]> git.proxmox.com Git - mirror_frr.git/blob - lib/typesafe.h
Merge pull request #4423 from ton31337/feature/delete_prefix_list_by_sequence_number_7.1
[mirror_frr.git] / lib / typesafe.h
1 /*
2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #ifndef _FRR_TYPESAFE_H
18 #define _FRR_TYPESAFE_H
19
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <stdbool.h>
23 #include <assert.h>
24 #include "compiler.h"
25
26 /* generic macros for all list-like types */
27
28 #define for_each(prefix, head, item) \
29 for (item = prefix##_first(head); item; \
30 item = prefix##_next(head, item))
31 #define for_each_safe(prefix, head, item) \
32 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
33 prefix##_next_safe(head, \
34 (item = prefix##_first(head))); \
35 item; \
36 item = prefix##_safe, \
37 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
38 #define for_each_from(prefix, head, item, from) \
39 for (item = from, from = prefix##_next_safe(head, item); \
40 item; \
41 item = from, from = prefix##_next_safe(head, from))
42
43 /* single-linked list, unsorted/arbitrary.
44 * can be used as queue with add_tail / pop
45 */
46
47 /* don't use these structs directly */
48 struct slist_item {
49 struct slist_item *next;
50 };
51
52 struct slist_head {
53 struct slist_item *first, **last_next;
54 size_t count;
55 };
56
57 static inline void typesafe_list_add(struct slist_head *head,
58 struct slist_item **pos, struct slist_item *item)
59 {
60 item->next = *pos;
61 *pos = item;
62 if (pos == head->last_next)
63 head->last_next = &item->next;
64 head->count++;
65 }
66
67 /* use as:
68 *
69 * PREDECL_LIST(namelist)
70 * struct name {
71 * struct namelist_item nlitem;
72 * }
73 * DECLARE_LIST(namelist, struct name, nlitem)
74 */
75 #define PREDECL_LIST(prefix) \
76 struct prefix ## _head { struct slist_head sh; }; \
77 struct prefix ## _item { struct slist_item si; };
78
79 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
80
81 #define DECLARE_LIST(prefix, type, field) \
82 \
83 macro_inline void prefix ## _init(struct prefix##_head *h) \
84 { \
85 memset(h, 0, sizeof(*h)); \
86 h->sh.last_next = &h->sh.first; \
87 } \
88 macro_inline void prefix ## _fini(struct prefix##_head *h) \
89 { \
90 memset(h, 0, sizeof(*h)); \
91 } \
92 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
93 { \
94 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
95 } \
96 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
97 { \
98 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
99 } \
100 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
101 type *after, type *item) \
102 { \
103 struct slist_item **nextp; \
104 nextp = after ? &after->field.si.next : &h->sh.first; \
105 typesafe_list_add(&h->sh, nextp, &item->field.si); \
106 } \
107 /* TODO: del_hint */ \
108 macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
109 { \
110 struct slist_item **iter = &h->sh.first; \
111 while (*iter && *iter != &item->field.si) \
112 iter = &(*iter)->next; \
113 if (!*iter) \
114 return; \
115 h->sh.count--; \
116 *iter = item->field.si.next; \
117 if (!item->field.si.next) \
118 h->sh.last_next = iter; \
119 } \
120 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
121 { \
122 struct slist_item *sitem = h->sh.first; \
123 if (!sitem) \
124 return NULL; \
125 h->sh.count--; \
126 h->sh.first = sitem->next; \
127 if (h->sh.first == NULL) \
128 h->sh.last_next = &h->sh.first; \
129 return container_of(sitem, type, field.si); \
130 } \
131 macro_pure type *prefix ## _first(struct prefix##_head *h) \
132 { \
133 return container_of_null(h->sh.first, type, field.si); \
134 } \
135 macro_pure type *prefix ## _next(struct prefix##_head * h, type *item) \
136 { \
137 struct slist_item *sitem = &item->field.si; \
138 return container_of_null(sitem->next, type, field.si); \
139 } \
140 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
141 { \
142 struct slist_item *sitem; \
143 if (!item) \
144 return NULL; \
145 sitem = &item->field.si; \
146 return container_of_null(sitem->next, type, field.si); \
147 } \
148 macro_pure size_t prefix ## _count(struct prefix##_head *h) \
149 { \
150 return h->sh.count; \
151 } \
152 /* ... */
153
154 /* don't use these structs directly */
155 struct dlist_item {
156 struct dlist_item *next;
157 struct dlist_item *prev;
158 };
159
160 struct dlist_head {
161 struct dlist_item hitem;
162 size_t count;
163 };
164
165 static inline void typesafe_dlist_add(struct dlist_head *head,
166 struct dlist_item *prev, struct dlist_item *item)
167 {
168 item->next = prev->next;
169 item->next->prev = item;
170 item->prev = prev;
171 prev->next = item;
172 head->count++;
173 }
174
175 /* double-linked list, for fast item deletion
176 */
177 #define PREDECL_DLIST(prefix) \
178 struct prefix ## _head { struct dlist_head dh; }; \
179 struct prefix ## _item { struct dlist_item di; };
180
181 #define INIT_DLIST(var) { .dh = { \
182 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
183
184 #define DECLARE_DLIST(prefix, type, field) \
185 \
186 macro_inline void prefix ## _init(struct prefix##_head *h) \
187 { \
188 memset(h, 0, sizeof(*h)); \
189 h->dh.hitem.prev = &h->dh.hitem; \
190 h->dh.hitem.next = &h->dh.hitem; \
191 } \
192 macro_inline void prefix ## _fini(struct prefix##_head *h) \
193 { \
194 memset(h, 0, sizeof(*h)); \
195 } \
196 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
197 { \
198 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
199 } \
200 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
201 { \
202 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
203 } \
204 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
205 type *after, type *item) \
206 { \
207 struct dlist_item *prev; \
208 prev = after ? &after->field.di : &h->dh.hitem; \
209 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
210 } \
211 macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
212 { \
213 struct dlist_item *ditem = &item->field.di; \
214 ditem->prev->next = ditem->next; \
215 ditem->next->prev = ditem->prev; \
216 h->dh.count--; \
217 ditem->prev = ditem->next = NULL; \
218 } \
219 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
220 { \
221 struct dlist_item *ditem = h->dh.hitem.next; \
222 if (ditem == &h->dh.hitem) \
223 return NULL; \
224 ditem->prev->next = ditem->next; \
225 ditem->next->prev = ditem->prev; \
226 h->dh.count--; \
227 return container_of(ditem, type, field.di); \
228 } \
229 macro_pure type *prefix ## _first(struct prefix##_head *h) \
230 { \
231 struct dlist_item *ditem = h->dh.hitem.next; \
232 if (ditem == &h->dh.hitem) \
233 return NULL; \
234 return container_of(ditem, type, field.di); \
235 } \
236 macro_pure type *prefix ## _next(struct prefix##_head * h, type *item) \
237 { \
238 struct dlist_item *ditem = &item->field.di; \
239 if (ditem->next == &h->dh.hitem) \
240 return NULL; \
241 return container_of(ditem->next, type, field.di); \
242 } \
243 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
244 { \
245 if (!item) \
246 return NULL; \
247 return prefix ## _next(h, item); \
248 } \
249 macro_pure size_t prefix ## _count(struct prefix##_head *h) \
250 { \
251 return h->dh.count; \
252 } \
253 /* ... */
254
255 /* single-linked list, sorted.
256 * can be used as priority queue with add / pop
257 */
258
259 /* don't use these structs directly */
260 struct ssort_item {
261 struct ssort_item *next;
262 };
263
264 struct ssort_head {
265 struct ssort_item *first;
266 size_t count;
267 };
268
269 /* use as:
270 *
271 * PREDECL_SORTLIST(namelist)
272 * struct name {
273 * struct namelist_item nlitem;
274 * }
275 * DECLARE_SORTLIST(namelist, struct name, nlitem)
276 */
277 #define _PREDECL_SORTLIST(prefix) \
278 struct prefix ## _head { struct ssort_head sh; }; \
279 struct prefix ## _item { struct ssort_item si; };
280
281 #define INIT_SORTLIST_UNIQ(var) { }
282 #define INIT_SORTLIST_NONUNIQ(var) { }
283
284 #define PREDECL_SORTLIST_UNIQ(prefix) \
285 _PREDECL_SORTLIST(prefix)
286 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
287 _PREDECL_SORTLIST(prefix)
288
289 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
290 \
291 macro_inline void prefix ## _init(struct prefix##_head *h) \
292 { \
293 memset(h, 0, sizeof(*h)); \
294 } \
295 macro_inline void prefix ## _fini(struct prefix##_head *h) \
296 { \
297 memset(h, 0, sizeof(*h)); \
298 } \
299 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
300 { \
301 struct ssort_item **np = &h->sh.first; \
302 int c = 1; \
303 while (*np && (c = cmpfn_uq( \
304 container_of(*np, type, field.si), item)) < 0) \
305 np = &(*np)->next; \
306 if (c == 0) \
307 return container_of(*np, type, field.si); \
308 item->field.si.next = *np; \
309 *np = &item->field.si; \
310 h->sh.count++; \
311 return NULL; \
312 } \
313 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
314 const type *item) \
315 { \
316 struct ssort_item *sitem = h->sh.first; \
317 int cmpval = 0; \
318 while (sitem && (cmpval = cmpfn_nuq( \
319 container_of(sitem, type, field.si), item) < 0)) \
320 sitem = sitem->next; \
321 return container_of_null(sitem, type, field.si); \
322 } \
323 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
324 const type *item) \
325 { \
326 struct ssort_item *prev = NULL, *sitem = h->sh.first; \
327 int cmpval = 0; \
328 while (sitem && (cmpval = cmpfn_nuq( \
329 container_of(sitem, type, field.si), item) < 0)) \
330 sitem = (prev = sitem)->next; \
331 return container_of_null(prev, type, field.si); \
332 } \
333 /* TODO: del_hint */ \
334 macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
335 { \
336 struct ssort_item **iter = &h->sh.first; \
337 while (*iter && *iter != &item->field.si) \
338 iter = &(*iter)->next; \
339 if (!*iter) \
340 return; \
341 h->sh.count--; \
342 *iter = item->field.si.next; \
343 } \
344 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
345 { \
346 struct ssort_item *sitem = h->sh.first; \
347 if (!sitem) \
348 return NULL; \
349 h->sh.count--; \
350 h->sh.first = sitem->next; \
351 return container_of(sitem, type, field.si); \
352 } \
353 macro_pure type *prefix ## _first(struct prefix##_head *h) \
354 { \
355 return container_of_null(h->sh.first, type, field.si); \
356 } \
357 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
358 { \
359 struct ssort_item *sitem = &item->field.si; \
360 return container_of_null(sitem->next, type, field.si); \
361 } \
362 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
363 { \
364 struct ssort_item *sitem; \
365 if (!item) \
366 return NULL; \
367 sitem = &item->field.si; \
368 return container_of_null(sitem->next, type, field.si); \
369 } \
370 macro_pure size_t prefix ## _count(struct prefix##_head *h) \
371 { \
372 return h->sh.count; \
373 } \
374 /* ... */
375
376 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
377 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn) \
378 \
379 macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
380 { \
381 struct ssort_item *sitem = h->sh.first; \
382 int cmpval = 0; \
383 while (sitem && (cmpval = cmpfn( \
384 container_of(sitem, type, field.si), item) < 0)) \
385 sitem = sitem->next; \
386 if (!sitem || cmpval > 0) \
387 return NULL; \
388 return container_of(sitem, type, field.si); \
389 } \
390 /* ... */
391
392 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
393 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
394 { \
395 int cmpval = cmpfn(a, b); \
396 if (cmpval) \
397 return cmpval; \
398 if (a < b) \
399 return -1; \
400 if (a > b) \
401 return 1; \
402 return 0; \
403 } \
404 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp) \
405 /* ... */
406
407
408 /* hash, "sorted" by hash value
409 */
410
411 /* don't use these structs directly */
412 struct thash_item {
413 struct thash_item *next;
414 uint32_t hashval;
415 };
416
417 struct thash_head {
418 struct thash_item **entries;
419 uint32_t count;
420
421 uint8_t tabshift;
422 uint8_t minshift, maxshift;
423 };
424
425 #define _HASH_SIZE(tabshift) \
426 ((1U << (tabshift)) >> 1)
427 #define HASH_SIZE(head) \
428 _HASH_SIZE((head).tabshift)
429 #define _HASH_KEY(tabshift, val) \
430 ((val) >> (33 - (tabshift)))
431 #define HASH_KEY(head, val) \
432 _HASH_KEY((head).tabshift, val)
433 #define HASH_GROW_THRESHOLD(head) \
434 ((head).count >= HASH_SIZE(head))
435 #define HASH_SHRINK_THRESHOLD(head) \
436 ((head).count <= (HASH_SIZE(head) - 1) / 2)
437
438 extern void typesafe_hash_grow(struct thash_head *head);
439 extern void typesafe_hash_shrink(struct thash_head *head);
440
441 /* use as:
442 *
443 * PREDECL_HASH(namelist)
444 * struct name {
445 * struct namelist_item nlitem;
446 * }
447 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
448 */
449 #define PREDECL_HASH(prefix) \
450 struct prefix ## _head { struct thash_head hh; }; \
451 struct prefix ## _item { struct thash_item hi; };
452
453 #define INIT_HASH(var) { }
454
455 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
456 \
457 macro_inline void prefix ## _init(struct prefix##_head *h) \
458 { \
459 memset(h, 0, sizeof(*h)); \
460 } \
461 macro_inline void prefix ## _fini(struct prefix##_head *h) \
462 { \
463 assert(h->hh.count == 0); \
464 h->hh.minshift = 0; \
465 typesafe_hash_shrink(&h->hh); \
466 memset(h, 0, sizeof(*h)); \
467 } \
468 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
469 { \
470 h->hh.count++; \
471 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
472 typesafe_hash_grow(&h->hh); \
473 \
474 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
475 item->field.hi.hashval = hval; \
476 struct thash_item **np = &h->hh.entries[hbits]; \
477 while (*np && (*np)->hashval < hval) \
478 np = &(*np)->next; \
479 if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
480 h->hh.count--; \
481 return container_of(*np, type, field.hi); \
482 } \
483 item->field.hi.next = *np; \
484 *np = &item->field.hi; \
485 return NULL; \
486 } \
487 macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
488 { \
489 if (!h->hh.tabshift) \
490 return NULL; \
491 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
492 struct thash_item *hitem = h->hh.entries[hbits]; \
493 while (hitem && hitem->hashval < hval) \
494 hitem = hitem->next; \
495 while (hitem && hitem->hashval == hval) { \
496 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
497 return container_of(hitem, type, field.hi); \
498 hitem = hitem->next; \
499 } \
500 return NULL; \
501 } \
502 macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
503 { \
504 if (!h->hh.tabshift) \
505 return; \
506 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
507 struct thash_item **np = &h->hh.entries[hbits]; \
508 while (*np && (*np)->hashval < hval) \
509 np = &(*np)->next; \
510 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
511 np = &(*np)->next; \
512 if (*np != &item->field.hi) \
513 return; \
514 *np = item->field.hi.next; \
515 item->field.hi.next = NULL; \
516 h->hh.count--; \
517 if (HASH_SHRINK_THRESHOLD(h->hh)) \
518 typesafe_hash_shrink(&h->hh); \
519 } \
520 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
521 { \
522 uint32_t i; \
523 for (i = 0; i < HASH_SIZE(h->hh); i++) \
524 if (h->hh.entries[i]) { \
525 struct thash_item *hitem = h->hh.entries[i]; \
526 h->hh.entries[i] = hitem->next; \
527 h->hh.count--; \
528 hitem->next = NULL; \
529 if (HASH_SHRINK_THRESHOLD(h->hh)) \
530 typesafe_hash_shrink(&h->hh); \
531 return container_of(hitem, type, field.hi); \
532 } \
533 return NULL; \
534 } \
535 macro_pure type *prefix ## _first(struct prefix##_head *h) \
536 { \
537 uint32_t i; \
538 for (i = 0; i < HASH_SIZE(h->hh); i++) \
539 if (h->hh.entries[i]) \
540 return container_of(h->hh.entries[i], type, field.hi); \
541 return NULL; \
542 } \
543 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
544 { \
545 struct thash_item *hitem = &item->field.hi; \
546 if (hitem->next) \
547 return container_of(hitem->next, type, field.hi); \
548 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
549 for (; i < HASH_SIZE(h->hh); i++) \
550 if (h->hh.entries[i]) \
551 return container_of(h->hh.entries[i], type, field.hi); \
552 return NULL; \
553 } \
554 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
555 { \
556 if (!item) \
557 return NULL; \
558 return prefix ## _next(h, item); \
559 } \
560 macro_pure size_t prefix ## _count(struct prefix##_head *h) \
561 { \
562 return h->hh.count; \
563 } \
564 /* ... */
565
566 /* skiplist, sorted.
567 * can be used as priority queue with add / pop
568 */
569
570 /* don't use these structs directly */
571 #define SKIPLIST_MAXDEPTH 16
572 #define SKIPLIST_EMBED 4
573 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
574
575 struct sskip_item {
576 struct sskip_item *next[SKIPLIST_EMBED];
577 };
578
579 struct sskip_overflow {
580 struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
581 };
582
583 struct sskip_head {
584 struct sskip_item hitem;
585 struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
586 size_t count;
587 };
588
589 /* use as:
590 *
591 * PREDECL_SKIPLIST(namelist)
592 * struct name {
593 * struct namelist_item nlitem;
594 * }
595 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
596 */
597 #define _PREDECL_SKIPLIST(prefix) \
598 struct prefix ## _head { struct sskip_head sh; }; \
599 struct prefix ## _item { struct sskip_item si; };
600
601 #define INIT_SKIPLIST_UNIQ(var) { }
602 #define INIT_SKIPLIST_NONUNIQ(var) { }
603
604 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
605 \
606 macro_inline void prefix ## _init(struct prefix##_head *h) \
607 { \
608 memset(h, 0, sizeof(*h)); \
609 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
610 ((uintptr_t)h->sh.overflow | 1); \
611 } \
612 macro_inline void prefix ## _fini(struct prefix##_head *h) \
613 { \
614 memset(h, 0, sizeof(*h)); \
615 } \
616 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
617 { \
618 struct sskip_item *si; \
619 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
620 return container_of_null(si, type, field.si); \
621 } \
622 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
623 const type *item) \
624 { \
625 struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
626 &item->field.si, cmpfn_nuq); \
627 return container_of_null(sitem, type, field.si); \
628 } \
629 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
630 const type *item) \
631 { \
632 struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
633 &item->field.si, cmpfn_nuq); \
634 return container_of_null(sitem, type, field.si); \
635 } \
636 macro_inline void prefix ## _del(struct prefix##_head *h, type *item) \
637 { \
638 typesafe_skiplist_del(&h->sh, &item->field.si, cmpfn_uq); \
639 } \
640 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
641 { \
642 struct sskip_item *sitem = h->sh.hitem.next[0]; \
643 if (!sitem) \
644 return NULL; \
645 typesafe_skiplist_del(&h->sh, sitem, cmpfn_uq); \
646 return container_of(sitem, type, field.si); \
647 } \
648 macro_pure type *prefix ## _first(struct prefix##_head *h) \
649 { \
650 struct sskip_item *first = h->sh.hitem.next[0]; \
651 return container_of_null(first, type, field.si); \
652 } \
653 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
654 { \
655 struct sskip_item *next = item->field.si.next[0]; \
656 return container_of_null(next, type, field.si); \
657 } \
658 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
659 { \
660 struct sskip_item *next; \
661 next = item ? item->field.si.next[0] : NULL; \
662 return container_of_null(next, type, field.si); \
663 } \
664 macro_pure size_t prefix ## _count(struct prefix##_head *h) \
665 { \
666 return h->sh.count; \
667 } \
668 /* ... */
669
670 #define PREDECL_SKIPLIST_UNIQ(prefix) \
671 _PREDECL_SKIPLIST(prefix)
672 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
673 \
674 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
675 const struct sskip_item *b) \
676 { \
677 return cmpfn(container_of(a, type, field.si), \
678 container_of(b, type, field.si)); \
679 } \
680 macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
681 { \
682 struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
683 &item->field.si, &prefix ## __cmp); \
684 return container_of_null(sitem, type, field.si); \
685 } \
686 \
687 _DECLARE_SKIPLIST(prefix, type, field, \
688 prefix ## __cmp, prefix ## __cmp) \
689 /* ... */
690
691 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
692 _PREDECL_SKIPLIST(prefix)
693 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
694 \
695 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
696 const struct sskip_item *b) \
697 { \
698 return cmpfn(container_of(a, type, field.si), \
699 container_of(b, type, field.si)); \
700 } \
701 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
702 const struct sskip_item *b) \
703 { \
704 int cmpval = cmpfn(container_of(a, type, field.si), \
705 container_of(b, type, field.si)); \
706 if (cmpval) \
707 return cmpval; \
708 if (a < b) \
709 return -1; \
710 if (a > b) \
711 return 1; \
712 return 0; \
713 } \
714 \
715 _DECLARE_SKIPLIST(prefix, type, field, \
716 prefix ## __cmp, prefix ## __cmp_uq) \
717 /* ... */
718
719
720 extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
721 struct sskip_item *item, int (*cmpfn)(
722 const struct sskip_item *a,
723 const struct sskip_item *b));
724 extern struct sskip_item *typesafe_skiplist_find(struct sskip_head *head,
725 const struct sskip_item *item, int (*cmpfn)(
726 const struct sskip_item *a,
727 const struct sskip_item *b));
728 extern struct sskip_item *typesafe_skiplist_find_gteq(struct sskip_head *head,
729 const struct sskip_item *item, int (*cmpfn)(
730 const struct sskip_item *a,
731 const struct sskip_item *b));
732 extern struct sskip_item *typesafe_skiplist_find_lt(struct sskip_head *head,
733 const struct sskip_item *item, int (*cmpfn)(
734 const struct sskip_item *a,
735 const struct sskip_item *b));
736 extern void typesafe_skiplist_del(struct sskip_head *head,
737 struct sskip_item *item, int (*cmpfn)(
738 const struct sskip_item *a,
739 const struct sskip_item *b));
740
741 /* this needs to stay at the end because both files include each other.
742 * the resolved order is typesafe.h before typerb.h
743 */
744 #include "typerb.h"
745
746 #endif /* _FRR_TYPESAFE_H */