]> git.proxmox.com Git - mirror_frr.git/blob - lib/typesafe.h
Merge pull request #8301 from donaldsharp/isis_spacing
[mirror_frr.git] / lib / typesafe.h
1 /*
2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #ifndef _FRR_TYPESAFE_H
18 #define _FRR_TYPESAFE_H
19
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <stdbool.h>
23 #include <assert.h>
24 #include "compiler.h"
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 /* generic macros for all list-like types */
31
32 #define frr_each(prefix, head, item) \
33 for (item = prefix##_first(head); item; \
34 item = prefix##_next(head, item))
35 #define frr_each_safe(prefix, head, item) \
36 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
37 prefix##_next_safe(head, \
38 (item = prefix##_first(head))); \
39 item; \
40 item = prefix##_safe, \
41 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
42 #define frr_each_from(prefix, head, item, from) \
43 for (item = from, from = prefix##_next_safe(head, item); \
44 item; \
45 item = from, from = prefix##_next_safe(head, from))
46
47
48 /* non-const variants. these wrappers are the same for all the types, so
49 * bundle them together here.
50 */
51 #define TYPESAFE_FIRST_NEXT(prefix, type) \
52 macro_pure type *prefix ## _first(struct prefix##_head *h) \
53 { \
54 return (type *)prefix ## _const_first(h); \
55 } \
56 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
57 { \
58 return (type *)prefix ## _const_next(h, item); \
59 } \
60 /* ... */
61 #define TYPESAFE_FIND(prefix, type) \
62 macro_inline type *prefix ## _find(struct prefix##_head *h, \
63 const type *item) \
64 { \
65 return (type *)prefix ## _const_find(h, item); \
66 } \
67 /* ... */
68 #define TYPESAFE_FIND_CMP(prefix, type) \
69 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
70 const type *item) \
71 { \
72 return (type *)prefix ## _const_find_lt(h, item); \
73 } \
74 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
75 const type *item) \
76 { \
77 return (type *)prefix ## _const_find_gteq(h, item); \
78 } \
79 /* ... */
80
81
82 /* single-linked list, unsorted/arbitrary.
83 * can be used as queue with add_tail / pop
84 */
85
86 /* don't use these structs directly */
87 struct slist_item {
88 struct slist_item *next;
89 };
90
91 struct slist_head {
92 struct slist_item *first, **last_next;
93 size_t count;
94 };
95
96 static inline void typesafe_list_add(struct slist_head *head,
97 struct slist_item **pos, struct slist_item *item)
98 {
99 item->next = *pos;
100 *pos = item;
101 if (pos == head->last_next)
102 head->last_next = &item->next;
103 head->count++;
104 }
105
106 /* use as:
107 *
108 * PREDECL_LIST(namelist);
109 * struct name {
110 * struct namelist_item nlitem;
111 * }
112 * DECLARE_LIST(namelist, struct name, nlitem);
113 */
114 #define PREDECL_LIST(prefix) \
115 struct prefix ## _head { struct slist_head sh; }; \
116 struct prefix ## _item { struct slist_item si; }; \
117 MACRO_REQUIRE_SEMICOLON() /* end */
118
119 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
120
121 #define DECLARE_LIST(prefix, type, field) \
122 \
123 macro_inline void prefix ## _init(struct prefix##_head *h) \
124 { \
125 memset(h, 0, sizeof(*h)); \
126 h->sh.last_next = &h->sh.first; \
127 } \
128 macro_inline void prefix ## _fini(struct prefix##_head *h) \
129 { \
130 memset(h, 0, sizeof(*h)); \
131 } \
132 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
133 { \
134 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
135 } \
136 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
137 { \
138 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
139 } \
140 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
141 type *after, type *item) \
142 { \
143 struct slist_item **nextp; \
144 nextp = after ? &after->field.si.next : &h->sh.first; \
145 typesafe_list_add(&h->sh, nextp, &item->field.si); \
146 } \
147 /* TODO: del_hint */ \
148 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
149 { \
150 struct slist_item **iter = &h->sh.first; \
151 while (*iter && *iter != &item->field.si) \
152 iter = &(*iter)->next; \
153 if (!*iter) \
154 return NULL; \
155 h->sh.count--; \
156 *iter = item->field.si.next; \
157 if (!item->field.si.next) \
158 h->sh.last_next = iter; \
159 return item; \
160 } \
161 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
162 { \
163 struct slist_item *sitem = h->sh.first; \
164 if (!sitem) \
165 return NULL; \
166 h->sh.count--; \
167 h->sh.first = sitem->next; \
168 if (h->sh.first == NULL) \
169 h->sh.last_next = &h->sh.first; \
170 return container_of(sitem, type, field.si); \
171 } \
172 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
173 { \
174 return container_of_null(h->sh.first, type, field.si); \
175 } \
176 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
177 const type *item) \
178 { \
179 const struct slist_item *sitem = &item->field.si; \
180 return container_of_null(sitem->next, type, field.si); \
181 } \
182 TYPESAFE_FIRST_NEXT(prefix, type) \
183 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
184 { \
185 struct slist_item *sitem; \
186 if (!item) \
187 return NULL; \
188 sitem = &item->field.si; \
189 return container_of_null(sitem->next, type, field.si); \
190 } \
191 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
192 { \
193 return h->sh.count; \
194 } \
195 MACRO_REQUIRE_SEMICOLON() /* end */
196
197 /* don't use these structs directly */
198 struct dlist_item {
199 struct dlist_item *next;
200 struct dlist_item *prev;
201 };
202
203 struct dlist_head {
204 struct dlist_item hitem;
205 size_t count;
206 };
207
208 static inline void typesafe_dlist_add(struct dlist_head *head,
209 struct dlist_item *prev, struct dlist_item *item)
210 {
211 item->next = prev->next;
212 item->next->prev = item;
213 item->prev = prev;
214 prev->next = item;
215 head->count++;
216 }
217
218 /* double-linked list, for fast item deletion
219 */
220 #define PREDECL_DLIST(prefix) \
221 struct prefix ## _head { struct dlist_head dh; }; \
222 struct prefix ## _item { struct dlist_item di; }; \
223 MACRO_REQUIRE_SEMICOLON() /* end */
224
225 #define INIT_DLIST(var) { .dh = { \
226 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
227
228 #define DECLARE_DLIST(prefix, type, field) \
229 \
230 macro_inline void prefix ## _init(struct prefix##_head *h) \
231 { \
232 memset(h, 0, sizeof(*h)); \
233 h->dh.hitem.prev = &h->dh.hitem; \
234 h->dh.hitem.next = &h->dh.hitem; \
235 } \
236 macro_inline void prefix ## _fini(struct prefix##_head *h) \
237 { \
238 memset(h, 0, sizeof(*h)); \
239 } \
240 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
241 { \
242 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
243 } \
244 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
245 { \
246 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
247 } \
248 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
249 type *after, type *item) \
250 { \
251 struct dlist_item *prev; \
252 prev = after ? &after->field.di : &h->dh.hitem; \
253 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
254 } \
255 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
256 { \
257 struct dlist_item *ditem = &item->field.di; \
258 ditem->prev->next = ditem->next; \
259 ditem->next->prev = ditem->prev; \
260 h->dh.count--; \
261 ditem->prev = ditem->next = NULL; \
262 return item; \
263 } \
264 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
265 { \
266 struct dlist_item *ditem = h->dh.hitem.next; \
267 if (ditem == &h->dh.hitem) \
268 return NULL; \
269 ditem->prev->next = ditem->next; \
270 ditem->next->prev = ditem->prev; \
271 h->dh.count--; \
272 return container_of(ditem, type, field.di); \
273 } \
274 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
275 { \
276 const struct dlist_item *ditem = h->dh.hitem.next; \
277 if (ditem == &h->dh.hitem) \
278 return NULL; \
279 return container_of(ditem, type, field.di); \
280 } \
281 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
282 const type *item) \
283 { \
284 const struct dlist_item *ditem = &item->field.di; \
285 if (ditem->next == &h->dh.hitem) \
286 return NULL; \
287 return container_of(ditem->next, type, field.di); \
288 } \
289 TYPESAFE_FIRST_NEXT(prefix, type) \
290 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
291 { \
292 if (!item) \
293 return NULL; \
294 return prefix ## _next(h, item); \
295 } \
296 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
297 { \
298 return h->dh.count; \
299 } \
300 MACRO_REQUIRE_SEMICOLON() /* end */
301
302 /* note: heap currently caps out at 4G items */
303
304 #define HEAP_NARY 8U
305 typedef uint32_t heap_index_i;
306
307 struct heap_item {
308 uint32_t index;
309 };
310
311 struct heap_head {
312 struct heap_item **array;
313 uint32_t arraysz, count;
314 };
315
316 #define HEAP_RESIZE_TRESH_UP(h) \
317 (h->hh.count + 1 >= h->hh.arraysz)
318 #define HEAP_RESIZE_TRESH_DN(h) \
319 (h->hh.count == 0 || \
320 h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
321
322 #define PREDECL_HEAP(prefix) \
323 struct prefix ## _head { struct heap_head hh; }; \
324 struct prefix ## _item { struct heap_item hi; }; \
325 MACRO_REQUIRE_SEMICOLON() /* end */
326
327 #define INIT_HEAP(var) { }
328
329 #define DECLARE_HEAP(prefix, type, field, cmpfn) \
330 \
331 macro_inline void prefix ## _init(struct prefix##_head *h) \
332 { \
333 memset(h, 0, sizeof(*h)); \
334 } \
335 macro_inline void prefix ## _fini(struct prefix##_head *h) \
336 { \
337 assert(h->hh.count == 0); \
338 memset(h, 0, sizeof(*h)); \
339 } \
340 macro_inline int prefix ## __cmp(const struct heap_item *a, \
341 const struct heap_item *b) \
342 { \
343 return cmpfn(container_of(a, type, field.hi), \
344 container_of(b, type, field.hi)); \
345 } \
346 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
347 { \
348 if (HEAP_RESIZE_TRESH_UP(h)) \
349 typesafe_heap_resize(&h->hh, true); \
350 typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
351 prefix ## __cmp); \
352 h->hh.count++; \
353 return NULL; \
354 } \
355 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
356 { \
357 struct heap_item *other; \
358 uint32_t index = item->field.hi.index; \
359 assert(h->hh.array[index] == &item->field.hi); \
360 h->hh.count--; \
361 other = h->hh.array[h->hh.count]; \
362 if (cmpfn(container_of(other, type, field.hi), item) < 0) \
363 typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
364 else \
365 typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
366 if (HEAP_RESIZE_TRESH_DN(h)) \
367 typesafe_heap_resize(&h->hh, false); \
368 return item; \
369 } \
370 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
371 { \
372 struct heap_item *hitem, *other; \
373 if (h->hh.count == 0) \
374 return NULL; \
375 hitem = h->hh.array[0]; \
376 h->hh.count--; \
377 other = h->hh.array[h->hh.count]; \
378 typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
379 if (HEAP_RESIZE_TRESH_DN(h)) \
380 typesafe_heap_resize(&h->hh, false); \
381 return container_of(hitem, type, field.hi); \
382 } \
383 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
384 { \
385 if (h->hh.count == 0) \
386 return NULL; \
387 return container_of(h->hh.array[0], type, field.hi); \
388 } \
389 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
390 const type *item) \
391 { \
392 uint32_t idx = item->field.hi.index + 1; \
393 if (idx >= h->hh.count) \
394 return NULL; \
395 return container_of(h->hh.array[idx], type, field.hi); \
396 } \
397 TYPESAFE_FIRST_NEXT(prefix, type) \
398 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
399 { \
400 if (!item) \
401 return NULL; \
402 return prefix ## _next(h, item); \
403 } \
404 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
405 { \
406 return h->hh.count; \
407 } \
408 MACRO_REQUIRE_SEMICOLON() /* end */
409
410 extern void typesafe_heap_resize(struct heap_head *head, bool grow);
411 extern void typesafe_heap_pushdown(struct heap_head *head, uint32_t index,
412 struct heap_item *item,
413 int (*cmpfn)(const struct heap_item *a,
414 const struct heap_item *b));
415 extern void typesafe_heap_pullup(struct heap_head *head, uint32_t index,
416 struct heap_item *item,
417 int (*cmpfn)(const struct heap_item *a,
418 const struct heap_item *b));
419
420 /* single-linked list, sorted.
421 * can be used as priority queue with add / pop
422 */
423
424 /* don't use these structs directly */
425 struct ssort_item {
426 struct ssort_item *next;
427 };
428
429 struct ssort_head {
430 struct ssort_item *first;
431 size_t count;
432 };
433
434 /* use as:
435 *
436 * PREDECL_SORTLIST(namelist)
437 * struct name {
438 * struct namelist_item nlitem;
439 * }
440 * DECLARE_SORTLIST(namelist, struct name, nlitem)
441 */
442 #define _PREDECL_SORTLIST(prefix) \
443 struct prefix ## _head { struct ssort_head sh; }; \
444 struct prefix ## _item { struct ssort_item si; }; \
445 MACRO_REQUIRE_SEMICOLON() /* end */
446
447 #define INIT_SORTLIST_UNIQ(var) { }
448 #define INIT_SORTLIST_NONUNIQ(var) { }
449
450 #define PREDECL_SORTLIST_UNIQ(prefix) \
451 _PREDECL_SORTLIST(prefix)
452 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
453 _PREDECL_SORTLIST(prefix)
454
455 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
456 \
457 macro_inline void prefix ## _init(struct prefix##_head *h) \
458 { \
459 memset(h, 0, sizeof(*h)); \
460 } \
461 macro_inline void prefix ## _fini(struct prefix##_head *h) \
462 { \
463 memset(h, 0, sizeof(*h)); \
464 } \
465 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
466 { \
467 struct ssort_item **np = &h->sh.first; \
468 int c = 1; \
469 while (*np && (c = cmpfn_uq( \
470 container_of(*np, type, field.si), item)) < 0) \
471 np = &(*np)->next; \
472 if (c == 0) \
473 return container_of(*np, type, field.si); \
474 item->field.si.next = *np; \
475 *np = &item->field.si; \
476 h->sh.count++; \
477 return NULL; \
478 } \
479 macro_inline const type *prefix ## _const_find_gteq( \
480 const struct prefix##_head *h, const type *item) \
481 { \
482 const struct ssort_item *sitem = h->sh.first; \
483 int cmpval = 0; \
484 while (sitem && (cmpval = cmpfn_nuq( \
485 container_of(sitem, type, field.si), item)) < 0) \
486 sitem = sitem->next; \
487 return container_of_null(sitem, type, field.si); \
488 } \
489 macro_inline const type *prefix ## _const_find_lt( \
490 const struct prefix##_head *h, const type *item) \
491 { \
492 const struct ssort_item *prev = NULL, *sitem = h->sh.first; \
493 int cmpval = 0; \
494 while (sitem && (cmpval = cmpfn_nuq( \
495 container_of(sitem, type, field.si), item)) < 0) \
496 sitem = (prev = sitem)->next; \
497 return container_of_null(prev, type, field.si); \
498 } \
499 TYPESAFE_FIND_CMP(prefix, type) \
500 /* TODO: del_hint */ \
501 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
502 { \
503 struct ssort_item **iter = &h->sh.first; \
504 while (*iter && *iter != &item->field.si) \
505 iter = &(*iter)->next; \
506 if (!*iter) \
507 return NULL; \
508 h->sh.count--; \
509 *iter = item->field.si.next; \
510 return item; \
511 } \
512 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
513 { \
514 struct ssort_item *sitem = h->sh.first; \
515 if (!sitem) \
516 return NULL; \
517 h->sh.count--; \
518 h->sh.first = sitem->next; \
519 return container_of(sitem, type, field.si); \
520 } \
521 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
522 { \
523 return container_of_null(h->sh.first, type, field.si); \
524 } \
525 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
526 const type *item) \
527 { \
528 const struct ssort_item *sitem = &item->field.si; \
529 return container_of_null(sitem->next, type, field.si); \
530 } \
531 TYPESAFE_FIRST_NEXT(prefix, type) \
532 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
533 { \
534 struct ssort_item *sitem; \
535 if (!item) \
536 return NULL; \
537 sitem = &item->field.si; \
538 return container_of_null(sitem->next, type, field.si); \
539 } \
540 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
541 { \
542 return h->sh.count; \
543 } \
544 MACRO_REQUIRE_SEMICOLON() /* end */
545
546 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
547 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn); \
548 \
549 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
550 const type *item) \
551 { \
552 const struct ssort_item *sitem = h->sh.first; \
553 int cmpval = 0; \
554 while (sitem && (cmpval = cmpfn( \
555 container_of(sitem, type, field.si), item)) < 0) \
556 sitem = sitem->next; \
557 if (!sitem || cmpval > 0) \
558 return NULL; \
559 return container_of(sitem, type, field.si); \
560 } \
561 TYPESAFE_FIND(prefix, type) \
562 MACRO_REQUIRE_SEMICOLON() /* end */
563
564 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
565 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
566 { \
567 int cmpval = cmpfn(a, b); \
568 if (cmpval) \
569 return cmpval; \
570 if (a < b) \
571 return -1; \
572 if (a > b) \
573 return 1; \
574 return 0; \
575 } \
576 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp); \
577 MACRO_REQUIRE_SEMICOLON() /* end */
578
579
580 /* hash, "sorted" by hash value
581 */
582
583 /* don't use these structs directly */
584 struct thash_item {
585 struct thash_item *next;
586 uint32_t hashval;
587 };
588
589 struct thash_head {
590 struct thash_item **entries;
591 uint32_t count;
592
593 uint8_t tabshift;
594 uint8_t minshift, maxshift;
595 };
596
597 #define _HASH_SIZE(tabshift) \
598 ((1U << (tabshift)) >> 1)
599 #define HASH_SIZE(head) \
600 _HASH_SIZE((head).tabshift)
601 #define _HASH_KEY(tabshift, val) \
602 ((val) >> (33 - (tabshift)))
603 #define HASH_KEY(head, val) \
604 _HASH_KEY((head).tabshift, val)
605 #define HASH_GROW_THRESHOLD(head) \
606 ((head).count >= HASH_SIZE(head))
607 #define HASH_SHRINK_THRESHOLD(head) \
608 ((head).count <= (HASH_SIZE(head) - 1) / 2)
609
610 extern void typesafe_hash_grow(struct thash_head *head);
611 extern void typesafe_hash_shrink(struct thash_head *head);
612
613 /* use as:
614 *
615 * PREDECL_HASH(namelist)
616 * struct name {
617 * struct namelist_item nlitem;
618 * }
619 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
620 */
621 #define PREDECL_HASH(prefix) \
622 struct prefix ## _head { struct thash_head hh; }; \
623 struct prefix ## _item { struct thash_item hi; }; \
624 MACRO_REQUIRE_SEMICOLON() /* end */
625
626 #define INIT_HASH(var) { }
627
628 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
629 \
630 macro_inline void prefix ## _init(struct prefix##_head *h) \
631 { \
632 memset(h, 0, sizeof(*h)); \
633 } \
634 macro_inline void prefix ## _fini(struct prefix##_head *h) \
635 { \
636 assert(h->hh.count == 0); \
637 h->hh.minshift = 0; \
638 typesafe_hash_shrink(&h->hh); \
639 memset(h, 0, sizeof(*h)); \
640 } \
641 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
642 { \
643 h->hh.count++; \
644 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
645 typesafe_hash_grow(&h->hh); \
646 \
647 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
648 item->field.hi.hashval = hval; \
649 struct thash_item **np = &h->hh.entries[hbits]; \
650 while (*np && (*np)->hashval < hval) \
651 np = &(*np)->next; \
652 if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
653 h->hh.count--; \
654 return container_of(*np, type, field.hi); \
655 } \
656 item->field.hi.next = *np; \
657 *np = &item->field.hi; \
658 return NULL; \
659 } \
660 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
661 const type *item) \
662 { \
663 if (!h->hh.tabshift) \
664 return NULL; \
665 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
666 const struct thash_item *hitem = h->hh.entries[hbits]; \
667 while (hitem && hitem->hashval < hval) \
668 hitem = hitem->next; \
669 while (hitem && hitem->hashval == hval) { \
670 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
671 return container_of(hitem, type, field.hi); \
672 hitem = hitem->next; \
673 } \
674 return NULL; \
675 } \
676 TYPESAFE_FIND(prefix, type) \
677 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
678 { \
679 if (!h->hh.tabshift) \
680 return NULL; \
681 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
682 struct thash_item **np = &h->hh.entries[hbits]; \
683 while (*np && (*np)->hashval < hval) \
684 np = &(*np)->next; \
685 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
686 np = &(*np)->next; \
687 if (*np != &item->field.hi) \
688 return NULL; \
689 *np = item->field.hi.next; \
690 item->field.hi.next = NULL; \
691 h->hh.count--; \
692 if (HASH_SHRINK_THRESHOLD(h->hh)) \
693 typesafe_hash_shrink(&h->hh); \
694 return item; \
695 } \
696 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
697 { \
698 uint32_t i; \
699 for (i = 0; i < HASH_SIZE(h->hh); i++) \
700 if (h->hh.entries[i]) { \
701 struct thash_item *hitem = h->hh.entries[i]; \
702 h->hh.entries[i] = hitem->next; \
703 h->hh.count--; \
704 hitem->next = NULL; \
705 if (HASH_SHRINK_THRESHOLD(h->hh)) \
706 typesafe_hash_shrink(&h->hh); \
707 return container_of(hitem, type, field.hi); \
708 } \
709 return NULL; \
710 } \
711 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
712 { \
713 uint32_t i; \
714 for (i = 0; i < HASH_SIZE(h->hh); i++) \
715 if (h->hh.entries[i]) \
716 return container_of(h->hh.entries[i], type, field.hi); \
717 return NULL; \
718 } \
719 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
720 const type *item) \
721 { \
722 const struct thash_item *hitem = &item->field.hi; \
723 if (hitem->next) \
724 return container_of(hitem->next, type, field.hi); \
725 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
726 for (; i < HASH_SIZE(h->hh); i++) \
727 if (h->hh.entries[i]) \
728 return container_of(h->hh.entries[i], type, field.hi); \
729 return NULL; \
730 } \
731 TYPESAFE_FIRST_NEXT(prefix, type) \
732 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
733 { \
734 if (!item) \
735 return NULL; \
736 return prefix ## _next(h, item); \
737 } \
738 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
739 { \
740 return h->hh.count; \
741 } \
742 MACRO_REQUIRE_SEMICOLON() /* end */
743
744 /* skiplist, sorted.
745 * can be used as priority queue with add / pop
746 */
747
748 /* don't use these structs directly */
749 #define SKIPLIST_MAXDEPTH 16
750 #define SKIPLIST_EMBED 4
751 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
752
753 struct sskip_item {
754 struct sskip_item *next[SKIPLIST_EMBED];
755 };
756
757 struct sskip_overflow {
758 struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
759 };
760
761 struct sskip_head {
762 struct sskip_item hitem;
763 struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
764 size_t count;
765 };
766
767 /* use as:
768 *
769 * PREDECL_SKIPLIST(namelist)
770 * struct name {
771 * struct namelist_item nlitem;
772 * }
773 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
774 */
775 #define _PREDECL_SKIPLIST(prefix) \
776 struct prefix ## _head { struct sskip_head sh; }; \
777 struct prefix ## _item { struct sskip_item si; }; \
778 MACRO_REQUIRE_SEMICOLON() /* end */
779
780 #define INIT_SKIPLIST_UNIQ(var) { }
781 #define INIT_SKIPLIST_NONUNIQ(var) { }
782
783 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
784 \
785 macro_inline void prefix ## _init(struct prefix##_head *h) \
786 { \
787 memset(h, 0, sizeof(*h)); \
788 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
789 ((uintptr_t)h->sh.overflow | 1); \
790 } \
791 macro_inline void prefix ## _fini(struct prefix##_head *h) \
792 { \
793 memset(h, 0, sizeof(*h)); \
794 } \
795 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
796 { \
797 struct sskip_item *si; \
798 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
799 return container_of_null(si, type, field.si); \
800 } \
801 macro_inline const type *prefix ## _const_find_gteq( \
802 const struct prefix##_head *h, const type *item) \
803 { \
804 const struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
805 &item->field.si, cmpfn_nuq); \
806 return container_of_null(sitem, type, field.si); \
807 } \
808 macro_inline const type *prefix ## _const_find_lt( \
809 const struct prefix##_head *h, const type *item) \
810 { \
811 const struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
812 &item->field.si, cmpfn_nuq); \
813 return container_of_null(sitem, type, field.si); \
814 } \
815 TYPESAFE_FIND_CMP(prefix, type) \
816 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
817 { \
818 struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \
819 &item->field.si, cmpfn_uq); \
820 return container_of_null(sitem, type, field.si); \
821 } \
822 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
823 { \
824 struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
825 return container_of_null(sitem, type, field.si); \
826 } \
827 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
828 { \
829 const struct sskip_item *first = h->sh.hitem.next[0]; \
830 return container_of_null(first, type, field.si); \
831 } \
832 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
833 const type *item) \
834 { \
835 const struct sskip_item *next = item->field.si.next[0]; \
836 return container_of_null(next, type, field.si); \
837 } \
838 TYPESAFE_FIRST_NEXT(prefix, type) \
839 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
840 { \
841 struct sskip_item *next; \
842 next = item ? item->field.si.next[0] : NULL; \
843 return container_of_null(next, type, field.si); \
844 } \
845 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
846 { \
847 return h->sh.count; \
848 } \
849 MACRO_REQUIRE_SEMICOLON() /* end */
850
851 #define PREDECL_SKIPLIST_UNIQ(prefix) \
852 _PREDECL_SKIPLIST(prefix)
853 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
854 \
855 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
856 const struct sskip_item *b) \
857 { \
858 return cmpfn(container_of(a, type, field.si), \
859 container_of(b, type, field.si)); \
860 } \
861 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
862 const type *item) \
863 { \
864 const struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
865 &item->field.si, &prefix ## __cmp); \
866 return container_of_null(sitem, type, field.si); \
867 } \
868 TYPESAFE_FIND(prefix, type) \
869 \
870 _DECLARE_SKIPLIST(prefix, type, field, \
871 prefix ## __cmp, prefix ## __cmp); \
872 MACRO_REQUIRE_SEMICOLON() /* end */
873
874 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
875 _PREDECL_SKIPLIST(prefix)
876 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
877 \
878 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
879 const struct sskip_item *b) \
880 { \
881 return cmpfn(container_of(a, type, field.si), \
882 container_of(b, type, field.si)); \
883 } \
884 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
885 const struct sskip_item *b) \
886 { \
887 int cmpval = cmpfn(container_of(a, type, field.si), \
888 container_of(b, type, field.si)); \
889 if (cmpval) \
890 return cmpval; \
891 if (a < b) \
892 return -1; \
893 if (a > b) \
894 return 1; \
895 return 0; \
896 } \
897 \
898 _DECLARE_SKIPLIST(prefix, type, field, \
899 prefix ## __cmp, prefix ## __cmp_uq); \
900 MACRO_REQUIRE_SEMICOLON() /* end */
901
902
903 extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
904 struct sskip_item *item, int (*cmpfn)(
905 const struct sskip_item *a,
906 const struct sskip_item *b));
907 extern const struct sskip_item *typesafe_skiplist_find(
908 const struct sskip_head *head,
909 const struct sskip_item *item, int (*cmpfn)(
910 const struct sskip_item *a,
911 const struct sskip_item *b));
912 extern const struct sskip_item *typesafe_skiplist_find_gteq(
913 const struct sskip_head *head,
914 const struct sskip_item *item, int (*cmpfn)(
915 const struct sskip_item *a,
916 const struct sskip_item *b));
917 extern const struct sskip_item *typesafe_skiplist_find_lt(
918 const struct sskip_head *head,
919 const struct sskip_item *item, int (*cmpfn)(
920 const struct sskip_item *a,
921 const struct sskip_item *b));
922 extern struct sskip_item *typesafe_skiplist_del(
923 struct sskip_head *head, struct sskip_item *item, int (*cmpfn)(
924 const struct sskip_item *a,
925 const struct sskip_item *b));
926 extern struct sskip_item *typesafe_skiplist_pop(struct sskip_head *head);
927
928 #ifdef __cplusplus
929 }
930 #endif
931
932 /* this needs to stay at the end because both files include each other.
933 * the resolved order is typesafe.h before typerb.h
934 */
935 #include "typerb.h"
936
937 #endif /* _FRR_TYPESAFE_H */