]> git.proxmox.com Git - mirror_frr.git/blob - lib/typesafe.h
Merge pull request #13455 from sri-mohan1/srib-ldpd
[mirror_frr.git] / lib / typesafe.h
1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
4 */
5
6 #ifndef _FRR_TYPESAFE_H
7 #define _FRR_TYPESAFE_H
8
9 #include <stddef.h>
10 #include <stdint.h>
11 #include <stdbool.h>
12 #include "compiler.h"
13
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17
18 /* generic macros for all list-like types */
19
20 #define frr_each(prefix, head, item) \
21 for (item = prefix##_first(head); item; \
22 item = prefix##_next(head, item))
23 #define frr_each_const(prefix, head, item) \
24 for (item = prefix##_const_first(head); item; \
25 item = prefix##_const_next(head, item))
26 #define frr_each_safe(prefix, head, item) \
27 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
28 prefix##_next_safe(head, \
29 (item = prefix##_first(head))); \
30 item; \
31 item = prefix##_safe, \
32 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
33 #define frr_each_from(prefix, head, item, from) \
34 for (item = from, from = prefix##_next_safe(head, item); \
35 item; \
36 item = from, from = prefix##_next_safe(head, from))
37
38 /* reverse direction, only supported by a few containers */
39
40 #define frr_rev_each(prefix, head, item) \
41 for (item = prefix##_last(head); item; \
42 item = prefix##_prev(head, item))
43 #define frr_rev_each_safe(prefix, head, item) \
44 for (typeof(prefix##_prev_safe(head, NULL)) prefix##_safe = \
45 prefix##_prev_safe(head, \
46 (item = prefix##_last(head))); \
47 item; \
48 item = prefix##_safe, \
49 prefix##_safe = prefix##_prev_safe(head, prefix##_safe))
50 #define frr_rev_each_from(prefix, head, item, from) \
51 for (item = from, from = prefix##_prev_safe(head, item); \
52 item; \
53 item = from, from = prefix##_prev_safe(head, from))
54
55 /* non-const variants. these wrappers are the same for all the types, so
56 * bundle them together here.
57 */
58 #define TYPESAFE_FIRST_NEXT(prefix, type) \
59 macro_pure type *prefix ## _first(struct prefix##_head *h) \
60 { \
61 return (type *)prefix ## _const_first(h); \
62 } \
63 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
64 { \
65 return (type *)prefix ## _const_next(h, item); \
66 } \
67 /* ... */
68 #define TYPESAFE_LAST_PREV(prefix, type) \
69 macro_pure type *prefix ## _last(struct prefix##_head *h) \
70 { \
71 return (type *)prefix ## _const_last(h); \
72 } \
73 macro_pure type *prefix ## _prev(struct prefix##_head *h, type *item) \
74 { \
75 return (type *)prefix ## _const_prev(h, item); \
76 } \
77 /* ... */
78 #define TYPESAFE_FIND(prefix, type) \
79 macro_inline type *prefix ## _find(struct prefix##_head *h, \
80 const type *item) \
81 { \
82 return (type *)prefix ## _const_find(h, item); \
83 } \
84 /* ... */
85 #define TYPESAFE_FIND_CMP(prefix, type) \
86 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
87 const type *item) \
88 { \
89 return (type *)prefix ## _const_find_lt(h, item); \
90 } \
91 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
92 const type *item) \
93 { \
94 return (type *)prefix ## _const_find_gteq(h, item); \
95 } \
96 /* ... */
97
98 /* *_member via find - when there is no better membership check than find() */
99 #define TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
100 macro_inline bool prefix ## _member(struct prefix##_head *h, \
101 const type *item) \
102 { \
103 return item == prefix ## _const_find(h, item); \
104 } \
105 /* ... */
106
107 /* *_member via find_gteq - same for non-unique containers */
108 #define TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
109 macro_inline bool prefix ## _member(struct prefix##_head *h, \
110 const type *item) \
111 { \
112 const type *iter; \
113 for (iter = prefix ## _const_find_gteq(h, item); iter; \
114 iter = prefix ## _const_next(h, iter)) { \
115 if (iter == item) \
116 return true; \
117 if (cmpfn(iter, item) > 0) \
118 break; \
119 } \
120 return false; \
121 } \
122 /* ... */
123
124 /* SWAP_ALL_SIMPLE = for containers where the items don't point back to the
125 * head *AND* the head doesn't point to itself (= everything except LIST,
126 * DLIST and SKIPLIST), just switch out the entire head
127 */
128 #define TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
129 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
130 struct prefix##_head *b) \
131 { \
132 struct prefix##_head tmp = *a; \
133 *a = *b; \
134 *b = tmp; \
135 } \
136 /* ... */
137
138 /* single-linked list, unsorted/arbitrary.
139 * can be used as queue with add_tail / pop
140 */
141
142 /* don't use these structs directly */
143 struct slist_item {
144 struct slist_item *next;
145 };
146
147 struct slist_head {
148 struct slist_item *first, **last_next;
149 size_t count;
150 };
151
152 /* this replaces NULL as the value for ->next on the last item. */
153 extern struct slist_item typesafe_slist_sentinel;
154 #define _SLIST_LAST &typesafe_slist_sentinel
155
156 static inline void typesafe_list_add(struct slist_head *head,
157 struct slist_item **pos, struct slist_item *item)
158 {
159 item->next = *pos;
160 *pos = item;
161 if (pos == head->last_next)
162 head->last_next = &item->next;
163 head->count++;
164 }
165
166 extern bool typesafe_list_member(const struct slist_head *head,
167 const struct slist_item *item);
168
169 /* use as:
170 *
171 * PREDECL_LIST(namelist);
172 * struct name {
173 * struct namelist_item nlitem;
174 * }
175 * DECLARE_LIST(namelist, struct name, nlitem);
176 */
177 #define PREDECL_LIST(prefix) \
178 struct prefix ## _head { struct slist_head sh; }; \
179 struct prefix ## _item { struct slist_item si; }; \
180 MACRO_REQUIRE_SEMICOLON() /* end */
181
182 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
183
184 #define DECLARE_LIST(prefix, type, field) \
185 \
186 macro_inline void prefix ## _init(struct prefix##_head *h) \
187 { \
188 memset(h, 0, sizeof(*h)); \
189 h->sh.first = _SLIST_LAST; \
190 h->sh.last_next = &h->sh.first; \
191 } \
192 macro_inline void prefix ## _fini(struct prefix##_head *h) \
193 { \
194 memset(h, 0, sizeof(*h)); \
195 } \
196 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
197 { \
198 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
199 } \
200 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
201 { \
202 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
203 } \
204 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
205 type *after, type *item) \
206 { \
207 struct slist_item **nextp; \
208 nextp = after ? &after->field.si.next : &h->sh.first; \
209 typesafe_list_add(&h->sh, nextp, &item->field.si); \
210 } \
211 /* TODO: del_hint */ \
212 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
213 { \
214 struct slist_item **iter = &h->sh.first; \
215 while (*iter != _SLIST_LAST && *iter != &item->field.si) \
216 iter = &(*iter)->next; \
217 if (*iter == _SLIST_LAST) \
218 return NULL; \
219 h->sh.count--; \
220 *iter = item->field.si.next; \
221 if (item->field.si.next == _SLIST_LAST) \
222 h->sh.last_next = iter; \
223 item->field.si.next = NULL; \
224 return item; \
225 } \
226 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
227 { \
228 struct slist_item *sitem = h->sh.first; \
229 if (sitem == _SLIST_LAST) \
230 return NULL; \
231 h->sh.count--; \
232 h->sh.first = sitem->next; \
233 if (h->sh.first == _SLIST_LAST) \
234 h->sh.last_next = &h->sh.first; \
235 sitem->next = NULL; \
236 return container_of(sitem, type, field.si); \
237 } \
238 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
239 struct prefix##_head *b) \
240 { \
241 struct prefix##_head tmp = *a; \
242 *a = *b; \
243 *b = tmp; \
244 if (a->sh.last_next == &b->sh.first) \
245 a->sh.last_next = &a->sh.first; \
246 if (b->sh.last_next == &a->sh.first) \
247 b->sh.last_next = &b->sh.first; \
248 } \
249 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
250 { \
251 if (h->sh.first != _SLIST_LAST) \
252 return container_of(h->sh.first, type, field.si); \
253 return NULL; \
254 } \
255 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
256 const type *item) \
257 { \
258 const struct slist_item *sitem = &item->field.si; \
259 if (sitem->next != _SLIST_LAST) \
260 return container_of(sitem->next, type, field.si); \
261 return NULL; \
262 } \
263 TYPESAFE_FIRST_NEXT(prefix, type) \
264 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
265 { \
266 struct slist_item *sitem; \
267 if (!item) \
268 return NULL; \
269 sitem = &item->field.si; \
270 if (sitem->next != _SLIST_LAST) \
271 return container_of(sitem->next, type, field.si); \
272 return NULL; \
273 } \
274 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
275 { \
276 return h->sh.count; \
277 } \
278 macro_pure bool prefix ## _anywhere(const type *item) \
279 { \
280 return item->field.si.next != NULL; \
281 } \
282 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
283 const type *item) \
284 { \
285 return typesafe_list_member(&h->sh, &item->field.si); \
286 } \
287 MACRO_REQUIRE_SEMICOLON() /* end */
288
289 /* don't use these structs directly */
290 struct dlist_item {
291 struct dlist_item *next;
292 struct dlist_item *prev;
293 };
294
295 struct dlist_head {
296 struct dlist_item hitem;
297 size_t count;
298 };
299
300 static inline void typesafe_dlist_add(struct dlist_head *head,
301 struct dlist_item *prev, struct dlist_item *item)
302 {
303 /* SA on clang-11 thinks this can happen, but in reality -assuming no
304 * memory corruption- it can't. DLIST uses a "closed" ring, i.e. the
305 * termination at the end of the list is not NULL but rather a pointer
306 * back to the head. (This eliminates special-casing the first or last
307 * item.)
308 *
309 * Sadly, can't use assert() here since the libfrr assert / xref code
310 * uses typesafe lists itself... that said, if an assert tripped here
311 * we'd already be way past some memory corruption, so we might as
312 * well just take the SEGV. (In the presence of corruption, we'd see
313 * random SEGVs from places that make no sense at all anyway, an
314 * assert might actually be a red herring.)
315 *
316 * ("assume()" tells the compiler to produce code as if the condition
317 * will always hold; it doesn't have any actual effect here, it'll
318 * just SEGV out on "item->next->prev = item".)
319 */
320 assume(prev->next != NULL);
321
322 item->next = prev->next;
323 item->next->prev = item;
324 item->prev = prev;
325 prev->next = item;
326 head->count++;
327 }
328
329 static inline void typesafe_dlist_swap_all(struct dlist_head *a,
330 struct dlist_head *b)
331 {
332 struct dlist_head tmp = *a;
333
334 a->count = b->count;
335 if (a->count) {
336 a->hitem.next = b->hitem.next;
337 a->hitem.prev = b->hitem.prev;
338 a->hitem.next->prev = &a->hitem;
339 a->hitem.prev->next = &a->hitem;
340 } else {
341 a->hitem.next = &a->hitem;
342 a->hitem.prev = &a->hitem;
343 }
344
345 b->count = tmp.count;
346 if (b->count) {
347 b->hitem.next = tmp.hitem.next;
348 b->hitem.prev = tmp.hitem.prev;
349 b->hitem.next->prev = &b->hitem;
350 b->hitem.prev->next = &b->hitem;
351 } else {
352 b->hitem.next = &b->hitem;
353 b->hitem.prev = &b->hitem;
354 }
355 }
356
357 extern bool typesafe_dlist_member(const struct dlist_head *head,
358 const struct dlist_item *item);
359
360 /* double-linked list, for fast item deletion
361 */
362 #define PREDECL_DLIST(prefix) \
363 struct prefix ## _head { struct dlist_head dh; }; \
364 struct prefix ## _item { struct dlist_item di; }; \
365 MACRO_REQUIRE_SEMICOLON() /* end */
366
367 #define INIT_DLIST(var) { .dh = { \
368 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
369
370 #define DECLARE_DLIST(prefix, type, field) \
371 \
372 macro_inline void prefix ## _init(struct prefix##_head *h) \
373 { \
374 memset(h, 0, sizeof(*h)); \
375 h->dh.hitem.prev = &h->dh.hitem; \
376 h->dh.hitem.next = &h->dh.hitem; \
377 } \
378 macro_inline void prefix ## _fini(struct prefix##_head *h) \
379 { \
380 memset(h, 0, sizeof(*h)); \
381 } \
382 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
383 { \
384 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
385 } \
386 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
387 { \
388 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
389 } \
390 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
391 type *after, type *item) \
392 { \
393 struct dlist_item *prev; \
394 prev = after ? &after->field.di : &h->dh.hitem; \
395 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
396 } \
397 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
398 { \
399 struct dlist_item *ditem = &item->field.di; \
400 ditem->prev->next = ditem->next; \
401 ditem->next->prev = ditem->prev; \
402 h->dh.count--; \
403 ditem->prev = ditem->next = NULL; \
404 return item; \
405 } \
406 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
407 { \
408 struct dlist_item *ditem = h->dh.hitem.next; \
409 if (ditem == &h->dh.hitem) \
410 return NULL; \
411 ditem->prev->next = ditem->next; \
412 ditem->next->prev = ditem->prev; \
413 h->dh.count--; \
414 ditem->prev = ditem->next = NULL; \
415 return container_of(ditem, type, field.di); \
416 } \
417 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
418 struct prefix##_head *b) \
419 { \
420 typesafe_dlist_swap_all(&a->dh, &b->dh); \
421 } \
422 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
423 { \
424 const struct dlist_item *ditem = h->dh.hitem.next; \
425 if (ditem == &h->dh.hitem) \
426 return NULL; \
427 return container_of(ditem, type, field.di); \
428 } \
429 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
430 const type *item) \
431 { \
432 const struct dlist_item *ditem = &item->field.di; \
433 if (ditem->next == &h->dh.hitem) \
434 return NULL; \
435 return container_of(ditem->next, type, field.di); \
436 } \
437 TYPESAFE_FIRST_NEXT(prefix, type) \
438 macro_pure const type *prefix ## _const_last(const struct prefix##_head *h) \
439 { \
440 const struct dlist_item *ditem = h->dh.hitem.prev; \
441 if (ditem == &h->dh.hitem) \
442 return NULL; \
443 return container_of(ditem, type, field.di); \
444 } \
445 macro_pure const type *prefix ## _const_prev(const struct prefix##_head *h, \
446 const type *item) \
447 { \
448 const struct dlist_item *ditem = &item->field.di; \
449 if (ditem->prev == &h->dh.hitem) \
450 return NULL; \
451 return container_of(ditem->prev, type, field.di); \
452 } \
453 TYPESAFE_LAST_PREV(prefix, type) \
454 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
455 { \
456 if (!item) \
457 return NULL; \
458 return prefix ## _next(h, item); \
459 } \
460 macro_pure type *prefix ## _prev_safe(struct prefix##_head *h, type *item) \
461 { \
462 if (!item) \
463 return NULL; \
464 return prefix ## _prev(h, item); \
465 } \
466 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
467 { \
468 return h->dh.count; \
469 } \
470 macro_pure bool prefix ## _anywhere(const type *item) \
471 { \
472 const struct dlist_item *ditem = &item->field.di; \
473 return ditem->next && ditem->prev; \
474 } \
475 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
476 const type *item) \
477 { \
478 return typesafe_dlist_member(&h->dh, &item->field.di); \
479 } \
480 MACRO_REQUIRE_SEMICOLON() /* end */
481
482 /* note: heap currently caps out at 4G items */
483
484 #define HEAP_NARY 8U
485 typedef uint32_t heap_index_i;
486
487 struct heap_item {
488 uint32_t index;
489 };
490
491 struct heap_head {
492 struct heap_item **array;
493 uint32_t arraysz, count;
494 };
495
496 #define HEAP_RESIZE_TRESH_UP(h) \
497 (h->hh.count + 1 >= h->hh.arraysz)
498 #define HEAP_RESIZE_TRESH_DN(h) \
499 (h->hh.count == 0 || \
500 h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
501
502 #define PREDECL_HEAP(prefix) \
503 struct prefix ## _head { struct heap_head hh; }; \
504 struct prefix ## _item { struct heap_item hi; }; \
505 MACRO_REQUIRE_SEMICOLON() /* end */
506
507 #define INIT_HEAP(var) { }
508
509 #define DECLARE_HEAP(prefix, type, field, cmpfn) \
510 \
511 macro_inline void prefix ## _init(struct prefix##_head *h) \
512 { \
513 memset(h, 0, sizeof(*h)); \
514 } \
515 macro_inline void prefix ## _fini(struct prefix##_head *h) \
516 { \
517 assert(h->hh.count == 0); \
518 memset(h, 0, sizeof(*h)); \
519 } \
520 macro_inline int prefix ## __cmp(const struct heap_item *a, \
521 const struct heap_item *b) \
522 { \
523 return cmpfn(container_of(a, type, field.hi), \
524 container_of(b, type, field.hi)); \
525 } \
526 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
527 { \
528 if (HEAP_RESIZE_TRESH_UP(h)) \
529 typesafe_heap_resize(&h->hh, true); \
530 typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
531 prefix ## __cmp); \
532 h->hh.count++; \
533 return NULL; \
534 } \
535 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
536 { \
537 struct heap_item *other; \
538 uint32_t index = item->field.hi.index; \
539 assert(h->hh.array[index] == &item->field.hi); \
540 h->hh.count--; \
541 other = h->hh.array[h->hh.count]; \
542 if (cmpfn(container_of(other, type, field.hi), item) < 0) \
543 typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
544 else \
545 typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
546 if (HEAP_RESIZE_TRESH_DN(h)) \
547 typesafe_heap_resize(&h->hh, false); \
548 return item; \
549 } \
550 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
551 { \
552 struct heap_item *hitem, *other; \
553 if (h->hh.count == 0) \
554 return NULL; \
555 hitem = h->hh.array[0]; \
556 h->hh.count--; \
557 other = h->hh.array[h->hh.count]; \
558 typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
559 if (HEAP_RESIZE_TRESH_DN(h)) \
560 typesafe_heap_resize(&h->hh, false); \
561 return container_of(hitem, type, field.hi); \
562 } \
563 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
564 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
565 { \
566 if (h->hh.count == 0) \
567 return NULL; \
568 return container_of(h->hh.array[0], type, field.hi); \
569 } \
570 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
571 const type *item) \
572 { \
573 uint32_t idx = item->field.hi.index + 1; \
574 if (idx >= h->hh.count) \
575 return NULL; \
576 return container_of(h->hh.array[idx], type, field.hi); \
577 } \
578 TYPESAFE_FIRST_NEXT(prefix, type) \
579 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
580 { \
581 if (!item) \
582 return NULL; \
583 return prefix ## _next(h, item); \
584 } \
585 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
586 { \
587 return h->hh.count; \
588 } \
589 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
590 const type *item) \
591 { \
592 uint32_t idx = item->field.hi.index; \
593 if (idx >= h->hh.count) \
594 return false; \
595 return h->hh.array[idx] == &item->field.hi; \
596 } \
597 MACRO_REQUIRE_SEMICOLON() /* end */
598
599 extern void typesafe_heap_resize(struct heap_head *head, bool grow);
600 extern void typesafe_heap_pushdown(struct heap_head *head, uint32_t index,
601 struct heap_item *item,
602 int (*cmpfn)(const struct heap_item *a,
603 const struct heap_item *b));
604 extern void typesafe_heap_pullup(struct heap_head *head, uint32_t index,
605 struct heap_item *item,
606 int (*cmpfn)(const struct heap_item *a,
607 const struct heap_item *b));
608
609 /* single-linked list, sorted.
610 * can be used as priority queue with add / pop
611 */
612
613 /* don't use these structs directly */
614 struct ssort_item {
615 struct ssort_item *next;
616 };
617
618 struct ssort_head {
619 struct ssort_item *first;
620 size_t count;
621 };
622
623 /* use as:
624 *
625 * PREDECL_SORTLIST(namelist)
626 * struct name {
627 * struct namelist_item nlitem;
628 * }
629 * DECLARE_SORTLIST(namelist, struct name, nlitem)
630 */
631 #define _PREDECL_SORTLIST(prefix) \
632 struct prefix ## _head { struct ssort_head sh; }; \
633 struct prefix ## _item { struct ssort_item si; }; \
634 MACRO_REQUIRE_SEMICOLON() /* end */
635
636 #define INIT_SORTLIST_UNIQ(var) { }
637 #define INIT_SORTLIST_NONUNIQ(var) { }
638
639 #define PREDECL_SORTLIST_UNIQ(prefix) \
640 _PREDECL_SORTLIST(prefix)
641 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
642 _PREDECL_SORTLIST(prefix)
643
644 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
645 \
646 macro_inline void prefix ## _init(struct prefix##_head *h) \
647 { \
648 memset(h, 0, sizeof(*h)); \
649 } \
650 macro_inline void prefix ## _fini(struct prefix##_head *h) \
651 { \
652 memset(h, 0, sizeof(*h)); \
653 } \
654 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
655 { \
656 struct ssort_item **np = &h->sh.first; \
657 int c = 1; \
658 while (*np && (c = cmpfn_uq( \
659 container_of(*np, type, field.si), item)) < 0) \
660 np = &(*np)->next; \
661 if (c == 0) \
662 return container_of(*np, type, field.si); \
663 item->field.si.next = *np; \
664 *np = &item->field.si; \
665 h->sh.count++; \
666 return NULL; \
667 } \
668 macro_inline const type *prefix ## _const_find_gteq( \
669 const struct prefix##_head *h, const type *item) \
670 { \
671 const struct ssort_item *sitem = h->sh.first; \
672 int cmpval = 0; \
673 while (sitem && (cmpval = cmpfn_nuq( \
674 container_of(sitem, type, field.si), item)) < 0) \
675 sitem = sitem->next; \
676 return container_of_null(sitem, type, field.si); \
677 } \
678 macro_inline const type *prefix ## _const_find_lt( \
679 const struct prefix##_head *h, const type *item) \
680 { \
681 const struct ssort_item *prev = NULL, *sitem = h->sh.first; \
682 int cmpval = 0; \
683 while (sitem && (cmpval = cmpfn_nuq( \
684 container_of(sitem, type, field.si), item)) < 0) \
685 sitem = (prev = sitem)->next; \
686 return container_of_null(prev, type, field.si); \
687 } \
688 TYPESAFE_FIND_CMP(prefix, type) \
689 /* TODO: del_hint */ \
690 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
691 { \
692 struct ssort_item **iter = &h->sh.first; \
693 while (*iter && *iter != &item->field.si) \
694 iter = &(*iter)->next; \
695 if (!*iter) \
696 return NULL; \
697 h->sh.count--; \
698 *iter = item->field.si.next; \
699 item->field.si.next = NULL; \
700 return item; \
701 } \
702 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
703 { \
704 struct ssort_item *sitem = h->sh.first; \
705 if (!sitem) \
706 return NULL; \
707 h->sh.count--; \
708 h->sh.first = sitem->next; \
709 return container_of(sitem, type, field.si); \
710 } \
711 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
712 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
713 { \
714 return container_of_null(h->sh.first, type, field.si); \
715 } \
716 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
717 const type *item) \
718 { \
719 const struct ssort_item *sitem = &item->field.si; \
720 return container_of_null(sitem->next, type, field.si); \
721 } \
722 TYPESAFE_FIRST_NEXT(prefix, type) \
723 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
724 { \
725 struct ssort_item *sitem; \
726 if (!item) \
727 return NULL; \
728 sitem = &item->field.si; \
729 return container_of_null(sitem->next, type, field.si); \
730 } \
731 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
732 { \
733 return h->sh.count; \
734 } \
735 MACRO_REQUIRE_SEMICOLON() /* end */
736
737 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
738 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn); \
739 \
740 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
741 const type *item) \
742 { \
743 const struct ssort_item *sitem = h->sh.first; \
744 int cmpval = 0; \
745 while (sitem && (cmpval = cmpfn( \
746 container_of(sitem, type, field.si), item)) < 0) \
747 sitem = sitem->next; \
748 if (!sitem || cmpval > 0) \
749 return NULL; \
750 return container_of(sitem, type, field.si); \
751 } \
752 TYPESAFE_FIND(prefix, type) \
753 TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
754 MACRO_REQUIRE_SEMICOLON() /* end */
755
756 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
757 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
758 { \
759 int cmpval = cmpfn(a, b); \
760 if (cmpval) \
761 return cmpval; \
762 if (a < b) \
763 return -1; \
764 if (a > b) \
765 return 1; \
766 return 0; \
767 } \
768 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp); \
769 TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
770 MACRO_REQUIRE_SEMICOLON() /* end */
771
772
773 /* hash, "sorted" by hash value
774 */
775
776 /* don't use these structs directly */
777 struct thash_item {
778 struct thash_item *next;
779 uint32_t hashval;
780 };
781
782 struct thash_head {
783 struct thash_item **entries;
784 uint32_t count;
785
786 /* tabshift can be 0 if the hash table is empty and entries is NULL.
787 * otherwise it will always be 2 or larger because it contains
788 * the shift value *plus 1*. This is a trick to make HASH_SIZE return
789 * the correct value (with the >> 1) for tabshift == 0, without needing
790 * a conditional branch.
791 */
792 uint8_t tabshift;
793 uint8_t minshift, maxshift;
794 };
795
796 #define _HASH_SIZE(tabshift) \
797 ((1U << (tabshift)) >> 1)
798 #define HASH_SIZE(head) \
799 _HASH_SIZE((head).tabshift)
800 #define _HASH_KEY(tabshift, val) \
801 ({ \
802 assume((tabshift) >= 2 && (tabshift) <= 33); \
803 (val) >> (33 - (tabshift)); \
804 })
805 #define HASH_KEY(head, val) \
806 _HASH_KEY((head).tabshift, val)
807 #define HASH_GROW_THRESHOLD(head) \
808 ((head).count >= HASH_SIZE(head))
809 #define HASH_SHRINK_THRESHOLD(head) \
810 ((head).count <= (HASH_SIZE(head) - 1) / 2)
811
812 extern void typesafe_hash_grow(struct thash_head *head);
813 extern void typesafe_hash_shrink(struct thash_head *head);
814
815 /* use as:
816 *
817 * PREDECL_HASH(namelist)
818 * struct name {
819 * struct namelist_item nlitem;
820 * }
821 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
822 */
823 #define PREDECL_HASH(prefix) \
824 struct prefix ## _head { struct thash_head hh; }; \
825 struct prefix ## _item { struct thash_item hi; }; \
826 MACRO_REQUIRE_SEMICOLON() /* end */
827
828 #define INIT_HASH(var) { }
829
830 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
831 \
832 macro_inline void prefix ## _init(struct prefix##_head *h) \
833 { \
834 memset(h, 0, sizeof(*h)); \
835 } \
836 macro_inline void prefix ## _fini(struct prefix##_head *h) \
837 { \
838 assert(h->hh.count == 0); \
839 h->hh.minshift = 0; \
840 typesafe_hash_shrink(&h->hh); \
841 memset(h, 0, sizeof(*h)); \
842 } \
843 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
844 { \
845 h->hh.count++; \
846 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
847 typesafe_hash_grow(&h->hh); \
848 \
849 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
850 item->field.hi.hashval = hval; \
851 struct thash_item **np = &h->hh.entries[hbits]; \
852 while (*np && (*np)->hashval < hval) \
853 np = &(*np)->next; \
854 while (*np && (*np)->hashval == hval) { \
855 if (cmpfn(container_of(*np, type, field.hi), item) == 0) { \
856 h->hh.count--; \
857 return container_of(*np, type, field.hi); \
858 } \
859 np = &(*np)->next; \
860 } \
861 item->field.hi.next = *np; \
862 *np = &item->field.hi; \
863 return NULL; \
864 } \
865 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
866 const type *item) \
867 { \
868 if (!h->hh.tabshift) \
869 return NULL; \
870 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
871 const struct thash_item *hitem = h->hh.entries[hbits]; \
872 while (hitem && hitem->hashval < hval) \
873 hitem = hitem->next; \
874 while (hitem && hitem->hashval == hval) { \
875 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
876 return container_of(hitem, type, field.hi); \
877 hitem = hitem->next; \
878 } \
879 return NULL; \
880 } \
881 TYPESAFE_FIND(prefix, type) \
882 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
883 { \
884 if (!h->hh.tabshift) \
885 return NULL; \
886 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
887 struct thash_item **np = &h->hh.entries[hbits]; \
888 while (*np && (*np)->hashval < hval) \
889 np = &(*np)->next; \
890 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
891 np = &(*np)->next; \
892 if (*np != &item->field.hi) \
893 return NULL; \
894 *np = item->field.hi.next; \
895 item->field.hi.next = NULL; \
896 h->hh.count--; \
897 if (HASH_SHRINK_THRESHOLD(h->hh)) \
898 typesafe_hash_shrink(&h->hh); \
899 return item; \
900 } \
901 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
902 { \
903 uint32_t i; \
904 for (i = 0; i < HASH_SIZE(h->hh); i++) \
905 if (h->hh.entries[i]) { \
906 struct thash_item *hitem = h->hh.entries[i]; \
907 h->hh.entries[i] = hitem->next; \
908 h->hh.count--; \
909 hitem->next = NULL; \
910 if (HASH_SHRINK_THRESHOLD(h->hh)) \
911 typesafe_hash_shrink(&h->hh); \
912 return container_of(hitem, type, field.hi); \
913 } \
914 return NULL; \
915 } \
916 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
917 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
918 { \
919 uint32_t i; \
920 for (i = 0; i < HASH_SIZE(h->hh); i++) \
921 if (h->hh.entries[i]) \
922 return container_of(h->hh.entries[i], type, field.hi); \
923 return NULL; \
924 } \
925 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
926 const type *item) \
927 { \
928 const struct thash_item *hitem = &item->field.hi; \
929 if (hitem->next) \
930 return container_of(hitem->next, type, field.hi); \
931 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
932 for (; i < HASH_SIZE(h->hh); i++) \
933 if (h->hh.entries[i]) \
934 return container_of(h->hh.entries[i], type, field.hi); \
935 return NULL; \
936 } \
937 TYPESAFE_FIRST_NEXT(prefix, type) \
938 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
939 { \
940 if (!item) \
941 return NULL; \
942 return prefix ## _next(h, item); \
943 } \
944 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
945 { \
946 return h->hh.count; \
947 } \
948 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
949 const type *item) \
950 { \
951 if (!h->hh.tabshift) \
952 return NULL; \
953 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
954 const struct thash_item *hitem = h->hh.entries[hbits]; \
955 while (hitem && hitem->hashval < hval) \
956 hitem = hitem->next; \
957 for (hitem = h->hh.entries[hbits]; hitem && hitem->hashval <= hval; \
958 hitem = hitem->next) \
959 if (hitem == &item->field.hi) \
960 return true; \
961 return false; \
962 } \
963 MACRO_REQUIRE_SEMICOLON() /* end */
964
965 /* skiplist, sorted.
966 * can be used as priority queue with add / pop
967 */
968
969 /* don't use these structs directly */
970 #define SKIPLIST_MAXDEPTH 16
971 #define SKIPLIST_EMBED 4
972 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
973
974 struct sskip_item {
975 struct sskip_item *next[SKIPLIST_EMBED];
976 };
977
978 struct sskip_overflow {
979 struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
980 };
981
982 struct sskip_head {
983 struct sskip_item hitem;
984 struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
985 size_t count;
986 };
987
988 /* use as:
989 *
990 * PREDECL_SKIPLIST(namelist)
991 * struct name {
992 * struct namelist_item nlitem;
993 * }
994 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
995 */
996 #define _PREDECL_SKIPLIST(prefix) \
997 struct prefix ## _head { struct sskip_head sh; }; \
998 struct prefix ## _item { struct sskip_item si; }; \
999 MACRO_REQUIRE_SEMICOLON() /* end */
1000
1001 #define INIT_SKIPLIST_UNIQ(var) { }
1002 #define INIT_SKIPLIST_NONUNIQ(var) { }
1003
1004 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
1005 \
1006 macro_inline void prefix ## _init(struct prefix##_head *h) \
1007 { \
1008 memset(h, 0, sizeof(*h)); \
1009 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1010 ((uintptr_t)h->sh.overflow | 1); \
1011 } \
1012 macro_inline void prefix ## _fini(struct prefix##_head *h) \
1013 { \
1014 memset(h, 0, sizeof(*h)); \
1015 } \
1016 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
1017 { \
1018 struct sskip_item *si; \
1019 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
1020 return container_of_null(si, type, field.si); \
1021 } \
1022 macro_inline const type *prefix ## _const_find_gteq( \
1023 const struct prefix##_head *h, const type *item) \
1024 { \
1025 const struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
1026 &item->field.si, cmpfn_nuq); \
1027 return container_of_null(sitem, type, field.si); \
1028 } \
1029 macro_inline const type *prefix ## _const_find_lt( \
1030 const struct prefix##_head *h, const type *item) \
1031 { \
1032 const struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
1033 &item->field.si, cmpfn_nuq); \
1034 return container_of_null(sitem, type, field.si); \
1035 } \
1036 TYPESAFE_FIND_CMP(prefix, type) \
1037 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
1038 { \
1039 struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \
1040 &item->field.si, cmpfn_uq); \
1041 return container_of_null(sitem, type, field.si); \
1042 } \
1043 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
1044 { \
1045 struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
1046 return container_of_null(sitem, type, field.si); \
1047 } \
1048 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
1049 struct prefix##_head *b) \
1050 { \
1051 struct prefix##_head tmp = *a; \
1052 *a = *b; \
1053 *b = tmp; \
1054 a->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1055 ((uintptr_t)a->sh.overflow | 1); \
1056 b->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1057 ((uintptr_t)b->sh.overflow | 1); \
1058 } \
1059 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
1060 { \
1061 const struct sskip_item *first = h->sh.hitem.next[0]; \
1062 return container_of_null(first, type, field.si); \
1063 } \
1064 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
1065 const type *item) \
1066 { \
1067 const struct sskip_item *next = item->field.si.next[0]; \
1068 return container_of_null(next, type, field.si); \
1069 } \
1070 TYPESAFE_FIRST_NEXT(prefix, type) \
1071 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
1072 { \
1073 struct sskip_item *next; \
1074 next = item ? item->field.si.next[0] : NULL; \
1075 return container_of_null(next, type, field.si); \
1076 } \
1077 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
1078 { \
1079 return h->sh.count; \
1080 } \
1081 MACRO_REQUIRE_SEMICOLON() /* end */
1082
1083 #define PREDECL_SKIPLIST_UNIQ(prefix) \
1084 _PREDECL_SKIPLIST(prefix)
1085 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
1086 \
1087 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1088 const struct sskip_item *b) \
1089 { \
1090 return cmpfn(container_of(a, type, field.si), \
1091 container_of(b, type, field.si)); \
1092 } \
1093 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
1094 const type *item) \
1095 { \
1096 const struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
1097 &item->field.si, &prefix ## __cmp); \
1098 return container_of_null(sitem, type, field.si); \
1099 } \
1100 TYPESAFE_FIND(prefix, type) \
1101 TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
1102 \
1103 _DECLARE_SKIPLIST(prefix, type, field, \
1104 prefix ## __cmp, prefix ## __cmp); \
1105 MACRO_REQUIRE_SEMICOLON() /* end */
1106
1107 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
1108 _PREDECL_SKIPLIST(prefix)
1109 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
1110 \
1111 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1112 const struct sskip_item *b) \
1113 { \
1114 return cmpfn(container_of(a, type, field.si), \
1115 container_of(b, type, field.si)); \
1116 } \
1117 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
1118 const struct sskip_item *b) \
1119 { \
1120 int cmpval = cmpfn(container_of(a, type, field.si), \
1121 container_of(b, type, field.si)); \
1122 if (cmpval) \
1123 return cmpval; \
1124 if (a < b) \
1125 return -1; \
1126 if (a > b) \
1127 return 1; \
1128 return 0; \
1129 } \
1130 \
1131 _DECLARE_SKIPLIST(prefix, type, field, \
1132 prefix ## __cmp, prefix ## __cmp_uq); \
1133 TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
1134 MACRO_REQUIRE_SEMICOLON() /* end */
1135
1136
1137 extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
1138 struct sskip_item *item, int (*cmpfn)(
1139 const struct sskip_item *a,
1140 const struct sskip_item *b));
1141 extern const struct sskip_item *typesafe_skiplist_find(
1142 const struct sskip_head *head,
1143 const struct sskip_item *item, int (*cmpfn)(
1144 const struct sskip_item *a,
1145 const struct sskip_item *b));
1146 extern const struct sskip_item *typesafe_skiplist_find_gteq(
1147 const struct sskip_head *head,
1148 const struct sskip_item *item, int (*cmpfn)(
1149 const struct sskip_item *a,
1150 const struct sskip_item *b));
1151 extern const struct sskip_item *typesafe_skiplist_find_lt(
1152 const struct sskip_head *head,
1153 const struct sskip_item *item, int (*cmpfn)(
1154 const struct sskip_item *a,
1155 const struct sskip_item *b));
1156 extern struct sskip_item *typesafe_skiplist_del(
1157 struct sskip_head *head, struct sskip_item *item, int (*cmpfn)(
1158 const struct sskip_item *a,
1159 const struct sskip_item *b));
1160 extern struct sskip_item *typesafe_skiplist_pop(struct sskip_head *head);
1161
1162 #ifdef __cplusplus
1163 }
1164 #endif
1165
1166 /* this needs to stay at the end because both files include each other.
1167 * the resolved order is typesafe.h before typerb.h
1168 */
1169 #include "typerb.h"
1170
1171 #endif /* _FRR_TYPESAFE_H */