]> git.proxmox.com Git - mirror_frr.git/blob - lib/typesafe.h
*: auto-convert to SPDX License IDs
[mirror_frr.git] / lib / typesafe.h
1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
4 */
5
6 #ifndef _FRR_TYPESAFE_H
7 #define _FRR_TYPESAFE_H
8
9 #include <stddef.h>
10 #include <stdint.h>
11 #include <stdbool.h>
12 #include "compiler.h"
13
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17
18 /* generic macros for all list-like types */
19
20 #define frr_each(prefix, head, item) \
21 for (item = prefix##_first(head); item; \
22 item = prefix##_next(head, item))
23 #define frr_each_safe(prefix, head, item) \
24 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
25 prefix##_next_safe(head, \
26 (item = prefix##_first(head))); \
27 item; \
28 item = prefix##_safe, \
29 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
30 #define frr_each_from(prefix, head, item, from) \
31 for (item = from, from = prefix##_next_safe(head, item); \
32 item; \
33 item = from, from = prefix##_next_safe(head, from))
34
35 /* reverse direction, only supported by a few containers */
36
37 #define frr_rev_each(prefix, head, item) \
38 for (item = prefix##_last(head); item; \
39 item = prefix##_prev(head, item))
40 #define frr_rev_each_safe(prefix, head, item) \
41 for (typeof(prefix##_prev_safe(head, NULL)) prefix##_safe = \
42 prefix##_prev_safe(head, \
43 (item = prefix##_last(head))); \
44 item; \
45 item = prefix##_safe, \
46 prefix##_safe = prefix##_prev_safe(head, prefix##_safe))
47 #define frr_rev_each_from(prefix, head, item, from) \
48 for (item = from, from = prefix##_prev_safe(head, item); \
49 item; \
50 item = from, from = prefix##_prev_safe(head, from))
51
52 /* non-const variants. these wrappers are the same for all the types, so
53 * bundle them together here.
54 */
55 #define TYPESAFE_FIRST_NEXT(prefix, type) \
56 macro_pure type *prefix ## _first(struct prefix##_head *h) \
57 { \
58 return (type *)prefix ## _const_first(h); \
59 } \
60 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
61 { \
62 return (type *)prefix ## _const_next(h, item); \
63 } \
64 /* ... */
65 #define TYPESAFE_LAST_PREV(prefix, type) \
66 macro_pure type *prefix ## _last(struct prefix##_head *h) \
67 { \
68 return (type *)prefix ## _const_last(h); \
69 } \
70 macro_pure type *prefix ## _prev(struct prefix##_head *h, type *item) \
71 { \
72 return (type *)prefix ## _const_prev(h, item); \
73 } \
74 /* ... */
75 #define TYPESAFE_FIND(prefix, type) \
76 macro_inline type *prefix ## _find(struct prefix##_head *h, \
77 const type *item) \
78 { \
79 return (type *)prefix ## _const_find(h, item); \
80 } \
81 /* ... */
82 #define TYPESAFE_FIND_CMP(prefix, type) \
83 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
84 const type *item) \
85 { \
86 return (type *)prefix ## _const_find_lt(h, item); \
87 } \
88 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
89 const type *item) \
90 { \
91 return (type *)prefix ## _const_find_gteq(h, item); \
92 } \
93 /* ... */
94
95 /* *_member via find - when there is no better membership check than find() */
96 #define TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
97 macro_inline bool prefix ## _member(struct prefix##_head *h, \
98 const type *item) \
99 { \
100 return item == prefix ## _const_find(h, item); \
101 } \
102 /* ... */
103
104 /* *_member via find_gteq - same for non-unique containers */
105 #define TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
106 macro_inline bool prefix ## _member(struct prefix##_head *h, \
107 const type *item) \
108 { \
109 const type *iter; \
110 for (iter = prefix ## _const_find_gteq(h, item); iter; \
111 iter = prefix ## _const_next(h, iter)) { \
112 if (iter == item) \
113 return true; \
114 if (cmpfn(iter, item) > 0) \
115 break; \
116 } \
117 return false; \
118 } \
119 /* ... */
120
121 /* SWAP_ALL_SIMPLE = for containers where the items don't point back to the
122 * head *AND* the head doesn't point to itself (= everything except LIST,
123 * DLIST and SKIPLIST), just switch out the entire head
124 */
125 #define TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
126 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
127 struct prefix##_head *b) \
128 { \
129 struct prefix##_head tmp = *a; \
130 *a = *b; \
131 *b = tmp; \
132 } \
133 /* ... */
134
135 /* single-linked list, unsorted/arbitrary.
136 * can be used as queue with add_tail / pop
137 */
138
139 /* don't use these structs directly */
140 struct slist_item {
141 struct slist_item *next;
142 };
143
144 struct slist_head {
145 struct slist_item *first, **last_next;
146 size_t count;
147 };
148
149 /* this replaces NULL as the value for ->next on the last item. */
150 extern struct slist_item typesafe_slist_sentinel;
151 #define _SLIST_LAST &typesafe_slist_sentinel
152
153 static inline void typesafe_list_add(struct slist_head *head,
154 struct slist_item **pos, struct slist_item *item)
155 {
156 item->next = *pos;
157 *pos = item;
158 if (pos == head->last_next)
159 head->last_next = &item->next;
160 head->count++;
161 }
162
163 extern bool typesafe_list_member(const struct slist_head *head,
164 const struct slist_item *item);
165
166 /* use as:
167 *
168 * PREDECL_LIST(namelist);
169 * struct name {
170 * struct namelist_item nlitem;
171 * }
172 * DECLARE_LIST(namelist, struct name, nlitem);
173 */
174 #define PREDECL_LIST(prefix) \
175 struct prefix ## _head { struct slist_head sh; }; \
176 struct prefix ## _item { struct slist_item si; }; \
177 MACRO_REQUIRE_SEMICOLON() /* end */
178
179 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
180
181 #define DECLARE_LIST(prefix, type, field) \
182 \
183 macro_inline void prefix ## _init(struct prefix##_head *h) \
184 { \
185 memset(h, 0, sizeof(*h)); \
186 h->sh.first = _SLIST_LAST; \
187 h->sh.last_next = &h->sh.first; \
188 } \
189 macro_inline void prefix ## _fini(struct prefix##_head *h) \
190 { \
191 memset(h, 0, sizeof(*h)); \
192 } \
193 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
194 { \
195 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
196 } \
197 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
198 { \
199 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
200 } \
201 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
202 type *after, type *item) \
203 { \
204 struct slist_item **nextp; \
205 nextp = after ? &after->field.si.next : &h->sh.first; \
206 typesafe_list_add(&h->sh, nextp, &item->field.si); \
207 } \
208 /* TODO: del_hint */ \
209 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
210 { \
211 struct slist_item **iter = &h->sh.first; \
212 while (*iter != _SLIST_LAST && *iter != &item->field.si) \
213 iter = &(*iter)->next; \
214 if (*iter == _SLIST_LAST) \
215 return NULL; \
216 h->sh.count--; \
217 *iter = item->field.si.next; \
218 if (item->field.si.next == _SLIST_LAST) \
219 h->sh.last_next = iter; \
220 item->field.si.next = NULL; \
221 return item; \
222 } \
223 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
224 { \
225 struct slist_item *sitem = h->sh.first; \
226 if (sitem == _SLIST_LAST) \
227 return NULL; \
228 h->sh.count--; \
229 h->sh.first = sitem->next; \
230 if (h->sh.first == _SLIST_LAST) \
231 h->sh.last_next = &h->sh.first; \
232 sitem->next = NULL; \
233 return container_of(sitem, type, field.si); \
234 } \
235 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
236 struct prefix##_head *b) \
237 { \
238 struct prefix##_head tmp = *a; \
239 *a = *b; \
240 *b = tmp; \
241 if (a->sh.last_next == &b->sh.first) \
242 a->sh.last_next = &a->sh.first; \
243 if (b->sh.last_next == &a->sh.first) \
244 b->sh.last_next = &b->sh.first; \
245 } \
246 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
247 { \
248 if (h->sh.first != _SLIST_LAST) \
249 return container_of(h->sh.first, type, field.si); \
250 return NULL; \
251 } \
252 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
253 const type *item) \
254 { \
255 const struct slist_item *sitem = &item->field.si; \
256 if (sitem->next != _SLIST_LAST) \
257 return container_of(sitem->next, type, field.si); \
258 return NULL; \
259 } \
260 TYPESAFE_FIRST_NEXT(prefix, type) \
261 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
262 { \
263 struct slist_item *sitem; \
264 if (!item) \
265 return NULL; \
266 sitem = &item->field.si; \
267 if (sitem->next != _SLIST_LAST) \
268 return container_of(sitem->next, type, field.si); \
269 return NULL; \
270 } \
271 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
272 { \
273 return h->sh.count; \
274 } \
275 macro_pure bool prefix ## _anywhere(const type *item) \
276 { \
277 return item->field.si.next != NULL; \
278 } \
279 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
280 const type *item) \
281 { \
282 return typesafe_list_member(&h->sh, &item->field.si); \
283 } \
284 MACRO_REQUIRE_SEMICOLON() /* end */
285
286 /* don't use these structs directly */
287 struct dlist_item {
288 struct dlist_item *next;
289 struct dlist_item *prev;
290 };
291
292 struct dlist_head {
293 struct dlist_item hitem;
294 size_t count;
295 };
296
297 static inline void typesafe_dlist_add(struct dlist_head *head,
298 struct dlist_item *prev, struct dlist_item *item)
299 {
300 /* SA on clang-11 thinks this can happen, but in reality -assuming no
301 * memory corruption- it can't. DLIST uses a "closed" ring, i.e. the
302 * termination at the end of the list is not NULL but rather a pointer
303 * back to the head. (This eliminates special-casing the first or last
304 * item.)
305 *
306 * Sadly, can't use assert() here since the libfrr assert / xref code
307 * uses typesafe lists itself... that said, if an assert tripped here
308 * we'd already be way past some memory corruption, so we might as
309 * well just take the SEGV. (In the presence of corruption, we'd see
310 * random SEGVs from places that make no sense at all anyway, an
311 * assert might actually be a red herring.)
312 *
313 * ("assume()" tells the compiler to produce code as if the condition
314 * will always hold; it doesn't have any actual effect here, it'll
315 * just SEGV out on "item->next->prev = item".)
316 */
317 assume(prev->next != NULL);
318
319 item->next = prev->next;
320 item->next->prev = item;
321 item->prev = prev;
322 prev->next = item;
323 head->count++;
324 }
325
326 static inline void typesafe_dlist_swap_all(struct dlist_head *a,
327 struct dlist_head *b)
328 {
329 struct dlist_head tmp = *a;
330
331 a->count = b->count;
332 if (a->count) {
333 a->hitem.next = b->hitem.next;
334 a->hitem.prev = b->hitem.prev;
335 a->hitem.next->prev = &a->hitem;
336 a->hitem.prev->next = &a->hitem;
337 } else {
338 a->hitem.next = &a->hitem;
339 a->hitem.prev = &a->hitem;
340 }
341
342 b->count = tmp.count;
343 if (b->count) {
344 b->hitem.next = tmp.hitem.next;
345 b->hitem.prev = tmp.hitem.prev;
346 b->hitem.next->prev = &b->hitem;
347 b->hitem.prev->next = &b->hitem;
348 } else {
349 b->hitem.next = &b->hitem;
350 b->hitem.prev = &b->hitem;
351 }
352 }
353
354 extern bool typesafe_dlist_member(const struct dlist_head *head,
355 const struct dlist_item *item);
356
357 /* double-linked list, for fast item deletion
358 */
359 #define PREDECL_DLIST(prefix) \
360 struct prefix ## _head { struct dlist_head dh; }; \
361 struct prefix ## _item { struct dlist_item di; }; \
362 MACRO_REQUIRE_SEMICOLON() /* end */
363
364 #define INIT_DLIST(var) { .dh = { \
365 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
366
367 #define DECLARE_DLIST(prefix, type, field) \
368 \
369 macro_inline void prefix ## _init(struct prefix##_head *h) \
370 { \
371 memset(h, 0, sizeof(*h)); \
372 h->dh.hitem.prev = &h->dh.hitem; \
373 h->dh.hitem.next = &h->dh.hitem; \
374 } \
375 macro_inline void prefix ## _fini(struct prefix##_head *h) \
376 { \
377 memset(h, 0, sizeof(*h)); \
378 } \
379 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
380 { \
381 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
382 } \
383 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
384 { \
385 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
386 } \
387 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
388 type *after, type *item) \
389 { \
390 struct dlist_item *prev; \
391 prev = after ? &after->field.di : &h->dh.hitem; \
392 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
393 } \
394 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
395 { \
396 struct dlist_item *ditem = &item->field.di; \
397 ditem->prev->next = ditem->next; \
398 ditem->next->prev = ditem->prev; \
399 h->dh.count--; \
400 ditem->prev = ditem->next = NULL; \
401 return item; \
402 } \
403 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
404 { \
405 struct dlist_item *ditem = h->dh.hitem.next; \
406 if (ditem == &h->dh.hitem) \
407 return NULL; \
408 ditem->prev->next = ditem->next; \
409 ditem->next->prev = ditem->prev; \
410 h->dh.count--; \
411 ditem->prev = ditem->next = NULL; \
412 return container_of(ditem, type, field.di); \
413 } \
414 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
415 struct prefix##_head *b) \
416 { \
417 typesafe_dlist_swap_all(&a->dh, &b->dh); \
418 } \
419 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
420 { \
421 const struct dlist_item *ditem = h->dh.hitem.next; \
422 if (ditem == &h->dh.hitem) \
423 return NULL; \
424 return container_of(ditem, type, field.di); \
425 } \
426 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
427 const type *item) \
428 { \
429 const struct dlist_item *ditem = &item->field.di; \
430 if (ditem->next == &h->dh.hitem) \
431 return NULL; \
432 return container_of(ditem->next, type, field.di); \
433 } \
434 TYPESAFE_FIRST_NEXT(prefix, type) \
435 macro_pure const type *prefix ## _const_last(const struct prefix##_head *h) \
436 { \
437 const struct dlist_item *ditem = h->dh.hitem.prev; \
438 if (ditem == &h->dh.hitem) \
439 return NULL; \
440 return container_of(ditem, type, field.di); \
441 } \
442 macro_pure const type *prefix ## _const_prev(const struct prefix##_head *h, \
443 const type *item) \
444 { \
445 const struct dlist_item *ditem = &item->field.di; \
446 if (ditem->prev == &h->dh.hitem) \
447 return NULL; \
448 return container_of(ditem->prev, type, field.di); \
449 } \
450 TYPESAFE_LAST_PREV(prefix, type) \
451 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
452 { \
453 if (!item) \
454 return NULL; \
455 return prefix ## _next(h, item); \
456 } \
457 macro_pure type *prefix ## _prev_safe(struct prefix##_head *h, type *item) \
458 { \
459 if (!item) \
460 return NULL; \
461 return prefix ## _prev(h, item); \
462 } \
463 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
464 { \
465 return h->dh.count; \
466 } \
467 macro_pure bool prefix ## _anywhere(const type *item) \
468 { \
469 const struct dlist_item *ditem = &item->field.di; \
470 return ditem->next && ditem->prev; \
471 } \
472 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
473 const type *item) \
474 { \
475 return typesafe_dlist_member(&h->dh, &item->field.di); \
476 } \
477 MACRO_REQUIRE_SEMICOLON() /* end */
478
479 /* note: heap currently caps out at 4G items */
480
481 #define HEAP_NARY 8U
482 typedef uint32_t heap_index_i;
483
484 struct heap_item {
485 uint32_t index;
486 };
487
488 struct heap_head {
489 struct heap_item **array;
490 uint32_t arraysz, count;
491 };
492
493 #define HEAP_RESIZE_TRESH_UP(h) \
494 (h->hh.count + 1 >= h->hh.arraysz)
495 #define HEAP_RESIZE_TRESH_DN(h) \
496 (h->hh.count == 0 || \
497 h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
498
499 #define PREDECL_HEAP(prefix) \
500 struct prefix ## _head { struct heap_head hh; }; \
501 struct prefix ## _item { struct heap_item hi; }; \
502 MACRO_REQUIRE_SEMICOLON() /* end */
503
504 #define INIT_HEAP(var) { }
505
506 #define DECLARE_HEAP(prefix, type, field, cmpfn) \
507 \
508 macro_inline void prefix ## _init(struct prefix##_head *h) \
509 { \
510 memset(h, 0, sizeof(*h)); \
511 } \
512 macro_inline void prefix ## _fini(struct prefix##_head *h) \
513 { \
514 assert(h->hh.count == 0); \
515 memset(h, 0, sizeof(*h)); \
516 } \
517 macro_inline int prefix ## __cmp(const struct heap_item *a, \
518 const struct heap_item *b) \
519 { \
520 return cmpfn(container_of(a, type, field.hi), \
521 container_of(b, type, field.hi)); \
522 } \
523 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
524 { \
525 if (HEAP_RESIZE_TRESH_UP(h)) \
526 typesafe_heap_resize(&h->hh, true); \
527 typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
528 prefix ## __cmp); \
529 h->hh.count++; \
530 return NULL; \
531 } \
532 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
533 { \
534 struct heap_item *other; \
535 uint32_t index = item->field.hi.index; \
536 assert(h->hh.array[index] == &item->field.hi); \
537 h->hh.count--; \
538 other = h->hh.array[h->hh.count]; \
539 if (cmpfn(container_of(other, type, field.hi), item) < 0) \
540 typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
541 else \
542 typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
543 if (HEAP_RESIZE_TRESH_DN(h)) \
544 typesafe_heap_resize(&h->hh, false); \
545 return item; \
546 } \
547 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
548 { \
549 struct heap_item *hitem, *other; \
550 if (h->hh.count == 0) \
551 return NULL; \
552 hitem = h->hh.array[0]; \
553 h->hh.count--; \
554 other = h->hh.array[h->hh.count]; \
555 typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
556 if (HEAP_RESIZE_TRESH_DN(h)) \
557 typesafe_heap_resize(&h->hh, false); \
558 return container_of(hitem, type, field.hi); \
559 } \
560 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
561 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
562 { \
563 if (h->hh.count == 0) \
564 return NULL; \
565 return container_of(h->hh.array[0], type, field.hi); \
566 } \
567 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
568 const type *item) \
569 { \
570 uint32_t idx = item->field.hi.index + 1; \
571 if (idx >= h->hh.count) \
572 return NULL; \
573 return container_of(h->hh.array[idx], type, field.hi); \
574 } \
575 TYPESAFE_FIRST_NEXT(prefix, type) \
576 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
577 { \
578 if (!item) \
579 return NULL; \
580 return prefix ## _next(h, item); \
581 } \
582 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
583 { \
584 return h->hh.count; \
585 } \
586 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
587 const type *item) \
588 { \
589 uint32_t idx = item->field.hi.index; \
590 if (idx >= h->hh.count) \
591 return false; \
592 return h->hh.array[idx] == &item->field.hi; \
593 } \
594 MACRO_REQUIRE_SEMICOLON() /* end */
595
596 extern void typesafe_heap_resize(struct heap_head *head, bool grow);
597 extern void typesafe_heap_pushdown(struct heap_head *head, uint32_t index,
598 struct heap_item *item,
599 int (*cmpfn)(const struct heap_item *a,
600 const struct heap_item *b));
601 extern void typesafe_heap_pullup(struct heap_head *head, uint32_t index,
602 struct heap_item *item,
603 int (*cmpfn)(const struct heap_item *a,
604 const struct heap_item *b));
605
606 /* single-linked list, sorted.
607 * can be used as priority queue with add / pop
608 */
609
610 /* don't use these structs directly */
611 struct ssort_item {
612 struct ssort_item *next;
613 };
614
615 struct ssort_head {
616 struct ssort_item *first;
617 size_t count;
618 };
619
620 /* use as:
621 *
622 * PREDECL_SORTLIST(namelist)
623 * struct name {
624 * struct namelist_item nlitem;
625 * }
626 * DECLARE_SORTLIST(namelist, struct name, nlitem)
627 */
628 #define _PREDECL_SORTLIST(prefix) \
629 struct prefix ## _head { struct ssort_head sh; }; \
630 struct prefix ## _item { struct ssort_item si; }; \
631 MACRO_REQUIRE_SEMICOLON() /* end */
632
633 #define INIT_SORTLIST_UNIQ(var) { }
634 #define INIT_SORTLIST_NONUNIQ(var) { }
635
636 #define PREDECL_SORTLIST_UNIQ(prefix) \
637 _PREDECL_SORTLIST(prefix)
638 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
639 _PREDECL_SORTLIST(prefix)
640
641 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
642 \
643 macro_inline void prefix ## _init(struct prefix##_head *h) \
644 { \
645 memset(h, 0, sizeof(*h)); \
646 } \
647 macro_inline void prefix ## _fini(struct prefix##_head *h) \
648 { \
649 memset(h, 0, sizeof(*h)); \
650 } \
651 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
652 { \
653 struct ssort_item **np = &h->sh.first; \
654 int c = 1; \
655 while (*np && (c = cmpfn_uq( \
656 container_of(*np, type, field.si), item)) < 0) \
657 np = &(*np)->next; \
658 if (c == 0) \
659 return container_of(*np, type, field.si); \
660 item->field.si.next = *np; \
661 *np = &item->field.si; \
662 h->sh.count++; \
663 return NULL; \
664 } \
665 macro_inline const type *prefix ## _const_find_gteq( \
666 const struct prefix##_head *h, const type *item) \
667 { \
668 const struct ssort_item *sitem = h->sh.first; \
669 int cmpval = 0; \
670 while (sitem && (cmpval = cmpfn_nuq( \
671 container_of(sitem, type, field.si), item)) < 0) \
672 sitem = sitem->next; \
673 return container_of_null(sitem, type, field.si); \
674 } \
675 macro_inline const type *prefix ## _const_find_lt( \
676 const struct prefix##_head *h, const type *item) \
677 { \
678 const struct ssort_item *prev = NULL, *sitem = h->sh.first; \
679 int cmpval = 0; \
680 while (sitem && (cmpval = cmpfn_nuq( \
681 container_of(sitem, type, field.si), item)) < 0) \
682 sitem = (prev = sitem)->next; \
683 return container_of_null(prev, type, field.si); \
684 } \
685 TYPESAFE_FIND_CMP(prefix, type) \
686 /* TODO: del_hint */ \
687 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
688 { \
689 struct ssort_item **iter = &h->sh.first; \
690 while (*iter && *iter != &item->field.si) \
691 iter = &(*iter)->next; \
692 if (!*iter) \
693 return NULL; \
694 h->sh.count--; \
695 *iter = item->field.si.next; \
696 item->field.si.next = NULL; \
697 return item; \
698 } \
699 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
700 { \
701 struct ssort_item *sitem = h->sh.first; \
702 if (!sitem) \
703 return NULL; \
704 h->sh.count--; \
705 h->sh.first = sitem->next; \
706 return container_of(sitem, type, field.si); \
707 } \
708 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
709 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
710 { \
711 return container_of_null(h->sh.first, type, field.si); \
712 } \
713 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
714 const type *item) \
715 { \
716 const struct ssort_item *sitem = &item->field.si; \
717 return container_of_null(sitem->next, type, field.si); \
718 } \
719 TYPESAFE_FIRST_NEXT(prefix, type) \
720 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
721 { \
722 struct ssort_item *sitem; \
723 if (!item) \
724 return NULL; \
725 sitem = &item->field.si; \
726 return container_of_null(sitem->next, type, field.si); \
727 } \
728 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
729 { \
730 return h->sh.count; \
731 } \
732 MACRO_REQUIRE_SEMICOLON() /* end */
733
734 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
735 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn); \
736 \
737 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
738 const type *item) \
739 { \
740 const struct ssort_item *sitem = h->sh.first; \
741 int cmpval = 0; \
742 while (sitem && (cmpval = cmpfn( \
743 container_of(sitem, type, field.si), item)) < 0) \
744 sitem = sitem->next; \
745 if (!sitem || cmpval > 0) \
746 return NULL; \
747 return container_of(sitem, type, field.si); \
748 } \
749 TYPESAFE_FIND(prefix, type) \
750 TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
751 MACRO_REQUIRE_SEMICOLON() /* end */
752
753 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
754 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
755 { \
756 int cmpval = cmpfn(a, b); \
757 if (cmpval) \
758 return cmpval; \
759 if (a < b) \
760 return -1; \
761 if (a > b) \
762 return 1; \
763 return 0; \
764 } \
765 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp); \
766 TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
767 MACRO_REQUIRE_SEMICOLON() /* end */
768
769
770 /* hash, "sorted" by hash value
771 */
772
773 /* don't use these structs directly */
774 struct thash_item {
775 struct thash_item *next;
776 uint32_t hashval;
777 };
778
779 struct thash_head {
780 struct thash_item **entries;
781 uint32_t count;
782
783 uint8_t tabshift;
784 uint8_t minshift, maxshift;
785 };
786
787 #define _HASH_SIZE(tabshift) \
788 ((1U << (tabshift)) >> 1)
789 #define HASH_SIZE(head) \
790 _HASH_SIZE((head).tabshift)
791 #define _HASH_KEY(tabshift, val) \
792 ((val) >> (33 - (tabshift)))
793 #define HASH_KEY(head, val) \
794 _HASH_KEY((head).tabshift, val)
795 #define HASH_GROW_THRESHOLD(head) \
796 ((head).count >= HASH_SIZE(head))
797 #define HASH_SHRINK_THRESHOLD(head) \
798 ((head).count <= (HASH_SIZE(head) - 1) / 2)
799
800 extern void typesafe_hash_grow(struct thash_head *head);
801 extern void typesafe_hash_shrink(struct thash_head *head);
802
803 /* use as:
804 *
805 * PREDECL_HASH(namelist)
806 * struct name {
807 * struct namelist_item nlitem;
808 * }
809 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
810 */
811 #define PREDECL_HASH(prefix) \
812 struct prefix ## _head { struct thash_head hh; }; \
813 struct prefix ## _item { struct thash_item hi; }; \
814 MACRO_REQUIRE_SEMICOLON() /* end */
815
816 #define INIT_HASH(var) { }
817
818 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
819 \
820 macro_inline void prefix ## _init(struct prefix##_head *h) \
821 { \
822 memset(h, 0, sizeof(*h)); \
823 } \
824 macro_inline void prefix ## _fini(struct prefix##_head *h) \
825 { \
826 assert(h->hh.count == 0); \
827 h->hh.minshift = 0; \
828 typesafe_hash_shrink(&h->hh); \
829 memset(h, 0, sizeof(*h)); \
830 } \
831 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
832 { \
833 h->hh.count++; \
834 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
835 typesafe_hash_grow(&h->hh); \
836 \
837 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
838 item->field.hi.hashval = hval; \
839 struct thash_item **np = &h->hh.entries[hbits]; \
840 while (*np && (*np)->hashval < hval) \
841 np = &(*np)->next; \
842 while (*np && (*np)->hashval == hval) { \
843 if (cmpfn(container_of(*np, type, field.hi), item) == 0) { \
844 h->hh.count--; \
845 return container_of(*np, type, field.hi); \
846 } \
847 np = &(*np)->next; \
848 } \
849 item->field.hi.next = *np; \
850 *np = &item->field.hi; \
851 return NULL; \
852 } \
853 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
854 const type *item) \
855 { \
856 if (!h->hh.tabshift) \
857 return NULL; \
858 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
859 const struct thash_item *hitem = h->hh.entries[hbits]; \
860 while (hitem && hitem->hashval < hval) \
861 hitem = hitem->next; \
862 while (hitem && hitem->hashval == hval) { \
863 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
864 return container_of(hitem, type, field.hi); \
865 hitem = hitem->next; \
866 } \
867 return NULL; \
868 } \
869 TYPESAFE_FIND(prefix, type) \
870 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
871 { \
872 if (!h->hh.tabshift) \
873 return NULL; \
874 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
875 struct thash_item **np = &h->hh.entries[hbits]; \
876 while (*np && (*np)->hashval < hval) \
877 np = &(*np)->next; \
878 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
879 np = &(*np)->next; \
880 if (*np != &item->field.hi) \
881 return NULL; \
882 *np = item->field.hi.next; \
883 item->field.hi.next = NULL; \
884 h->hh.count--; \
885 if (HASH_SHRINK_THRESHOLD(h->hh)) \
886 typesafe_hash_shrink(&h->hh); \
887 return item; \
888 } \
889 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
890 { \
891 uint32_t i; \
892 for (i = 0; i < HASH_SIZE(h->hh); i++) \
893 if (h->hh.entries[i]) { \
894 struct thash_item *hitem = h->hh.entries[i]; \
895 h->hh.entries[i] = hitem->next; \
896 h->hh.count--; \
897 hitem->next = NULL; \
898 if (HASH_SHRINK_THRESHOLD(h->hh)) \
899 typesafe_hash_shrink(&h->hh); \
900 return container_of(hitem, type, field.hi); \
901 } \
902 return NULL; \
903 } \
904 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
905 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
906 { \
907 uint32_t i; \
908 for (i = 0; i < HASH_SIZE(h->hh); i++) \
909 if (h->hh.entries[i]) \
910 return container_of(h->hh.entries[i], type, field.hi); \
911 return NULL; \
912 } \
913 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
914 const type *item) \
915 { \
916 const struct thash_item *hitem = &item->field.hi; \
917 if (hitem->next) \
918 return container_of(hitem->next, type, field.hi); \
919 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
920 for (; i < HASH_SIZE(h->hh); i++) \
921 if (h->hh.entries[i]) \
922 return container_of(h->hh.entries[i], type, field.hi); \
923 return NULL; \
924 } \
925 TYPESAFE_FIRST_NEXT(prefix, type) \
926 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
927 { \
928 if (!item) \
929 return NULL; \
930 return prefix ## _next(h, item); \
931 } \
932 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
933 { \
934 return h->hh.count; \
935 } \
936 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
937 const type *item) \
938 { \
939 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
940 const struct thash_item *hitem = h->hh.entries[hbits]; \
941 while (hitem && hitem->hashval < hval) \
942 hitem = hitem->next; \
943 for (hitem = h->hh.entries[hbits]; hitem && hitem->hashval <= hval; \
944 hitem = hitem->next) \
945 if (hitem == &item->field.hi) \
946 return true; \
947 return false; \
948 } \
949 MACRO_REQUIRE_SEMICOLON() /* end */
950
951 /* skiplist, sorted.
952 * can be used as priority queue with add / pop
953 */
954
955 /* don't use these structs directly */
956 #define SKIPLIST_MAXDEPTH 16
957 #define SKIPLIST_EMBED 4
958 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
959
960 struct sskip_item {
961 struct sskip_item *next[SKIPLIST_EMBED];
962 };
963
964 struct sskip_overflow {
965 struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
966 };
967
968 struct sskip_head {
969 struct sskip_item hitem;
970 struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
971 size_t count;
972 };
973
974 /* use as:
975 *
976 * PREDECL_SKIPLIST(namelist)
977 * struct name {
978 * struct namelist_item nlitem;
979 * }
980 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
981 */
982 #define _PREDECL_SKIPLIST(prefix) \
983 struct prefix ## _head { struct sskip_head sh; }; \
984 struct prefix ## _item { struct sskip_item si; }; \
985 MACRO_REQUIRE_SEMICOLON() /* end */
986
987 #define INIT_SKIPLIST_UNIQ(var) { }
988 #define INIT_SKIPLIST_NONUNIQ(var) { }
989
990 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
991 \
992 macro_inline void prefix ## _init(struct prefix##_head *h) \
993 { \
994 memset(h, 0, sizeof(*h)); \
995 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
996 ((uintptr_t)h->sh.overflow | 1); \
997 } \
998 macro_inline void prefix ## _fini(struct prefix##_head *h) \
999 { \
1000 memset(h, 0, sizeof(*h)); \
1001 } \
1002 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
1003 { \
1004 struct sskip_item *si; \
1005 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
1006 return container_of_null(si, type, field.si); \
1007 } \
1008 macro_inline const type *prefix ## _const_find_gteq( \
1009 const struct prefix##_head *h, const type *item) \
1010 { \
1011 const struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
1012 &item->field.si, cmpfn_nuq); \
1013 return container_of_null(sitem, type, field.si); \
1014 } \
1015 macro_inline const type *prefix ## _const_find_lt( \
1016 const struct prefix##_head *h, const type *item) \
1017 { \
1018 const struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
1019 &item->field.si, cmpfn_nuq); \
1020 return container_of_null(sitem, type, field.si); \
1021 } \
1022 TYPESAFE_FIND_CMP(prefix, type) \
1023 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
1024 { \
1025 struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \
1026 &item->field.si, cmpfn_uq); \
1027 return container_of_null(sitem, type, field.si); \
1028 } \
1029 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
1030 { \
1031 struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
1032 return container_of_null(sitem, type, field.si); \
1033 } \
1034 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
1035 struct prefix##_head *b) \
1036 { \
1037 struct prefix##_head tmp = *a; \
1038 *a = *b; \
1039 *b = tmp; \
1040 a->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1041 ((uintptr_t)a->sh.overflow | 1); \
1042 b->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1043 ((uintptr_t)b->sh.overflow | 1); \
1044 } \
1045 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
1046 { \
1047 const struct sskip_item *first = h->sh.hitem.next[0]; \
1048 return container_of_null(first, type, field.si); \
1049 } \
1050 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
1051 const type *item) \
1052 { \
1053 const struct sskip_item *next = item->field.si.next[0]; \
1054 return container_of_null(next, type, field.si); \
1055 } \
1056 TYPESAFE_FIRST_NEXT(prefix, type) \
1057 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
1058 { \
1059 struct sskip_item *next; \
1060 next = item ? item->field.si.next[0] : NULL; \
1061 return container_of_null(next, type, field.si); \
1062 } \
1063 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
1064 { \
1065 return h->sh.count; \
1066 } \
1067 MACRO_REQUIRE_SEMICOLON() /* end */
1068
1069 #define PREDECL_SKIPLIST_UNIQ(prefix) \
1070 _PREDECL_SKIPLIST(prefix)
1071 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
1072 \
1073 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1074 const struct sskip_item *b) \
1075 { \
1076 return cmpfn(container_of(a, type, field.si), \
1077 container_of(b, type, field.si)); \
1078 } \
1079 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
1080 const type *item) \
1081 { \
1082 const struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
1083 &item->field.si, &prefix ## __cmp); \
1084 return container_of_null(sitem, type, field.si); \
1085 } \
1086 TYPESAFE_FIND(prefix, type) \
1087 TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
1088 \
1089 _DECLARE_SKIPLIST(prefix, type, field, \
1090 prefix ## __cmp, prefix ## __cmp); \
1091 MACRO_REQUIRE_SEMICOLON() /* end */
1092
1093 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
1094 _PREDECL_SKIPLIST(prefix)
1095 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
1096 \
1097 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1098 const struct sskip_item *b) \
1099 { \
1100 return cmpfn(container_of(a, type, field.si), \
1101 container_of(b, type, field.si)); \
1102 } \
1103 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
1104 const struct sskip_item *b) \
1105 { \
1106 int cmpval = cmpfn(container_of(a, type, field.si), \
1107 container_of(b, type, field.si)); \
1108 if (cmpval) \
1109 return cmpval; \
1110 if (a < b) \
1111 return -1; \
1112 if (a > b) \
1113 return 1; \
1114 return 0; \
1115 } \
1116 \
1117 _DECLARE_SKIPLIST(prefix, type, field, \
1118 prefix ## __cmp, prefix ## __cmp_uq); \
1119 TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
1120 MACRO_REQUIRE_SEMICOLON() /* end */
1121
1122
1123 extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
1124 struct sskip_item *item, int (*cmpfn)(
1125 const struct sskip_item *a,
1126 const struct sskip_item *b));
1127 extern const struct sskip_item *typesafe_skiplist_find(
1128 const struct sskip_head *head,
1129 const struct sskip_item *item, int (*cmpfn)(
1130 const struct sskip_item *a,
1131 const struct sskip_item *b));
1132 extern const struct sskip_item *typesafe_skiplist_find_gteq(
1133 const struct sskip_head *head,
1134 const struct sskip_item *item, int (*cmpfn)(
1135 const struct sskip_item *a,
1136 const struct sskip_item *b));
1137 extern const struct sskip_item *typesafe_skiplist_find_lt(
1138 const struct sskip_head *head,
1139 const struct sskip_item *item, int (*cmpfn)(
1140 const struct sskip_item *a,
1141 const struct sskip_item *b));
1142 extern struct sskip_item *typesafe_skiplist_del(
1143 struct sskip_head *head, struct sskip_item *item, int (*cmpfn)(
1144 const struct sskip_item *a,
1145 const struct sskip_item *b));
1146 extern struct sskip_item *typesafe_skiplist_pop(struct sskip_head *head);
1147
1148 #ifdef __cplusplus
1149 }
1150 #endif
1151
1152 /* this needs to stay at the end because both files include each other.
1153 * the resolved order is typesafe.h before typerb.h
1154 */
1155 #include "typerb.h"
1156
1157 #endif /* _FRR_TYPESAFE_H */