]> git.proxmox.com Git - mirror_frr.git/blob - lib/typesafe.h
Merge pull request #9855 from donaldsharp/ospf_fini
[mirror_frr.git] / lib / typesafe.h
1 /*
2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #ifndef _FRR_TYPESAFE_H
18 #define _FRR_TYPESAFE_H
19
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <stdbool.h>
23 #include <assert.h>
24 #include "compiler.h"
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 /* generic macros for all list-like types */
31
32 #define frr_each(prefix, head, item) \
33 for (item = prefix##_first(head); item; \
34 item = prefix##_next(head, item))
35 #define frr_each_safe(prefix, head, item) \
36 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
37 prefix##_next_safe(head, \
38 (item = prefix##_first(head))); \
39 item; \
40 item = prefix##_safe, \
41 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
42 #define frr_each_from(prefix, head, item, from) \
43 for (item = from, from = prefix##_next_safe(head, item); \
44 item; \
45 item = from, from = prefix##_next_safe(head, from))
46
47
48 /* non-const variants. these wrappers are the same for all the types, so
49 * bundle them together here.
50 */
51 #define TYPESAFE_FIRST_NEXT(prefix, type) \
52 macro_pure type *prefix ## _first(struct prefix##_head *h) \
53 { \
54 return (type *)prefix ## _const_first(h); \
55 } \
56 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
57 { \
58 return (type *)prefix ## _const_next(h, item); \
59 } \
60 /* ... */
61 #define TYPESAFE_FIND(prefix, type) \
62 macro_inline type *prefix ## _find(struct prefix##_head *h, \
63 const type *item) \
64 { \
65 return (type *)prefix ## _const_find(h, item); \
66 } \
67 /* ... */
68 #define TYPESAFE_FIND_CMP(prefix, type) \
69 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
70 const type *item) \
71 { \
72 return (type *)prefix ## _const_find_lt(h, item); \
73 } \
74 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
75 const type *item) \
76 { \
77 return (type *)prefix ## _const_find_gteq(h, item); \
78 } \
79 /* ... */
80
81 /* *_member via find - when there is no better membership check than find() */
82 #define TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
83 macro_inline bool prefix ## _member(struct prefix##_head *h, \
84 const type *item) \
85 { \
86 return item == prefix ## _const_find(h, item); \
87 } \
88 /* ... */
89
90 /* *_member via find_gteq - same for non-unique containers */
91 #define TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
92 macro_inline bool prefix ## _member(struct prefix##_head *h, \
93 const type *item) \
94 { \
95 const type *iter; \
96 for (iter = prefix ## _const_find_gteq(h, item); iter; \
97 iter = prefix ## _const_next(h, iter)) { \
98 if (iter == item) \
99 return true; \
100 if (cmpfn(iter, item) > 0) \
101 break; \
102 } \
103 return false; \
104 } \
105 /* ... */
106
107 /* SWAP_ALL_SIMPLE = for containers where the items don't point back to the
108 * head *AND* the head doesn't point to itself (= everything except LIST,
109 * DLIST and SKIPLIST), just switch out the entire head
110 */
111 #define TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
112 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
113 struct prefix##_head *b) \
114 { \
115 struct prefix##_head tmp = *a; \
116 *a = *b; \
117 *b = tmp; \
118 } \
119 /* ... */
120
121 /* single-linked list, unsorted/arbitrary.
122 * can be used as queue with add_tail / pop
123 */
124
125 /* don't use these structs directly */
126 struct slist_item {
127 struct slist_item *next;
128 };
129
130 struct slist_head {
131 struct slist_item *first, **last_next;
132 size_t count;
133 };
134
135 /* this replaces NULL as the value for ->next on the last item. */
136 extern struct slist_item typesafe_slist_sentinel;
137 #define _SLIST_LAST &typesafe_slist_sentinel
138
139 static inline void typesafe_list_add(struct slist_head *head,
140 struct slist_item **pos, struct slist_item *item)
141 {
142 item->next = *pos;
143 *pos = item;
144 if (pos == head->last_next)
145 head->last_next = &item->next;
146 head->count++;
147 }
148
149 extern bool typesafe_list_member(const struct slist_head *head,
150 const struct slist_item *item);
151
152 /* use as:
153 *
154 * PREDECL_LIST(namelist);
155 * struct name {
156 * struct namelist_item nlitem;
157 * }
158 * DECLARE_LIST(namelist, struct name, nlitem);
159 */
160 #define PREDECL_LIST(prefix) \
161 struct prefix ## _head { struct slist_head sh; }; \
162 struct prefix ## _item { struct slist_item si; }; \
163 MACRO_REQUIRE_SEMICOLON() /* end */
164
165 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
166
167 #define DECLARE_LIST(prefix, type, field) \
168 \
169 macro_inline void prefix ## _init(struct prefix##_head *h) \
170 { \
171 memset(h, 0, sizeof(*h)); \
172 h->sh.first = _SLIST_LAST; \
173 h->sh.last_next = &h->sh.first; \
174 } \
175 macro_inline void prefix ## _fini(struct prefix##_head *h) \
176 { \
177 memset(h, 0, sizeof(*h)); \
178 } \
179 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
180 { \
181 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
182 } \
183 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
184 { \
185 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
186 } \
187 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
188 type *after, type *item) \
189 { \
190 struct slist_item **nextp; \
191 nextp = after ? &after->field.si.next : &h->sh.first; \
192 typesafe_list_add(&h->sh, nextp, &item->field.si); \
193 } \
194 /* TODO: del_hint */ \
195 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
196 { \
197 struct slist_item **iter = &h->sh.first; \
198 while (*iter != _SLIST_LAST && *iter != &item->field.si) \
199 iter = &(*iter)->next; \
200 if (*iter == _SLIST_LAST) \
201 return NULL; \
202 h->sh.count--; \
203 *iter = item->field.si.next; \
204 if (item->field.si.next == _SLIST_LAST) \
205 h->sh.last_next = iter; \
206 item->field.si.next = NULL; \
207 return item; \
208 } \
209 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
210 { \
211 struct slist_item *sitem = h->sh.first; \
212 if (sitem == _SLIST_LAST) \
213 return NULL; \
214 h->sh.count--; \
215 h->sh.first = sitem->next; \
216 if (h->sh.first == _SLIST_LAST) \
217 h->sh.last_next = &h->sh.first; \
218 sitem->next = NULL; \
219 return container_of(sitem, type, field.si); \
220 } \
221 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
222 struct prefix##_head *b) \
223 { \
224 struct prefix##_head tmp = *a; \
225 *a = *b; \
226 *b = tmp; \
227 if (a->sh.last_next == &b->sh.first) \
228 a->sh.last_next = &a->sh.first; \
229 if (b->sh.last_next == &a->sh.first) \
230 b->sh.last_next = &b->sh.first; \
231 } \
232 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
233 { \
234 if (h->sh.first != _SLIST_LAST) \
235 return container_of(h->sh.first, type, field.si); \
236 return NULL; \
237 } \
238 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
239 const type *item) \
240 { \
241 const struct slist_item *sitem = &item->field.si; \
242 if (sitem->next != _SLIST_LAST) \
243 return container_of(sitem->next, type, field.si); \
244 return NULL; \
245 } \
246 TYPESAFE_FIRST_NEXT(prefix, type) \
247 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
248 { \
249 struct slist_item *sitem; \
250 if (!item) \
251 return NULL; \
252 sitem = &item->field.si; \
253 if (sitem->next != _SLIST_LAST) \
254 return container_of(sitem->next, type, field.si); \
255 return NULL; \
256 } \
257 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
258 { \
259 return h->sh.count; \
260 } \
261 macro_pure bool prefix ## _anywhere(const type *item) \
262 { \
263 return item->field.si.next != NULL; \
264 } \
265 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
266 const type *item) \
267 { \
268 return typesafe_list_member(&h->sh, &item->field.si); \
269 } \
270 MACRO_REQUIRE_SEMICOLON() /* end */
271
272 /* don't use these structs directly */
273 struct dlist_item {
274 struct dlist_item *next;
275 struct dlist_item *prev;
276 };
277
278 struct dlist_head {
279 struct dlist_item hitem;
280 size_t count;
281 };
282
283 static inline void typesafe_dlist_add(struct dlist_head *head,
284 struct dlist_item *prev, struct dlist_item *item)
285 {
286 item->next = prev->next;
287 item->next->prev = item;
288 item->prev = prev;
289 prev->next = item;
290 head->count++;
291 }
292
293 static inline void typesafe_dlist_swap_all(struct dlist_head *a,
294 struct dlist_head *b)
295 {
296 struct dlist_head tmp = *a;
297
298 a->count = b->count;
299 if (a->count) {
300 a->hitem.next = b->hitem.next;
301 a->hitem.prev = b->hitem.prev;
302 a->hitem.next->prev = &a->hitem;
303 a->hitem.prev->next = &a->hitem;
304 } else {
305 a->hitem.next = &a->hitem;
306 a->hitem.prev = &a->hitem;
307 }
308
309 b->count = tmp.count;
310 if (b->count) {
311 b->hitem.next = tmp.hitem.next;
312 b->hitem.prev = tmp.hitem.prev;
313 b->hitem.next->prev = &b->hitem;
314 b->hitem.prev->next = &b->hitem;
315 } else {
316 b->hitem.next = &b->hitem;
317 b->hitem.prev = &b->hitem;
318 }
319 }
320
321 extern bool typesafe_dlist_member(const struct dlist_head *head,
322 const struct dlist_item *item);
323
324 /* double-linked list, for fast item deletion
325 */
326 #define PREDECL_DLIST(prefix) \
327 struct prefix ## _head { struct dlist_head dh; }; \
328 struct prefix ## _item { struct dlist_item di; }; \
329 MACRO_REQUIRE_SEMICOLON() /* end */
330
331 #define INIT_DLIST(var) { .dh = { \
332 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
333
334 #define DECLARE_DLIST(prefix, type, field) \
335 \
336 macro_inline void prefix ## _init(struct prefix##_head *h) \
337 { \
338 memset(h, 0, sizeof(*h)); \
339 h->dh.hitem.prev = &h->dh.hitem; \
340 h->dh.hitem.next = &h->dh.hitem; \
341 } \
342 macro_inline void prefix ## _fini(struct prefix##_head *h) \
343 { \
344 memset(h, 0, sizeof(*h)); \
345 } \
346 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
347 { \
348 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
349 } \
350 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
351 { \
352 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
353 } \
354 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
355 type *after, type *item) \
356 { \
357 struct dlist_item *prev; \
358 prev = after ? &after->field.di : &h->dh.hitem; \
359 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
360 } \
361 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
362 { \
363 struct dlist_item *ditem = &item->field.di; \
364 ditem->prev->next = ditem->next; \
365 ditem->next->prev = ditem->prev; \
366 h->dh.count--; \
367 ditem->prev = ditem->next = NULL; \
368 return item; \
369 } \
370 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
371 { \
372 struct dlist_item *ditem = h->dh.hitem.next; \
373 if (ditem == &h->dh.hitem) \
374 return NULL; \
375 ditem->prev->next = ditem->next; \
376 ditem->next->prev = ditem->prev; \
377 h->dh.count--; \
378 ditem->prev = ditem->next = NULL; \
379 return container_of(ditem, type, field.di); \
380 } \
381 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
382 struct prefix##_head *b) \
383 { \
384 typesafe_dlist_swap_all(&a->dh, &b->dh); \
385 } \
386 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
387 { \
388 const struct dlist_item *ditem = h->dh.hitem.next; \
389 if (ditem == &h->dh.hitem) \
390 return NULL; \
391 return container_of(ditem, type, field.di); \
392 } \
393 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
394 const type *item) \
395 { \
396 const struct dlist_item *ditem = &item->field.di; \
397 if (ditem->next == &h->dh.hitem) \
398 return NULL; \
399 return container_of(ditem->next, type, field.di); \
400 } \
401 TYPESAFE_FIRST_NEXT(prefix, type) \
402 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
403 { \
404 if (!item) \
405 return NULL; \
406 return prefix ## _next(h, item); \
407 } \
408 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
409 { \
410 return h->dh.count; \
411 } \
412 macro_pure bool prefix ## _anywhere(const type *item) \
413 { \
414 const struct dlist_item *ditem = &item->field.di; \
415 return ditem->next && ditem->prev; \
416 } \
417 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
418 const type *item) \
419 { \
420 return typesafe_dlist_member(&h->dh, &item->field.di); \
421 } \
422 MACRO_REQUIRE_SEMICOLON() /* end */
423
424 /* note: heap currently caps out at 4G items */
425
426 #define HEAP_NARY 8U
427 typedef uint32_t heap_index_i;
428
429 struct heap_item {
430 uint32_t index;
431 };
432
433 struct heap_head {
434 struct heap_item **array;
435 uint32_t arraysz, count;
436 };
437
438 #define HEAP_RESIZE_TRESH_UP(h) \
439 (h->hh.count + 1 >= h->hh.arraysz)
440 #define HEAP_RESIZE_TRESH_DN(h) \
441 (h->hh.count == 0 || \
442 h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
443
444 #define PREDECL_HEAP(prefix) \
445 struct prefix ## _head { struct heap_head hh; }; \
446 struct prefix ## _item { struct heap_item hi; }; \
447 MACRO_REQUIRE_SEMICOLON() /* end */
448
449 #define INIT_HEAP(var) { }
450
451 #define DECLARE_HEAP(prefix, type, field, cmpfn) \
452 \
453 macro_inline void prefix ## _init(struct prefix##_head *h) \
454 { \
455 memset(h, 0, sizeof(*h)); \
456 } \
457 macro_inline void prefix ## _fini(struct prefix##_head *h) \
458 { \
459 assert(h->hh.count == 0); \
460 memset(h, 0, sizeof(*h)); \
461 } \
462 macro_inline int prefix ## __cmp(const struct heap_item *a, \
463 const struct heap_item *b) \
464 { \
465 return cmpfn(container_of(a, type, field.hi), \
466 container_of(b, type, field.hi)); \
467 } \
468 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
469 { \
470 if (HEAP_RESIZE_TRESH_UP(h)) \
471 typesafe_heap_resize(&h->hh, true); \
472 typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
473 prefix ## __cmp); \
474 h->hh.count++; \
475 return NULL; \
476 } \
477 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
478 { \
479 struct heap_item *other; \
480 uint32_t index = item->field.hi.index; \
481 assert(h->hh.array[index] == &item->field.hi); \
482 h->hh.count--; \
483 other = h->hh.array[h->hh.count]; \
484 if (cmpfn(container_of(other, type, field.hi), item) < 0) \
485 typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
486 else \
487 typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
488 if (HEAP_RESIZE_TRESH_DN(h)) \
489 typesafe_heap_resize(&h->hh, false); \
490 return item; \
491 } \
492 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
493 { \
494 struct heap_item *hitem, *other; \
495 if (h->hh.count == 0) \
496 return NULL; \
497 hitem = h->hh.array[0]; \
498 h->hh.count--; \
499 other = h->hh.array[h->hh.count]; \
500 typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
501 if (HEAP_RESIZE_TRESH_DN(h)) \
502 typesafe_heap_resize(&h->hh, false); \
503 return container_of(hitem, type, field.hi); \
504 } \
505 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
506 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
507 { \
508 if (h->hh.count == 0) \
509 return NULL; \
510 return container_of(h->hh.array[0], type, field.hi); \
511 } \
512 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
513 const type *item) \
514 { \
515 uint32_t idx = item->field.hi.index + 1; \
516 if (idx >= h->hh.count) \
517 return NULL; \
518 return container_of(h->hh.array[idx], type, field.hi); \
519 } \
520 TYPESAFE_FIRST_NEXT(prefix, type) \
521 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
522 { \
523 if (!item) \
524 return NULL; \
525 return prefix ## _next(h, item); \
526 } \
527 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
528 { \
529 return h->hh.count; \
530 } \
531 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
532 const type *item) \
533 { \
534 uint32_t idx = item->field.hi.index; \
535 if (idx >= h->hh.count) \
536 return false; \
537 return h->hh.array[idx] == &item->field.hi; \
538 } \
539 MACRO_REQUIRE_SEMICOLON() /* end */
540
541 extern void typesafe_heap_resize(struct heap_head *head, bool grow);
542 extern void typesafe_heap_pushdown(struct heap_head *head, uint32_t index,
543 struct heap_item *item,
544 int (*cmpfn)(const struct heap_item *a,
545 const struct heap_item *b));
546 extern void typesafe_heap_pullup(struct heap_head *head, uint32_t index,
547 struct heap_item *item,
548 int (*cmpfn)(const struct heap_item *a,
549 const struct heap_item *b));
550
551 /* single-linked list, sorted.
552 * can be used as priority queue with add / pop
553 */
554
555 /* don't use these structs directly */
556 struct ssort_item {
557 struct ssort_item *next;
558 };
559
560 struct ssort_head {
561 struct ssort_item *first;
562 size_t count;
563 };
564
565 /* use as:
566 *
567 * PREDECL_SORTLIST(namelist)
568 * struct name {
569 * struct namelist_item nlitem;
570 * }
571 * DECLARE_SORTLIST(namelist, struct name, nlitem)
572 */
573 #define _PREDECL_SORTLIST(prefix) \
574 struct prefix ## _head { struct ssort_head sh; }; \
575 struct prefix ## _item { struct ssort_item si; }; \
576 MACRO_REQUIRE_SEMICOLON() /* end */
577
578 #define INIT_SORTLIST_UNIQ(var) { }
579 #define INIT_SORTLIST_NONUNIQ(var) { }
580
581 #define PREDECL_SORTLIST_UNIQ(prefix) \
582 _PREDECL_SORTLIST(prefix)
583 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
584 _PREDECL_SORTLIST(prefix)
585
586 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
587 \
588 macro_inline void prefix ## _init(struct prefix##_head *h) \
589 { \
590 memset(h, 0, sizeof(*h)); \
591 } \
592 macro_inline void prefix ## _fini(struct prefix##_head *h) \
593 { \
594 memset(h, 0, sizeof(*h)); \
595 } \
596 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
597 { \
598 struct ssort_item **np = &h->sh.first; \
599 int c = 1; \
600 while (*np && (c = cmpfn_uq( \
601 container_of(*np, type, field.si), item)) < 0) \
602 np = &(*np)->next; \
603 if (c == 0) \
604 return container_of(*np, type, field.si); \
605 item->field.si.next = *np; \
606 *np = &item->field.si; \
607 h->sh.count++; \
608 return NULL; \
609 } \
610 macro_inline const type *prefix ## _const_find_gteq( \
611 const struct prefix##_head *h, const type *item) \
612 { \
613 const struct ssort_item *sitem = h->sh.first; \
614 int cmpval = 0; \
615 while (sitem && (cmpval = cmpfn_nuq( \
616 container_of(sitem, type, field.si), item)) < 0) \
617 sitem = sitem->next; \
618 return container_of_null(sitem, type, field.si); \
619 } \
620 macro_inline const type *prefix ## _const_find_lt( \
621 const struct prefix##_head *h, const type *item) \
622 { \
623 const struct ssort_item *prev = NULL, *sitem = h->sh.first; \
624 int cmpval = 0; \
625 while (sitem && (cmpval = cmpfn_nuq( \
626 container_of(sitem, type, field.si), item)) < 0) \
627 sitem = (prev = sitem)->next; \
628 return container_of_null(prev, type, field.si); \
629 } \
630 TYPESAFE_FIND_CMP(prefix, type) \
631 /* TODO: del_hint */ \
632 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
633 { \
634 struct ssort_item **iter = &h->sh.first; \
635 while (*iter && *iter != &item->field.si) \
636 iter = &(*iter)->next; \
637 if (!*iter) \
638 return NULL; \
639 h->sh.count--; \
640 *iter = item->field.si.next; \
641 item->field.si.next = NULL; \
642 return item; \
643 } \
644 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
645 { \
646 struct ssort_item *sitem = h->sh.first; \
647 if (!sitem) \
648 return NULL; \
649 h->sh.count--; \
650 h->sh.first = sitem->next; \
651 return container_of(sitem, type, field.si); \
652 } \
653 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
654 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
655 { \
656 return container_of_null(h->sh.first, type, field.si); \
657 } \
658 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
659 const type *item) \
660 { \
661 const struct ssort_item *sitem = &item->field.si; \
662 return container_of_null(sitem->next, type, field.si); \
663 } \
664 TYPESAFE_FIRST_NEXT(prefix, type) \
665 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
666 { \
667 struct ssort_item *sitem; \
668 if (!item) \
669 return NULL; \
670 sitem = &item->field.si; \
671 return container_of_null(sitem->next, type, field.si); \
672 } \
673 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
674 { \
675 return h->sh.count; \
676 } \
677 MACRO_REQUIRE_SEMICOLON() /* end */
678
679 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
680 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn); \
681 \
682 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
683 const type *item) \
684 { \
685 const struct ssort_item *sitem = h->sh.first; \
686 int cmpval = 0; \
687 while (sitem && (cmpval = cmpfn( \
688 container_of(sitem, type, field.si), item)) < 0) \
689 sitem = sitem->next; \
690 if (!sitem || cmpval > 0) \
691 return NULL; \
692 return container_of(sitem, type, field.si); \
693 } \
694 TYPESAFE_FIND(prefix, type) \
695 TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
696 MACRO_REQUIRE_SEMICOLON() /* end */
697
698 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
699 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
700 { \
701 int cmpval = cmpfn(a, b); \
702 if (cmpval) \
703 return cmpval; \
704 if (a < b) \
705 return -1; \
706 if (a > b) \
707 return 1; \
708 return 0; \
709 } \
710 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp); \
711 TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
712 MACRO_REQUIRE_SEMICOLON() /* end */
713
714
715 /* hash, "sorted" by hash value
716 */
717
718 /* don't use these structs directly */
719 struct thash_item {
720 struct thash_item *next;
721 uint32_t hashval;
722 };
723
724 struct thash_head {
725 struct thash_item **entries;
726 uint32_t count;
727
728 uint8_t tabshift;
729 uint8_t minshift, maxshift;
730 };
731
732 #define _HASH_SIZE(tabshift) \
733 ((1U << (tabshift)) >> 1)
734 #define HASH_SIZE(head) \
735 _HASH_SIZE((head).tabshift)
736 #define _HASH_KEY(tabshift, val) \
737 ((val) >> (33 - (tabshift)))
738 #define HASH_KEY(head, val) \
739 _HASH_KEY((head).tabshift, val)
740 #define HASH_GROW_THRESHOLD(head) \
741 ((head).count >= HASH_SIZE(head))
742 #define HASH_SHRINK_THRESHOLD(head) \
743 ((head).count <= (HASH_SIZE(head) - 1) / 2)
744
745 extern void typesafe_hash_grow(struct thash_head *head);
746 extern void typesafe_hash_shrink(struct thash_head *head);
747
748 /* use as:
749 *
750 * PREDECL_HASH(namelist)
751 * struct name {
752 * struct namelist_item nlitem;
753 * }
754 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
755 */
756 #define PREDECL_HASH(prefix) \
757 struct prefix ## _head { struct thash_head hh; }; \
758 struct prefix ## _item { struct thash_item hi; }; \
759 MACRO_REQUIRE_SEMICOLON() /* end */
760
761 #define INIT_HASH(var) { }
762
763 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
764 \
765 macro_inline void prefix ## _init(struct prefix##_head *h) \
766 { \
767 memset(h, 0, sizeof(*h)); \
768 } \
769 macro_inline void prefix ## _fini(struct prefix##_head *h) \
770 { \
771 assert(h->hh.count == 0); \
772 h->hh.minshift = 0; \
773 typesafe_hash_shrink(&h->hh); \
774 memset(h, 0, sizeof(*h)); \
775 } \
776 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
777 { \
778 h->hh.count++; \
779 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
780 typesafe_hash_grow(&h->hh); \
781 \
782 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
783 item->field.hi.hashval = hval; \
784 struct thash_item **np = &h->hh.entries[hbits]; \
785 while (*np && (*np)->hashval < hval) \
786 np = &(*np)->next; \
787 if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
788 h->hh.count--; \
789 return container_of(*np, type, field.hi); \
790 } \
791 item->field.hi.next = *np; \
792 *np = &item->field.hi; \
793 return NULL; \
794 } \
795 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
796 const type *item) \
797 { \
798 if (!h->hh.tabshift) \
799 return NULL; \
800 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
801 const struct thash_item *hitem = h->hh.entries[hbits]; \
802 while (hitem && hitem->hashval < hval) \
803 hitem = hitem->next; \
804 while (hitem && hitem->hashval == hval) { \
805 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
806 return container_of(hitem, type, field.hi); \
807 hitem = hitem->next; \
808 } \
809 return NULL; \
810 } \
811 TYPESAFE_FIND(prefix, type) \
812 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
813 { \
814 if (!h->hh.tabshift) \
815 return NULL; \
816 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
817 struct thash_item **np = &h->hh.entries[hbits]; \
818 while (*np && (*np)->hashval < hval) \
819 np = &(*np)->next; \
820 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
821 np = &(*np)->next; \
822 if (*np != &item->field.hi) \
823 return NULL; \
824 *np = item->field.hi.next; \
825 item->field.hi.next = NULL; \
826 h->hh.count--; \
827 if (HASH_SHRINK_THRESHOLD(h->hh)) \
828 typesafe_hash_shrink(&h->hh); \
829 return item; \
830 } \
831 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
832 { \
833 uint32_t i; \
834 for (i = 0; i < HASH_SIZE(h->hh); i++) \
835 if (h->hh.entries[i]) { \
836 struct thash_item *hitem = h->hh.entries[i]; \
837 h->hh.entries[i] = hitem->next; \
838 h->hh.count--; \
839 hitem->next = NULL; \
840 if (HASH_SHRINK_THRESHOLD(h->hh)) \
841 typesafe_hash_shrink(&h->hh); \
842 return container_of(hitem, type, field.hi); \
843 } \
844 return NULL; \
845 } \
846 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
847 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
848 { \
849 uint32_t i; \
850 for (i = 0; i < HASH_SIZE(h->hh); i++) \
851 if (h->hh.entries[i]) \
852 return container_of(h->hh.entries[i], type, field.hi); \
853 return NULL; \
854 } \
855 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
856 const type *item) \
857 { \
858 const struct thash_item *hitem = &item->field.hi; \
859 if (hitem->next) \
860 return container_of(hitem->next, type, field.hi); \
861 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
862 for (; i < HASH_SIZE(h->hh); i++) \
863 if (h->hh.entries[i]) \
864 return container_of(h->hh.entries[i], type, field.hi); \
865 return NULL; \
866 } \
867 TYPESAFE_FIRST_NEXT(prefix, type) \
868 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
869 { \
870 if (!item) \
871 return NULL; \
872 return prefix ## _next(h, item); \
873 } \
874 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
875 { \
876 return h->hh.count; \
877 } \
878 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
879 const type *item) \
880 { \
881 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
882 const struct thash_item *hitem = h->hh.entries[hbits]; \
883 while (hitem && hitem->hashval < hval) \
884 hitem = hitem->next; \
885 for (hitem = h->hh.entries[hbits]; hitem && hitem->hashval <= hval; \
886 hitem = hitem->next) \
887 if (hitem == &item->field.hi) \
888 return true; \
889 return false; \
890 } \
891 MACRO_REQUIRE_SEMICOLON() /* end */
892
893 /* skiplist, sorted.
894 * can be used as priority queue with add / pop
895 */
896
897 /* don't use these structs directly */
898 #define SKIPLIST_MAXDEPTH 16
899 #define SKIPLIST_EMBED 4
900 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
901
902 struct sskip_item {
903 struct sskip_item *next[SKIPLIST_EMBED];
904 };
905
906 struct sskip_overflow {
907 struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
908 };
909
910 struct sskip_head {
911 struct sskip_item hitem;
912 struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
913 size_t count;
914 };
915
916 /* use as:
917 *
918 * PREDECL_SKIPLIST(namelist)
919 * struct name {
920 * struct namelist_item nlitem;
921 * }
922 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
923 */
924 #define _PREDECL_SKIPLIST(prefix) \
925 struct prefix ## _head { struct sskip_head sh; }; \
926 struct prefix ## _item { struct sskip_item si; }; \
927 MACRO_REQUIRE_SEMICOLON() /* end */
928
929 #define INIT_SKIPLIST_UNIQ(var) { }
930 #define INIT_SKIPLIST_NONUNIQ(var) { }
931
932 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
933 \
934 macro_inline void prefix ## _init(struct prefix##_head *h) \
935 { \
936 memset(h, 0, sizeof(*h)); \
937 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
938 ((uintptr_t)h->sh.overflow | 1); \
939 } \
940 macro_inline void prefix ## _fini(struct prefix##_head *h) \
941 { \
942 memset(h, 0, sizeof(*h)); \
943 } \
944 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
945 { \
946 struct sskip_item *si; \
947 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
948 return container_of_null(si, type, field.si); \
949 } \
950 macro_inline const type *prefix ## _const_find_gteq( \
951 const struct prefix##_head *h, const type *item) \
952 { \
953 const struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
954 &item->field.si, cmpfn_nuq); \
955 return container_of_null(sitem, type, field.si); \
956 } \
957 macro_inline const type *prefix ## _const_find_lt( \
958 const struct prefix##_head *h, const type *item) \
959 { \
960 const struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
961 &item->field.si, cmpfn_nuq); \
962 return container_of_null(sitem, type, field.si); \
963 } \
964 TYPESAFE_FIND_CMP(prefix, type) \
965 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
966 { \
967 struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \
968 &item->field.si, cmpfn_uq); \
969 return container_of_null(sitem, type, field.si); \
970 } \
971 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
972 { \
973 struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
974 return container_of_null(sitem, type, field.si); \
975 } \
976 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
977 struct prefix##_head *b) \
978 { \
979 struct prefix##_head tmp = *a; \
980 *a = *b; \
981 *b = tmp; \
982 a->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
983 ((uintptr_t)a->sh.overflow | 1); \
984 b->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
985 ((uintptr_t)b->sh.overflow | 1); \
986 } \
987 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
988 { \
989 const struct sskip_item *first = h->sh.hitem.next[0]; \
990 return container_of_null(first, type, field.si); \
991 } \
992 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
993 const type *item) \
994 { \
995 const struct sskip_item *next = item->field.si.next[0]; \
996 return container_of_null(next, type, field.si); \
997 } \
998 TYPESAFE_FIRST_NEXT(prefix, type) \
999 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
1000 { \
1001 struct sskip_item *next; \
1002 next = item ? item->field.si.next[0] : NULL; \
1003 return container_of_null(next, type, field.si); \
1004 } \
1005 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
1006 { \
1007 return h->sh.count; \
1008 } \
1009 MACRO_REQUIRE_SEMICOLON() /* end */
1010
1011 #define PREDECL_SKIPLIST_UNIQ(prefix) \
1012 _PREDECL_SKIPLIST(prefix)
1013 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
1014 \
1015 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1016 const struct sskip_item *b) \
1017 { \
1018 return cmpfn(container_of(a, type, field.si), \
1019 container_of(b, type, field.si)); \
1020 } \
1021 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
1022 const type *item) \
1023 { \
1024 const struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
1025 &item->field.si, &prefix ## __cmp); \
1026 return container_of_null(sitem, type, field.si); \
1027 } \
1028 TYPESAFE_FIND(prefix, type) \
1029 TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
1030 \
1031 _DECLARE_SKIPLIST(prefix, type, field, \
1032 prefix ## __cmp, prefix ## __cmp); \
1033 MACRO_REQUIRE_SEMICOLON() /* end */
1034
1035 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
1036 _PREDECL_SKIPLIST(prefix)
1037 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
1038 \
1039 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1040 const struct sskip_item *b) \
1041 { \
1042 return cmpfn(container_of(a, type, field.si), \
1043 container_of(b, type, field.si)); \
1044 } \
1045 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
1046 const struct sskip_item *b) \
1047 { \
1048 int cmpval = cmpfn(container_of(a, type, field.si), \
1049 container_of(b, type, field.si)); \
1050 if (cmpval) \
1051 return cmpval; \
1052 if (a < b) \
1053 return -1; \
1054 if (a > b) \
1055 return 1; \
1056 return 0; \
1057 } \
1058 \
1059 _DECLARE_SKIPLIST(prefix, type, field, \
1060 prefix ## __cmp, prefix ## __cmp_uq); \
1061 TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
1062 MACRO_REQUIRE_SEMICOLON() /* end */
1063
1064
1065 extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
1066 struct sskip_item *item, int (*cmpfn)(
1067 const struct sskip_item *a,
1068 const struct sskip_item *b));
1069 extern const struct sskip_item *typesafe_skiplist_find(
1070 const struct sskip_head *head,
1071 const struct sskip_item *item, int (*cmpfn)(
1072 const struct sskip_item *a,
1073 const struct sskip_item *b));
1074 extern const struct sskip_item *typesafe_skiplist_find_gteq(
1075 const struct sskip_head *head,
1076 const struct sskip_item *item, int (*cmpfn)(
1077 const struct sskip_item *a,
1078 const struct sskip_item *b));
1079 extern const struct sskip_item *typesafe_skiplist_find_lt(
1080 const struct sskip_head *head,
1081 const struct sskip_item *item, int (*cmpfn)(
1082 const struct sskip_item *a,
1083 const struct sskip_item *b));
1084 extern struct sskip_item *typesafe_skiplist_del(
1085 struct sskip_head *head, struct sskip_item *item, int (*cmpfn)(
1086 const struct sskip_item *a,
1087 const struct sskip_item *b));
1088 extern struct sskip_item *typesafe_skiplist_pop(struct sskip_head *head);
1089
1090 #ifdef __cplusplus
1091 }
1092 #endif
1093
1094 /* this needs to stay at the end because both files include each other.
1095 * the resolved order is typesafe.h before typerb.h
1096 */
1097 #include "typerb.h"
1098
1099 #endif /* _FRR_TYPESAFE_H */