]> git.proxmox.com Git - mirror_frr.git/blob - lib/typesafe.h
Merge pull request #8626 from idryzhov/fix-isis-topo1
[mirror_frr.git] / lib / typesafe.h
1 /*
2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #ifndef _FRR_TYPESAFE_H
18 #define _FRR_TYPESAFE_H
19
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <stdbool.h>
23 #include <assert.h>
24 #include "compiler.h"
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 /* generic macros for all list-like types */
31
32 #define frr_each(prefix, head, item) \
33 for (item = prefix##_first(head); item; \
34 item = prefix##_next(head, item))
35 #define frr_each_safe(prefix, head, item) \
36 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
37 prefix##_next_safe(head, \
38 (item = prefix##_first(head))); \
39 item; \
40 item = prefix##_safe, \
41 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
42 #define frr_each_from(prefix, head, item, from) \
43 for (item = from, from = prefix##_next_safe(head, item); \
44 item; \
45 item = from, from = prefix##_next_safe(head, from))
46
47
48 /* non-const variants. these wrappers are the same for all the types, so
49 * bundle them together here.
50 */
51 #define TYPESAFE_FIRST_NEXT(prefix, type) \
52 macro_pure type *prefix ## _first(struct prefix##_head *h) \
53 { \
54 return (type *)prefix ## _const_first(h); \
55 } \
56 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
57 { \
58 return (type *)prefix ## _const_next(h, item); \
59 } \
60 /* ... */
61 #define TYPESAFE_FIND(prefix, type) \
62 macro_inline type *prefix ## _find(struct prefix##_head *h, \
63 const type *item) \
64 { \
65 return (type *)prefix ## _const_find(h, item); \
66 } \
67 /* ... */
68 #define TYPESAFE_FIND_CMP(prefix, type) \
69 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
70 const type *item) \
71 { \
72 return (type *)prefix ## _const_find_lt(h, item); \
73 } \
74 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
75 const type *item) \
76 { \
77 return (type *)prefix ## _const_find_gteq(h, item); \
78 } \
79 /* ... */
80
81 /* SWAP_ALL_SIMPLE = for containers where the items don't point back to the
82 * head *AND* the head doesn'T points to itself (= everything except LIST,
83 * DLIST and SKIPLIST), just switch out the entire head
84 */
85 #define TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
86 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
87 struct prefix##_head *b) \
88 { \
89 struct prefix##_head tmp = *a; \
90 *a = *b; \
91 *b = tmp; \
92 } \
93 /* ... */
94
95 /* single-linked list, unsorted/arbitrary.
96 * can be used as queue with add_tail / pop
97 */
98
99 /* don't use these structs directly */
100 struct slist_item {
101 struct slist_item *next;
102 };
103
104 struct slist_head {
105 struct slist_item *first, **last_next;
106 size_t count;
107 };
108
109 static inline void typesafe_list_add(struct slist_head *head,
110 struct slist_item **pos, struct slist_item *item)
111 {
112 item->next = *pos;
113 *pos = item;
114 if (pos == head->last_next)
115 head->last_next = &item->next;
116 head->count++;
117 }
118
119 /* use as:
120 *
121 * PREDECL_LIST(namelist);
122 * struct name {
123 * struct namelist_item nlitem;
124 * }
125 * DECLARE_LIST(namelist, struct name, nlitem);
126 */
127 #define PREDECL_LIST(prefix) \
128 struct prefix ## _head { struct slist_head sh; }; \
129 struct prefix ## _item { struct slist_item si; }; \
130 MACRO_REQUIRE_SEMICOLON() /* end */
131
132 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
133
134 #define DECLARE_LIST(prefix, type, field) \
135 \
136 macro_inline void prefix ## _init(struct prefix##_head *h) \
137 { \
138 memset(h, 0, sizeof(*h)); \
139 h->sh.last_next = &h->sh.first; \
140 } \
141 macro_inline void prefix ## _fini(struct prefix##_head *h) \
142 { \
143 memset(h, 0, sizeof(*h)); \
144 } \
145 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
146 { \
147 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
148 } \
149 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
150 { \
151 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
152 } \
153 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
154 type *after, type *item) \
155 { \
156 struct slist_item **nextp; \
157 nextp = after ? &after->field.si.next : &h->sh.first; \
158 typesafe_list_add(&h->sh, nextp, &item->field.si); \
159 } \
160 /* TODO: del_hint */ \
161 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
162 { \
163 struct slist_item **iter = &h->sh.first; \
164 while (*iter && *iter != &item->field.si) \
165 iter = &(*iter)->next; \
166 if (!*iter) \
167 return NULL; \
168 h->sh.count--; \
169 *iter = item->field.si.next; \
170 if (!item->field.si.next) \
171 h->sh.last_next = iter; \
172 return item; \
173 } \
174 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
175 { \
176 struct slist_item *sitem = h->sh.first; \
177 if (!sitem) \
178 return NULL; \
179 h->sh.count--; \
180 h->sh.first = sitem->next; \
181 if (h->sh.first == NULL) \
182 h->sh.last_next = &h->sh.first; \
183 return container_of(sitem, type, field.si); \
184 } \
185 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
186 struct prefix##_head *b) \
187 { \
188 struct prefix##_head tmp = *a; \
189 *a = *b; \
190 *b = tmp; \
191 if (a->sh.last_next == &b->sh.first) \
192 a->sh.last_next = &a->sh.first; \
193 if (b->sh.last_next == &a->sh.first) \
194 b->sh.last_next = &b->sh.first; \
195 } \
196 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
197 { \
198 return container_of_null(h->sh.first, type, field.si); \
199 } \
200 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
201 const type *item) \
202 { \
203 const struct slist_item *sitem = &item->field.si; \
204 return container_of_null(sitem->next, type, field.si); \
205 } \
206 TYPESAFE_FIRST_NEXT(prefix, type) \
207 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
208 { \
209 struct slist_item *sitem; \
210 if (!item) \
211 return NULL; \
212 sitem = &item->field.si; \
213 return container_of_null(sitem->next, type, field.si); \
214 } \
215 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
216 { \
217 return h->sh.count; \
218 } \
219 MACRO_REQUIRE_SEMICOLON() /* end */
220
221 /* don't use these structs directly */
222 struct dlist_item {
223 struct dlist_item *next;
224 struct dlist_item *prev;
225 };
226
227 struct dlist_head {
228 struct dlist_item hitem;
229 size_t count;
230 };
231
232 static inline void typesafe_dlist_add(struct dlist_head *head,
233 struct dlist_item *prev, struct dlist_item *item)
234 {
235 item->next = prev->next;
236 item->next->prev = item;
237 item->prev = prev;
238 prev->next = item;
239 head->count++;
240 }
241
242 static inline void typesafe_dlist_swap_all(struct dlist_head *a,
243 struct dlist_head *b)
244 {
245 struct dlist_head tmp = *a;
246
247 a->count = b->count;
248 if (a->count) {
249 a->hitem.next = b->hitem.next;
250 a->hitem.prev = b->hitem.prev;
251 a->hitem.next->prev = &a->hitem;
252 a->hitem.prev->next = &a->hitem;
253 } else {
254 a->hitem.next = &a->hitem;
255 a->hitem.prev = &a->hitem;
256 }
257
258 b->count = tmp.count;
259 if (b->count) {
260 b->hitem.next = tmp.hitem.next;
261 b->hitem.prev = tmp.hitem.prev;
262 b->hitem.next->prev = &b->hitem;
263 b->hitem.prev->next = &b->hitem;
264 } else {
265 b->hitem.next = &b->hitem;
266 b->hitem.prev = &b->hitem;
267 }
268 }
269
270 /* double-linked list, for fast item deletion
271 */
272 #define PREDECL_DLIST(prefix) \
273 struct prefix ## _head { struct dlist_head dh; }; \
274 struct prefix ## _item { struct dlist_item di; }; \
275 MACRO_REQUIRE_SEMICOLON() /* end */
276
277 #define INIT_DLIST(var) { .dh = { \
278 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
279
280 #define DECLARE_DLIST(prefix, type, field) \
281 \
282 macro_inline void prefix ## _init(struct prefix##_head *h) \
283 { \
284 memset(h, 0, sizeof(*h)); \
285 h->dh.hitem.prev = &h->dh.hitem; \
286 h->dh.hitem.next = &h->dh.hitem; \
287 } \
288 macro_inline void prefix ## _fini(struct prefix##_head *h) \
289 { \
290 memset(h, 0, sizeof(*h)); \
291 } \
292 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
293 { \
294 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
295 } \
296 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
297 { \
298 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
299 } \
300 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
301 type *after, type *item) \
302 { \
303 struct dlist_item *prev; \
304 prev = after ? &after->field.di : &h->dh.hitem; \
305 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
306 } \
307 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
308 { \
309 struct dlist_item *ditem = &item->field.di; \
310 ditem->prev->next = ditem->next; \
311 ditem->next->prev = ditem->prev; \
312 h->dh.count--; \
313 ditem->prev = ditem->next = NULL; \
314 return item; \
315 } \
316 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
317 { \
318 struct dlist_item *ditem = h->dh.hitem.next; \
319 if (ditem == &h->dh.hitem) \
320 return NULL; \
321 ditem->prev->next = ditem->next; \
322 ditem->next->prev = ditem->prev; \
323 h->dh.count--; \
324 return container_of(ditem, type, field.di); \
325 } \
326 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
327 struct prefix##_head *b) \
328 { \
329 typesafe_dlist_swap_all(&a->dh, &b->dh); \
330 } \
331 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
332 { \
333 const struct dlist_item *ditem = h->dh.hitem.next; \
334 if (ditem == &h->dh.hitem) \
335 return NULL; \
336 return container_of(ditem, type, field.di); \
337 } \
338 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
339 const type *item) \
340 { \
341 const struct dlist_item *ditem = &item->field.di; \
342 if (ditem->next == &h->dh.hitem) \
343 return NULL; \
344 return container_of(ditem->next, type, field.di); \
345 } \
346 TYPESAFE_FIRST_NEXT(prefix, type) \
347 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
348 { \
349 if (!item) \
350 return NULL; \
351 return prefix ## _next(h, item); \
352 } \
353 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
354 { \
355 return h->dh.count; \
356 } \
357 MACRO_REQUIRE_SEMICOLON() /* end */
358
359 /* note: heap currently caps out at 4G items */
360
361 #define HEAP_NARY 8U
362 typedef uint32_t heap_index_i;
363
364 struct heap_item {
365 uint32_t index;
366 };
367
368 struct heap_head {
369 struct heap_item **array;
370 uint32_t arraysz, count;
371 };
372
373 #define HEAP_RESIZE_TRESH_UP(h) \
374 (h->hh.count + 1 >= h->hh.arraysz)
375 #define HEAP_RESIZE_TRESH_DN(h) \
376 (h->hh.count == 0 || \
377 h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
378
379 #define PREDECL_HEAP(prefix) \
380 struct prefix ## _head { struct heap_head hh; }; \
381 struct prefix ## _item { struct heap_item hi; }; \
382 MACRO_REQUIRE_SEMICOLON() /* end */
383
384 #define INIT_HEAP(var) { }
385
386 #define DECLARE_HEAP(prefix, type, field, cmpfn) \
387 \
388 macro_inline void prefix ## _init(struct prefix##_head *h) \
389 { \
390 memset(h, 0, sizeof(*h)); \
391 } \
392 macro_inline void prefix ## _fini(struct prefix##_head *h) \
393 { \
394 assert(h->hh.count == 0); \
395 memset(h, 0, sizeof(*h)); \
396 } \
397 macro_inline int prefix ## __cmp(const struct heap_item *a, \
398 const struct heap_item *b) \
399 { \
400 return cmpfn(container_of(a, type, field.hi), \
401 container_of(b, type, field.hi)); \
402 } \
403 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
404 { \
405 if (HEAP_RESIZE_TRESH_UP(h)) \
406 typesafe_heap_resize(&h->hh, true); \
407 typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
408 prefix ## __cmp); \
409 h->hh.count++; \
410 return NULL; \
411 } \
412 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
413 { \
414 struct heap_item *other; \
415 uint32_t index = item->field.hi.index; \
416 assert(h->hh.array[index] == &item->field.hi); \
417 h->hh.count--; \
418 other = h->hh.array[h->hh.count]; \
419 if (cmpfn(container_of(other, type, field.hi), item) < 0) \
420 typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
421 else \
422 typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
423 if (HEAP_RESIZE_TRESH_DN(h)) \
424 typesafe_heap_resize(&h->hh, false); \
425 return item; \
426 } \
427 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
428 { \
429 struct heap_item *hitem, *other; \
430 if (h->hh.count == 0) \
431 return NULL; \
432 hitem = h->hh.array[0]; \
433 h->hh.count--; \
434 other = h->hh.array[h->hh.count]; \
435 typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
436 if (HEAP_RESIZE_TRESH_DN(h)) \
437 typesafe_heap_resize(&h->hh, false); \
438 return container_of(hitem, type, field.hi); \
439 } \
440 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
441 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
442 { \
443 if (h->hh.count == 0) \
444 return NULL; \
445 return container_of(h->hh.array[0], type, field.hi); \
446 } \
447 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
448 const type *item) \
449 { \
450 uint32_t idx = item->field.hi.index + 1; \
451 if (idx >= h->hh.count) \
452 return NULL; \
453 return container_of(h->hh.array[idx], type, field.hi); \
454 } \
455 TYPESAFE_FIRST_NEXT(prefix, type) \
456 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
457 { \
458 if (!item) \
459 return NULL; \
460 return prefix ## _next(h, item); \
461 } \
462 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
463 { \
464 return h->hh.count; \
465 } \
466 MACRO_REQUIRE_SEMICOLON() /* end */
467
468 extern void typesafe_heap_resize(struct heap_head *head, bool grow);
469 extern void typesafe_heap_pushdown(struct heap_head *head, uint32_t index,
470 struct heap_item *item,
471 int (*cmpfn)(const struct heap_item *a,
472 const struct heap_item *b));
473 extern void typesafe_heap_pullup(struct heap_head *head, uint32_t index,
474 struct heap_item *item,
475 int (*cmpfn)(const struct heap_item *a,
476 const struct heap_item *b));
477
478 /* single-linked list, sorted.
479 * can be used as priority queue with add / pop
480 */
481
482 /* don't use these structs directly */
483 struct ssort_item {
484 struct ssort_item *next;
485 };
486
487 struct ssort_head {
488 struct ssort_item *first;
489 size_t count;
490 };
491
492 /* use as:
493 *
494 * PREDECL_SORTLIST(namelist)
495 * struct name {
496 * struct namelist_item nlitem;
497 * }
498 * DECLARE_SORTLIST(namelist, struct name, nlitem)
499 */
500 #define _PREDECL_SORTLIST(prefix) \
501 struct prefix ## _head { struct ssort_head sh; }; \
502 struct prefix ## _item { struct ssort_item si; }; \
503 MACRO_REQUIRE_SEMICOLON() /* end */
504
505 #define INIT_SORTLIST_UNIQ(var) { }
506 #define INIT_SORTLIST_NONUNIQ(var) { }
507
508 #define PREDECL_SORTLIST_UNIQ(prefix) \
509 _PREDECL_SORTLIST(prefix)
510 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
511 _PREDECL_SORTLIST(prefix)
512
513 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
514 \
515 macro_inline void prefix ## _init(struct prefix##_head *h) \
516 { \
517 memset(h, 0, sizeof(*h)); \
518 } \
519 macro_inline void prefix ## _fini(struct prefix##_head *h) \
520 { \
521 memset(h, 0, sizeof(*h)); \
522 } \
523 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
524 { \
525 struct ssort_item **np = &h->sh.first; \
526 int c = 1; \
527 while (*np && (c = cmpfn_uq( \
528 container_of(*np, type, field.si), item)) < 0) \
529 np = &(*np)->next; \
530 if (c == 0) \
531 return container_of(*np, type, field.si); \
532 item->field.si.next = *np; \
533 *np = &item->field.si; \
534 h->sh.count++; \
535 return NULL; \
536 } \
537 macro_inline const type *prefix ## _const_find_gteq( \
538 const struct prefix##_head *h, const type *item) \
539 { \
540 const struct ssort_item *sitem = h->sh.first; \
541 int cmpval = 0; \
542 while (sitem && (cmpval = cmpfn_nuq( \
543 container_of(sitem, type, field.si), item)) < 0) \
544 sitem = sitem->next; \
545 return container_of_null(sitem, type, field.si); \
546 } \
547 macro_inline const type *prefix ## _const_find_lt( \
548 const struct prefix##_head *h, const type *item) \
549 { \
550 const struct ssort_item *prev = NULL, *sitem = h->sh.first; \
551 int cmpval = 0; \
552 while (sitem && (cmpval = cmpfn_nuq( \
553 container_of(sitem, type, field.si), item)) < 0) \
554 sitem = (prev = sitem)->next; \
555 return container_of_null(prev, type, field.si); \
556 } \
557 TYPESAFE_FIND_CMP(prefix, type) \
558 /* TODO: del_hint */ \
559 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
560 { \
561 struct ssort_item **iter = &h->sh.first; \
562 while (*iter && *iter != &item->field.si) \
563 iter = &(*iter)->next; \
564 if (!*iter) \
565 return NULL; \
566 h->sh.count--; \
567 *iter = item->field.si.next; \
568 return item; \
569 } \
570 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
571 { \
572 struct ssort_item *sitem = h->sh.first; \
573 if (!sitem) \
574 return NULL; \
575 h->sh.count--; \
576 h->sh.first = sitem->next; \
577 return container_of(sitem, type, field.si); \
578 } \
579 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
580 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
581 { \
582 return container_of_null(h->sh.first, type, field.si); \
583 } \
584 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
585 const type *item) \
586 { \
587 const struct ssort_item *sitem = &item->field.si; \
588 return container_of_null(sitem->next, type, field.si); \
589 } \
590 TYPESAFE_FIRST_NEXT(prefix, type) \
591 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
592 { \
593 struct ssort_item *sitem; \
594 if (!item) \
595 return NULL; \
596 sitem = &item->field.si; \
597 return container_of_null(sitem->next, type, field.si); \
598 } \
599 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
600 { \
601 return h->sh.count; \
602 } \
603 MACRO_REQUIRE_SEMICOLON() /* end */
604
605 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
606 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn); \
607 \
608 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
609 const type *item) \
610 { \
611 const struct ssort_item *sitem = h->sh.first; \
612 int cmpval = 0; \
613 while (sitem && (cmpval = cmpfn( \
614 container_of(sitem, type, field.si), item)) < 0) \
615 sitem = sitem->next; \
616 if (!sitem || cmpval > 0) \
617 return NULL; \
618 return container_of(sitem, type, field.si); \
619 } \
620 TYPESAFE_FIND(prefix, type) \
621 MACRO_REQUIRE_SEMICOLON() /* end */
622
623 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
624 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
625 { \
626 int cmpval = cmpfn(a, b); \
627 if (cmpval) \
628 return cmpval; \
629 if (a < b) \
630 return -1; \
631 if (a > b) \
632 return 1; \
633 return 0; \
634 } \
635 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp); \
636 MACRO_REQUIRE_SEMICOLON() /* end */
637
638
639 /* hash, "sorted" by hash value
640 */
641
642 /* don't use these structs directly */
643 struct thash_item {
644 struct thash_item *next;
645 uint32_t hashval;
646 };
647
648 struct thash_head {
649 struct thash_item **entries;
650 uint32_t count;
651
652 uint8_t tabshift;
653 uint8_t minshift, maxshift;
654 };
655
656 #define _HASH_SIZE(tabshift) \
657 ((1U << (tabshift)) >> 1)
658 #define HASH_SIZE(head) \
659 _HASH_SIZE((head).tabshift)
660 #define _HASH_KEY(tabshift, val) \
661 ((val) >> (33 - (tabshift)))
662 #define HASH_KEY(head, val) \
663 _HASH_KEY((head).tabshift, val)
664 #define HASH_GROW_THRESHOLD(head) \
665 ((head).count >= HASH_SIZE(head))
666 #define HASH_SHRINK_THRESHOLD(head) \
667 ((head).count <= (HASH_SIZE(head) - 1) / 2)
668
669 extern void typesafe_hash_grow(struct thash_head *head);
670 extern void typesafe_hash_shrink(struct thash_head *head);
671
672 /* use as:
673 *
674 * PREDECL_HASH(namelist)
675 * struct name {
676 * struct namelist_item nlitem;
677 * }
678 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
679 */
680 #define PREDECL_HASH(prefix) \
681 struct prefix ## _head { struct thash_head hh; }; \
682 struct prefix ## _item { struct thash_item hi; }; \
683 MACRO_REQUIRE_SEMICOLON() /* end */
684
685 #define INIT_HASH(var) { }
686
687 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
688 \
689 macro_inline void prefix ## _init(struct prefix##_head *h) \
690 { \
691 memset(h, 0, sizeof(*h)); \
692 } \
693 macro_inline void prefix ## _fini(struct prefix##_head *h) \
694 { \
695 assert(h->hh.count == 0); \
696 h->hh.minshift = 0; \
697 typesafe_hash_shrink(&h->hh); \
698 memset(h, 0, sizeof(*h)); \
699 } \
700 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
701 { \
702 h->hh.count++; \
703 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
704 typesafe_hash_grow(&h->hh); \
705 \
706 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
707 item->field.hi.hashval = hval; \
708 struct thash_item **np = &h->hh.entries[hbits]; \
709 while (*np && (*np)->hashval < hval) \
710 np = &(*np)->next; \
711 if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
712 h->hh.count--; \
713 return container_of(*np, type, field.hi); \
714 } \
715 item->field.hi.next = *np; \
716 *np = &item->field.hi; \
717 return NULL; \
718 } \
719 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
720 const type *item) \
721 { \
722 if (!h->hh.tabshift) \
723 return NULL; \
724 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
725 const struct thash_item *hitem = h->hh.entries[hbits]; \
726 while (hitem && hitem->hashval < hval) \
727 hitem = hitem->next; \
728 while (hitem && hitem->hashval == hval) { \
729 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
730 return container_of(hitem, type, field.hi); \
731 hitem = hitem->next; \
732 } \
733 return NULL; \
734 } \
735 TYPESAFE_FIND(prefix, type) \
736 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
737 { \
738 if (!h->hh.tabshift) \
739 return NULL; \
740 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
741 struct thash_item **np = &h->hh.entries[hbits]; \
742 while (*np && (*np)->hashval < hval) \
743 np = &(*np)->next; \
744 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
745 np = &(*np)->next; \
746 if (*np != &item->field.hi) \
747 return NULL; \
748 *np = item->field.hi.next; \
749 item->field.hi.next = NULL; \
750 h->hh.count--; \
751 if (HASH_SHRINK_THRESHOLD(h->hh)) \
752 typesafe_hash_shrink(&h->hh); \
753 return item; \
754 } \
755 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
756 { \
757 uint32_t i; \
758 for (i = 0; i < HASH_SIZE(h->hh); i++) \
759 if (h->hh.entries[i]) { \
760 struct thash_item *hitem = h->hh.entries[i]; \
761 h->hh.entries[i] = hitem->next; \
762 h->hh.count--; \
763 hitem->next = NULL; \
764 if (HASH_SHRINK_THRESHOLD(h->hh)) \
765 typesafe_hash_shrink(&h->hh); \
766 return container_of(hitem, type, field.hi); \
767 } \
768 return NULL; \
769 } \
770 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
771 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
772 { \
773 uint32_t i; \
774 for (i = 0; i < HASH_SIZE(h->hh); i++) \
775 if (h->hh.entries[i]) \
776 return container_of(h->hh.entries[i], type, field.hi); \
777 return NULL; \
778 } \
779 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
780 const type *item) \
781 { \
782 const struct thash_item *hitem = &item->field.hi; \
783 if (hitem->next) \
784 return container_of(hitem->next, type, field.hi); \
785 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
786 for (; i < HASH_SIZE(h->hh); i++) \
787 if (h->hh.entries[i]) \
788 return container_of(h->hh.entries[i], type, field.hi); \
789 return NULL; \
790 } \
791 TYPESAFE_FIRST_NEXT(prefix, type) \
792 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
793 { \
794 if (!item) \
795 return NULL; \
796 return prefix ## _next(h, item); \
797 } \
798 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
799 { \
800 return h->hh.count; \
801 } \
802 MACRO_REQUIRE_SEMICOLON() /* end */
803
804 /* skiplist, sorted.
805 * can be used as priority queue with add / pop
806 */
807
808 /* don't use these structs directly */
809 #define SKIPLIST_MAXDEPTH 16
810 #define SKIPLIST_EMBED 4
811 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
812
813 struct sskip_item {
814 struct sskip_item *next[SKIPLIST_EMBED];
815 };
816
817 struct sskip_overflow {
818 struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
819 };
820
821 struct sskip_head {
822 struct sskip_item hitem;
823 struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
824 size_t count;
825 };
826
827 /* use as:
828 *
829 * PREDECL_SKIPLIST(namelist)
830 * struct name {
831 * struct namelist_item nlitem;
832 * }
833 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
834 */
835 #define _PREDECL_SKIPLIST(prefix) \
836 struct prefix ## _head { struct sskip_head sh; }; \
837 struct prefix ## _item { struct sskip_item si; }; \
838 MACRO_REQUIRE_SEMICOLON() /* end */
839
840 #define INIT_SKIPLIST_UNIQ(var) { }
841 #define INIT_SKIPLIST_NONUNIQ(var) { }
842
843 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
844 \
845 macro_inline void prefix ## _init(struct prefix##_head *h) \
846 { \
847 memset(h, 0, sizeof(*h)); \
848 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
849 ((uintptr_t)h->sh.overflow | 1); \
850 } \
851 macro_inline void prefix ## _fini(struct prefix##_head *h) \
852 { \
853 memset(h, 0, sizeof(*h)); \
854 } \
855 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
856 { \
857 struct sskip_item *si; \
858 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
859 return container_of_null(si, type, field.si); \
860 } \
861 macro_inline const type *prefix ## _const_find_gteq( \
862 const struct prefix##_head *h, const type *item) \
863 { \
864 const struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
865 &item->field.si, cmpfn_nuq); \
866 return container_of_null(sitem, type, field.si); \
867 } \
868 macro_inline const type *prefix ## _const_find_lt( \
869 const struct prefix##_head *h, const type *item) \
870 { \
871 const struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
872 &item->field.si, cmpfn_nuq); \
873 return container_of_null(sitem, type, field.si); \
874 } \
875 TYPESAFE_FIND_CMP(prefix, type) \
876 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
877 { \
878 struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \
879 &item->field.si, cmpfn_uq); \
880 return container_of_null(sitem, type, field.si); \
881 } \
882 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
883 { \
884 struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
885 return container_of_null(sitem, type, field.si); \
886 } \
887 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
888 struct prefix##_head *b) \
889 { \
890 struct prefix##_head tmp = *a; \
891 *a = *b; \
892 *b = tmp; \
893 a->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
894 ((uintptr_t)a->sh.overflow | 1); \
895 b->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
896 ((uintptr_t)b->sh.overflow | 1); \
897 } \
898 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
899 { \
900 const struct sskip_item *first = h->sh.hitem.next[0]; \
901 return container_of_null(first, type, field.si); \
902 } \
903 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
904 const type *item) \
905 { \
906 const struct sskip_item *next = item->field.si.next[0]; \
907 return container_of_null(next, type, field.si); \
908 } \
909 TYPESAFE_FIRST_NEXT(prefix, type) \
910 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
911 { \
912 struct sskip_item *next; \
913 next = item ? item->field.si.next[0] : NULL; \
914 return container_of_null(next, type, field.si); \
915 } \
916 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
917 { \
918 return h->sh.count; \
919 } \
920 MACRO_REQUIRE_SEMICOLON() /* end */
921
922 #define PREDECL_SKIPLIST_UNIQ(prefix) \
923 _PREDECL_SKIPLIST(prefix)
924 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
925 \
926 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
927 const struct sskip_item *b) \
928 { \
929 return cmpfn(container_of(a, type, field.si), \
930 container_of(b, type, field.si)); \
931 } \
932 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
933 const type *item) \
934 { \
935 const struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
936 &item->field.si, &prefix ## __cmp); \
937 return container_of_null(sitem, type, field.si); \
938 } \
939 TYPESAFE_FIND(prefix, type) \
940 \
941 _DECLARE_SKIPLIST(prefix, type, field, \
942 prefix ## __cmp, prefix ## __cmp); \
943 MACRO_REQUIRE_SEMICOLON() /* end */
944
945 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
946 _PREDECL_SKIPLIST(prefix)
947 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
948 \
949 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
950 const struct sskip_item *b) \
951 { \
952 return cmpfn(container_of(a, type, field.si), \
953 container_of(b, type, field.si)); \
954 } \
955 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
956 const struct sskip_item *b) \
957 { \
958 int cmpval = cmpfn(container_of(a, type, field.si), \
959 container_of(b, type, field.si)); \
960 if (cmpval) \
961 return cmpval; \
962 if (a < b) \
963 return -1; \
964 if (a > b) \
965 return 1; \
966 return 0; \
967 } \
968 \
969 _DECLARE_SKIPLIST(prefix, type, field, \
970 prefix ## __cmp, prefix ## __cmp_uq); \
971 MACRO_REQUIRE_SEMICOLON() /* end */
972
973
974 extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
975 struct sskip_item *item, int (*cmpfn)(
976 const struct sskip_item *a,
977 const struct sskip_item *b));
978 extern const struct sskip_item *typesafe_skiplist_find(
979 const struct sskip_head *head,
980 const struct sskip_item *item, int (*cmpfn)(
981 const struct sskip_item *a,
982 const struct sskip_item *b));
983 extern const struct sskip_item *typesafe_skiplist_find_gteq(
984 const struct sskip_head *head,
985 const struct sskip_item *item, int (*cmpfn)(
986 const struct sskip_item *a,
987 const struct sskip_item *b));
988 extern const struct sskip_item *typesafe_skiplist_find_lt(
989 const struct sskip_head *head,
990 const struct sskip_item *item, int (*cmpfn)(
991 const struct sskip_item *a,
992 const struct sskip_item *b));
993 extern struct sskip_item *typesafe_skiplist_del(
994 struct sskip_head *head, struct sskip_item *item, int (*cmpfn)(
995 const struct sskip_item *a,
996 const struct sskip_item *b));
997 extern struct sskip_item *typesafe_skiplist_pop(struct sskip_head *head);
998
999 #ifdef __cplusplus
1000 }
1001 #endif
1002
1003 /* this needs to stay at the end because both files include each other.
1004 * the resolved order is typesafe.h before typerb.h
1005 */
1006 #include "typerb.h"
1007
1008 #endif /* _FRR_TYPESAFE_H */