]> git.proxmox.com Git - mirror_frr.git/blob - lib/typesafe.h
tools: fix frr-reload.py daemon option
[mirror_frr.git] / lib / typesafe.h
1 /*
2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #ifndef _FRR_TYPESAFE_H
18 #define _FRR_TYPESAFE_H
19
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <stdbool.h>
23 #include <assert.h>
24 #include "compiler.h"
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 /* generic macros for all list-like types */
31
32 #define frr_each(prefix, head, item) \
33 for (item = prefix##_first(head); item; \
34 item = prefix##_next(head, item))
35 #define frr_each_safe(prefix, head, item) \
36 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
37 prefix##_next_safe(head, \
38 (item = prefix##_first(head))); \
39 item; \
40 item = prefix##_safe, \
41 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
42 #define frr_each_from(prefix, head, item, from) \
43 for (item = from, from = prefix##_next_safe(head, item); \
44 item; \
45 item = from, from = prefix##_next_safe(head, from))
46
47
48 /* non-const variants. these wrappers are the same for all the types, so
49 * bundle them together here.
50 */
51 #define TYPESAFE_FIRST_NEXT(prefix, type) \
52 macro_pure type *prefix ## _first(struct prefix##_head *h) \
53 { \
54 return (type *)prefix ## _const_first(h); \
55 } \
56 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
57 { \
58 return (type *)prefix ## _const_next(h, item); \
59 } \
60 /* ... */
61 #define TYPESAFE_FIND(prefix, type) \
62 macro_inline type *prefix ## _find(struct prefix##_head *h, \
63 const type *item) \
64 { \
65 return (type *)prefix ## _const_find(h, item); \
66 } \
67 /* ... */
68 #define TYPESAFE_FIND_CMP(prefix, type) \
69 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
70 const type *item) \
71 { \
72 return (type *)prefix ## _const_find_lt(h, item); \
73 } \
74 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
75 const type *item) \
76 { \
77 return (type *)prefix ## _const_find_gteq(h, item); \
78 } \
79 /* ... */
80
81
82 /* single-linked list, unsorted/arbitrary.
83 * can be used as queue with add_tail / pop
84 */
85
86 /* don't use these structs directly */
87 struct slist_item {
88 struct slist_item *next;
89 };
90
91 struct slist_head {
92 struct slist_item *first, **last_next;
93 size_t count;
94 };
95
96 static inline void typesafe_list_add(struct slist_head *head,
97 struct slist_item **pos, struct slist_item *item)
98 {
99 item->next = *pos;
100 *pos = item;
101 if (pos == head->last_next)
102 head->last_next = &item->next;
103 head->count++;
104 }
105
106 /* use as:
107 *
108 * PREDECL_LIST(namelist)
109 * struct name {
110 * struct namelist_item nlitem;
111 * }
112 * DECLARE_LIST(namelist, struct name, nlitem)
113 */
114 #define PREDECL_LIST(prefix) \
115 struct prefix ## _head { struct slist_head sh; }; \
116 struct prefix ## _item { struct slist_item si; };
117
118 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
119
120 #define DECLARE_LIST(prefix, type, field) \
121 \
122 macro_inline void prefix ## _init(struct prefix##_head *h) \
123 { \
124 memset(h, 0, sizeof(*h)); \
125 h->sh.last_next = &h->sh.first; \
126 } \
127 macro_inline void prefix ## _fini(struct prefix##_head *h) \
128 { \
129 memset(h, 0, sizeof(*h)); \
130 } \
131 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
132 { \
133 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
134 } \
135 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
136 { \
137 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
138 } \
139 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
140 type *after, type *item) \
141 { \
142 struct slist_item **nextp; \
143 nextp = after ? &after->field.si.next : &h->sh.first; \
144 typesafe_list_add(&h->sh, nextp, &item->field.si); \
145 } \
146 /* TODO: del_hint */ \
147 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
148 { \
149 struct slist_item **iter = &h->sh.first; \
150 while (*iter && *iter != &item->field.si) \
151 iter = &(*iter)->next; \
152 if (!*iter) \
153 return NULL; \
154 h->sh.count--; \
155 *iter = item->field.si.next; \
156 if (!item->field.si.next) \
157 h->sh.last_next = iter; \
158 return item; \
159 } \
160 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
161 { \
162 struct slist_item *sitem = h->sh.first; \
163 if (!sitem) \
164 return NULL; \
165 h->sh.count--; \
166 h->sh.first = sitem->next; \
167 if (h->sh.first == NULL) \
168 h->sh.last_next = &h->sh.first; \
169 return container_of(sitem, type, field.si); \
170 } \
171 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
172 { \
173 return container_of_null(h->sh.first, type, field.si); \
174 } \
175 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
176 const type *item) \
177 { \
178 const struct slist_item *sitem = &item->field.si; \
179 return container_of_null(sitem->next, type, field.si); \
180 } \
181 TYPESAFE_FIRST_NEXT(prefix, type) \
182 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
183 { \
184 struct slist_item *sitem; \
185 if (!item) \
186 return NULL; \
187 sitem = &item->field.si; \
188 return container_of_null(sitem->next, type, field.si); \
189 } \
190 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
191 { \
192 return h->sh.count; \
193 } \
194 /* ... */
195
196 /* don't use these structs directly */
197 struct dlist_item {
198 struct dlist_item *next;
199 struct dlist_item *prev;
200 };
201
202 struct dlist_head {
203 struct dlist_item hitem;
204 size_t count;
205 };
206
207 static inline void typesafe_dlist_add(struct dlist_head *head,
208 struct dlist_item *prev, struct dlist_item *item)
209 {
210 item->next = prev->next;
211 item->next->prev = item;
212 item->prev = prev;
213 prev->next = item;
214 head->count++;
215 }
216
217 /* double-linked list, for fast item deletion
218 */
219 #define PREDECL_DLIST(prefix) \
220 struct prefix ## _head { struct dlist_head dh; }; \
221 struct prefix ## _item { struct dlist_item di; };
222
223 #define INIT_DLIST(var) { .dh = { \
224 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
225
226 #define DECLARE_DLIST(prefix, type, field) \
227 \
228 macro_inline void prefix ## _init(struct prefix##_head *h) \
229 { \
230 memset(h, 0, sizeof(*h)); \
231 h->dh.hitem.prev = &h->dh.hitem; \
232 h->dh.hitem.next = &h->dh.hitem; \
233 } \
234 macro_inline void prefix ## _fini(struct prefix##_head *h) \
235 { \
236 memset(h, 0, sizeof(*h)); \
237 } \
238 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
239 { \
240 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
241 } \
242 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
243 { \
244 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
245 } \
246 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
247 type *after, type *item) \
248 { \
249 struct dlist_item *prev; \
250 prev = after ? &after->field.di : &h->dh.hitem; \
251 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
252 } \
253 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
254 { \
255 struct dlist_item *ditem = &item->field.di; \
256 ditem->prev->next = ditem->next; \
257 ditem->next->prev = ditem->prev; \
258 h->dh.count--; \
259 ditem->prev = ditem->next = NULL; \
260 return item; \
261 } \
262 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
263 { \
264 struct dlist_item *ditem = h->dh.hitem.next; \
265 if (ditem == &h->dh.hitem) \
266 return NULL; \
267 ditem->prev->next = ditem->next; \
268 ditem->next->prev = ditem->prev; \
269 h->dh.count--; \
270 return container_of(ditem, type, field.di); \
271 } \
272 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
273 { \
274 const struct dlist_item *ditem = h->dh.hitem.next; \
275 if (ditem == &h->dh.hitem) \
276 return NULL; \
277 return container_of(ditem, type, field.di); \
278 } \
279 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
280 const type *item) \
281 { \
282 const struct dlist_item *ditem = &item->field.di; \
283 if (ditem->next == &h->dh.hitem) \
284 return NULL; \
285 return container_of(ditem->next, type, field.di); \
286 } \
287 TYPESAFE_FIRST_NEXT(prefix, type) \
288 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
289 { \
290 if (!item) \
291 return NULL; \
292 return prefix ## _next(h, item); \
293 } \
294 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
295 { \
296 return h->dh.count; \
297 } \
298 /* ... */
299
300 /* note: heap currently caps out at 4G items */
301
302 #define HEAP_NARY 8U
303 typedef uint32_t heap_index_i;
304
305 struct heap_item {
306 uint32_t index;
307 };
308
309 struct heap_head {
310 struct heap_item **array;
311 uint32_t arraysz, count;
312 };
313
314 #define HEAP_RESIZE_TRESH_UP(h) \
315 (h->hh.count + 1 >= h->hh.arraysz)
316 #define HEAP_RESIZE_TRESH_DN(h) \
317 (h->hh.count == 0 || \
318 h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
319
320 #define PREDECL_HEAP(prefix) \
321 struct prefix ## _head { struct heap_head hh; }; \
322 struct prefix ## _item { struct heap_item hi; };
323
324 #define INIT_HEAP(var) { }
325
326 #define DECLARE_HEAP(prefix, type, field, cmpfn) \
327 \
328 macro_inline void prefix ## _init(struct prefix##_head *h) \
329 { \
330 memset(h, 0, sizeof(*h)); \
331 } \
332 macro_inline void prefix ## _fini(struct prefix##_head *h) \
333 { \
334 assert(h->hh.count == 0); \
335 memset(h, 0, sizeof(*h)); \
336 } \
337 macro_inline int prefix ## __cmp(const struct heap_item *a, \
338 const struct heap_item *b) \
339 { \
340 return cmpfn(container_of(a, type, field.hi), \
341 container_of(b, type, field.hi)); \
342 } \
343 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
344 { \
345 if (HEAP_RESIZE_TRESH_UP(h)) \
346 typesafe_heap_resize(&h->hh, true); \
347 typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
348 prefix ## __cmp); \
349 h->hh.count++; \
350 return NULL; \
351 } \
352 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
353 { \
354 struct heap_item *other; \
355 uint32_t index = item->field.hi.index; \
356 assert(h->hh.array[index] == &item->field.hi); \
357 h->hh.count--; \
358 other = h->hh.array[h->hh.count]; \
359 if (cmpfn(container_of(other, type, field.hi), item) < 0) \
360 typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
361 else \
362 typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
363 if (HEAP_RESIZE_TRESH_DN(h)) \
364 typesafe_heap_resize(&h->hh, false); \
365 return item; \
366 } \
367 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
368 { \
369 struct heap_item *hitem, *other; \
370 if (h->hh.count == 0) \
371 return NULL; \
372 hitem = h->hh.array[0]; \
373 h->hh.count--; \
374 other = h->hh.array[h->hh.count]; \
375 typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
376 if (HEAP_RESIZE_TRESH_DN(h)) \
377 typesafe_heap_resize(&h->hh, false); \
378 return container_of(hitem, type, field.hi); \
379 } \
380 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
381 { \
382 if (h->hh.count == 0) \
383 return NULL; \
384 return container_of(h->hh.array[0], type, field.hi); \
385 } \
386 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
387 const type *item) \
388 { \
389 uint32_t idx = item->field.hi.index + 1; \
390 if (idx >= h->hh.count) \
391 return NULL; \
392 return container_of(h->hh.array[idx], type, field.hi); \
393 } \
394 TYPESAFE_FIRST_NEXT(prefix, type) \
395 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
396 { \
397 if (!item) \
398 return NULL; \
399 return prefix ## _next(h, item); \
400 } \
401 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
402 { \
403 return h->hh.count; \
404 } \
405 /* ... */
406
407 extern void typesafe_heap_resize(struct heap_head *head, bool grow);
408 extern void typesafe_heap_pushdown(struct heap_head *head, uint32_t index,
409 struct heap_item *item,
410 int (*cmpfn)(const struct heap_item *a,
411 const struct heap_item *b));
412 extern void typesafe_heap_pullup(struct heap_head *head, uint32_t index,
413 struct heap_item *item,
414 int (*cmpfn)(const struct heap_item *a,
415 const struct heap_item *b));
416
417 /* single-linked list, sorted.
418 * can be used as priority queue with add / pop
419 */
420
421 /* don't use these structs directly */
422 struct ssort_item {
423 struct ssort_item *next;
424 };
425
426 struct ssort_head {
427 struct ssort_item *first;
428 size_t count;
429 };
430
431 /* use as:
432 *
433 * PREDECL_SORTLIST(namelist)
434 * struct name {
435 * struct namelist_item nlitem;
436 * }
437 * DECLARE_SORTLIST(namelist, struct name, nlitem)
438 */
439 #define _PREDECL_SORTLIST(prefix) \
440 struct prefix ## _head { struct ssort_head sh; }; \
441 struct prefix ## _item { struct ssort_item si; };
442
443 #define INIT_SORTLIST_UNIQ(var) { }
444 #define INIT_SORTLIST_NONUNIQ(var) { }
445
446 #define PREDECL_SORTLIST_UNIQ(prefix) \
447 _PREDECL_SORTLIST(prefix)
448 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
449 _PREDECL_SORTLIST(prefix)
450
451 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
452 \
453 macro_inline void prefix ## _init(struct prefix##_head *h) \
454 { \
455 memset(h, 0, sizeof(*h)); \
456 } \
457 macro_inline void prefix ## _fini(struct prefix##_head *h) \
458 { \
459 memset(h, 0, sizeof(*h)); \
460 } \
461 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
462 { \
463 struct ssort_item **np = &h->sh.first; \
464 int c = 1; \
465 while (*np && (c = cmpfn_uq( \
466 container_of(*np, type, field.si), item)) < 0) \
467 np = &(*np)->next; \
468 if (c == 0) \
469 return container_of(*np, type, field.si); \
470 item->field.si.next = *np; \
471 *np = &item->field.si; \
472 h->sh.count++; \
473 return NULL; \
474 } \
475 macro_inline const type *prefix ## _const_find_gteq( \
476 const struct prefix##_head *h, const type *item) \
477 { \
478 const struct ssort_item *sitem = h->sh.first; \
479 int cmpval = 0; \
480 while (sitem && (cmpval = cmpfn_nuq( \
481 container_of(sitem, type, field.si), item)) < 0) \
482 sitem = sitem->next; \
483 return container_of_null(sitem, type, field.si); \
484 } \
485 macro_inline const type *prefix ## _const_find_lt( \
486 const struct prefix##_head *h, const type *item) \
487 { \
488 const struct ssort_item *prev = NULL, *sitem = h->sh.first; \
489 int cmpval = 0; \
490 while (sitem && (cmpval = cmpfn_nuq( \
491 container_of(sitem, type, field.si), item)) < 0) \
492 sitem = (prev = sitem)->next; \
493 return container_of_null(prev, type, field.si); \
494 } \
495 TYPESAFE_FIND_CMP(prefix, type) \
496 /* TODO: del_hint */ \
497 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
498 { \
499 struct ssort_item **iter = &h->sh.first; \
500 while (*iter && *iter != &item->field.si) \
501 iter = &(*iter)->next; \
502 if (!*iter) \
503 return NULL; \
504 h->sh.count--; \
505 *iter = item->field.si.next; \
506 return item; \
507 } \
508 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
509 { \
510 struct ssort_item *sitem = h->sh.first; \
511 if (!sitem) \
512 return NULL; \
513 h->sh.count--; \
514 h->sh.first = sitem->next; \
515 return container_of(sitem, type, field.si); \
516 } \
517 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
518 { \
519 return container_of_null(h->sh.first, type, field.si); \
520 } \
521 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
522 const type *item) \
523 { \
524 const struct ssort_item *sitem = &item->field.si; \
525 return container_of_null(sitem->next, type, field.si); \
526 } \
527 TYPESAFE_FIRST_NEXT(prefix, type) \
528 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
529 { \
530 struct ssort_item *sitem; \
531 if (!item) \
532 return NULL; \
533 sitem = &item->field.si; \
534 return container_of_null(sitem->next, type, field.si); \
535 } \
536 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
537 { \
538 return h->sh.count; \
539 } \
540 /* ... */
541
542 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
543 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn) \
544 \
545 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
546 const type *item) \
547 { \
548 const struct ssort_item *sitem = h->sh.first; \
549 int cmpval = 0; \
550 while (sitem && (cmpval = cmpfn( \
551 container_of(sitem, type, field.si), item)) < 0) \
552 sitem = sitem->next; \
553 if (!sitem || cmpval > 0) \
554 return NULL; \
555 return container_of(sitem, type, field.si); \
556 } \
557 TYPESAFE_FIND(prefix, type) \
558 /* ... */
559
560 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
561 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
562 { \
563 int cmpval = cmpfn(a, b); \
564 if (cmpval) \
565 return cmpval; \
566 if (a < b) \
567 return -1; \
568 if (a > b) \
569 return 1; \
570 return 0; \
571 } \
572 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp) \
573 /* ... */
574
575
576 /* hash, "sorted" by hash value
577 */
578
579 /* don't use these structs directly */
580 struct thash_item {
581 struct thash_item *next;
582 uint32_t hashval;
583 };
584
585 struct thash_head {
586 struct thash_item **entries;
587 uint32_t count;
588
589 uint8_t tabshift;
590 uint8_t minshift, maxshift;
591 };
592
593 #define _HASH_SIZE(tabshift) \
594 ((1U << (tabshift)) >> 1)
595 #define HASH_SIZE(head) \
596 _HASH_SIZE((head).tabshift)
597 #define _HASH_KEY(tabshift, val) \
598 ((val) >> (33 - (tabshift)))
599 #define HASH_KEY(head, val) \
600 _HASH_KEY((head).tabshift, val)
601 #define HASH_GROW_THRESHOLD(head) \
602 ((head).count >= HASH_SIZE(head))
603 #define HASH_SHRINK_THRESHOLD(head) \
604 ((head).count <= (HASH_SIZE(head) - 1) / 2)
605
606 extern void typesafe_hash_grow(struct thash_head *head);
607 extern void typesafe_hash_shrink(struct thash_head *head);
608
609 /* use as:
610 *
611 * PREDECL_HASH(namelist)
612 * struct name {
613 * struct namelist_item nlitem;
614 * }
615 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
616 */
617 #define PREDECL_HASH(prefix) \
618 struct prefix ## _head { struct thash_head hh; }; \
619 struct prefix ## _item { struct thash_item hi; };
620
621 #define INIT_HASH(var) { }
622
623 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
624 \
625 macro_inline void prefix ## _init(struct prefix##_head *h) \
626 { \
627 memset(h, 0, sizeof(*h)); \
628 } \
629 macro_inline void prefix ## _fini(struct prefix##_head *h) \
630 { \
631 assert(h->hh.count == 0); \
632 h->hh.minshift = 0; \
633 typesafe_hash_shrink(&h->hh); \
634 memset(h, 0, sizeof(*h)); \
635 } \
636 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
637 { \
638 h->hh.count++; \
639 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
640 typesafe_hash_grow(&h->hh); \
641 \
642 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
643 item->field.hi.hashval = hval; \
644 struct thash_item **np = &h->hh.entries[hbits]; \
645 while (*np && (*np)->hashval < hval) \
646 np = &(*np)->next; \
647 if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
648 h->hh.count--; \
649 return container_of(*np, type, field.hi); \
650 } \
651 item->field.hi.next = *np; \
652 *np = &item->field.hi; \
653 return NULL; \
654 } \
655 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
656 const type *item) \
657 { \
658 if (!h->hh.tabshift) \
659 return NULL; \
660 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
661 const struct thash_item *hitem = h->hh.entries[hbits]; \
662 while (hitem && hitem->hashval < hval) \
663 hitem = hitem->next; \
664 while (hitem && hitem->hashval == hval) { \
665 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
666 return container_of(hitem, type, field.hi); \
667 hitem = hitem->next; \
668 } \
669 return NULL; \
670 } \
671 TYPESAFE_FIND(prefix, type) \
672 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
673 { \
674 if (!h->hh.tabshift) \
675 return NULL; \
676 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
677 struct thash_item **np = &h->hh.entries[hbits]; \
678 while (*np && (*np)->hashval < hval) \
679 np = &(*np)->next; \
680 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
681 np = &(*np)->next; \
682 if (*np != &item->field.hi) \
683 return NULL; \
684 *np = item->field.hi.next; \
685 item->field.hi.next = NULL; \
686 h->hh.count--; \
687 if (HASH_SHRINK_THRESHOLD(h->hh)) \
688 typesafe_hash_shrink(&h->hh); \
689 return item; \
690 } \
691 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
692 { \
693 uint32_t i; \
694 for (i = 0; i < HASH_SIZE(h->hh); i++) \
695 if (h->hh.entries[i]) { \
696 struct thash_item *hitem = h->hh.entries[i]; \
697 h->hh.entries[i] = hitem->next; \
698 h->hh.count--; \
699 hitem->next = NULL; \
700 if (HASH_SHRINK_THRESHOLD(h->hh)) \
701 typesafe_hash_shrink(&h->hh); \
702 return container_of(hitem, type, field.hi); \
703 } \
704 return NULL; \
705 } \
706 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
707 { \
708 uint32_t i; \
709 for (i = 0; i < HASH_SIZE(h->hh); i++) \
710 if (h->hh.entries[i]) \
711 return container_of(h->hh.entries[i], type, field.hi); \
712 return NULL; \
713 } \
714 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
715 const type *item) \
716 { \
717 const struct thash_item *hitem = &item->field.hi; \
718 if (hitem->next) \
719 return container_of(hitem->next, type, field.hi); \
720 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
721 for (; i < HASH_SIZE(h->hh); i++) \
722 if (h->hh.entries[i]) \
723 return container_of(h->hh.entries[i], type, field.hi); \
724 return NULL; \
725 } \
726 TYPESAFE_FIRST_NEXT(prefix, type) \
727 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
728 { \
729 if (!item) \
730 return NULL; \
731 return prefix ## _next(h, item); \
732 } \
733 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
734 { \
735 return h->hh.count; \
736 } \
737 /* ... */
738
739 /* skiplist, sorted.
740 * can be used as priority queue with add / pop
741 */
742
743 /* don't use these structs directly */
744 #define SKIPLIST_MAXDEPTH 16
745 #define SKIPLIST_EMBED 4
746 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
747
748 struct sskip_item {
749 struct sskip_item *next[SKIPLIST_EMBED];
750 };
751
752 struct sskip_overflow {
753 struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
754 };
755
756 struct sskip_head {
757 struct sskip_item hitem;
758 struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
759 size_t count;
760 };
761
762 /* use as:
763 *
764 * PREDECL_SKIPLIST(namelist)
765 * struct name {
766 * struct namelist_item nlitem;
767 * }
768 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
769 */
770 #define _PREDECL_SKIPLIST(prefix) \
771 struct prefix ## _head { struct sskip_head sh; }; \
772 struct prefix ## _item { struct sskip_item si; };
773
774 #define INIT_SKIPLIST_UNIQ(var) { }
775 #define INIT_SKIPLIST_NONUNIQ(var) { }
776
777 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
778 \
779 macro_inline void prefix ## _init(struct prefix##_head *h) \
780 { \
781 memset(h, 0, sizeof(*h)); \
782 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
783 ((uintptr_t)h->sh.overflow | 1); \
784 } \
785 macro_inline void prefix ## _fini(struct prefix##_head *h) \
786 { \
787 memset(h, 0, sizeof(*h)); \
788 } \
789 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
790 { \
791 struct sskip_item *si; \
792 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
793 return container_of_null(si, type, field.si); \
794 } \
795 macro_inline const type *prefix ## _const_find_gteq( \
796 const struct prefix##_head *h, const type *item) \
797 { \
798 const struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
799 &item->field.si, cmpfn_nuq); \
800 return container_of_null(sitem, type, field.si); \
801 } \
802 macro_inline const type *prefix ## _const_find_lt( \
803 const struct prefix##_head *h, const type *item) \
804 { \
805 const struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
806 &item->field.si, cmpfn_nuq); \
807 return container_of_null(sitem, type, field.si); \
808 } \
809 TYPESAFE_FIND_CMP(prefix, type) \
810 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
811 { \
812 struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \
813 &item->field.si, cmpfn_uq); \
814 return container_of_null(sitem, type, field.si); \
815 } \
816 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
817 { \
818 struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
819 return container_of_null(sitem, type, field.si); \
820 } \
821 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
822 { \
823 const struct sskip_item *first = h->sh.hitem.next[0]; \
824 return container_of_null(first, type, field.si); \
825 } \
826 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
827 const type *item) \
828 { \
829 const struct sskip_item *next = item->field.si.next[0]; \
830 return container_of_null(next, type, field.si); \
831 } \
832 TYPESAFE_FIRST_NEXT(prefix, type) \
833 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
834 { \
835 struct sskip_item *next; \
836 next = item ? item->field.si.next[0] : NULL; \
837 return container_of_null(next, type, field.si); \
838 } \
839 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
840 { \
841 return h->sh.count; \
842 } \
843 /* ... */
844
845 #define PREDECL_SKIPLIST_UNIQ(prefix) \
846 _PREDECL_SKIPLIST(prefix)
847 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
848 \
849 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
850 const struct sskip_item *b) \
851 { \
852 return cmpfn(container_of(a, type, field.si), \
853 container_of(b, type, field.si)); \
854 } \
855 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
856 const type *item) \
857 { \
858 const struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
859 &item->field.si, &prefix ## __cmp); \
860 return container_of_null(sitem, type, field.si); \
861 } \
862 TYPESAFE_FIND(prefix, type) \
863 \
864 _DECLARE_SKIPLIST(prefix, type, field, \
865 prefix ## __cmp, prefix ## __cmp) \
866 /* ... */
867
868 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
869 _PREDECL_SKIPLIST(prefix)
870 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
871 \
872 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
873 const struct sskip_item *b) \
874 { \
875 return cmpfn(container_of(a, type, field.si), \
876 container_of(b, type, field.si)); \
877 } \
878 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
879 const struct sskip_item *b) \
880 { \
881 int cmpval = cmpfn(container_of(a, type, field.si), \
882 container_of(b, type, field.si)); \
883 if (cmpval) \
884 return cmpval; \
885 if (a < b) \
886 return -1; \
887 if (a > b) \
888 return 1; \
889 return 0; \
890 } \
891 \
892 _DECLARE_SKIPLIST(prefix, type, field, \
893 prefix ## __cmp, prefix ## __cmp_uq) \
894 /* ... */
895
896
897 extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
898 struct sskip_item *item, int (*cmpfn)(
899 const struct sskip_item *a,
900 const struct sskip_item *b));
901 extern const struct sskip_item *typesafe_skiplist_find(
902 const struct sskip_head *head,
903 const struct sskip_item *item, int (*cmpfn)(
904 const struct sskip_item *a,
905 const struct sskip_item *b));
906 extern const struct sskip_item *typesafe_skiplist_find_gteq(
907 const struct sskip_head *head,
908 const struct sskip_item *item, int (*cmpfn)(
909 const struct sskip_item *a,
910 const struct sskip_item *b));
911 extern const struct sskip_item *typesafe_skiplist_find_lt(
912 const struct sskip_head *head,
913 const struct sskip_item *item, int (*cmpfn)(
914 const struct sskip_item *a,
915 const struct sskip_item *b));
916 extern struct sskip_item *typesafe_skiplist_del(
917 struct sskip_head *head, struct sskip_item *item, int (*cmpfn)(
918 const struct sskip_item *a,
919 const struct sskip_item *b));
920 extern struct sskip_item *typesafe_skiplist_pop(struct sskip_head *head);
921
922 #ifdef __cplusplus
923 }
924 #endif
925
926 /* this needs to stay at the end because both files include each other.
927 * the resolved order is typesafe.h before typerb.h
928 */
929 #include "typerb.h"
930
931 #endif /* _FRR_TYPESAFE_H */