]> git.proxmox.com Git - mirror_frr.git/blob - lib/typesafe.h
lib: add `_last` and `_prev` on typesafe RB/DLIST
[mirror_frr.git] / lib / typesafe.h
1 /*
2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #ifndef _FRR_TYPESAFE_H
18 #define _FRR_TYPESAFE_H
19
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <stdbool.h>
23 #include "compiler.h"
24
25 #ifdef __cplusplus
26 extern "C" {
27 #endif
28
29 /* generic macros for all list-like types */
30
31 #define frr_each(prefix, head, item) \
32 for (item = prefix##_first(head); item; \
33 item = prefix##_next(head, item))
34 #define frr_each_safe(prefix, head, item) \
35 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
36 prefix##_next_safe(head, \
37 (item = prefix##_first(head))); \
38 item; \
39 item = prefix##_safe, \
40 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
41 #define frr_each_from(prefix, head, item, from) \
42 for (item = from, from = prefix##_next_safe(head, item); \
43 item; \
44 item = from, from = prefix##_next_safe(head, from))
45
46 /* reverse direction, only supported by a few containers */
47
48 #define frr_rev_each(prefix, head, item) \
49 for (item = prefix##_last(head); item; \
50 item = prefix##_prev(head, item))
51 #define frr_rev_each_safe(prefix, head, item) \
52 for (typeof(prefix##_prev_safe(head, NULL)) prefix##_safe = \
53 prefix##_prev_safe(head, \
54 (item = prefix##_last(head))); \
55 item; \
56 item = prefix##_safe, \
57 prefix##_safe = prefix##_prev_safe(head, prefix##_safe))
58 #define frr_rev_each_from(prefix, head, item, from) \
59 for (item = from, from = prefix##_prev_safe(head, item); \
60 item; \
61 item = from, from = prefix##_prev_safe(head, from))
62
63 /* non-const variants. these wrappers are the same for all the types, so
64 * bundle them together here.
65 */
66 #define TYPESAFE_FIRST_NEXT(prefix, type) \
67 macro_pure type *prefix ## _first(struct prefix##_head *h) \
68 { \
69 return (type *)prefix ## _const_first(h); \
70 } \
71 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
72 { \
73 return (type *)prefix ## _const_next(h, item); \
74 } \
75 /* ... */
76 #define TYPESAFE_LAST_PREV(prefix, type) \
77 macro_pure type *prefix ## _last(struct prefix##_head *h) \
78 { \
79 return (type *)prefix ## _const_last(h); \
80 } \
81 macro_pure type *prefix ## _prev(struct prefix##_head *h, type *item) \
82 { \
83 return (type *)prefix ## _const_prev(h, item); \
84 } \
85 /* ... */
86 #define TYPESAFE_FIND(prefix, type) \
87 macro_inline type *prefix ## _find(struct prefix##_head *h, \
88 const type *item) \
89 { \
90 return (type *)prefix ## _const_find(h, item); \
91 } \
92 /* ... */
93 #define TYPESAFE_FIND_CMP(prefix, type) \
94 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
95 const type *item) \
96 { \
97 return (type *)prefix ## _const_find_lt(h, item); \
98 } \
99 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
100 const type *item) \
101 { \
102 return (type *)prefix ## _const_find_gteq(h, item); \
103 } \
104 /* ... */
105
106 /* *_member via find - when there is no better membership check than find() */
107 #define TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
108 macro_inline bool prefix ## _member(struct prefix##_head *h, \
109 const type *item) \
110 { \
111 return item == prefix ## _const_find(h, item); \
112 } \
113 /* ... */
114
115 /* *_member via find_gteq - same for non-unique containers */
116 #define TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
117 macro_inline bool prefix ## _member(struct prefix##_head *h, \
118 const type *item) \
119 { \
120 const type *iter; \
121 for (iter = prefix ## _const_find_gteq(h, item); iter; \
122 iter = prefix ## _const_next(h, iter)) { \
123 if (iter == item) \
124 return true; \
125 if (cmpfn(iter, item) > 0) \
126 break; \
127 } \
128 return false; \
129 } \
130 /* ... */
131
132 /* SWAP_ALL_SIMPLE = for containers where the items don't point back to the
133 * head *AND* the head doesn't point to itself (= everything except LIST,
134 * DLIST and SKIPLIST), just switch out the entire head
135 */
136 #define TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
137 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
138 struct prefix##_head *b) \
139 { \
140 struct prefix##_head tmp = *a; \
141 *a = *b; \
142 *b = tmp; \
143 } \
144 /* ... */
145
146 /* single-linked list, unsorted/arbitrary.
147 * can be used as queue with add_tail / pop
148 */
149
150 /* don't use these structs directly */
151 struct slist_item {
152 struct slist_item *next;
153 };
154
155 struct slist_head {
156 struct slist_item *first, **last_next;
157 size_t count;
158 };
159
160 /* this replaces NULL as the value for ->next on the last item. */
161 extern struct slist_item typesafe_slist_sentinel;
162 #define _SLIST_LAST &typesafe_slist_sentinel
163
164 static inline void typesafe_list_add(struct slist_head *head,
165 struct slist_item **pos, struct slist_item *item)
166 {
167 item->next = *pos;
168 *pos = item;
169 if (pos == head->last_next)
170 head->last_next = &item->next;
171 head->count++;
172 }
173
174 extern bool typesafe_list_member(const struct slist_head *head,
175 const struct slist_item *item);
176
177 /* use as:
178 *
179 * PREDECL_LIST(namelist);
180 * struct name {
181 * struct namelist_item nlitem;
182 * }
183 * DECLARE_LIST(namelist, struct name, nlitem);
184 */
185 #define PREDECL_LIST(prefix) \
186 struct prefix ## _head { struct slist_head sh; }; \
187 struct prefix ## _item { struct slist_item si; }; \
188 MACRO_REQUIRE_SEMICOLON() /* end */
189
190 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
191
192 #define DECLARE_LIST(prefix, type, field) \
193 \
194 macro_inline void prefix ## _init(struct prefix##_head *h) \
195 { \
196 memset(h, 0, sizeof(*h)); \
197 h->sh.first = _SLIST_LAST; \
198 h->sh.last_next = &h->sh.first; \
199 } \
200 macro_inline void prefix ## _fini(struct prefix##_head *h) \
201 { \
202 memset(h, 0, sizeof(*h)); \
203 } \
204 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
205 { \
206 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
207 } \
208 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
209 { \
210 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
211 } \
212 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
213 type *after, type *item) \
214 { \
215 struct slist_item **nextp; \
216 nextp = after ? &after->field.si.next : &h->sh.first; \
217 typesafe_list_add(&h->sh, nextp, &item->field.si); \
218 } \
219 /* TODO: del_hint */ \
220 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
221 { \
222 struct slist_item **iter = &h->sh.first; \
223 while (*iter != _SLIST_LAST && *iter != &item->field.si) \
224 iter = &(*iter)->next; \
225 if (*iter == _SLIST_LAST) \
226 return NULL; \
227 h->sh.count--; \
228 *iter = item->field.si.next; \
229 if (item->field.si.next == _SLIST_LAST) \
230 h->sh.last_next = iter; \
231 item->field.si.next = NULL; \
232 return item; \
233 } \
234 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
235 { \
236 struct slist_item *sitem = h->sh.first; \
237 if (sitem == _SLIST_LAST) \
238 return NULL; \
239 h->sh.count--; \
240 h->sh.first = sitem->next; \
241 if (h->sh.first == _SLIST_LAST) \
242 h->sh.last_next = &h->sh.first; \
243 sitem->next = NULL; \
244 return container_of(sitem, type, field.si); \
245 } \
246 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
247 struct prefix##_head *b) \
248 { \
249 struct prefix##_head tmp = *a; \
250 *a = *b; \
251 *b = tmp; \
252 if (a->sh.last_next == &b->sh.first) \
253 a->sh.last_next = &a->sh.first; \
254 if (b->sh.last_next == &a->sh.first) \
255 b->sh.last_next = &b->sh.first; \
256 } \
257 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
258 { \
259 if (h->sh.first != _SLIST_LAST) \
260 return container_of(h->sh.first, type, field.si); \
261 return NULL; \
262 } \
263 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
264 const type *item) \
265 { \
266 const struct slist_item *sitem = &item->field.si; \
267 if (sitem->next != _SLIST_LAST) \
268 return container_of(sitem->next, type, field.si); \
269 return NULL; \
270 } \
271 TYPESAFE_FIRST_NEXT(prefix, type) \
272 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
273 { \
274 struct slist_item *sitem; \
275 if (!item) \
276 return NULL; \
277 sitem = &item->field.si; \
278 if (sitem->next != _SLIST_LAST) \
279 return container_of(sitem->next, type, field.si); \
280 return NULL; \
281 } \
282 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
283 { \
284 return h->sh.count; \
285 } \
286 macro_pure bool prefix ## _anywhere(const type *item) \
287 { \
288 return item->field.si.next != NULL; \
289 } \
290 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
291 const type *item) \
292 { \
293 return typesafe_list_member(&h->sh, &item->field.si); \
294 } \
295 MACRO_REQUIRE_SEMICOLON() /* end */
296
297 /* don't use these structs directly */
298 struct dlist_item {
299 struct dlist_item *next;
300 struct dlist_item *prev;
301 };
302
303 struct dlist_head {
304 struct dlist_item hitem;
305 size_t count;
306 };
307
308 static inline void typesafe_dlist_add(struct dlist_head *head,
309 struct dlist_item *prev, struct dlist_item *item)
310 {
311 item->next = prev->next;
312 item->next->prev = item;
313 item->prev = prev;
314 prev->next = item;
315 head->count++;
316 }
317
318 static inline void typesafe_dlist_swap_all(struct dlist_head *a,
319 struct dlist_head *b)
320 {
321 struct dlist_head tmp = *a;
322
323 a->count = b->count;
324 if (a->count) {
325 a->hitem.next = b->hitem.next;
326 a->hitem.prev = b->hitem.prev;
327 a->hitem.next->prev = &a->hitem;
328 a->hitem.prev->next = &a->hitem;
329 } else {
330 a->hitem.next = &a->hitem;
331 a->hitem.prev = &a->hitem;
332 }
333
334 b->count = tmp.count;
335 if (b->count) {
336 b->hitem.next = tmp.hitem.next;
337 b->hitem.prev = tmp.hitem.prev;
338 b->hitem.next->prev = &b->hitem;
339 b->hitem.prev->next = &b->hitem;
340 } else {
341 b->hitem.next = &b->hitem;
342 b->hitem.prev = &b->hitem;
343 }
344 }
345
346 extern bool typesafe_dlist_member(const struct dlist_head *head,
347 const struct dlist_item *item);
348
349 /* double-linked list, for fast item deletion
350 */
351 #define PREDECL_DLIST(prefix) \
352 struct prefix ## _head { struct dlist_head dh; }; \
353 struct prefix ## _item { struct dlist_item di; }; \
354 MACRO_REQUIRE_SEMICOLON() /* end */
355
356 #define INIT_DLIST(var) { .dh = { \
357 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
358
359 #define DECLARE_DLIST(prefix, type, field) \
360 \
361 macro_inline void prefix ## _init(struct prefix##_head *h) \
362 { \
363 memset(h, 0, sizeof(*h)); \
364 h->dh.hitem.prev = &h->dh.hitem; \
365 h->dh.hitem.next = &h->dh.hitem; \
366 } \
367 macro_inline void prefix ## _fini(struct prefix##_head *h) \
368 { \
369 memset(h, 0, sizeof(*h)); \
370 } \
371 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
372 { \
373 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
374 } \
375 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
376 { \
377 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
378 } \
379 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
380 type *after, type *item) \
381 { \
382 struct dlist_item *prev; \
383 prev = after ? &after->field.di : &h->dh.hitem; \
384 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
385 } \
386 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
387 { \
388 struct dlist_item *ditem = &item->field.di; \
389 ditem->prev->next = ditem->next; \
390 ditem->next->prev = ditem->prev; \
391 h->dh.count--; \
392 ditem->prev = ditem->next = NULL; \
393 return item; \
394 } \
395 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
396 { \
397 struct dlist_item *ditem = h->dh.hitem.next; \
398 if (ditem == &h->dh.hitem) \
399 return NULL; \
400 ditem->prev->next = ditem->next; \
401 ditem->next->prev = ditem->prev; \
402 h->dh.count--; \
403 ditem->prev = ditem->next = NULL; \
404 return container_of(ditem, type, field.di); \
405 } \
406 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
407 struct prefix##_head *b) \
408 { \
409 typesafe_dlist_swap_all(&a->dh, &b->dh); \
410 } \
411 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
412 { \
413 const struct dlist_item *ditem = h->dh.hitem.next; \
414 if (ditem == &h->dh.hitem) \
415 return NULL; \
416 return container_of(ditem, type, field.di); \
417 } \
418 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
419 const type *item) \
420 { \
421 const struct dlist_item *ditem = &item->field.di; \
422 if (ditem->next == &h->dh.hitem) \
423 return NULL; \
424 return container_of(ditem->next, type, field.di); \
425 } \
426 TYPESAFE_FIRST_NEXT(prefix, type) \
427 macro_pure const type *prefix ## _const_last(const struct prefix##_head *h) \
428 { \
429 const struct dlist_item *ditem = h->dh.hitem.prev; \
430 if (ditem == &h->dh.hitem) \
431 return NULL; \
432 return container_of(ditem, type, field.di); \
433 } \
434 macro_pure const type *prefix ## _const_prev(const struct prefix##_head *h, \
435 const type *item) \
436 { \
437 const struct dlist_item *ditem = &item->field.di; \
438 if (ditem->prev == &h->dh.hitem) \
439 return NULL; \
440 return container_of(ditem->prev, type, field.di); \
441 } \
442 TYPESAFE_LAST_PREV(prefix, type) \
443 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
444 { \
445 if (!item) \
446 return NULL; \
447 return prefix ## _next(h, item); \
448 } \
449 macro_pure type *prefix ## _prev_safe(struct prefix##_head *h, type *item) \
450 { \
451 if (!item) \
452 return NULL; \
453 return prefix ## _prev(h, item); \
454 } \
455 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
456 { \
457 return h->dh.count; \
458 } \
459 macro_pure bool prefix ## _anywhere(const type *item) \
460 { \
461 const struct dlist_item *ditem = &item->field.di; \
462 return ditem->next && ditem->prev; \
463 } \
464 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
465 const type *item) \
466 { \
467 return typesafe_dlist_member(&h->dh, &item->field.di); \
468 } \
469 MACRO_REQUIRE_SEMICOLON() /* end */
470
471 /* note: heap currently caps out at 4G items */
472
473 #define HEAP_NARY 8U
474 typedef uint32_t heap_index_i;
475
476 struct heap_item {
477 uint32_t index;
478 };
479
480 struct heap_head {
481 struct heap_item **array;
482 uint32_t arraysz, count;
483 };
484
485 #define HEAP_RESIZE_TRESH_UP(h) \
486 (h->hh.count + 1 >= h->hh.arraysz)
487 #define HEAP_RESIZE_TRESH_DN(h) \
488 (h->hh.count == 0 || \
489 h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
490
491 #define PREDECL_HEAP(prefix) \
492 struct prefix ## _head { struct heap_head hh; }; \
493 struct prefix ## _item { struct heap_item hi; }; \
494 MACRO_REQUIRE_SEMICOLON() /* end */
495
496 #define INIT_HEAP(var) { }
497
498 #define DECLARE_HEAP(prefix, type, field, cmpfn) \
499 \
500 macro_inline void prefix ## _init(struct prefix##_head *h) \
501 { \
502 memset(h, 0, sizeof(*h)); \
503 } \
504 macro_inline void prefix ## _fini(struct prefix##_head *h) \
505 { \
506 assert(h->hh.count == 0); \
507 memset(h, 0, sizeof(*h)); \
508 } \
509 macro_inline int prefix ## __cmp(const struct heap_item *a, \
510 const struct heap_item *b) \
511 { \
512 return cmpfn(container_of(a, type, field.hi), \
513 container_of(b, type, field.hi)); \
514 } \
515 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
516 { \
517 if (HEAP_RESIZE_TRESH_UP(h)) \
518 typesafe_heap_resize(&h->hh, true); \
519 typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
520 prefix ## __cmp); \
521 h->hh.count++; \
522 return NULL; \
523 } \
524 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
525 { \
526 struct heap_item *other; \
527 uint32_t index = item->field.hi.index; \
528 assert(h->hh.array[index] == &item->field.hi); \
529 h->hh.count--; \
530 other = h->hh.array[h->hh.count]; \
531 if (cmpfn(container_of(other, type, field.hi), item) < 0) \
532 typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
533 else \
534 typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
535 if (HEAP_RESIZE_TRESH_DN(h)) \
536 typesafe_heap_resize(&h->hh, false); \
537 return item; \
538 } \
539 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
540 { \
541 struct heap_item *hitem, *other; \
542 if (h->hh.count == 0) \
543 return NULL; \
544 hitem = h->hh.array[0]; \
545 h->hh.count--; \
546 other = h->hh.array[h->hh.count]; \
547 typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
548 if (HEAP_RESIZE_TRESH_DN(h)) \
549 typesafe_heap_resize(&h->hh, false); \
550 return container_of(hitem, type, field.hi); \
551 } \
552 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
553 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
554 { \
555 if (h->hh.count == 0) \
556 return NULL; \
557 return container_of(h->hh.array[0], type, field.hi); \
558 } \
559 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
560 const type *item) \
561 { \
562 uint32_t idx = item->field.hi.index + 1; \
563 if (idx >= h->hh.count) \
564 return NULL; \
565 return container_of(h->hh.array[idx], type, field.hi); \
566 } \
567 TYPESAFE_FIRST_NEXT(prefix, type) \
568 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
569 { \
570 if (!item) \
571 return NULL; \
572 return prefix ## _next(h, item); \
573 } \
574 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
575 { \
576 return h->hh.count; \
577 } \
578 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
579 const type *item) \
580 { \
581 uint32_t idx = item->field.hi.index; \
582 if (idx >= h->hh.count) \
583 return false; \
584 return h->hh.array[idx] == &item->field.hi; \
585 } \
586 MACRO_REQUIRE_SEMICOLON() /* end */
587
588 extern void typesafe_heap_resize(struct heap_head *head, bool grow);
589 extern void typesafe_heap_pushdown(struct heap_head *head, uint32_t index,
590 struct heap_item *item,
591 int (*cmpfn)(const struct heap_item *a,
592 const struct heap_item *b));
593 extern void typesafe_heap_pullup(struct heap_head *head, uint32_t index,
594 struct heap_item *item,
595 int (*cmpfn)(const struct heap_item *a,
596 const struct heap_item *b));
597
598 /* single-linked list, sorted.
599 * can be used as priority queue with add / pop
600 */
601
602 /* don't use these structs directly */
603 struct ssort_item {
604 struct ssort_item *next;
605 };
606
607 struct ssort_head {
608 struct ssort_item *first;
609 size_t count;
610 };
611
612 /* use as:
613 *
614 * PREDECL_SORTLIST(namelist)
615 * struct name {
616 * struct namelist_item nlitem;
617 * }
618 * DECLARE_SORTLIST(namelist, struct name, nlitem)
619 */
620 #define _PREDECL_SORTLIST(prefix) \
621 struct prefix ## _head { struct ssort_head sh; }; \
622 struct prefix ## _item { struct ssort_item si; }; \
623 MACRO_REQUIRE_SEMICOLON() /* end */
624
625 #define INIT_SORTLIST_UNIQ(var) { }
626 #define INIT_SORTLIST_NONUNIQ(var) { }
627
628 #define PREDECL_SORTLIST_UNIQ(prefix) \
629 _PREDECL_SORTLIST(prefix)
630 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
631 _PREDECL_SORTLIST(prefix)
632
633 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
634 \
635 macro_inline void prefix ## _init(struct prefix##_head *h) \
636 { \
637 memset(h, 0, sizeof(*h)); \
638 } \
639 macro_inline void prefix ## _fini(struct prefix##_head *h) \
640 { \
641 memset(h, 0, sizeof(*h)); \
642 } \
643 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
644 { \
645 struct ssort_item **np = &h->sh.first; \
646 int c = 1; \
647 while (*np && (c = cmpfn_uq( \
648 container_of(*np, type, field.si), item)) < 0) \
649 np = &(*np)->next; \
650 if (c == 0) \
651 return container_of(*np, type, field.si); \
652 item->field.si.next = *np; \
653 *np = &item->field.si; \
654 h->sh.count++; \
655 return NULL; \
656 } \
657 macro_inline const type *prefix ## _const_find_gteq( \
658 const struct prefix##_head *h, const type *item) \
659 { \
660 const struct ssort_item *sitem = h->sh.first; \
661 int cmpval = 0; \
662 while (sitem && (cmpval = cmpfn_nuq( \
663 container_of(sitem, type, field.si), item)) < 0) \
664 sitem = sitem->next; \
665 return container_of_null(sitem, type, field.si); \
666 } \
667 macro_inline const type *prefix ## _const_find_lt( \
668 const struct prefix##_head *h, const type *item) \
669 { \
670 const struct ssort_item *prev = NULL, *sitem = h->sh.first; \
671 int cmpval = 0; \
672 while (sitem && (cmpval = cmpfn_nuq( \
673 container_of(sitem, type, field.si), item)) < 0) \
674 sitem = (prev = sitem)->next; \
675 return container_of_null(prev, type, field.si); \
676 } \
677 TYPESAFE_FIND_CMP(prefix, type) \
678 /* TODO: del_hint */ \
679 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
680 { \
681 struct ssort_item **iter = &h->sh.first; \
682 while (*iter && *iter != &item->field.si) \
683 iter = &(*iter)->next; \
684 if (!*iter) \
685 return NULL; \
686 h->sh.count--; \
687 *iter = item->field.si.next; \
688 item->field.si.next = NULL; \
689 return item; \
690 } \
691 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
692 { \
693 struct ssort_item *sitem = h->sh.first; \
694 if (!sitem) \
695 return NULL; \
696 h->sh.count--; \
697 h->sh.first = sitem->next; \
698 return container_of(sitem, type, field.si); \
699 } \
700 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
701 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
702 { \
703 return container_of_null(h->sh.first, type, field.si); \
704 } \
705 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
706 const type *item) \
707 { \
708 const struct ssort_item *sitem = &item->field.si; \
709 return container_of_null(sitem->next, type, field.si); \
710 } \
711 TYPESAFE_FIRST_NEXT(prefix, type) \
712 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
713 { \
714 struct ssort_item *sitem; \
715 if (!item) \
716 return NULL; \
717 sitem = &item->field.si; \
718 return container_of_null(sitem->next, type, field.si); \
719 } \
720 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
721 { \
722 return h->sh.count; \
723 } \
724 MACRO_REQUIRE_SEMICOLON() /* end */
725
726 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
727 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn); \
728 \
729 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
730 const type *item) \
731 { \
732 const struct ssort_item *sitem = h->sh.first; \
733 int cmpval = 0; \
734 while (sitem && (cmpval = cmpfn( \
735 container_of(sitem, type, field.si), item)) < 0) \
736 sitem = sitem->next; \
737 if (!sitem || cmpval > 0) \
738 return NULL; \
739 return container_of(sitem, type, field.si); \
740 } \
741 TYPESAFE_FIND(prefix, type) \
742 TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
743 MACRO_REQUIRE_SEMICOLON() /* end */
744
745 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
746 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
747 { \
748 int cmpval = cmpfn(a, b); \
749 if (cmpval) \
750 return cmpval; \
751 if (a < b) \
752 return -1; \
753 if (a > b) \
754 return 1; \
755 return 0; \
756 } \
757 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp); \
758 TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
759 MACRO_REQUIRE_SEMICOLON() /* end */
760
761
762 /* hash, "sorted" by hash value
763 */
764
765 /* don't use these structs directly */
766 struct thash_item {
767 struct thash_item *next;
768 uint32_t hashval;
769 };
770
771 struct thash_head {
772 struct thash_item **entries;
773 uint32_t count;
774
775 uint8_t tabshift;
776 uint8_t minshift, maxshift;
777 };
778
779 #define _HASH_SIZE(tabshift) \
780 ((1U << (tabshift)) >> 1)
781 #define HASH_SIZE(head) \
782 _HASH_SIZE((head).tabshift)
783 #define _HASH_KEY(tabshift, val) \
784 ((val) >> (33 - (tabshift)))
785 #define HASH_KEY(head, val) \
786 _HASH_KEY((head).tabshift, val)
787 #define HASH_GROW_THRESHOLD(head) \
788 ((head).count >= HASH_SIZE(head))
789 #define HASH_SHRINK_THRESHOLD(head) \
790 ((head).count <= (HASH_SIZE(head) - 1) / 2)
791
792 extern void typesafe_hash_grow(struct thash_head *head);
793 extern void typesafe_hash_shrink(struct thash_head *head);
794
795 /* use as:
796 *
797 * PREDECL_HASH(namelist)
798 * struct name {
799 * struct namelist_item nlitem;
800 * }
801 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
802 */
803 #define PREDECL_HASH(prefix) \
804 struct prefix ## _head { struct thash_head hh; }; \
805 struct prefix ## _item { struct thash_item hi; }; \
806 MACRO_REQUIRE_SEMICOLON() /* end */
807
808 #define INIT_HASH(var) { }
809
810 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
811 \
812 macro_inline void prefix ## _init(struct prefix##_head *h) \
813 { \
814 memset(h, 0, sizeof(*h)); \
815 } \
816 macro_inline void prefix ## _fini(struct prefix##_head *h) \
817 { \
818 assert(h->hh.count == 0); \
819 h->hh.minshift = 0; \
820 typesafe_hash_shrink(&h->hh); \
821 memset(h, 0, sizeof(*h)); \
822 } \
823 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
824 { \
825 h->hh.count++; \
826 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
827 typesafe_hash_grow(&h->hh); \
828 \
829 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
830 item->field.hi.hashval = hval; \
831 struct thash_item **np = &h->hh.entries[hbits]; \
832 while (*np && (*np)->hashval < hval) \
833 np = &(*np)->next; \
834 if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
835 h->hh.count--; \
836 return container_of(*np, type, field.hi); \
837 } \
838 item->field.hi.next = *np; \
839 *np = &item->field.hi; \
840 return NULL; \
841 } \
842 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
843 const type *item) \
844 { \
845 if (!h->hh.tabshift) \
846 return NULL; \
847 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
848 const struct thash_item *hitem = h->hh.entries[hbits]; \
849 while (hitem && hitem->hashval < hval) \
850 hitem = hitem->next; \
851 while (hitem && hitem->hashval == hval) { \
852 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
853 return container_of(hitem, type, field.hi); \
854 hitem = hitem->next; \
855 } \
856 return NULL; \
857 } \
858 TYPESAFE_FIND(prefix, type) \
859 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
860 { \
861 if (!h->hh.tabshift) \
862 return NULL; \
863 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
864 struct thash_item **np = &h->hh.entries[hbits]; \
865 while (*np && (*np)->hashval < hval) \
866 np = &(*np)->next; \
867 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
868 np = &(*np)->next; \
869 if (*np != &item->field.hi) \
870 return NULL; \
871 *np = item->field.hi.next; \
872 item->field.hi.next = NULL; \
873 h->hh.count--; \
874 if (HASH_SHRINK_THRESHOLD(h->hh)) \
875 typesafe_hash_shrink(&h->hh); \
876 return item; \
877 } \
878 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
879 { \
880 uint32_t i; \
881 for (i = 0; i < HASH_SIZE(h->hh); i++) \
882 if (h->hh.entries[i]) { \
883 struct thash_item *hitem = h->hh.entries[i]; \
884 h->hh.entries[i] = hitem->next; \
885 h->hh.count--; \
886 hitem->next = NULL; \
887 if (HASH_SHRINK_THRESHOLD(h->hh)) \
888 typesafe_hash_shrink(&h->hh); \
889 return container_of(hitem, type, field.hi); \
890 } \
891 return NULL; \
892 } \
893 TYPESAFE_SWAP_ALL_SIMPLE(prefix) \
894 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
895 { \
896 uint32_t i; \
897 for (i = 0; i < HASH_SIZE(h->hh); i++) \
898 if (h->hh.entries[i]) \
899 return container_of(h->hh.entries[i], type, field.hi); \
900 return NULL; \
901 } \
902 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
903 const type *item) \
904 { \
905 const struct thash_item *hitem = &item->field.hi; \
906 if (hitem->next) \
907 return container_of(hitem->next, type, field.hi); \
908 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
909 for (; i < HASH_SIZE(h->hh); i++) \
910 if (h->hh.entries[i]) \
911 return container_of(h->hh.entries[i], type, field.hi); \
912 return NULL; \
913 } \
914 TYPESAFE_FIRST_NEXT(prefix, type) \
915 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
916 { \
917 if (!item) \
918 return NULL; \
919 return prefix ## _next(h, item); \
920 } \
921 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
922 { \
923 return h->hh.count; \
924 } \
925 macro_pure bool prefix ## _member(const struct prefix##_head *h, \
926 const type *item) \
927 { \
928 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
929 const struct thash_item *hitem = h->hh.entries[hbits]; \
930 while (hitem && hitem->hashval < hval) \
931 hitem = hitem->next; \
932 for (hitem = h->hh.entries[hbits]; hitem && hitem->hashval <= hval; \
933 hitem = hitem->next) \
934 if (hitem == &item->field.hi) \
935 return true; \
936 return false; \
937 } \
938 MACRO_REQUIRE_SEMICOLON() /* end */
939
940 /* skiplist, sorted.
941 * can be used as priority queue with add / pop
942 */
943
944 /* don't use these structs directly */
945 #define SKIPLIST_MAXDEPTH 16
946 #define SKIPLIST_EMBED 4
947 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
948
949 struct sskip_item {
950 struct sskip_item *next[SKIPLIST_EMBED];
951 };
952
953 struct sskip_overflow {
954 struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
955 };
956
957 struct sskip_head {
958 struct sskip_item hitem;
959 struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
960 size_t count;
961 };
962
963 /* use as:
964 *
965 * PREDECL_SKIPLIST(namelist)
966 * struct name {
967 * struct namelist_item nlitem;
968 * }
969 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
970 */
971 #define _PREDECL_SKIPLIST(prefix) \
972 struct prefix ## _head { struct sskip_head sh; }; \
973 struct prefix ## _item { struct sskip_item si; }; \
974 MACRO_REQUIRE_SEMICOLON() /* end */
975
976 #define INIT_SKIPLIST_UNIQ(var) { }
977 #define INIT_SKIPLIST_NONUNIQ(var) { }
978
979 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
980 \
981 macro_inline void prefix ## _init(struct prefix##_head *h) \
982 { \
983 memset(h, 0, sizeof(*h)); \
984 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
985 ((uintptr_t)h->sh.overflow | 1); \
986 } \
987 macro_inline void prefix ## _fini(struct prefix##_head *h) \
988 { \
989 memset(h, 0, sizeof(*h)); \
990 } \
991 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
992 { \
993 struct sskip_item *si; \
994 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
995 return container_of_null(si, type, field.si); \
996 } \
997 macro_inline const type *prefix ## _const_find_gteq( \
998 const struct prefix##_head *h, const type *item) \
999 { \
1000 const struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
1001 &item->field.si, cmpfn_nuq); \
1002 return container_of_null(sitem, type, field.si); \
1003 } \
1004 macro_inline const type *prefix ## _const_find_lt( \
1005 const struct prefix##_head *h, const type *item) \
1006 { \
1007 const struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
1008 &item->field.si, cmpfn_nuq); \
1009 return container_of_null(sitem, type, field.si); \
1010 } \
1011 TYPESAFE_FIND_CMP(prefix, type) \
1012 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
1013 { \
1014 struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \
1015 &item->field.si, cmpfn_uq); \
1016 return container_of_null(sitem, type, field.si); \
1017 } \
1018 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
1019 { \
1020 struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
1021 return container_of_null(sitem, type, field.si); \
1022 } \
1023 macro_inline void prefix ## _swap_all(struct prefix##_head *a, \
1024 struct prefix##_head *b) \
1025 { \
1026 struct prefix##_head tmp = *a; \
1027 *a = *b; \
1028 *b = tmp; \
1029 a->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1030 ((uintptr_t)a->sh.overflow | 1); \
1031 b->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
1032 ((uintptr_t)b->sh.overflow | 1); \
1033 } \
1034 macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \
1035 { \
1036 const struct sskip_item *first = h->sh.hitem.next[0]; \
1037 return container_of_null(first, type, field.si); \
1038 } \
1039 macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \
1040 const type *item) \
1041 { \
1042 const struct sskip_item *next = item->field.si.next[0]; \
1043 return container_of_null(next, type, field.si); \
1044 } \
1045 TYPESAFE_FIRST_NEXT(prefix, type) \
1046 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
1047 { \
1048 struct sskip_item *next; \
1049 next = item ? item->field.si.next[0] : NULL; \
1050 return container_of_null(next, type, field.si); \
1051 } \
1052 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
1053 { \
1054 return h->sh.count; \
1055 } \
1056 MACRO_REQUIRE_SEMICOLON() /* end */
1057
1058 #define PREDECL_SKIPLIST_UNIQ(prefix) \
1059 _PREDECL_SKIPLIST(prefix)
1060 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
1061 \
1062 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1063 const struct sskip_item *b) \
1064 { \
1065 return cmpfn(container_of(a, type, field.si), \
1066 container_of(b, type, field.si)); \
1067 } \
1068 macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \
1069 const type *item) \
1070 { \
1071 const struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
1072 &item->field.si, &prefix ## __cmp); \
1073 return container_of_null(sitem, type, field.si); \
1074 } \
1075 TYPESAFE_FIND(prefix, type) \
1076 TYPESAFE_MEMBER_VIA_FIND(prefix, type) \
1077 \
1078 _DECLARE_SKIPLIST(prefix, type, field, \
1079 prefix ## __cmp, prefix ## __cmp); \
1080 MACRO_REQUIRE_SEMICOLON() /* end */
1081
1082 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
1083 _PREDECL_SKIPLIST(prefix)
1084 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
1085 \
1086 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
1087 const struct sskip_item *b) \
1088 { \
1089 return cmpfn(container_of(a, type, field.si), \
1090 container_of(b, type, field.si)); \
1091 } \
1092 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
1093 const struct sskip_item *b) \
1094 { \
1095 int cmpval = cmpfn(container_of(a, type, field.si), \
1096 container_of(b, type, field.si)); \
1097 if (cmpval) \
1098 return cmpval; \
1099 if (a < b) \
1100 return -1; \
1101 if (a > b) \
1102 return 1; \
1103 return 0; \
1104 } \
1105 \
1106 _DECLARE_SKIPLIST(prefix, type, field, \
1107 prefix ## __cmp, prefix ## __cmp_uq); \
1108 TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \
1109 MACRO_REQUIRE_SEMICOLON() /* end */
1110
1111
1112 extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
1113 struct sskip_item *item, int (*cmpfn)(
1114 const struct sskip_item *a,
1115 const struct sskip_item *b));
1116 extern const struct sskip_item *typesafe_skiplist_find(
1117 const struct sskip_head *head,
1118 const struct sskip_item *item, int (*cmpfn)(
1119 const struct sskip_item *a,
1120 const struct sskip_item *b));
1121 extern const struct sskip_item *typesafe_skiplist_find_gteq(
1122 const struct sskip_head *head,
1123 const struct sskip_item *item, int (*cmpfn)(
1124 const struct sskip_item *a,
1125 const struct sskip_item *b));
1126 extern const struct sskip_item *typesafe_skiplist_find_lt(
1127 const struct sskip_head *head,
1128 const struct sskip_item *item, int (*cmpfn)(
1129 const struct sskip_item *a,
1130 const struct sskip_item *b));
1131 extern struct sskip_item *typesafe_skiplist_del(
1132 struct sskip_head *head, struct sskip_item *item, int (*cmpfn)(
1133 const struct sskip_item *a,
1134 const struct sskip_item *b));
1135 extern struct sskip_item *typesafe_skiplist_pop(struct sskip_head *head);
1136
1137 #ifdef __cplusplus
1138 }
1139 #endif
1140
1141 /* this needs to stay at the end because both files include each other.
1142 * the resolved order is typesafe.h before typerb.h
1143 */
1144 #include "typerb.h"
1145
1146 #endif /* _FRR_TYPESAFE_H */