]> git.proxmox.com Git - mirror_frr.git/blob - lib/typesafe.h
lib: Test return of fcntl in agentx.c
[mirror_frr.git] / lib / typesafe.h
1 /*
2 * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #ifndef _FRR_TYPESAFE_H
18 #define _FRR_TYPESAFE_H
19
20 #include <stddef.h>
21 #include <stdint.h>
22 #include <stdbool.h>
23 #include <assert.h>
24 #include "compiler.h"
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 /* generic macros for all list-like types */
31
32 #define frr_each(prefix, head, item) \
33 for (item = prefix##_first(head); item; \
34 item = prefix##_next(head, item))
35 #define frr_each_safe(prefix, head, item) \
36 for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \
37 prefix##_next_safe(head, \
38 (item = prefix##_first(head))); \
39 item; \
40 item = prefix##_safe, \
41 prefix##_safe = prefix##_next_safe(head, prefix##_safe))
42 #define frr_each_from(prefix, head, item, from) \
43 for (item = from, from = prefix##_next_safe(head, item); \
44 item; \
45 item = from, from = prefix##_next_safe(head, from))
46
47 /* single-linked list, unsorted/arbitrary.
48 * can be used as queue with add_tail / pop
49 */
50
51 /* don't use these structs directly */
52 struct slist_item {
53 struct slist_item *next;
54 };
55
56 struct slist_head {
57 struct slist_item *first, **last_next;
58 size_t count;
59 };
60
61 static inline void typesafe_list_add(struct slist_head *head,
62 struct slist_item **pos, struct slist_item *item)
63 {
64 item->next = *pos;
65 *pos = item;
66 if (pos == head->last_next)
67 head->last_next = &item->next;
68 head->count++;
69 }
70
71 /* use as:
72 *
73 * PREDECL_LIST(namelist)
74 * struct name {
75 * struct namelist_item nlitem;
76 * }
77 * DECLARE_LIST(namelist, struct name, nlitem)
78 */
79 #define PREDECL_LIST(prefix) \
80 struct prefix ## _head { struct slist_head sh; }; \
81 struct prefix ## _item { struct slist_item si; };
82
83 #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, }
84
85 #define DECLARE_LIST(prefix, type, field) \
86 \
87 macro_inline void prefix ## _init(struct prefix##_head *h) \
88 { \
89 memset(h, 0, sizeof(*h)); \
90 h->sh.last_next = &h->sh.first; \
91 } \
92 macro_inline void prefix ## _fini(struct prefix##_head *h) \
93 { \
94 memset(h, 0, sizeof(*h)); \
95 } \
96 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
97 { \
98 typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \
99 } \
100 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
101 { \
102 typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \
103 } \
104 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
105 type *after, type *item) \
106 { \
107 struct slist_item **nextp; \
108 nextp = after ? &after->field.si.next : &h->sh.first; \
109 typesafe_list_add(&h->sh, nextp, &item->field.si); \
110 } \
111 /* TODO: del_hint */ \
112 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
113 { \
114 struct slist_item **iter = &h->sh.first; \
115 while (*iter && *iter != &item->field.si) \
116 iter = &(*iter)->next; \
117 if (!*iter) \
118 return NULL; \
119 h->sh.count--; \
120 *iter = item->field.si.next; \
121 if (!item->field.si.next) \
122 h->sh.last_next = iter; \
123 return item; \
124 } \
125 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
126 { \
127 struct slist_item *sitem = h->sh.first; \
128 if (!sitem) \
129 return NULL; \
130 h->sh.count--; \
131 h->sh.first = sitem->next; \
132 if (h->sh.first == NULL) \
133 h->sh.last_next = &h->sh.first; \
134 return container_of(sitem, type, field.si); \
135 } \
136 macro_pure type *prefix ## _first(struct prefix##_head *h) \
137 { \
138 return container_of_null(h->sh.first, type, field.si); \
139 } \
140 macro_pure type *prefix ## _next(struct prefix##_head * h, type *item) \
141 { \
142 struct slist_item *sitem = &item->field.si; \
143 return container_of_null(sitem->next, type, field.si); \
144 } \
145 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
146 { \
147 struct slist_item *sitem; \
148 if (!item) \
149 return NULL; \
150 sitem = &item->field.si; \
151 return container_of_null(sitem->next, type, field.si); \
152 } \
153 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
154 { \
155 return h->sh.count; \
156 } \
157 /* ... */
158
159 /* don't use these structs directly */
160 struct dlist_item {
161 struct dlist_item *next;
162 struct dlist_item *prev;
163 };
164
165 struct dlist_head {
166 struct dlist_item hitem;
167 size_t count;
168 };
169
170 static inline void typesafe_dlist_add(struct dlist_head *head,
171 struct dlist_item *prev, struct dlist_item *item)
172 {
173 item->next = prev->next;
174 item->next->prev = item;
175 item->prev = prev;
176 prev->next = item;
177 head->count++;
178 }
179
180 /* double-linked list, for fast item deletion
181 */
182 #define PREDECL_DLIST(prefix) \
183 struct prefix ## _head { struct dlist_head dh; }; \
184 struct prefix ## _item { struct dlist_item di; };
185
186 #define INIT_DLIST(var) { .dh = { \
187 .hitem = { &var.dh.hitem, &var.dh.hitem }, }, }
188
189 #define DECLARE_DLIST(prefix, type, field) \
190 \
191 macro_inline void prefix ## _init(struct prefix##_head *h) \
192 { \
193 memset(h, 0, sizeof(*h)); \
194 h->dh.hitem.prev = &h->dh.hitem; \
195 h->dh.hitem.next = &h->dh.hitem; \
196 } \
197 macro_inline void prefix ## _fini(struct prefix##_head *h) \
198 { \
199 memset(h, 0, sizeof(*h)); \
200 } \
201 macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \
202 { \
203 typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \
204 } \
205 macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \
206 { \
207 typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \
208 } \
209 macro_inline void prefix ## _add_after(struct prefix##_head *h, \
210 type *after, type *item) \
211 { \
212 struct dlist_item *prev; \
213 prev = after ? &after->field.di : &h->dh.hitem; \
214 typesafe_dlist_add(&h->dh, prev, &item->field.di); \
215 } \
216 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
217 { \
218 struct dlist_item *ditem = &item->field.di; \
219 ditem->prev->next = ditem->next; \
220 ditem->next->prev = ditem->prev; \
221 h->dh.count--; \
222 ditem->prev = ditem->next = NULL; \
223 return item; \
224 } \
225 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
226 { \
227 struct dlist_item *ditem = h->dh.hitem.next; \
228 if (ditem == &h->dh.hitem) \
229 return NULL; \
230 ditem->prev->next = ditem->next; \
231 ditem->next->prev = ditem->prev; \
232 h->dh.count--; \
233 return container_of(ditem, type, field.di); \
234 } \
235 macro_pure type *prefix ## _first(struct prefix##_head *h) \
236 { \
237 struct dlist_item *ditem = h->dh.hitem.next; \
238 if (ditem == &h->dh.hitem) \
239 return NULL; \
240 return container_of(ditem, type, field.di); \
241 } \
242 macro_pure type *prefix ## _next(struct prefix##_head * h, type *item) \
243 { \
244 struct dlist_item *ditem = &item->field.di; \
245 if (ditem->next == &h->dh.hitem) \
246 return NULL; \
247 return container_of(ditem->next, type, field.di); \
248 } \
249 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
250 { \
251 if (!item) \
252 return NULL; \
253 return prefix ## _next(h, item); \
254 } \
255 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
256 { \
257 return h->dh.count; \
258 } \
259 /* ... */
260
261 /* note: heap currently caps out at 4G items */
262
263 #define HEAP_NARY 8U
264 typedef uint32_t heap_index_i;
265
266 struct heap_item {
267 uint32_t index;
268 };
269
270 struct heap_head {
271 struct heap_item **array;
272 uint32_t arraysz, count;
273 };
274
275 #define HEAP_RESIZE_TRESH_UP(h) \
276 (h->hh.count + 1 >= h->hh.arraysz)
277 #define HEAP_RESIZE_TRESH_DN(h) \
278 (h->hh.count == 0 || \
279 h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2)
280
281 #define PREDECL_HEAP(prefix) \
282 struct prefix ## _head { struct heap_head hh; }; \
283 struct prefix ## _item { struct heap_item hi; };
284
285 #define INIT_HEAP(var) { }
286
287 #define DECLARE_HEAP(prefix, type, field, cmpfn) \
288 \
289 macro_inline void prefix ## _init(struct prefix##_head *h) \
290 { \
291 memset(h, 0, sizeof(*h)); \
292 } \
293 macro_inline void prefix ## _fini(struct prefix##_head *h) \
294 { \
295 assert(h->hh.count == 0); \
296 memset(h, 0, sizeof(*h)); \
297 } \
298 macro_inline int prefix ## __cmp(const struct heap_item *a, \
299 const struct heap_item *b) \
300 { \
301 return cmpfn(container_of(a, type, field.hi), \
302 container_of(b, type, field.hi)); \
303 } \
304 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
305 { \
306 if (HEAP_RESIZE_TRESH_UP(h)) \
307 typesafe_heap_resize(&h->hh, true); \
308 typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \
309 prefix ## __cmp); \
310 h->hh.count++; \
311 return NULL; \
312 } \
313 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
314 { \
315 struct heap_item *other; \
316 uint32_t index = item->field.hi.index; \
317 assert(h->hh.array[index] == &item->field.hi); \
318 h->hh.count--; \
319 other = h->hh.array[h->hh.count]; \
320 if (cmpfn(container_of(other, type, field.hi), item) < 0) \
321 typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \
322 else \
323 typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \
324 if (HEAP_RESIZE_TRESH_DN(h)) \
325 typesafe_heap_resize(&h->hh, false); \
326 return item; \
327 } \
328 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
329 { \
330 struct heap_item *hitem, *other; \
331 if (h->hh.count == 0) \
332 return NULL; \
333 hitem = h->hh.array[0]; \
334 h->hh.count--; \
335 other = h->hh.array[h->hh.count]; \
336 typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \
337 if (HEAP_RESIZE_TRESH_DN(h)) \
338 typesafe_heap_resize(&h->hh, false); \
339 return container_of(hitem, type, field.hi); \
340 } \
341 macro_pure type *prefix ## _first(struct prefix##_head *h) \
342 { \
343 if (h->hh.count == 0) \
344 return NULL; \
345 return container_of(h->hh.array[0], type, field.hi); \
346 } \
347 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
348 { \
349 uint32_t idx = item->field.hi.index + 1; \
350 if (idx >= h->hh.count) \
351 return NULL; \
352 return container_of(h->hh.array[idx], type, field.hi); \
353 } \
354 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
355 { \
356 if (!item) \
357 return NULL; \
358 return prefix ## _next(h, item); \
359 } \
360 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
361 { \
362 return h->hh.count; \
363 } \
364 /* ... */
365
366 extern void typesafe_heap_resize(struct heap_head *head, bool grow);
367 extern void typesafe_heap_pushdown(struct heap_head *head, uint32_t index,
368 struct heap_item *item,
369 int (*cmpfn)(const struct heap_item *a,
370 const struct heap_item *b));
371 extern void typesafe_heap_pullup(struct heap_head *head, uint32_t index,
372 struct heap_item *item,
373 int (*cmpfn)(const struct heap_item *a,
374 const struct heap_item *b));
375
376 /* single-linked list, sorted.
377 * can be used as priority queue with add / pop
378 */
379
380 /* don't use these structs directly */
381 struct ssort_item {
382 struct ssort_item *next;
383 };
384
385 struct ssort_head {
386 struct ssort_item *first;
387 size_t count;
388 };
389
390 /* use as:
391 *
392 * PREDECL_SORTLIST(namelist)
393 * struct name {
394 * struct namelist_item nlitem;
395 * }
396 * DECLARE_SORTLIST(namelist, struct name, nlitem)
397 */
398 #define _PREDECL_SORTLIST(prefix) \
399 struct prefix ## _head { struct ssort_head sh; }; \
400 struct prefix ## _item { struct ssort_item si; };
401
402 #define INIT_SORTLIST_UNIQ(var) { }
403 #define INIT_SORTLIST_NONUNIQ(var) { }
404
405 #define PREDECL_SORTLIST_UNIQ(prefix) \
406 _PREDECL_SORTLIST(prefix)
407 #define PREDECL_SORTLIST_NONUNIQ(prefix) \
408 _PREDECL_SORTLIST(prefix)
409
410 #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
411 \
412 macro_inline void prefix ## _init(struct prefix##_head *h) \
413 { \
414 memset(h, 0, sizeof(*h)); \
415 } \
416 macro_inline void prefix ## _fini(struct prefix##_head *h) \
417 { \
418 memset(h, 0, sizeof(*h)); \
419 } \
420 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
421 { \
422 struct ssort_item **np = &h->sh.first; \
423 int c = 1; \
424 while (*np && (c = cmpfn_uq( \
425 container_of(*np, type, field.si), item)) < 0) \
426 np = &(*np)->next; \
427 if (c == 0) \
428 return container_of(*np, type, field.si); \
429 item->field.si.next = *np; \
430 *np = &item->field.si; \
431 h->sh.count++; \
432 return NULL; \
433 } \
434 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
435 const type *item) \
436 { \
437 struct ssort_item *sitem = h->sh.first; \
438 int cmpval = 0; \
439 while (sitem && (cmpval = cmpfn_nuq( \
440 container_of(sitem, type, field.si), item)) < 0) \
441 sitem = sitem->next; \
442 return container_of_null(sitem, type, field.si); \
443 } \
444 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
445 const type *item) \
446 { \
447 struct ssort_item *prev = NULL, *sitem = h->sh.first; \
448 int cmpval = 0; \
449 while (sitem && (cmpval = cmpfn_nuq( \
450 container_of(sitem, type, field.si), item)) < 0) \
451 sitem = (prev = sitem)->next; \
452 return container_of_null(prev, type, field.si); \
453 } \
454 /* TODO: del_hint */ \
455 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
456 { \
457 struct ssort_item **iter = &h->sh.first; \
458 while (*iter && *iter != &item->field.si) \
459 iter = &(*iter)->next; \
460 if (!*iter) \
461 return NULL; \
462 h->sh.count--; \
463 *iter = item->field.si.next; \
464 return item; \
465 } \
466 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
467 { \
468 struct ssort_item *sitem = h->sh.first; \
469 if (!sitem) \
470 return NULL; \
471 h->sh.count--; \
472 h->sh.first = sitem->next; \
473 return container_of(sitem, type, field.si); \
474 } \
475 macro_pure type *prefix ## _first(struct prefix##_head *h) \
476 { \
477 return container_of_null(h->sh.first, type, field.si); \
478 } \
479 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
480 { \
481 struct ssort_item *sitem = &item->field.si; \
482 return container_of_null(sitem->next, type, field.si); \
483 } \
484 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
485 { \
486 struct ssort_item *sitem; \
487 if (!item) \
488 return NULL; \
489 sitem = &item->field.si; \
490 return container_of_null(sitem->next, type, field.si); \
491 } \
492 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
493 { \
494 return h->sh.count; \
495 } \
496 /* ... */
497
498 #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \
499 _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn) \
500 \
501 macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
502 { \
503 struct ssort_item *sitem = h->sh.first; \
504 int cmpval = 0; \
505 while (sitem && (cmpval = cmpfn( \
506 container_of(sitem, type, field.si), item)) < 0) \
507 sitem = sitem->next; \
508 if (!sitem || cmpval > 0) \
509 return NULL; \
510 return container_of(sitem, type, field.si); \
511 } \
512 /* ... */
513
514 #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \
515 macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \
516 { \
517 int cmpval = cmpfn(a, b); \
518 if (cmpval) \
519 return cmpval; \
520 if (a < b) \
521 return -1; \
522 if (a > b) \
523 return 1; \
524 return 0; \
525 } \
526 _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp) \
527 /* ... */
528
529
530 /* hash, "sorted" by hash value
531 */
532
533 /* don't use these structs directly */
534 struct thash_item {
535 struct thash_item *next;
536 uint32_t hashval;
537 };
538
539 struct thash_head {
540 struct thash_item **entries;
541 uint32_t count;
542
543 uint8_t tabshift;
544 uint8_t minshift, maxshift;
545 };
546
547 #define _HASH_SIZE(tabshift) \
548 ((1U << (tabshift)) >> 1)
549 #define HASH_SIZE(head) \
550 _HASH_SIZE((head).tabshift)
551 #define _HASH_KEY(tabshift, val) \
552 ((val) >> (33 - (tabshift)))
553 #define HASH_KEY(head, val) \
554 _HASH_KEY((head).tabshift, val)
555 #define HASH_GROW_THRESHOLD(head) \
556 ((head).count >= HASH_SIZE(head))
557 #define HASH_SHRINK_THRESHOLD(head) \
558 ((head).count <= (HASH_SIZE(head) - 1) / 2)
559
560 extern void typesafe_hash_grow(struct thash_head *head);
561 extern void typesafe_hash_shrink(struct thash_head *head);
562
563 /* use as:
564 *
565 * PREDECL_HASH(namelist)
566 * struct name {
567 * struct namelist_item nlitem;
568 * }
569 * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc)
570 */
571 #define PREDECL_HASH(prefix) \
572 struct prefix ## _head { struct thash_head hh; }; \
573 struct prefix ## _item { struct thash_item hi; };
574
575 #define INIT_HASH(var) { }
576
577 #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \
578 \
579 macro_inline void prefix ## _init(struct prefix##_head *h) \
580 { \
581 memset(h, 0, sizeof(*h)); \
582 } \
583 macro_inline void prefix ## _fini(struct prefix##_head *h) \
584 { \
585 assert(h->hh.count == 0); \
586 h->hh.minshift = 0; \
587 typesafe_hash_shrink(&h->hh); \
588 memset(h, 0, sizeof(*h)); \
589 } \
590 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
591 { \
592 h->hh.count++; \
593 if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \
594 typesafe_hash_grow(&h->hh); \
595 \
596 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
597 item->field.hi.hashval = hval; \
598 struct thash_item **np = &h->hh.entries[hbits]; \
599 while (*np && (*np)->hashval < hval) \
600 np = &(*np)->next; \
601 if (*np && cmpfn(container_of(*np, type, field.hi), item) == 0) { \
602 h->hh.count--; \
603 return container_of(*np, type, field.hi); \
604 } \
605 item->field.hi.next = *np; \
606 *np = &item->field.hi; \
607 return NULL; \
608 } \
609 macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
610 { \
611 if (!h->hh.tabshift) \
612 return NULL; \
613 uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \
614 struct thash_item *hitem = h->hh.entries[hbits]; \
615 while (hitem && hitem->hashval < hval) \
616 hitem = hitem->next; \
617 while (hitem && hitem->hashval == hval) { \
618 if (!cmpfn(container_of(hitem, type, field.hi), item)) \
619 return container_of(hitem, type, field.hi); \
620 hitem = hitem->next; \
621 } \
622 return NULL; \
623 } \
624 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
625 { \
626 if (!h->hh.tabshift) \
627 return NULL; \
628 uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
629 struct thash_item **np = &h->hh.entries[hbits]; \
630 while (*np && (*np)->hashval < hval) \
631 np = &(*np)->next; \
632 while (*np && *np != &item->field.hi && (*np)->hashval == hval) \
633 np = &(*np)->next; \
634 if (*np != &item->field.hi) \
635 return NULL; \
636 *np = item->field.hi.next; \
637 item->field.hi.next = NULL; \
638 h->hh.count--; \
639 if (HASH_SHRINK_THRESHOLD(h->hh)) \
640 typesafe_hash_shrink(&h->hh); \
641 return item; \
642 } \
643 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
644 { \
645 uint32_t i; \
646 for (i = 0; i < HASH_SIZE(h->hh); i++) \
647 if (h->hh.entries[i]) { \
648 struct thash_item *hitem = h->hh.entries[i]; \
649 h->hh.entries[i] = hitem->next; \
650 h->hh.count--; \
651 hitem->next = NULL; \
652 if (HASH_SHRINK_THRESHOLD(h->hh)) \
653 typesafe_hash_shrink(&h->hh); \
654 return container_of(hitem, type, field.hi); \
655 } \
656 return NULL; \
657 } \
658 macro_pure type *prefix ## _first(struct prefix##_head *h) \
659 { \
660 uint32_t i; \
661 for (i = 0; i < HASH_SIZE(h->hh); i++) \
662 if (h->hh.entries[i]) \
663 return container_of(h->hh.entries[i], type, field.hi); \
664 return NULL; \
665 } \
666 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
667 { \
668 struct thash_item *hitem = &item->field.hi; \
669 if (hitem->next) \
670 return container_of(hitem->next, type, field.hi); \
671 uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \
672 for (; i < HASH_SIZE(h->hh); i++) \
673 if (h->hh.entries[i]) \
674 return container_of(h->hh.entries[i], type, field.hi); \
675 return NULL; \
676 } \
677 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
678 { \
679 if (!item) \
680 return NULL; \
681 return prefix ## _next(h, item); \
682 } \
683 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
684 { \
685 return h->hh.count; \
686 } \
687 /* ... */
688
689 /* skiplist, sorted.
690 * can be used as priority queue with add / pop
691 */
692
693 /* don't use these structs directly */
694 #define SKIPLIST_MAXDEPTH 16
695 #define SKIPLIST_EMBED 4
696 #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1)
697
698 struct sskip_item {
699 struct sskip_item *next[SKIPLIST_EMBED];
700 };
701
702 struct sskip_overflow {
703 struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
704 };
705
706 struct sskip_head {
707 struct sskip_item hitem;
708 struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW];
709 size_t count;
710 };
711
712 /* use as:
713 *
714 * PREDECL_SKIPLIST(namelist)
715 * struct name {
716 * struct namelist_item nlitem;
717 * }
718 * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc)
719 */
720 #define _PREDECL_SKIPLIST(prefix) \
721 struct prefix ## _head { struct sskip_head sh; }; \
722 struct prefix ## _item { struct sskip_item si; };
723
724 #define INIT_SKIPLIST_UNIQ(var) { }
725 #define INIT_SKIPLIST_NONUNIQ(var) { }
726
727 #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \
728 \
729 macro_inline void prefix ## _init(struct prefix##_head *h) \
730 { \
731 memset(h, 0, sizeof(*h)); \
732 h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \
733 ((uintptr_t)h->sh.overflow | 1); \
734 } \
735 macro_inline void prefix ## _fini(struct prefix##_head *h) \
736 { \
737 memset(h, 0, sizeof(*h)); \
738 } \
739 macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \
740 { \
741 struct sskip_item *si; \
742 si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \
743 return container_of_null(si, type, field.si); \
744 } \
745 macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \
746 const type *item) \
747 { \
748 struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \
749 &item->field.si, cmpfn_nuq); \
750 return container_of_null(sitem, type, field.si); \
751 } \
752 macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \
753 const type *item) \
754 { \
755 struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \
756 &item->field.si, cmpfn_nuq); \
757 return container_of_null(sitem, type, field.si); \
758 } \
759 macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \
760 { \
761 struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \
762 &item->field.si, cmpfn_uq); \
763 return container_of_null(sitem, type, field.si); \
764 } \
765 macro_inline type *prefix ## _pop(struct prefix##_head *h) \
766 { \
767 struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \
768 return container_of_null(sitem, type, field.si); \
769 } \
770 macro_pure type *prefix ## _first(struct prefix##_head *h) \
771 { \
772 struct sskip_item *first = h->sh.hitem.next[0]; \
773 return container_of_null(first, type, field.si); \
774 } \
775 macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \
776 { \
777 struct sskip_item *next = item->field.si.next[0]; \
778 return container_of_null(next, type, field.si); \
779 } \
780 macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \
781 { \
782 struct sskip_item *next; \
783 next = item ? item->field.si.next[0] : NULL; \
784 return container_of_null(next, type, field.si); \
785 } \
786 macro_pure size_t prefix ## _count(const struct prefix##_head *h) \
787 { \
788 return h->sh.count; \
789 } \
790 /* ... */
791
792 #define PREDECL_SKIPLIST_UNIQ(prefix) \
793 _PREDECL_SKIPLIST(prefix)
794 #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \
795 \
796 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
797 const struct sskip_item *b) \
798 { \
799 return cmpfn(container_of(a, type, field.si), \
800 container_of(b, type, field.si)); \
801 } \
802 macro_inline type *prefix ## _find(struct prefix##_head *h, const type *item) \
803 { \
804 struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \
805 &item->field.si, &prefix ## __cmp); \
806 return container_of_null(sitem, type, field.si); \
807 } \
808 \
809 _DECLARE_SKIPLIST(prefix, type, field, \
810 prefix ## __cmp, prefix ## __cmp) \
811 /* ... */
812
813 #define PREDECL_SKIPLIST_NONUNIQ(prefix) \
814 _PREDECL_SKIPLIST(prefix)
815 #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \
816 \
817 macro_inline int prefix ## __cmp(const struct sskip_item *a, \
818 const struct sskip_item *b) \
819 { \
820 return cmpfn(container_of(a, type, field.si), \
821 container_of(b, type, field.si)); \
822 } \
823 macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \
824 const struct sskip_item *b) \
825 { \
826 int cmpval = cmpfn(container_of(a, type, field.si), \
827 container_of(b, type, field.si)); \
828 if (cmpval) \
829 return cmpval; \
830 if (a < b) \
831 return -1; \
832 if (a > b) \
833 return 1; \
834 return 0; \
835 } \
836 \
837 _DECLARE_SKIPLIST(prefix, type, field, \
838 prefix ## __cmp, prefix ## __cmp_uq) \
839 /* ... */
840
841
842 extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head,
843 struct sskip_item *item, int (*cmpfn)(
844 const struct sskip_item *a,
845 const struct sskip_item *b));
846 extern struct sskip_item *typesafe_skiplist_find(struct sskip_head *head,
847 const struct sskip_item *item, int (*cmpfn)(
848 const struct sskip_item *a,
849 const struct sskip_item *b));
850 extern struct sskip_item *typesafe_skiplist_find_gteq(struct sskip_head *head,
851 const struct sskip_item *item, int (*cmpfn)(
852 const struct sskip_item *a,
853 const struct sskip_item *b));
854 extern struct sskip_item *typesafe_skiplist_find_lt(struct sskip_head *head,
855 const struct sskip_item *item, int (*cmpfn)(
856 const struct sskip_item *a,
857 const struct sskip_item *b));
858 extern struct sskip_item *typesafe_skiplist_del(
859 struct sskip_head *head, struct sskip_item *item, int (*cmpfn)(
860 const struct sskip_item *a,
861 const struct sskip_item *b));
862 extern struct sskip_item *typesafe_skiplist_pop(struct sskip_head *head);
863
864 #ifdef __cplusplus
865 }
866 #endif
867
868 /* this needs to stay at the end because both files include each other.
869 * the resolved order is typesafe.h before typerb.h
870 */
871 #include "typerb.h"
872
873 #endif /* _FRR_TYPESAFE_H */