]>
Commit | Line | Data |
---|---|---|
abd71baa DL |
1 | /* |
2 | * Copyright (c) 2016-2019 David Lamparter, for NetDEF, Inc. | |
3 | * | |
4 | * Permission to use, copy, modify, and distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
17 | #ifndef _FRR_TYPESAFE_H | |
18 | #define _FRR_TYPESAFE_H | |
19 | ||
20 | #include <stddef.h> | |
21 | #include <stdint.h> | |
22 | #include <stdbool.h> | |
abd71baa DL |
23 | #include "compiler.h" |
24 | ||
eea3c899 RW |
25 | #ifdef __cplusplus |
26 | extern "C" { | |
27 | #endif | |
28 | ||
abd71baa DL |
29 | /* generic macros for all list-like types */ |
30 | ||
81fddbe7 | 31 | #define frr_each(prefix, head, item) \ |
abd71baa DL |
32 | for (item = prefix##_first(head); item; \ |
33 | item = prefix##_next(head, item)) | |
81fddbe7 | 34 | #define frr_each_safe(prefix, head, item) \ |
abd71baa DL |
35 | for (typeof(prefix##_next_safe(head, NULL)) prefix##_safe = \ |
36 | prefix##_next_safe(head, \ | |
37 | (item = prefix##_first(head))); \ | |
38 | item; \ | |
39 | item = prefix##_safe, \ | |
40 | prefix##_safe = prefix##_next_safe(head, prefix##_safe)) | |
81fddbe7 | 41 | #define frr_each_from(prefix, head, item, from) \ |
abd71baa DL |
42 | for (item = from, from = prefix##_next_safe(head, item); \ |
43 | item; \ | |
44 | item = from, from = prefix##_next_safe(head, from)) | |
45 | ||
643ea83b DL |
46 | /* reverse direction, only supported by a few containers */ |
47 | ||
48 | #define frr_rev_each(prefix, head, item) \ | |
49 | for (item = prefix##_last(head); item; \ | |
50 | item = prefix##_prev(head, item)) | |
51 | #define frr_rev_each_safe(prefix, head, item) \ | |
52 | for (typeof(prefix##_prev_safe(head, NULL)) prefix##_safe = \ | |
53 | prefix##_prev_safe(head, \ | |
54 | (item = prefix##_last(head))); \ | |
55 | item; \ | |
56 | item = prefix##_safe, \ | |
57 | prefix##_safe = prefix##_prev_safe(head, prefix##_safe)) | |
58 | #define frr_rev_each_from(prefix, head, item, from) \ | |
59 | for (item = from, from = prefix##_prev_safe(head, item); \ | |
60 | item; \ | |
61 | item = from, from = prefix##_prev_safe(head, from)) | |
daf3441d DL |
62 | |
63 | /* non-const variants. these wrappers are the same for all the types, so | |
64 | * bundle them together here. | |
65 | */ | |
66 | #define TYPESAFE_FIRST_NEXT(prefix, type) \ | |
67 | macro_pure type *prefix ## _first(struct prefix##_head *h) \ | |
68 | { \ | |
69 | return (type *)prefix ## _const_first(h); \ | |
70 | } \ | |
71 | macro_pure type *prefix ## _next(struct prefix##_head *h, type *item) \ | |
72 | { \ | |
73 | return (type *)prefix ## _const_next(h, item); \ | |
74 | } \ | |
75 | /* ... */ | |
643ea83b DL |
76 | #define TYPESAFE_LAST_PREV(prefix, type) \ |
77 | macro_pure type *prefix ## _last(struct prefix##_head *h) \ | |
78 | { \ | |
79 | return (type *)prefix ## _const_last(h); \ | |
80 | } \ | |
81 | macro_pure type *prefix ## _prev(struct prefix##_head *h, type *item) \ | |
82 | { \ | |
83 | return (type *)prefix ## _const_prev(h, item); \ | |
84 | } \ | |
85 | /* ... */ | |
daf3441d DL |
86 | #define TYPESAFE_FIND(prefix, type) \ |
87 | macro_inline type *prefix ## _find(struct prefix##_head *h, \ | |
88 | const type *item) \ | |
89 | { \ | |
90 | return (type *)prefix ## _const_find(h, item); \ | |
91 | } \ | |
92 | /* ... */ | |
93 | #define TYPESAFE_FIND_CMP(prefix, type) \ | |
94 | macro_inline type *prefix ## _find_lt(struct prefix##_head *h, \ | |
95 | const type *item) \ | |
96 | { \ | |
97 | return (type *)prefix ## _const_find_lt(h, item); \ | |
98 | } \ | |
99 | macro_inline type *prefix ## _find_gteq(struct prefix##_head *h, \ | |
100 | const type *item) \ | |
101 | { \ | |
102 | return (type *)prefix ## _const_find_gteq(h, item); \ | |
103 | } \ | |
104 | /* ... */ | |
105 | ||
f45897e4 DL |
106 | /* *_member via find - when there is no better membership check than find() */ |
107 | #define TYPESAFE_MEMBER_VIA_FIND(prefix, type) \ | |
108 | macro_inline bool prefix ## _member(struct prefix##_head *h, \ | |
109 | const type *item) \ | |
110 | { \ | |
111 | return item == prefix ## _const_find(h, item); \ | |
112 | } \ | |
113 | /* ... */ | |
114 | ||
115 | /* *_member via find_gteq - same for non-unique containers */ | |
116 | #define TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \ | |
117 | macro_inline bool prefix ## _member(struct prefix##_head *h, \ | |
118 | const type *item) \ | |
119 | { \ | |
120 | const type *iter; \ | |
121 | for (iter = prefix ## _const_find_gteq(h, item); iter; \ | |
122 | iter = prefix ## _const_next(h, iter)) { \ | |
123 | if (iter == item) \ | |
124 | return true; \ | |
125 | if (cmpfn(iter, item) > 0) \ | |
126 | break; \ | |
127 | } \ | |
128 | return false; \ | |
129 | } \ | |
130 | /* ... */ | |
131 | ||
507e0e5d | 132 | /* SWAP_ALL_SIMPLE = for containers where the items don't point back to the |
f45897e4 | 133 | * head *AND* the head doesn't point to itself (= everything except LIST, |
507e0e5d DL |
134 | * DLIST and SKIPLIST), just switch out the entire head |
135 | */ | |
136 | #define TYPESAFE_SWAP_ALL_SIMPLE(prefix) \ | |
137 | macro_inline void prefix ## _swap_all(struct prefix##_head *a, \ | |
138 | struct prefix##_head *b) \ | |
139 | { \ | |
140 | struct prefix##_head tmp = *a; \ | |
141 | *a = *b; \ | |
142 | *b = tmp; \ | |
143 | } \ | |
144 | /* ... */ | |
daf3441d | 145 | |
abd71baa DL |
146 | /* single-linked list, unsorted/arbitrary. |
147 | * can be used as queue with add_tail / pop | |
148 | */ | |
149 | ||
150 | /* don't use these structs directly */ | |
151 | struct slist_item { | |
152 | struct slist_item *next; | |
153 | }; | |
154 | ||
155 | struct slist_head { | |
156 | struct slist_item *first, **last_next; | |
157 | size_t count; | |
158 | }; | |
159 | ||
d259ca09 DL |
160 | /* this replaces NULL as the value for ->next on the last item. */ |
161 | extern struct slist_item typesafe_slist_sentinel; | |
162 | #define _SLIST_LAST &typesafe_slist_sentinel | |
163 | ||
abd71baa DL |
164 | static inline void typesafe_list_add(struct slist_head *head, |
165 | struct slist_item **pos, struct slist_item *item) | |
166 | { | |
167 | item->next = *pos; | |
168 | *pos = item; | |
169 | if (pos == head->last_next) | |
170 | head->last_next = &item->next; | |
171 | head->count++; | |
172 | } | |
173 | ||
f45897e4 DL |
174 | extern bool typesafe_list_member(const struct slist_head *head, |
175 | const struct slist_item *item); | |
176 | ||
abd71baa DL |
177 | /* use as: |
178 | * | |
960b9a53 | 179 | * PREDECL_LIST(namelist); |
abd71baa DL |
180 | * struct name { |
181 | * struct namelist_item nlitem; | |
182 | * } | |
960b9a53 | 183 | * DECLARE_LIST(namelist, struct name, nlitem); |
abd71baa DL |
184 | */ |
185 | #define PREDECL_LIST(prefix) \ | |
186 | struct prefix ## _head { struct slist_head sh; }; \ | |
960b9a53 DL |
187 | struct prefix ## _item { struct slist_item si; }; \ |
188 | MACRO_REQUIRE_SEMICOLON() /* end */ | |
abd71baa DL |
189 | |
190 | #define INIT_LIST(var) { .sh = { .last_next = &var.sh.first, }, } | |
191 | ||
192 | #define DECLARE_LIST(prefix, type, field) \ | |
193 | \ | |
194 | macro_inline void prefix ## _init(struct prefix##_head *h) \ | |
195 | { \ | |
196 | memset(h, 0, sizeof(*h)); \ | |
d259ca09 | 197 | h->sh.first = _SLIST_LAST; \ |
abd71baa DL |
198 | h->sh.last_next = &h->sh.first; \ |
199 | } \ | |
200 | macro_inline void prefix ## _fini(struct prefix##_head *h) \ | |
201 | { \ | |
202 | memset(h, 0, sizeof(*h)); \ | |
203 | } \ | |
204 | macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \ | |
205 | { \ | |
206 | typesafe_list_add(&h->sh, &h->sh.first, &item->field.si); \ | |
207 | } \ | |
208 | macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \ | |
209 | { \ | |
210 | typesafe_list_add(&h->sh, h->sh.last_next, &item->field.si); \ | |
211 | } \ | |
212 | macro_inline void prefix ## _add_after(struct prefix##_head *h, \ | |
213 | type *after, type *item) \ | |
214 | { \ | |
215 | struct slist_item **nextp; \ | |
216 | nextp = after ? &after->field.si.next : &h->sh.first; \ | |
217 | typesafe_list_add(&h->sh, nextp, &item->field.si); \ | |
218 | } \ | |
219 | /* TODO: del_hint */ \ | |
da098d78 | 220 | macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \ |
abd71baa DL |
221 | { \ |
222 | struct slist_item **iter = &h->sh.first; \ | |
d259ca09 | 223 | while (*iter != _SLIST_LAST && *iter != &item->field.si) \ |
abd71baa | 224 | iter = &(*iter)->next; \ |
d259ca09 | 225 | if (*iter == _SLIST_LAST) \ |
da098d78 | 226 | return NULL; \ |
abd71baa DL |
227 | h->sh.count--; \ |
228 | *iter = item->field.si.next; \ | |
d259ca09 | 229 | if (item->field.si.next == _SLIST_LAST) \ |
abd71baa | 230 | h->sh.last_next = iter; \ |
4a1b3289 | 231 | item->field.si.next = NULL; \ |
da098d78 | 232 | return item; \ |
abd71baa DL |
233 | } \ |
234 | macro_inline type *prefix ## _pop(struct prefix##_head *h) \ | |
235 | { \ | |
236 | struct slist_item *sitem = h->sh.first; \ | |
d259ca09 | 237 | if (sitem == _SLIST_LAST) \ |
abd71baa DL |
238 | return NULL; \ |
239 | h->sh.count--; \ | |
240 | h->sh.first = sitem->next; \ | |
d259ca09 | 241 | if (h->sh.first == _SLIST_LAST) \ |
abd71baa | 242 | h->sh.last_next = &h->sh.first; \ |
4a1b3289 | 243 | sitem->next = NULL; \ |
abd71baa DL |
244 | return container_of(sitem, type, field.si); \ |
245 | } \ | |
507e0e5d DL |
246 | macro_inline void prefix ## _swap_all(struct prefix##_head *a, \ |
247 | struct prefix##_head *b) \ | |
248 | { \ | |
249 | struct prefix##_head tmp = *a; \ | |
250 | *a = *b; \ | |
251 | *b = tmp; \ | |
252 | if (a->sh.last_next == &b->sh.first) \ | |
253 | a->sh.last_next = &a->sh.first; \ | |
254 | if (b->sh.last_next == &a->sh.first) \ | |
255 | b->sh.last_next = &b->sh.first; \ | |
256 | } \ | |
daf3441d | 257 | macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \ |
abd71baa | 258 | { \ |
d259ca09 DL |
259 | if (h->sh.first != _SLIST_LAST) \ |
260 | return container_of(h->sh.first, type, field.si); \ | |
261 | return NULL; \ | |
abd71baa | 262 | } \ |
daf3441d DL |
263 | macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \ |
264 | const type *item) \ | |
abd71baa | 265 | { \ |
daf3441d | 266 | const struct slist_item *sitem = &item->field.si; \ |
d259ca09 DL |
267 | if (sitem->next != _SLIST_LAST) \ |
268 | return container_of(sitem->next, type, field.si); \ | |
269 | return NULL; \ | |
abd71baa | 270 | } \ |
daf3441d | 271 | TYPESAFE_FIRST_NEXT(prefix, type) \ |
abd71baa DL |
272 | macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \ |
273 | { \ | |
274 | struct slist_item *sitem; \ | |
275 | if (!item) \ | |
276 | return NULL; \ | |
277 | sitem = &item->field.si; \ | |
d259ca09 DL |
278 | if (sitem->next != _SLIST_LAST) \ |
279 | return container_of(sitem->next, type, field.si); \ | |
280 | return NULL; \ | |
abd71baa | 281 | } \ |
0ca16c58 | 282 | macro_pure size_t prefix ## _count(const struct prefix##_head *h) \ |
abd71baa DL |
283 | { \ |
284 | return h->sh.count; \ | |
285 | } \ | |
40ee228d DL |
286 | macro_pure bool prefix ## _anywhere(const type *item) \ |
287 | { \ | |
288 | return item->field.si.next != NULL; \ | |
289 | } \ | |
f45897e4 DL |
290 | macro_pure bool prefix ## _member(const struct prefix##_head *h, \ |
291 | const type *item) \ | |
292 | { \ | |
293 | return typesafe_list_member(&h->sh, &item->field.si); \ | |
294 | } \ | |
960b9a53 | 295 | MACRO_REQUIRE_SEMICOLON() /* end */ |
abd71baa | 296 | |
fdad523b DL |
297 | /* don't use these structs directly */ |
298 | struct dlist_item { | |
299 | struct dlist_item *next; | |
300 | struct dlist_item *prev; | |
301 | }; | |
302 | ||
303 | struct dlist_head { | |
304 | struct dlist_item hitem; | |
305 | size_t count; | |
306 | }; | |
307 | ||
308 | static inline void typesafe_dlist_add(struct dlist_head *head, | |
309 | struct dlist_item *prev, struct dlist_item *item) | |
310 | { | |
255a76a5 DL |
311 | /* SA on clang-11 thinks this can happen, but in reality -assuming no |
312 | * memory corruption- it can't. DLIST uses a "closed" ring, i.e. the | |
313 | * termination at the end of the list is not NULL but rather a pointer | |
314 | * back to the head. (This eliminates special-casing the first or last | |
315 | * item.) | |
316 | * | |
317 | * Sadly, can't use assert() here since the libfrr assert / xref code | |
318 | * uses typesafe lists itself... that said, if an assert tripped here | |
319 | * we'd already be way past some memory corruption, so we might as | |
320 | * well just take the SEGV. (In the presence of corruption, we'd see | |
321 | * random SEGVs from places that make no sense at all anyway, an | |
322 | * assert might actually be a red herring.) | |
323 | * | |
324 | * ("assume()" tells the compiler to produce code as if the condition | |
325 | * will always hold; it doesn't have any actual effect here, it'll | |
326 | * just SEGV out on "item->next->prev = item".) | |
327 | */ | |
328 | assume(prev->next != NULL); | |
329 | ||
fdad523b | 330 | item->next = prev->next; |
255a76a5 | 331 | item->next->prev = item; |
fdad523b DL |
332 | item->prev = prev; |
333 | prev->next = item; | |
334 | head->count++; | |
335 | } | |
336 | ||
507e0e5d DL |
337 | static inline void typesafe_dlist_swap_all(struct dlist_head *a, |
338 | struct dlist_head *b) | |
339 | { | |
340 | struct dlist_head tmp = *a; | |
341 | ||
342 | a->count = b->count; | |
343 | if (a->count) { | |
344 | a->hitem.next = b->hitem.next; | |
345 | a->hitem.prev = b->hitem.prev; | |
346 | a->hitem.next->prev = &a->hitem; | |
347 | a->hitem.prev->next = &a->hitem; | |
348 | } else { | |
349 | a->hitem.next = &a->hitem; | |
350 | a->hitem.prev = &a->hitem; | |
351 | } | |
352 | ||
353 | b->count = tmp.count; | |
354 | if (b->count) { | |
355 | b->hitem.next = tmp.hitem.next; | |
356 | b->hitem.prev = tmp.hitem.prev; | |
357 | b->hitem.next->prev = &b->hitem; | |
358 | b->hitem.prev->next = &b->hitem; | |
359 | } else { | |
360 | b->hitem.next = &b->hitem; | |
361 | b->hitem.prev = &b->hitem; | |
362 | } | |
363 | } | |
364 | ||
f45897e4 DL |
365 | extern bool typesafe_dlist_member(const struct dlist_head *head, |
366 | const struct dlist_item *item); | |
367 | ||
fdad523b DL |
368 | /* double-linked list, for fast item deletion |
369 | */ | |
370 | #define PREDECL_DLIST(prefix) \ | |
371 | struct prefix ## _head { struct dlist_head dh; }; \ | |
960b9a53 DL |
372 | struct prefix ## _item { struct dlist_item di; }; \ |
373 | MACRO_REQUIRE_SEMICOLON() /* end */ | |
fdad523b DL |
374 | |
375 | #define INIT_DLIST(var) { .dh = { \ | |
376 | .hitem = { &var.dh.hitem, &var.dh.hitem }, }, } | |
377 | ||
378 | #define DECLARE_DLIST(prefix, type, field) \ | |
379 | \ | |
380 | macro_inline void prefix ## _init(struct prefix##_head *h) \ | |
381 | { \ | |
382 | memset(h, 0, sizeof(*h)); \ | |
383 | h->dh.hitem.prev = &h->dh.hitem; \ | |
384 | h->dh.hitem.next = &h->dh.hitem; \ | |
385 | } \ | |
386 | macro_inline void prefix ## _fini(struct prefix##_head *h) \ | |
387 | { \ | |
388 | memset(h, 0, sizeof(*h)); \ | |
389 | } \ | |
390 | macro_inline void prefix ## _add_head(struct prefix##_head *h, type *item) \ | |
391 | { \ | |
392 | typesafe_dlist_add(&h->dh, &h->dh.hitem, &item->field.di); \ | |
393 | } \ | |
394 | macro_inline void prefix ## _add_tail(struct prefix##_head *h, type *item) \ | |
395 | { \ | |
396 | typesafe_dlist_add(&h->dh, h->dh.hitem.prev, &item->field.di); \ | |
397 | } \ | |
398 | macro_inline void prefix ## _add_after(struct prefix##_head *h, \ | |
399 | type *after, type *item) \ | |
400 | { \ | |
401 | struct dlist_item *prev; \ | |
402 | prev = after ? &after->field.di : &h->dh.hitem; \ | |
403 | typesafe_dlist_add(&h->dh, prev, &item->field.di); \ | |
404 | } \ | |
da098d78 | 405 | macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \ |
fdad523b DL |
406 | { \ |
407 | struct dlist_item *ditem = &item->field.di; \ | |
408 | ditem->prev->next = ditem->next; \ | |
409 | ditem->next->prev = ditem->prev; \ | |
410 | h->dh.count--; \ | |
411 | ditem->prev = ditem->next = NULL; \ | |
da098d78 | 412 | return item; \ |
fdad523b DL |
413 | } \ |
414 | macro_inline type *prefix ## _pop(struct prefix##_head *h) \ | |
415 | { \ | |
416 | struct dlist_item *ditem = h->dh.hitem.next; \ | |
417 | if (ditem == &h->dh.hitem) \ | |
418 | return NULL; \ | |
419 | ditem->prev->next = ditem->next; \ | |
420 | ditem->next->prev = ditem->prev; \ | |
421 | h->dh.count--; \ | |
4a1b3289 | 422 | ditem->prev = ditem->next = NULL; \ |
fdad523b DL |
423 | return container_of(ditem, type, field.di); \ |
424 | } \ | |
507e0e5d DL |
425 | macro_inline void prefix ## _swap_all(struct prefix##_head *a, \ |
426 | struct prefix##_head *b) \ | |
427 | { \ | |
428 | typesafe_dlist_swap_all(&a->dh, &b->dh); \ | |
429 | } \ | |
daf3441d | 430 | macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \ |
fdad523b | 431 | { \ |
daf3441d | 432 | const struct dlist_item *ditem = h->dh.hitem.next; \ |
fdad523b DL |
433 | if (ditem == &h->dh.hitem) \ |
434 | return NULL; \ | |
435 | return container_of(ditem, type, field.di); \ | |
436 | } \ | |
daf3441d DL |
437 | macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \ |
438 | const type *item) \ | |
fdad523b | 439 | { \ |
daf3441d | 440 | const struct dlist_item *ditem = &item->field.di; \ |
fdad523b DL |
441 | if (ditem->next == &h->dh.hitem) \ |
442 | return NULL; \ | |
443 | return container_of(ditem->next, type, field.di); \ | |
444 | } \ | |
daf3441d | 445 | TYPESAFE_FIRST_NEXT(prefix, type) \ |
643ea83b DL |
446 | macro_pure const type *prefix ## _const_last(const struct prefix##_head *h) \ |
447 | { \ | |
448 | const struct dlist_item *ditem = h->dh.hitem.prev; \ | |
449 | if (ditem == &h->dh.hitem) \ | |
450 | return NULL; \ | |
451 | return container_of(ditem, type, field.di); \ | |
452 | } \ | |
453 | macro_pure const type *prefix ## _const_prev(const struct prefix##_head *h, \ | |
454 | const type *item) \ | |
455 | { \ | |
456 | const struct dlist_item *ditem = &item->field.di; \ | |
457 | if (ditem->prev == &h->dh.hitem) \ | |
458 | return NULL; \ | |
459 | return container_of(ditem->prev, type, field.di); \ | |
460 | } \ | |
461 | TYPESAFE_LAST_PREV(prefix, type) \ | |
fdad523b DL |
462 | macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \ |
463 | { \ | |
464 | if (!item) \ | |
465 | return NULL; \ | |
466 | return prefix ## _next(h, item); \ | |
467 | } \ | |
643ea83b DL |
468 | macro_pure type *prefix ## _prev_safe(struct prefix##_head *h, type *item) \ |
469 | { \ | |
470 | if (!item) \ | |
471 | return NULL; \ | |
472 | return prefix ## _prev(h, item); \ | |
473 | } \ | |
0ca16c58 | 474 | macro_pure size_t prefix ## _count(const struct prefix##_head *h) \ |
fdad523b DL |
475 | { \ |
476 | return h->dh.count; \ | |
477 | } \ | |
40ee228d DL |
478 | macro_pure bool prefix ## _anywhere(const type *item) \ |
479 | { \ | |
480 | const struct dlist_item *ditem = &item->field.di; \ | |
481 | return ditem->next && ditem->prev; \ | |
482 | } \ | |
f45897e4 DL |
483 | macro_pure bool prefix ## _member(const struct prefix##_head *h, \ |
484 | const type *item) \ | |
485 | { \ | |
486 | return typesafe_dlist_member(&h->dh, &item->field.di); \ | |
487 | } \ | |
960b9a53 | 488 | MACRO_REQUIRE_SEMICOLON() /* end */ |
fdad523b | 489 | |
5cb4277d DL |
490 | /* note: heap currently caps out at 4G items */ |
491 | ||
492 | #define HEAP_NARY 8U | |
493 | typedef uint32_t heap_index_i; | |
494 | ||
495 | struct heap_item { | |
496 | uint32_t index; | |
497 | }; | |
498 | ||
499 | struct heap_head { | |
500 | struct heap_item **array; | |
501 | uint32_t arraysz, count; | |
502 | }; | |
503 | ||
504 | #define HEAP_RESIZE_TRESH_UP(h) \ | |
505 | (h->hh.count + 1 >= h->hh.arraysz) | |
506 | #define HEAP_RESIZE_TRESH_DN(h) \ | |
507 | (h->hh.count == 0 || \ | |
508 | h->hh.arraysz - h->hh.count > (h->hh.count + 1024) / 2) | |
509 | ||
510 | #define PREDECL_HEAP(prefix) \ | |
511 | struct prefix ## _head { struct heap_head hh; }; \ | |
960b9a53 DL |
512 | struct prefix ## _item { struct heap_item hi; }; \ |
513 | MACRO_REQUIRE_SEMICOLON() /* end */ | |
5cb4277d DL |
514 | |
515 | #define INIT_HEAP(var) { } | |
516 | ||
517 | #define DECLARE_HEAP(prefix, type, field, cmpfn) \ | |
518 | \ | |
519 | macro_inline void prefix ## _init(struct prefix##_head *h) \ | |
520 | { \ | |
521 | memset(h, 0, sizeof(*h)); \ | |
522 | } \ | |
523 | macro_inline void prefix ## _fini(struct prefix##_head *h) \ | |
524 | { \ | |
525 | assert(h->hh.count == 0); \ | |
526 | memset(h, 0, sizeof(*h)); \ | |
527 | } \ | |
528 | macro_inline int prefix ## __cmp(const struct heap_item *a, \ | |
529 | const struct heap_item *b) \ | |
530 | { \ | |
531 | return cmpfn(container_of(a, type, field.hi), \ | |
532 | container_of(b, type, field.hi)); \ | |
533 | } \ | |
534 | macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \ | |
535 | { \ | |
536 | if (HEAP_RESIZE_TRESH_UP(h)) \ | |
537 | typesafe_heap_resize(&h->hh, true); \ | |
538 | typesafe_heap_pullup(&h->hh, h->hh.count, &item->field.hi, \ | |
539 | prefix ## __cmp); \ | |
540 | h->hh.count++; \ | |
541 | return NULL; \ | |
542 | } \ | |
da098d78 | 543 | macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \ |
5cb4277d DL |
544 | { \ |
545 | struct heap_item *other; \ | |
546 | uint32_t index = item->field.hi.index; \ | |
547 | assert(h->hh.array[index] == &item->field.hi); \ | |
548 | h->hh.count--; \ | |
549 | other = h->hh.array[h->hh.count]; \ | |
550 | if (cmpfn(container_of(other, type, field.hi), item) < 0) \ | |
551 | typesafe_heap_pullup(&h->hh, index, other, prefix ## __cmp); \ | |
552 | else \ | |
553 | typesafe_heap_pushdown(&h->hh, index, other, prefix ## __cmp); \ | |
554 | if (HEAP_RESIZE_TRESH_DN(h)) \ | |
555 | typesafe_heap_resize(&h->hh, false); \ | |
da098d78 | 556 | return item; \ |
5cb4277d DL |
557 | } \ |
558 | macro_inline type *prefix ## _pop(struct prefix##_head *h) \ | |
559 | { \ | |
560 | struct heap_item *hitem, *other; \ | |
561 | if (h->hh.count == 0) \ | |
562 | return NULL; \ | |
563 | hitem = h->hh.array[0]; \ | |
564 | h->hh.count--; \ | |
565 | other = h->hh.array[h->hh.count]; \ | |
566 | typesafe_heap_pushdown(&h->hh, 0, other, prefix ## __cmp); \ | |
567 | if (HEAP_RESIZE_TRESH_DN(h)) \ | |
568 | typesafe_heap_resize(&h->hh, false); \ | |
569 | return container_of(hitem, type, field.hi); \ | |
570 | } \ | |
507e0e5d | 571 | TYPESAFE_SWAP_ALL_SIMPLE(prefix) \ |
daf3441d | 572 | macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \ |
5cb4277d DL |
573 | { \ |
574 | if (h->hh.count == 0) \ | |
575 | return NULL; \ | |
576 | return container_of(h->hh.array[0], type, field.hi); \ | |
577 | } \ | |
daf3441d DL |
578 | macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \ |
579 | const type *item) \ | |
5cb4277d DL |
580 | { \ |
581 | uint32_t idx = item->field.hi.index + 1; \ | |
582 | if (idx >= h->hh.count) \ | |
583 | return NULL; \ | |
584 | return container_of(h->hh.array[idx], type, field.hi); \ | |
585 | } \ | |
daf3441d | 586 | TYPESAFE_FIRST_NEXT(prefix, type) \ |
5cb4277d DL |
587 | macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \ |
588 | { \ | |
589 | if (!item) \ | |
590 | return NULL; \ | |
591 | return prefix ## _next(h, item); \ | |
592 | } \ | |
0ca16c58 | 593 | macro_pure size_t prefix ## _count(const struct prefix##_head *h) \ |
5cb4277d DL |
594 | { \ |
595 | return h->hh.count; \ | |
596 | } \ | |
f45897e4 DL |
597 | macro_pure bool prefix ## _member(const struct prefix##_head *h, \ |
598 | const type *item) \ | |
599 | { \ | |
600 | uint32_t idx = item->field.hi.index; \ | |
601 | if (idx >= h->hh.count) \ | |
602 | return false; \ | |
603 | return h->hh.array[idx] == &item->field.hi; \ | |
604 | } \ | |
960b9a53 | 605 | MACRO_REQUIRE_SEMICOLON() /* end */ |
5cb4277d DL |
606 | |
607 | extern void typesafe_heap_resize(struct heap_head *head, bool grow); | |
608 | extern void typesafe_heap_pushdown(struct heap_head *head, uint32_t index, | |
609 | struct heap_item *item, | |
610 | int (*cmpfn)(const struct heap_item *a, | |
611 | const struct heap_item *b)); | |
612 | extern void typesafe_heap_pullup(struct heap_head *head, uint32_t index, | |
613 | struct heap_item *item, | |
614 | int (*cmpfn)(const struct heap_item *a, | |
615 | const struct heap_item *b)); | |
616 | ||
abd71baa DL |
617 | /* single-linked list, sorted. |
618 | * can be used as priority queue with add / pop | |
619 | */ | |
620 | ||
621 | /* don't use these structs directly */ | |
622 | struct ssort_item { | |
623 | struct ssort_item *next; | |
624 | }; | |
625 | ||
626 | struct ssort_head { | |
627 | struct ssort_item *first; | |
628 | size_t count; | |
629 | }; | |
630 | ||
631 | /* use as: | |
632 | * | |
633 | * PREDECL_SORTLIST(namelist) | |
634 | * struct name { | |
635 | * struct namelist_item nlitem; | |
636 | * } | |
637 | * DECLARE_SORTLIST(namelist, struct name, nlitem) | |
638 | */ | |
639 | #define _PREDECL_SORTLIST(prefix) \ | |
640 | struct prefix ## _head { struct ssort_head sh; }; \ | |
960b9a53 DL |
641 | struct prefix ## _item { struct ssort_item si; }; \ |
642 | MACRO_REQUIRE_SEMICOLON() /* end */ | |
abd71baa DL |
643 | |
644 | #define INIT_SORTLIST_UNIQ(var) { } | |
645 | #define INIT_SORTLIST_NONUNIQ(var) { } | |
646 | ||
647 | #define PREDECL_SORTLIST_UNIQ(prefix) \ | |
648 | _PREDECL_SORTLIST(prefix) | |
649 | #define PREDECL_SORTLIST_NONUNIQ(prefix) \ | |
650 | _PREDECL_SORTLIST(prefix) | |
651 | ||
652 | #define _DECLARE_SORTLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \ | |
653 | \ | |
654 | macro_inline void prefix ## _init(struct prefix##_head *h) \ | |
655 | { \ | |
656 | memset(h, 0, sizeof(*h)); \ | |
657 | } \ | |
658 | macro_inline void prefix ## _fini(struct prefix##_head *h) \ | |
659 | { \ | |
660 | memset(h, 0, sizeof(*h)); \ | |
661 | } \ | |
662 | macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \ | |
663 | { \ | |
664 | struct ssort_item **np = &h->sh.first; \ | |
665 | int c = 1; \ | |
666 | while (*np && (c = cmpfn_uq( \ | |
667 | container_of(*np, type, field.si), item)) < 0) \ | |
668 | np = &(*np)->next; \ | |
669 | if (c == 0) \ | |
670 | return container_of(*np, type, field.si); \ | |
671 | item->field.si.next = *np; \ | |
672 | *np = &item->field.si; \ | |
673 | h->sh.count++; \ | |
674 | return NULL; \ | |
675 | } \ | |
daf3441d DL |
676 | macro_inline const type *prefix ## _const_find_gteq( \ |
677 | const struct prefix##_head *h, const type *item) \ | |
abd71baa | 678 | { \ |
daf3441d | 679 | const struct ssort_item *sitem = h->sh.first; \ |
abd71baa DL |
680 | int cmpval = 0; \ |
681 | while (sitem && (cmpval = cmpfn_nuq( \ | |
4d5cf6bc | 682 | container_of(sitem, type, field.si), item)) < 0) \ |
abd71baa DL |
683 | sitem = sitem->next; \ |
684 | return container_of_null(sitem, type, field.si); \ | |
685 | } \ | |
daf3441d DL |
686 | macro_inline const type *prefix ## _const_find_lt( \ |
687 | const struct prefix##_head *h, const type *item) \ | |
abd71baa | 688 | { \ |
daf3441d | 689 | const struct ssort_item *prev = NULL, *sitem = h->sh.first; \ |
abd71baa DL |
690 | int cmpval = 0; \ |
691 | while (sitem && (cmpval = cmpfn_nuq( \ | |
4d5cf6bc | 692 | container_of(sitem, type, field.si), item)) < 0) \ |
abd71baa DL |
693 | sitem = (prev = sitem)->next; \ |
694 | return container_of_null(prev, type, field.si); \ | |
695 | } \ | |
daf3441d | 696 | TYPESAFE_FIND_CMP(prefix, type) \ |
abd71baa | 697 | /* TODO: del_hint */ \ |
da098d78 | 698 | macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \ |
abd71baa DL |
699 | { \ |
700 | struct ssort_item **iter = &h->sh.first; \ | |
701 | while (*iter && *iter != &item->field.si) \ | |
702 | iter = &(*iter)->next; \ | |
703 | if (!*iter) \ | |
da098d78 | 704 | return NULL; \ |
abd71baa DL |
705 | h->sh.count--; \ |
706 | *iter = item->field.si.next; \ | |
4a1b3289 | 707 | item->field.si.next = NULL; \ |
da098d78 | 708 | return item; \ |
abd71baa DL |
709 | } \ |
710 | macro_inline type *prefix ## _pop(struct prefix##_head *h) \ | |
711 | { \ | |
712 | struct ssort_item *sitem = h->sh.first; \ | |
713 | if (!sitem) \ | |
714 | return NULL; \ | |
715 | h->sh.count--; \ | |
716 | h->sh.first = sitem->next; \ | |
717 | return container_of(sitem, type, field.si); \ | |
718 | } \ | |
507e0e5d | 719 | TYPESAFE_SWAP_ALL_SIMPLE(prefix) \ |
daf3441d | 720 | macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \ |
abd71baa DL |
721 | { \ |
722 | return container_of_null(h->sh.first, type, field.si); \ | |
723 | } \ | |
daf3441d DL |
724 | macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \ |
725 | const type *item) \ | |
abd71baa | 726 | { \ |
daf3441d | 727 | const struct ssort_item *sitem = &item->field.si; \ |
abd71baa DL |
728 | return container_of_null(sitem->next, type, field.si); \ |
729 | } \ | |
daf3441d | 730 | TYPESAFE_FIRST_NEXT(prefix, type) \ |
abd71baa DL |
731 | macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \ |
732 | { \ | |
733 | struct ssort_item *sitem; \ | |
734 | if (!item) \ | |
735 | return NULL; \ | |
736 | sitem = &item->field.si; \ | |
737 | return container_of_null(sitem->next, type, field.si); \ | |
738 | } \ | |
0ca16c58 | 739 | macro_pure size_t prefix ## _count(const struct prefix##_head *h) \ |
abd71baa DL |
740 | { \ |
741 | return h->sh.count; \ | |
742 | } \ | |
960b9a53 | 743 | MACRO_REQUIRE_SEMICOLON() /* end */ |
abd71baa DL |
744 | |
745 | #define DECLARE_SORTLIST_UNIQ(prefix, type, field, cmpfn) \ | |
960b9a53 | 746 | _DECLARE_SORTLIST(prefix, type, field, cmpfn, cmpfn); \ |
daf3441d DL |
747 | \ |
748 | macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \ | |
749 | const type *item) \ | |
abd71baa | 750 | { \ |
daf3441d | 751 | const struct ssort_item *sitem = h->sh.first; \ |
abd71baa DL |
752 | int cmpval = 0; \ |
753 | while (sitem && (cmpval = cmpfn( \ | |
4d5cf6bc | 754 | container_of(sitem, type, field.si), item)) < 0) \ |
abd71baa DL |
755 | sitem = sitem->next; \ |
756 | if (!sitem || cmpval > 0) \ | |
757 | return NULL; \ | |
758 | return container_of(sitem, type, field.si); \ | |
759 | } \ | |
daf3441d | 760 | TYPESAFE_FIND(prefix, type) \ |
f45897e4 | 761 | TYPESAFE_MEMBER_VIA_FIND(prefix, type) \ |
960b9a53 | 762 | MACRO_REQUIRE_SEMICOLON() /* end */ |
abd71baa DL |
763 | |
764 | #define DECLARE_SORTLIST_NONUNIQ(prefix, type, field, cmpfn) \ | |
765 | macro_inline int _ ## prefix ## _cmp(const type *a, const type *b) \ | |
766 | { \ | |
767 | int cmpval = cmpfn(a, b); \ | |
768 | if (cmpval) \ | |
769 | return cmpval; \ | |
770 | if (a < b) \ | |
771 | return -1; \ | |
772 | if (a > b) \ | |
773 | return 1; \ | |
774 | return 0; \ | |
775 | } \ | |
960b9a53 | 776 | _DECLARE_SORTLIST(prefix, type, field, cmpfn, _ ## prefix ## _cmp); \ |
f45897e4 | 777 | TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \ |
960b9a53 | 778 | MACRO_REQUIRE_SEMICOLON() /* end */ |
abd71baa DL |
779 | |
780 | ||
781 | /* hash, "sorted" by hash value | |
782 | */ | |
783 | ||
784 | /* don't use these structs directly */ | |
785 | struct thash_item { | |
786 | struct thash_item *next; | |
787 | uint32_t hashval; | |
788 | }; | |
789 | ||
790 | struct thash_head { | |
791 | struct thash_item **entries; | |
792 | uint32_t count; | |
793 | ||
794 | uint8_t tabshift; | |
795 | uint8_t minshift, maxshift; | |
796 | }; | |
797 | ||
798 | #define _HASH_SIZE(tabshift) \ | |
799 | ((1U << (tabshift)) >> 1) | |
800 | #define HASH_SIZE(head) \ | |
801 | _HASH_SIZE((head).tabshift) | |
802 | #define _HASH_KEY(tabshift, val) \ | |
803 | ((val) >> (33 - (tabshift))) | |
804 | #define HASH_KEY(head, val) \ | |
805 | _HASH_KEY((head).tabshift, val) | |
806 | #define HASH_GROW_THRESHOLD(head) \ | |
807 | ((head).count >= HASH_SIZE(head)) | |
808 | #define HASH_SHRINK_THRESHOLD(head) \ | |
809 | ((head).count <= (HASH_SIZE(head) - 1) / 2) | |
810 | ||
811 | extern void typesafe_hash_grow(struct thash_head *head); | |
812 | extern void typesafe_hash_shrink(struct thash_head *head); | |
813 | ||
814 | /* use as: | |
815 | * | |
816 | * PREDECL_HASH(namelist) | |
817 | * struct name { | |
818 | * struct namelist_item nlitem; | |
819 | * } | |
820 | * DECLARE_HASH(namelist, struct name, nlitem, cmpfunc, hashfunc) | |
821 | */ | |
822 | #define PREDECL_HASH(prefix) \ | |
823 | struct prefix ## _head { struct thash_head hh; }; \ | |
960b9a53 DL |
824 | struct prefix ## _item { struct thash_item hi; }; \ |
825 | MACRO_REQUIRE_SEMICOLON() /* end */ | |
abd71baa DL |
826 | |
827 | #define INIT_HASH(var) { } | |
828 | ||
829 | #define DECLARE_HASH(prefix, type, field, cmpfn, hashfn) \ | |
830 | \ | |
831 | macro_inline void prefix ## _init(struct prefix##_head *h) \ | |
832 | { \ | |
833 | memset(h, 0, sizeof(*h)); \ | |
834 | } \ | |
835 | macro_inline void prefix ## _fini(struct prefix##_head *h) \ | |
836 | { \ | |
837 | assert(h->hh.count == 0); \ | |
838 | h->hh.minshift = 0; \ | |
839 | typesafe_hash_shrink(&h->hh); \ | |
840 | memset(h, 0, sizeof(*h)); \ | |
841 | } \ | |
842 | macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \ | |
843 | { \ | |
844 | h->hh.count++; \ | |
845 | if (!h->hh.tabshift || HASH_GROW_THRESHOLD(h->hh)) \ | |
846 | typesafe_hash_grow(&h->hh); \ | |
847 | \ | |
848 | uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \ | |
849 | item->field.hi.hashval = hval; \ | |
850 | struct thash_item **np = &h->hh.entries[hbits]; \ | |
851 | while (*np && (*np)->hashval < hval) \ | |
852 | np = &(*np)->next; \ | |
5e0136c9 FD |
853 | while (*np && (*np)->hashval == hval) { \ |
854 | if (cmpfn(container_of(*np, type, field.hi), item) == 0) { \ | |
855 | h->hh.count--; \ | |
856 | return container_of(*np, type, field.hi); \ | |
857 | } \ | |
858 | np = &(*np)->next; \ | |
abd71baa DL |
859 | } \ |
860 | item->field.hi.next = *np; \ | |
861 | *np = &item->field.hi; \ | |
862 | return NULL; \ | |
863 | } \ | |
daf3441d DL |
864 | macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \ |
865 | const type *item) \ | |
abd71baa DL |
866 | { \ |
867 | if (!h->hh.tabshift) \ | |
868 | return NULL; \ | |
869 | uint32_t hval = hashfn(item), hbits = HASH_KEY(h->hh, hval); \ | |
daf3441d | 870 | const struct thash_item *hitem = h->hh.entries[hbits]; \ |
abd71baa DL |
871 | while (hitem && hitem->hashval < hval) \ |
872 | hitem = hitem->next; \ | |
873 | while (hitem && hitem->hashval == hval) { \ | |
874 | if (!cmpfn(container_of(hitem, type, field.hi), item)) \ | |
875 | return container_of(hitem, type, field.hi); \ | |
876 | hitem = hitem->next; \ | |
877 | } \ | |
878 | return NULL; \ | |
879 | } \ | |
daf3441d | 880 | TYPESAFE_FIND(prefix, type) \ |
da098d78 | 881 | macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \ |
abd71baa DL |
882 | { \ |
883 | if (!h->hh.tabshift) \ | |
da098d78 | 884 | return NULL; \ |
abd71baa DL |
885 | uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \ |
886 | struct thash_item **np = &h->hh.entries[hbits]; \ | |
887 | while (*np && (*np)->hashval < hval) \ | |
888 | np = &(*np)->next; \ | |
889 | while (*np && *np != &item->field.hi && (*np)->hashval == hval) \ | |
890 | np = &(*np)->next; \ | |
891 | if (*np != &item->field.hi) \ | |
da098d78 | 892 | return NULL; \ |
abd71baa DL |
893 | *np = item->field.hi.next; \ |
894 | item->field.hi.next = NULL; \ | |
895 | h->hh.count--; \ | |
896 | if (HASH_SHRINK_THRESHOLD(h->hh)) \ | |
897 | typesafe_hash_shrink(&h->hh); \ | |
da098d78 | 898 | return item; \ |
abd71baa DL |
899 | } \ |
900 | macro_inline type *prefix ## _pop(struct prefix##_head *h) \ | |
901 | { \ | |
902 | uint32_t i; \ | |
903 | for (i = 0; i < HASH_SIZE(h->hh); i++) \ | |
904 | if (h->hh.entries[i]) { \ | |
905 | struct thash_item *hitem = h->hh.entries[i]; \ | |
906 | h->hh.entries[i] = hitem->next; \ | |
907 | h->hh.count--; \ | |
908 | hitem->next = NULL; \ | |
909 | if (HASH_SHRINK_THRESHOLD(h->hh)) \ | |
910 | typesafe_hash_shrink(&h->hh); \ | |
911 | return container_of(hitem, type, field.hi); \ | |
912 | } \ | |
913 | return NULL; \ | |
914 | } \ | |
507e0e5d | 915 | TYPESAFE_SWAP_ALL_SIMPLE(prefix) \ |
daf3441d | 916 | macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \ |
abd71baa DL |
917 | { \ |
918 | uint32_t i; \ | |
919 | for (i = 0; i < HASH_SIZE(h->hh); i++) \ | |
920 | if (h->hh.entries[i]) \ | |
921 | return container_of(h->hh.entries[i], type, field.hi); \ | |
922 | return NULL; \ | |
923 | } \ | |
daf3441d DL |
924 | macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \ |
925 | const type *item) \ | |
abd71baa | 926 | { \ |
daf3441d | 927 | const struct thash_item *hitem = &item->field.hi; \ |
abd71baa DL |
928 | if (hitem->next) \ |
929 | return container_of(hitem->next, type, field.hi); \ | |
930 | uint32_t i = HASH_KEY(h->hh, hitem->hashval) + 1; \ | |
daf3441d | 931 | for (; i < HASH_SIZE(h->hh); i++) \ |
abd71baa DL |
932 | if (h->hh.entries[i]) \ |
933 | return container_of(h->hh.entries[i], type, field.hi); \ | |
934 | return NULL; \ | |
935 | } \ | |
daf3441d | 936 | TYPESAFE_FIRST_NEXT(prefix, type) \ |
abd71baa DL |
937 | macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \ |
938 | { \ | |
939 | if (!item) \ | |
940 | return NULL; \ | |
941 | return prefix ## _next(h, item); \ | |
942 | } \ | |
0ca16c58 | 943 | macro_pure size_t prefix ## _count(const struct prefix##_head *h) \ |
abd71baa DL |
944 | { \ |
945 | return h->hh.count; \ | |
946 | } \ | |
f45897e4 DL |
947 | macro_pure bool prefix ## _member(const struct prefix##_head *h, \ |
948 | const type *item) \ | |
949 | { \ | |
950 | uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \ | |
951 | const struct thash_item *hitem = h->hh.entries[hbits]; \ | |
952 | while (hitem && hitem->hashval < hval) \ | |
953 | hitem = hitem->next; \ | |
954 | for (hitem = h->hh.entries[hbits]; hitem && hitem->hashval <= hval; \ | |
955 | hitem = hitem->next) \ | |
956 | if (hitem == &item->field.hi) \ | |
957 | return true; \ | |
958 | return false; \ | |
959 | } \ | |
960b9a53 | 960 | MACRO_REQUIRE_SEMICOLON() /* end */ |
abd71baa DL |
961 | |
962 | /* skiplist, sorted. | |
963 | * can be used as priority queue with add / pop | |
964 | */ | |
965 | ||
966 | /* don't use these structs directly */ | |
967 | #define SKIPLIST_MAXDEPTH 16 | |
968 | #define SKIPLIST_EMBED 4 | |
969 | #define SKIPLIST_OVERFLOW (SKIPLIST_EMBED - 1) | |
970 | ||
971 | struct sskip_item { | |
972 | struct sskip_item *next[SKIPLIST_EMBED]; | |
973 | }; | |
974 | ||
975 | struct sskip_overflow { | |
976 | struct sskip_item *next[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW]; | |
977 | }; | |
978 | ||
979 | struct sskip_head { | |
980 | struct sskip_item hitem; | |
981 | struct sskip_item *overflow[SKIPLIST_MAXDEPTH - SKIPLIST_OVERFLOW]; | |
982 | size_t count; | |
983 | }; | |
984 | ||
985 | /* use as: | |
986 | * | |
987 | * PREDECL_SKIPLIST(namelist) | |
988 | * struct name { | |
989 | * struct namelist_item nlitem; | |
990 | * } | |
991 | * DECLARE_SKIPLIST(namelist, struct name, nlitem, cmpfunc) | |
992 | */ | |
993 | #define _PREDECL_SKIPLIST(prefix) \ | |
994 | struct prefix ## _head { struct sskip_head sh; }; \ | |
960b9a53 DL |
995 | struct prefix ## _item { struct sskip_item si; }; \ |
996 | MACRO_REQUIRE_SEMICOLON() /* end */ | |
abd71baa DL |
997 | |
998 | #define INIT_SKIPLIST_UNIQ(var) { } | |
999 | #define INIT_SKIPLIST_NONUNIQ(var) { } | |
1000 | ||
1001 | #define _DECLARE_SKIPLIST(prefix, type, field, cmpfn_nuq, cmpfn_uq) \ | |
1002 | \ | |
1003 | macro_inline void prefix ## _init(struct prefix##_head *h) \ | |
1004 | { \ | |
1005 | memset(h, 0, sizeof(*h)); \ | |
1006 | h->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \ | |
1007 | ((uintptr_t)h->sh.overflow | 1); \ | |
1008 | } \ | |
1009 | macro_inline void prefix ## _fini(struct prefix##_head *h) \ | |
1010 | { \ | |
1011 | memset(h, 0, sizeof(*h)); \ | |
1012 | } \ | |
1013 | macro_inline type *prefix ## _add(struct prefix##_head *h, type *item) \ | |
1014 | { \ | |
1015 | struct sskip_item *si; \ | |
1016 | si = typesafe_skiplist_add(&h->sh, &item->field.si, cmpfn_uq); \ | |
1017 | return container_of_null(si, type, field.si); \ | |
1018 | } \ | |
daf3441d DL |
1019 | macro_inline const type *prefix ## _const_find_gteq( \ |
1020 | const struct prefix##_head *h, const type *item) \ | |
abd71baa | 1021 | { \ |
daf3441d | 1022 | const struct sskip_item *sitem = typesafe_skiplist_find_gteq(&h->sh, \ |
abd71baa DL |
1023 | &item->field.si, cmpfn_nuq); \ |
1024 | return container_of_null(sitem, type, field.si); \ | |
1025 | } \ | |
daf3441d DL |
1026 | macro_inline const type *prefix ## _const_find_lt( \ |
1027 | const struct prefix##_head *h, const type *item) \ | |
abd71baa | 1028 | { \ |
daf3441d | 1029 | const struct sskip_item *sitem = typesafe_skiplist_find_lt(&h->sh, \ |
abd71baa DL |
1030 | &item->field.si, cmpfn_nuq); \ |
1031 | return container_of_null(sitem, type, field.si); \ | |
1032 | } \ | |
daf3441d | 1033 | TYPESAFE_FIND_CMP(prefix, type) \ |
da098d78 | 1034 | macro_inline type *prefix ## _del(struct prefix##_head *h, type *item) \ |
abd71baa | 1035 | { \ |
da098d78 SW |
1036 | struct sskip_item *sitem = typesafe_skiplist_del(&h->sh, \ |
1037 | &item->field.si, cmpfn_uq); \ | |
1038 | return container_of_null(sitem, type, field.si); \ | |
abd71baa DL |
1039 | } \ |
1040 | macro_inline type *prefix ## _pop(struct prefix##_head *h) \ | |
1041 | { \ | |
01734da3 DL |
1042 | struct sskip_item *sitem = typesafe_skiplist_pop(&h->sh); \ |
1043 | return container_of_null(sitem, type, field.si); \ | |
abd71baa | 1044 | } \ |
507e0e5d DL |
1045 | macro_inline void prefix ## _swap_all(struct prefix##_head *a, \ |
1046 | struct prefix##_head *b) \ | |
1047 | { \ | |
1048 | struct prefix##_head tmp = *a; \ | |
1049 | *a = *b; \ | |
1050 | *b = tmp; \ | |
1051 | a->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \ | |
1052 | ((uintptr_t)a->sh.overflow | 1); \ | |
1053 | b->sh.hitem.next[SKIPLIST_OVERFLOW] = (struct sskip_item *) \ | |
1054 | ((uintptr_t)b->sh.overflow | 1); \ | |
1055 | } \ | |
daf3441d | 1056 | macro_pure const type *prefix ## _const_first(const struct prefix##_head *h) \ |
abd71baa | 1057 | { \ |
daf3441d | 1058 | const struct sskip_item *first = h->sh.hitem.next[0]; \ |
abd71baa DL |
1059 | return container_of_null(first, type, field.si); \ |
1060 | } \ | |
daf3441d DL |
1061 | macro_pure const type *prefix ## _const_next(const struct prefix##_head *h, \ |
1062 | const type *item) \ | |
abd71baa | 1063 | { \ |
daf3441d | 1064 | const struct sskip_item *next = item->field.si.next[0]; \ |
abd71baa DL |
1065 | return container_of_null(next, type, field.si); \ |
1066 | } \ | |
daf3441d | 1067 | TYPESAFE_FIRST_NEXT(prefix, type) \ |
abd71baa DL |
1068 | macro_pure type *prefix ## _next_safe(struct prefix##_head *h, type *item) \ |
1069 | { \ | |
1070 | struct sskip_item *next; \ | |
1071 | next = item ? item->field.si.next[0] : NULL; \ | |
1072 | return container_of_null(next, type, field.si); \ | |
1073 | } \ | |
0ca16c58 | 1074 | macro_pure size_t prefix ## _count(const struct prefix##_head *h) \ |
abd71baa DL |
1075 | { \ |
1076 | return h->sh.count; \ | |
1077 | } \ | |
960b9a53 | 1078 | MACRO_REQUIRE_SEMICOLON() /* end */ |
abd71baa DL |
1079 | |
1080 | #define PREDECL_SKIPLIST_UNIQ(prefix) \ | |
1081 | _PREDECL_SKIPLIST(prefix) | |
1082 | #define DECLARE_SKIPLIST_UNIQ(prefix, type, field, cmpfn) \ | |
daf3441d | 1083 | \ |
abd71baa DL |
1084 | macro_inline int prefix ## __cmp(const struct sskip_item *a, \ |
1085 | const struct sskip_item *b) \ | |
1086 | { \ | |
1087 | return cmpfn(container_of(a, type, field.si), \ | |
1088 | container_of(b, type, field.si)); \ | |
1089 | } \ | |
daf3441d DL |
1090 | macro_inline const type *prefix ## _const_find(const struct prefix##_head *h, \ |
1091 | const type *item) \ | |
abd71baa | 1092 | { \ |
daf3441d | 1093 | const struct sskip_item *sitem = typesafe_skiplist_find(&h->sh, \ |
abd71baa DL |
1094 | &item->field.si, &prefix ## __cmp); \ |
1095 | return container_of_null(sitem, type, field.si); \ | |
1096 | } \ | |
daf3441d | 1097 | TYPESAFE_FIND(prefix, type) \ |
f45897e4 | 1098 | TYPESAFE_MEMBER_VIA_FIND(prefix, type) \ |
abd71baa DL |
1099 | \ |
1100 | _DECLARE_SKIPLIST(prefix, type, field, \ | |
960b9a53 DL |
1101 | prefix ## __cmp, prefix ## __cmp); \ |
1102 | MACRO_REQUIRE_SEMICOLON() /* end */ | |
abd71baa DL |
1103 | |
1104 | #define PREDECL_SKIPLIST_NONUNIQ(prefix) \ | |
1105 | _PREDECL_SKIPLIST(prefix) | |
1106 | #define DECLARE_SKIPLIST_NONUNIQ(prefix, type, field, cmpfn) \ | |
1107 | \ | |
1108 | macro_inline int prefix ## __cmp(const struct sskip_item *a, \ | |
1109 | const struct sskip_item *b) \ | |
1110 | { \ | |
1111 | return cmpfn(container_of(a, type, field.si), \ | |
1112 | container_of(b, type, field.si)); \ | |
1113 | } \ | |
1114 | macro_inline int prefix ## __cmp_uq(const struct sskip_item *a, \ | |
1115 | const struct sskip_item *b) \ | |
1116 | { \ | |
1117 | int cmpval = cmpfn(container_of(a, type, field.si), \ | |
1118 | container_of(b, type, field.si)); \ | |
1119 | if (cmpval) \ | |
1120 | return cmpval; \ | |
1121 | if (a < b) \ | |
1122 | return -1; \ | |
1123 | if (a > b) \ | |
1124 | return 1; \ | |
1125 | return 0; \ | |
1126 | } \ | |
1127 | \ | |
1128 | _DECLARE_SKIPLIST(prefix, type, field, \ | |
960b9a53 | 1129 | prefix ## __cmp, prefix ## __cmp_uq); \ |
f45897e4 | 1130 | TYPESAFE_MEMBER_VIA_FIND_GTEQ(prefix, type, cmpfn) \ |
960b9a53 | 1131 | MACRO_REQUIRE_SEMICOLON() /* end */ |
abd71baa DL |
1132 | |
1133 | ||
1134 | extern struct sskip_item *typesafe_skiplist_add(struct sskip_head *head, | |
1135 | struct sskip_item *item, int (*cmpfn)( | |
1136 | const struct sskip_item *a, | |
1137 | const struct sskip_item *b)); | |
daf3441d DL |
1138 | extern const struct sskip_item *typesafe_skiplist_find( |
1139 | const struct sskip_head *head, | |
abd71baa DL |
1140 | const struct sskip_item *item, int (*cmpfn)( |
1141 | const struct sskip_item *a, | |
1142 | const struct sskip_item *b)); | |
daf3441d DL |
1143 | extern const struct sskip_item *typesafe_skiplist_find_gteq( |
1144 | const struct sskip_head *head, | |
abd71baa DL |
1145 | const struct sskip_item *item, int (*cmpfn)( |
1146 | const struct sskip_item *a, | |
1147 | const struct sskip_item *b)); | |
daf3441d DL |
1148 | extern const struct sskip_item *typesafe_skiplist_find_lt( |
1149 | const struct sskip_head *head, | |
abd71baa DL |
1150 | const struct sskip_item *item, int (*cmpfn)( |
1151 | const struct sskip_item *a, | |
1152 | const struct sskip_item *b)); | |
da098d78 SW |
1153 | extern struct sskip_item *typesafe_skiplist_del( |
1154 | struct sskip_head *head, struct sskip_item *item, int (*cmpfn)( | |
abd71baa DL |
1155 | const struct sskip_item *a, |
1156 | const struct sskip_item *b)); | |
01734da3 | 1157 | extern struct sskip_item *typesafe_skiplist_pop(struct sskip_head *head); |
abd71baa | 1158 | |
eea3c899 RW |
1159 | #ifdef __cplusplus |
1160 | } | |
1161 | #endif | |
1162 | ||
80911bc2 DL |
1163 | /* this needs to stay at the end because both files include each other. |
1164 | * the resolved order is typesafe.h before typerb.h | |
1165 | */ | |
1166 | #include "typerb.h" | |
1167 | ||
abd71baa | 1168 | #endif /* _FRR_TYPESAFE_H */ |