1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2019 David Lamparter, for NetDEF, Inc.
18 DEFINE_MTYPE_STATIC(LIB
, TYPEDHASH_BUCKET
, "Typed-hash bucket");
19 DEFINE_MTYPE_STATIC(LIB
, SKIPLIST_OFLOW
, "Skiplist overflow");
20 DEFINE_MTYPE_STATIC(LIB
, HEAP_ARRAY
, "Typed-heap array");
22 struct slist_item typesafe_slist_sentinel
= { NULL
};
24 bool typesafe_list_member(const struct slist_head
*head
,
25 const struct slist_item
*item
)
27 struct slist_item
*fromhead
= head
->first
;
28 struct slist_item
**fromnext
= (struct slist_item
**)&item
->next
;
30 while (fromhead
!= _SLIST_LAST
) {
31 if (fromhead
== item
|| fromnext
== head
->last_next
)
34 fromhead
= fromhead
->next
;
35 if (!*fromnext
|| *fromnext
== _SLIST_LAST
)
37 fromnext
= &(*fromnext
)->next
;
43 bool typesafe_dlist_member(const struct dlist_head
*head
,
44 const struct dlist_item
*item
)
46 const struct dlist_item
*fromhead
= head
->hitem
.next
;
47 const struct dlist_item
*fromitem
= item
->next
;
49 if (!item
->prev
|| !item
->next
)
52 while (fromhead
!= &head
->hitem
&& fromitem
!= item
) {
53 if (fromitem
== &head
->hitem
|| fromhead
== item
)
55 fromhead
= fromhead
->next
;
56 fromitem
= fromitem
->next
;
63 static void hash_consistency_check(struct thash_head
*head
)
66 struct thash_item
*item
, *prev
;
68 for (i
= 0; i
< HASH_SIZE(*head
); i
++) {
69 item
= head
->entries
[i
];
72 assert(HASH_KEY(*head
, item
->hashval
) == i
);
73 assert(!prev
|| item
->hashval
>= prev
->hashval
);
80 #define hash_consistency_check(x)
83 void typesafe_hash_grow(struct thash_head
*head
)
85 uint32_t newsize
= head
->count
, i
, j
;
86 uint8_t newshift
, delta
;
88 hash_consistency_check(head
);
90 newsize
|= newsize
>> 1;
91 newsize
|= newsize
>> 2;
92 newsize
|= newsize
>> 4;
93 newsize
|= newsize
>> 8;
94 newsize
|= newsize
>> 16;
96 newshift
= __builtin_ctz(newsize
) + 1;
98 if (head
->maxshift
&& newshift
> head
->maxshift
)
99 newshift
= head
->maxshift
;
100 if (newshift
== head
->tabshift
)
102 newsize
= _HASH_SIZE(newshift
);
104 head
->entries
= XREALLOC(MTYPE_TYPEDHASH_BUCKET
, head
->entries
,
105 sizeof(head
->entries
[0]) * newsize
);
106 memset(head
->entries
+ HASH_SIZE(*head
), 0,
107 sizeof(head
->entries
[0]) *
108 (newsize
- HASH_SIZE(*head
)));
110 delta
= newshift
- head
->tabshift
;
112 i
= HASH_SIZE(*head
);
116 struct thash_item
**apos
, *item
;
119 apos
= &head
->entries
[i
];
121 for (j
= 0; j
< (1U << delta
); j
++) {
125 head
->entries
[(i
<< delta
) + j
] = item
;
126 apos
= &head
->entries
[(i
<< delta
) + j
];
128 while ((item
= *apos
)) {
130 midbits
= _HASH_KEY(newshift
, item
->hashval
);
131 midbits
&= (1 << delta
) - 1;
140 head
->tabshift
= newshift
;
141 hash_consistency_check(head
);
144 void typesafe_hash_shrink(struct thash_head
*head
)
146 uint32_t newsize
= head
->count
, i
, j
;
147 uint8_t newshift
, delta
;
149 hash_consistency_check(head
);
152 XFREE(MTYPE_TYPEDHASH_BUCKET
, head
->entries
);
157 newsize
|= newsize
>> 1;
158 newsize
|= newsize
>> 2;
159 newsize
|= newsize
>> 4;
160 newsize
|= newsize
>> 8;
161 newsize
|= newsize
>> 16;
163 newshift
= __builtin_ctz(newsize
) + 1;
165 if (head
->minshift
&& newshift
< head
->minshift
)
166 newshift
= head
->minshift
;
167 if (newshift
== head
->tabshift
)
169 newsize
= _HASH_SIZE(newshift
);
171 delta
= head
->tabshift
- newshift
;
173 for (i
= 0; i
< newsize
; i
++) {
174 struct thash_item
**apos
= &head
->entries
[i
];
176 for (j
= 0; j
< (1U << delta
); j
++) {
177 *apos
= head
->entries
[(i
<< delta
) + j
];
179 apos
= &(*apos
)->next
;
182 head
->entries
= XREALLOC(MTYPE_TYPEDHASH_BUCKET
, head
->entries
,
183 sizeof(head
->entries
[0]) * newsize
);
184 head
->tabshift
= newshift
;
186 hash_consistency_check(head
);
191 static inline struct sskip_item
*sl_level_get(const struct sskip_item
*item
,
194 if (level
< SKIPLIST_OVERFLOW
)
195 return item
->next
[level
];
196 if (level
== SKIPLIST_OVERFLOW
&& !((uintptr_t)item
->next
[level
] & 1))
197 return item
->next
[level
];
199 uintptr_t ptrval
= (uintptr_t)item
->next
[SKIPLIST_OVERFLOW
];
200 ptrval
&= UINTPTR_MAX
- 3;
201 struct sskip_overflow
*oflow
= (struct sskip_overflow
*)ptrval
;
202 return oflow
->next
[level
- SKIPLIST_OVERFLOW
];
205 static inline void sl_level_set(struct sskip_item
*item
, size_t level
,
206 struct sskip_item
*value
)
208 if (level
< SKIPLIST_OVERFLOW
)
209 item
->next
[level
] = value
;
210 else if (level
== SKIPLIST_OVERFLOW
&& !((uintptr_t)item
->next
[level
] & 1))
211 item
->next
[level
] = value
;
213 uintptr_t ptrval
= (uintptr_t)item
->next
[SKIPLIST_OVERFLOW
];
214 ptrval
&= UINTPTR_MAX
- 3;
215 struct sskip_overflow
*oflow
= (struct sskip_overflow
*)ptrval
;
216 oflow
->next
[level
- SKIPLIST_OVERFLOW
] = value
;
220 struct sskip_item
*typesafe_skiplist_add(struct sskip_head
*head
,
221 struct sskip_item
*item
,
222 int (*cmpfn
)(const struct sskip_item
*a
,
223 const struct sskip_item
*b
))
225 size_t level
= SKIPLIST_MAXDEPTH
, newlevel
, auxlevel
;
226 struct sskip_item
*prev
= &head
->hitem
, *next
, *auxprev
, *auxnext
;
229 /* level / newlevel are 1-counted here */
230 newlevel
= __builtin_ctz(frr_weak_random()) + 1;
231 if (newlevel
> SKIPLIST_MAXDEPTH
)
232 newlevel
= SKIPLIST_MAXDEPTH
;
235 while (level
>= newlevel
) {
236 next
= sl_level_get(prev
, level
- 1);
241 cmpval
= cmpfn(next
, item
);
245 } else if (cmpval
== 0) {
251 /* check for duplicate item - could be removed if code doesn't rely
252 * on it, but not really work the complication. */
257 auxnext
= sl_level_get(auxprev
, auxlevel
);
259 while (auxnext
&& (cmpval
= cmpfn(auxnext
, item
)) < 0) {
261 auxnext
= sl_level_get(auxprev
, auxlevel
);
268 memset(item
, 0, sizeof(*item
));
269 if (newlevel
> SKIPLIST_EMBED
) {
270 struct sskip_overflow
*oflow
;
271 oflow
= XMALLOC(MTYPE_SKIPLIST_OFLOW
, sizeof(void *)
272 * (newlevel
- SKIPLIST_OVERFLOW
));
273 item
->next
[SKIPLIST_OVERFLOW
] = (struct sskip_item
*)
274 ((uintptr_t)oflow
| 1);
277 sl_level_set(item
, level
, next
);
278 sl_level_set(prev
, level
, item
);
279 /* level is now 0-counted and < newlevel*/
282 next
= sl_level_get(prev
, level
);
283 while (next
&& cmpfn(next
, item
) < 0) {
285 next
= sl_level_get(prev
, level
);
288 sl_level_set(item
, level
, next
);
289 sl_level_set(prev
, level
, item
);
294 /* NOTE: level counting below is 1-based since that makes the code simpler! */
296 const struct sskip_item
*typesafe_skiplist_find(
297 const struct sskip_head
*head
,
298 const struct sskip_item
*item
, int (*cmpfn
)(
299 const struct sskip_item
*a
,
300 const struct sskip_item
*b
))
302 size_t level
= SKIPLIST_MAXDEPTH
;
303 const struct sskip_item
*prev
= &head
->hitem
, *next
;
307 next
= sl_level_get(prev
, level
- 1);
312 cmpval
= cmpfn(next
, item
);
324 const struct sskip_item
*typesafe_skiplist_find_gteq(
325 const struct sskip_head
*head
,
326 const struct sskip_item
*item
, int (*cmpfn
)(
327 const struct sskip_item
*a
,
328 const struct sskip_item
*b
))
330 size_t level
= SKIPLIST_MAXDEPTH
;
331 const struct sskip_item
*prev
= &head
->hitem
, *next
;
335 next
= sl_level_get(prev
, level
- 1);
340 cmpval
= cmpfn(next
, item
);
352 const struct sskip_item
*typesafe_skiplist_find_lt(
353 const struct sskip_head
*head
,
354 const struct sskip_item
*item
, int (*cmpfn
)(
355 const struct sskip_item
*a
,
356 const struct sskip_item
*b
))
358 size_t level
= SKIPLIST_MAXDEPTH
;
359 const struct sskip_item
*prev
= &head
->hitem
, *next
, *best
= NULL
;
363 next
= sl_level_get(prev
, level
- 1);
368 cmpval
= cmpfn(next
, item
);
378 struct sskip_item
*typesafe_skiplist_del(
379 struct sskip_head
*head
, struct sskip_item
*item
,
380 int (*cmpfn
)(const struct sskip_item
*a
, const struct sskip_item
*b
))
382 size_t level
= SKIPLIST_MAXDEPTH
;
383 struct sskip_item
*prev
= &head
->hitem
, *next
;
388 next
= sl_level_get(prev
, level
- 1);
394 sl_level_set(prev
, level
- 1,
395 sl_level_get(item
, level
- 1));
400 cmpval
= cmpfn(next
, item
);
411 /* TBD: assert when trying to remove non-existing item? */
414 if ((uintptr_t)item
->next
[SKIPLIST_OVERFLOW
] & 1) {
415 uintptr_t ptrval
= (uintptr_t)item
->next
[SKIPLIST_OVERFLOW
];
416 ptrval
&= UINTPTR_MAX
- 3;
417 struct sskip_overflow
*oflow
= (struct sskip_overflow
*)ptrval
;
418 XFREE(MTYPE_SKIPLIST_OFLOW
, oflow
);
420 memset(item
, 0, sizeof(*item
));
425 struct sskip_item
*typesafe_skiplist_pop(struct sskip_head
*head
)
427 size_t level
= SKIPLIST_MAXDEPTH
;
428 struct sskip_item
*prev
= &head
->hitem
, *next
, *item
;
430 item
= sl_level_get(prev
, 0);
437 next
= sl_level_get(prev
, level
);
441 sl_level_set(prev
, level
, sl_level_get(item
, level
));
446 if ((uintptr_t)item
->next
[SKIPLIST_OVERFLOW
] & 1) {
447 uintptr_t ptrval
= (uintptr_t)item
->next
[SKIPLIST_OVERFLOW
];
448 ptrval
&= UINTPTR_MAX
- 3;
449 struct sskip_overflow
*oflow
= (struct sskip_overflow
*)ptrval
;
450 XFREE(MTYPE_SKIPLIST_OFLOW
, oflow
);
452 memset(item
, 0, sizeof(*item
));
460 static void heap_consistency_check(struct heap_head
*head
,
461 int (*cmpfn
)(const struct heap_item
*a
,
462 const struct heap_item
*b
),
465 uint32_t rghtpos
= pos
+ 1;
466 uint32_t downpos
= HEAP_NARY
* (pos
+ 1);
468 if (pos
+ 1 > ~0U / HEAP_NARY
)
471 if ((pos
& (HEAP_NARY
- 1)) != HEAP_NARY
- 1 && rghtpos
< head
->count
) {
472 assert(cmpfn(head
->array
[rghtpos
], head
->array
[pos
]) >= 0);
473 heap_consistency_check(head
, cmpfn
, rghtpos
);
475 if (downpos
< head
->count
) {
476 assert(cmpfn(head
->array
[downpos
], head
->array
[pos
]) >= 0);
477 heap_consistency_check(head
, cmpfn
, downpos
);
481 #define heap_consistency_check(head, cmpfn, pos)
484 void typesafe_heap_resize(struct heap_head
*head
, bool grow
)
489 newsize
= head
->arraysz
;
492 else if (newsize
< 262144)
493 newsize
+= newsize
/ 2;
494 else if (newsize
< 0xaaaa0000)
495 newsize
+= newsize
/ 3;
498 } else if (head
->count
> 0) {
499 newsize
= head
->count
;
501 XFREE(MTYPE_HEAP_ARRAY
, head
->array
);
506 newsize
+= HEAP_NARY
- 1;
507 newsize
&= ~(HEAP_NARY
- 1);
508 if (newsize
== head
->arraysz
)
511 head
->array
= XREALLOC(MTYPE_HEAP_ARRAY
, head
->array
,
512 newsize
* sizeof(struct heap_item
*));
513 head
->arraysz
= newsize
;
516 void typesafe_heap_pushdown(struct heap_head
*head
, uint32_t pos
,
517 struct heap_item
*item
,
518 int (*cmpfn
)(const struct heap_item
*a
,
519 const struct heap_item
*b
))
521 uint32_t rghtpos
, downpos
, moveto
;
524 /* rghtpos: neighbor to the "right", inside block of NARY.
525 * may be invalid if last in block, check nary_last()
526 * downpos: first neighbor in the "downwards" block further
531 /* make sure we can use the full 4G items */
532 downpos
= HEAP_NARY
* (pos
+ 1);
533 if (pos
+ 1 > ~0U / HEAP_NARY
)
534 /* multiplication overflowed. ~0U is guaranteed
535 * to be an invalid index; size limit is enforced in
540 /* only used on break */
543 #define nary_last(x) (((x) & (HEAP_NARY - 1)) == HEAP_NARY - 1)
544 if (downpos
>= head
->count
545 || cmpfn(head
->array
[downpos
], item
) >= 0) {
546 /* not moving down; either at end or down is >= item */
547 if (nary_last(pos
) || rghtpos
>= head
->count
548 || cmpfn(head
->array
[rghtpos
], item
) >= 0)
549 /* not moving right either - got our spot */
554 /* else: downpos is valid and < item. choose between down
555 * or right (if the latter is an option) */
556 } else if (nary_last(pos
) || cmpfn(head
->array
[rghtpos
],
557 head
->array
[downpos
]) >= 0)
563 head
->array
[pos
] = head
->array
[moveto
];
564 head
->array
[pos
]->index
= pos
;
568 head
->array
[moveto
] = item
;
569 item
->index
= moveto
;
571 heap_consistency_check(head
, cmpfn
, 0);
574 void typesafe_heap_pullup(struct heap_head
*head
, uint32_t pos
,
575 struct heap_item
*item
,
576 int (*cmpfn
)(const struct heap_item
*a
,
577 const struct heap_item
*b
))
582 if ((pos
& (HEAP_NARY
- 1)) == 0)
583 moveto
= pos
/ HEAP_NARY
- 1;
587 if (cmpfn(head
->array
[moveto
], item
) <= 0)
590 head
->array
[pos
] = head
->array
[moveto
];
591 head
->array
[pos
]->index
= pos
;
596 head
->array
[pos
] = item
;
599 heap_consistency_check(head
, cmpfn
, 0);