]>
Commit | Line | Data |
---|---|---|
2e11264a EC |
1 | /* |
2 | * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads. | |
3 | * | |
4 | * Copyright (C) 2016, Emilio G. Cota <cota@braap.org> | |
5 | * | |
6 | * License: GNU GPL, version 2 or later. | |
7 | * See the COPYING file in the top-level directory. | |
8 | * | |
9 | * Assumptions: | |
10 | * - NULL cannot be inserted/removed as a pointer value. | |
11 | * - Trying to insert an already-existing hash-pointer pair is OK. However, | |
12 | * it is not OK to insert into the same hash table different hash-pointer | |
13 | * pairs that have the same pointer value, but not the hashes. | |
14 | * - Lookups are performed under an RCU read-critical section; removals | |
15 | * must wait for a grace period to elapse before freeing removed objects. | |
16 | * | |
17 | * Features: | |
18 | * - Reads (i.e. lookups and iterators) can be concurrent with other reads. | |
19 | * Lookups that are concurrent with writes to the same bucket will retry | |
20 | * via a seqlock; iterators acquire all bucket locks and therefore can be | |
21 | * concurrent with lookups and are serialized wrt writers. | |
22 | * - Writes (i.e. insertions/removals) can be concurrent with writes to | |
23 | * different buckets; writes to the same bucket are serialized through a lock. | |
24 | * - Optional auto-resizing: the hash table resizes up if the load surpasses | |
25 | * a certain threshold. Resizing is done concurrently with readers; writes | |
26 | * are serialized with the resize operation. | |
27 | * | |
28 | * The key structure is the bucket, which is cacheline-sized. Buckets | |
29 | * contain a few hash values and pointers; the u32 hash values are stored in | |
30 | * full so that resizing is fast. Having this structure instead of directly | |
31 | * chaining items has two advantages: | |
32 | * - Failed lookups fail fast, and touch a minimum number of cache lines. | |
33 | * - Resizing the hash table with concurrent lookups is easy. | |
34 | * | |
35 | * There are two types of buckets: | |
36 | * 1. "head" buckets are the ones allocated in the array of buckets in qht_map. | |
37 | * 2. all "non-head" buckets (i.e. all others) are members of a chain that | |
38 | * starts from a head bucket. | |
39 | * Note that the seqlock and spinlock of a head bucket applies to all buckets | |
40 | * chained to it; these two fields are unused in non-head buckets. | |
41 | * | |
42 | * On removals, we move the last valid item in the chain to the position of the | |
43 | * just-removed entry. This makes lookups slightly faster, since the moment an | |
44 | * invalid entry is found, the (failed) lookup is over. | |
45 | * | |
46 | * Resizing is done by taking all bucket spinlocks (so that no other writers can | |
47 | * race with us) and then copying all entries into a new hash map. Then, the | |
48 | * ht->map pointer is set, and the old map is freed once no RCU readers can see | |
49 | * it anymore. | |
50 | * | |
51 | * Writers check for concurrent resizes by comparing ht->map before and after | |
52 | * acquiring their bucket lock. If they don't match, a resize has occured | |
53 | * while the bucket spinlock was being acquired. | |
54 | * | |
55 | * Related Work: | |
56 | * - Idea of cacheline-sized buckets with full hashes taken from: | |
57 | * David, Guerraoui & Trigonakis, "Asynchronized Concurrency: | |
58 | * The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15. | |
59 | * - Why not RCU-based hash tables? They would allow us to get rid of the | |
60 | * seqlock, but resizing would take forever since RCU read critical | |
61 | * sections in QEMU take quite a long time. | |
62 | * More info on relativistic hash tables: | |
63 | * + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash | |
64 | * Tables via Relativistic Programming", USENIX ATC'11. | |
65 | * + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014. | |
66 | * https://lwn.net/Articles/612021/ | |
67 | */ | |
e9abfcb5 | 68 | #include "qemu/osdep.h" |
2e11264a EC |
69 | #include "qemu/qht.h" |
70 | #include "qemu/atomic.h" | |
71 | #include "qemu/rcu.h" | |
72 | ||
73 | //#define QHT_DEBUG | |
74 | ||
75 | /* | |
76 | * We want to avoid false sharing of cache lines. Most systems have 64-byte | |
77 | * cache lines so we go with it for simplicity. | |
78 | * | |
79 | * Note that systems with smaller cache lines will be fine (the struct is | |
80 | * almost 64-bytes); systems with larger cache lines might suffer from | |
81 | * some false sharing. | |
82 | */ | |
83 | #define QHT_BUCKET_ALIGN 64 | |
84 | ||
85 | /* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */ | |
86 | #if HOST_LONG_BITS == 32 | |
87 | #define QHT_BUCKET_ENTRIES 6 | |
88 | #else /* 64-bit */ | |
89 | #define QHT_BUCKET_ENTRIES 4 | |
90 | #endif | |
91 | ||
69d55e9c EC |
92 | enum qht_iter_type { |
93 | QHT_ITER_VOID, /* do nothing; use retvoid */ | |
94 | QHT_ITER_RM, /* remove element if retbool returns true */ | |
95 | }; | |
96 | ||
97 | struct qht_iter { | |
98 | union { | |
99 | qht_iter_func_t retvoid; | |
100 | qht_iter_bool_func_t retbool; | |
101 | } f; | |
102 | enum qht_iter_type type; | |
103 | }; | |
104 | ||
fe9959a2 EC |
105 | /* |
106 | * Do _not_ use qemu_mutex_[try]lock directly! Use these macros, otherwise | |
107 | * the profiler (QSP) will deadlock. | |
108 | */ | |
109 | static inline void qht_lock(struct qht *ht) | |
110 | { | |
111 | if (ht->mode & QHT_MODE_RAW_MUTEXES) { | |
112 | qemu_mutex_lock__raw(&ht->lock); | |
113 | } else { | |
114 | qemu_mutex_lock(&ht->lock); | |
115 | } | |
116 | } | |
117 | ||
118 | static inline int qht_trylock(struct qht *ht) | |
119 | { | |
120 | if (ht->mode & QHT_MODE_RAW_MUTEXES) { | |
121 | return qemu_mutex_trylock__raw(&(ht)->lock); | |
122 | } | |
123 | return qemu_mutex_trylock(&(ht)->lock); | |
124 | } | |
125 | ||
126 | /* this inline is not really necessary, but it helps keep code consistent */ | |
127 | static inline void qht_unlock(struct qht *ht) | |
128 | { | |
129 | qemu_mutex_unlock(&ht->lock); | |
130 | } | |
131 | ||
2e11264a EC |
132 | /* |
133 | * Note: reading partially-updated pointers in @pointers could lead to | |
134 | * segfaults. We thus access them with atomic_read/set; this guarantees | |
135 | * that the compiler makes all those accesses atomic. We also need the | |
136 | * volatile-like behavior in atomic_read, since otherwise the compiler | |
137 | * might refetch the pointer. | |
138 | * atomic_read's are of course not necessary when the bucket lock is held. | |
139 | * | |
140 | * If both ht->lock and b->lock are grabbed, ht->lock should always | |
141 | * be grabbed first. | |
142 | */ | |
143 | struct qht_bucket { | |
144 | QemuSpin lock; | |
145 | QemuSeqLock sequence; | |
146 | uint32_t hashes[QHT_BUCKET_ENTRIES]; | |
147 | void *pointers[QHT_BUCKET_ENTRIES]; | |
148 | struct qht_bucket *next; | |
149 | } QEMU_ALIGNED(QHT_BUCKET_ALIGN); | |
150 | ||
151 | QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN); | |
152 | ||
153 | /** | |
154 | * struct qht_map - structure to track an array of buckets | |
155 | * @rcu: used by RCU. Keep it as the top field in the struct to help valgrind | |
156 | * find the whole struct. | |
157 | * @buckets: array of head buckets. It is constant once the map is created. | |
158 | * @n_buckets: number of head buckets. It is constant once the map is created. | |
159 | * @n_added_buckets: number of added (i.e. "non-head") buckets | |
160 | * @n_added_buckets_threshold: threshold to trigger an upward resize once the | |
161 | * number of added buckets surpasses it. | |
162 | * | |
163 | * Buckets are tracked in what we call a "map", i.e. this structure. | |
164 | */ | |
165 | struct qht_map { | |
166 | struct rcu_head rcu; | |
167 | struct qht_bucket *buckets; | |
168 | size_t n_buckets; | |
169 | size_t n_added_buckets; | |
170 | size_t n_added_buckets_threshold; | |
171 | }; | |
172 | ||
173 | /* trigger a resize when n_added_buckets > n_buckets / div */ | |
174 | #define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8 | |
175 | ||
76b553b3 EC |
176 | static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, |
177 | bool reset); | |
2e11264a EC |
178 | static void qht_grow_maybe(struct qht *ht); |
179 | ||
180 | #ifdef QHT_DEBUG | |
181 | ||
182 | #define qht_debug_assert(X) do { assert(X); } while (0) | |
183 | ||
184 | static void qht_bucket_debug__locked(struct qht_bucket *b) | |
185 | { | |
186 | bool seen_empty = false; | |
187 | bool corrupt = false; | |
188 | int i; | |
189 | ||
190 | do { | |
191 | for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { | |
192 | if (b->pointers[i] == NULL) { | |
193 | seen_empty = true; | |
194 | continue; | |
195 | } | |
196 | if (seen_empty) { | |
197 | fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n", | |
198 | __func__, b, i, b->hashes[i], b->pointers[i]); | |
199 | corrupt = true; | |
200 | } | |
201 | } | |
202 | b = b->next; | |
203 | } while (b); | |
204 | qht_debug_assert(!corrupt); | |
205 | } | |
206 | ||
207 | static void qht_map_debug__all_locked(struct qht_map *map) | |
208 | { | |
209 | int i; | |
210 | ||
211 | for (i = 0; i < map->n_buckets; i++) { | |
212 | qht_bucket_debug__locked(&map->buckets[i]); | |
213 | } | |
214 | } | |
215 | #else | |
216 | ||
217 | #define qht_debug_assert(X) do { (void)(X); } while (0) | |
218 | ||
219 | static inline void qht_bucket_debug__locked(struct qht_bucket *b) | |
220 | { } | |
221 | ||
222 | static inline void qht_map_debug__all_locked(struct qht_map *map) | |
223 | { } | |
224 | #endif /* QHT_DEBUG */ | |
225 | ||
226 | static inline size_t qht_elems_to_buckets(size_t n_elems) | |
227 | { | |
228 | return pow2ceil(n_elems / QHT_BUCKET_ENTRIES); | |
229 | } | |
230 | ||
231 | static inline void qht_head_init(struct qht_bucket *b) | |
232 | { | |
233 | memset(b, 0, sizeof(*b)); | |
234 | qemu_spin_init(&b->lock); | |
235 | seqlock_init(&b->sequence); | |
236 | } | |
237 | ||
238 | static inline | |
239 | struct qht_bucket *qht_map_to_bucket(struct qht_map *map, uint32_t hash) | |
240 | { | |
241 | return &map->buckets[hash & (map->n_buckets - 1)]; | |
242 | } | |
243 | ||
244 | /* acquire all bucket locks from a map */ | |
245 | static void qht_map_lock_buckets(struct qht_map *map) | |
246 | { | |
247 | size_t i; | |
248 | ||
249 | for (i = 0; i < map->n_buckets; i++) { | |
250 | struct qht_bucket *b = &map->buckets[i]; | |
251 | ||
252 | qemu_spin_lock(&b->lock); | |
253 | } | |
254 | } | |
255 | ||
256 | static void qht_map_unlock_buckets(struct qht_map *map) | |
257 | { | |
258 | size_t i; | |
259 | ||
260 | for (i = 0; i < map->n_buckets; i++) { | |
261 | struct qht_bucket *b = &map->buckets[i]; | |
262 | ||
263 | qemu_spin_unlock(&b->lock); | |
264 | } | |
265 | } | |
266 | ||
267 | /* | |
268 | * Call with at least a bucket lock held. | |
269 | * @map should be the value read before acquiring the lock (or locks). | |
270 | */ | |
271 | static inline bool qht_map_is_stale__locked(struct qht *ht, struct qht_map *map) | |
272 | { | |
273 | return map != ht->map; | |
274 | } | |
275 | ||
276 | /* | |
277 | * Grab all bucket locks, and set @pmap after making sure the map isn't stale. | |
278 | * | |
279 | * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference. | |
280 | * | |
281 | * Note: callers cannot have ht->lock held. | |
282 | */ | |
283 | static inline | |
284 | void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap) | |
285 | { | |
286 | struct qht_map *map; | |
287 | ||
288 | map = atomic_rcu_read(&ht->map); | |
289 | qht_map_lock_buckets(map); | |
290 | if (likely(!qht_map_is_stale__locked(ht, map))) { | |
291 | *pmap = map; | |
292 | return; | |
293 | } | |
294 | qht_map_unlock_buckets(map); | |
295 | ||
296 | /* we raced with a resize; acquire ht->lock to see the updated ht->map */ | |
fe9959a2 | 297 | qht_lock(ht); |
2e11264a EC |
298 | map = ht->map; |
299 | qht_map_lock_buckets(map); | |
fe9959a2 | 300 | qht_unlock(ht); |
2e11264a EC |
301 | *pmap = map; |
302 | return; | |
303 | } | |
304 | ||
305 | /* | |
306 | * Get a head bucket and lock it, making sure its parent map is not stale. | |
307 | * @pmap is filled with a pointer to the bucket's parent map. | |
308 | * | |
309 | * Unlock with qemu_spin_unlock(&b->lock). | |
310 | * | |
311 | * Note: callers cannot have ht->lock held. | |
312 | */ | |
313 | static inline | |
314 | struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash, | |
315 | struct qht_map **pmap) | |
316 | { | |
317 | struct qht_bucket *b; | |
318 | struct qht_map *map; | |
319 | ||
320 | map = atomic_rcu_read(&ht->map); | |
321 | b = qht_map_to_bucket(map, hash); | |
322 | ||
323 | qemu_spin_lock(&b->lock); | |
324 | if (likely(!qht_map_is_stale__locked(ht, map))) { | |
325 | *pmap = map; | |
326 | return b; | |
327 | } | |
328 | qemu_spin_unlock(&b->lock); | |
329 | ||
330 | /* we raced with a resize; acquire ht->lock to see the updated ht->map */ | |
fe9959a2 | 331 | qht_lock(ht); |
2e11264a EC |
332 | map = ht->map; |
333 | b = qht_map_to_bucket(map, hash); | |
334 | qemu_spin_lock(&b->lock); | |
fe9959a2 | 335 | qht_unlock(ht); |
2e11264a EC |
336 | *pmap = map; |
337 | return b; | |
338 | } | |
339 | ||
340 | static inline bool qht_map_needs_resize(struct qht_map *map) | |
341 | { | |
342 | return atomic_read(&map->n_added_buckets) > map->n_added_buckets_threshold; | |
343 | } | |
344 | ||
345 | static inline void qht_chain_destroy(struct qht_bucket *head) | |
346 | { | |
347 | struct qht_bucket *curr = head->next; | |
348 | struct qht_bucket *prev; | |
349 | ||
350 | while (curr) { | |
351 | prev = curr; | |
352 | curr = curr->next; | |
353 | qemu_vfree(prev); | |
354 | } | |
355 | } | |
356 | ||
357 | /* pass only an orphan map */ | |
358 | static void qht_map_destroy(struct qht_map *map) | |
359 | { | |
360 | size_t i; | |
361 | ||
362 | for (i = 0; i < map->n_buckets; i++) { | |
363 | qht_chain_destroy(&map->buckets[i]); | |
364 | } | |
365 | qemu_vfree(map->buckets); | |
366 | g_free(map); | |
367 | } | |
368 | ||
369 | static struct qht_map *qht_map_create(size_t n_buckets) | |
370 | { | |
371 | struct qht_map *map; | |
372 | size_t i; | |
373 | ||
374 | map = g_malloc(sizeof(*map)); | |
375 | map->n_buckets = n_buckets; | |
376 | ||
377 | map->n_added_buckets = 0; | |
378 | map->n_added_buckets_threshold = n_buckets / | |
379 | QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV; | |
380 | ||
381 | /* let tiny hash tables to at least add one non-head bucket */ | |
382 | if (unlikely(map->n_added_buckets_threshold == 0)) { | |
383 | map->n_added_buckets_threshold = 1; | |
384 | } | |
385 | ||
386 | map->buckets = qemu_memalign(QHT_BUCKET_ALIGN, | |
387 | sizeof(*map->buckets) * n_buckets); | |
388 | for (i = 0; i < n_buckets; i++) { | |
389 | qht_head_init(&map->buckets[i]); | |
390 | } | |
391 | return map; | |
392 | } | |
393 | ||
61b8cef1 EC |
394 | void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems, |
395 | unsigned int mode) | |
2e11264a EC |
396 | { |
397 | struct qht_map *map; | |
398 | size_t n_buckets = qht_elems_to_buckets(n_elems); | |
399 | ||
61b8cef1 EC |
400 | g_assert(cmp); |
401 | ht->cmp = cmp; | |
2e11264a EC |
402 | ht->mode = mode; |
403 | qemu_mutex_init(&ht->lock); | |
404 | map = qht_map_create(n_buckets); | |
405 | atomic_rcu_set(&ht->map, map); | |
406 | } | |
407 | ||
408 | /* call only when there are no readers/writers left */ | |
409 | void qht_destroy(struct qht *ht) | |
410 | { | |
411 | qht_map_destroy(ht->map); | |
412 | memset(ht, 0, sizeof(*ht)); | |
413 | } | |
414 | ||
415 | static void qht_bucket_reset__locked(struct qht_bucket *head) | |
416 | { | |
417 | struct qht_bucket *b = head; | |
418 | int i; | |
419 | ||
420 | seqlock_write_begin(&head->sequence); | |
421 | do { | |
422 | for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { | |
423 | if (b->pointers[i] == NULL) { | |
424 | goto done; | |
425 | } | |
a8906439 | 426 | atomic_set(&b->hashes[i], 0); |
2e11264a EC |
427 | atomic_set(&b->pointers[i], NULL); |
428 | } | |
429 | b = b->next; | |
430 | } while (b); | |
431 | done: | |
432 | seqlock_write_end(&head->sequence); | |
433 | } | |
434 | ||
435 | /* call with all bucket locks held */ | |
436 | static void qht_map_reset__all_locked(struct qht_map *map) | |
437 | { | |
438 | size_t i; | |
439 | ||
440 | for (i = 0; i < map->n_buckets; i++) { | |
441 | qht_bucket_reset__locked(&map->buckets[i]); | |
442 | } | |
443 | qht_map_debug__all_locked(map); | |
444 | } | |
445 | ||
446 | void qht_reset(struct qht *ht) | |
447 | { | |
448 | struct qht_map *map; | |
449 | ||
450 | qht_map_lock_buckets__no_stale(ht, &map); | |
451 | qht_map_reset__all_locked(map); | |
452 | qht_map_unlock_buckets(map); | |
453 | } | |
454 | ||
76b553b3 EC |
455 | static inline void qht_do_resize(struct qht *ht, struct qht_map *new) |
456 | { | |
457 | qht_do_resize_reset(ht, new, false); | |
458 | } | |
459 | ||
460 | static inline void qht_do_resize_and_reset(struct qht *ht, struct qht_map *new) | |
461 | { | |
462 | qht_do_resize_reset(ht, new, true); | |
463 | } | |
464 | ||
2e11264a EC |
465 | bool qht_reset_size(struct qht *ht, size_t n_elems) |
466 | { | |
f555a9d0 | 467 | struct qht_map *new = NULL; |
2e11264a EC |
468 | struct qht_map *map; |
469 | size_t n_buckets; | |
2e11264a EC |
470 | |
471 | n_buckets = qht_elems_to_buckets(n_elems); | |
472 | ||
fe9959a2 | 473 | qht_lock(ht); |
2e11264a EC |
474 | map = ht->map; |
475 | if (n_buckets != map->n_buckets) { | |
476 | new = qht_map_create(n_buckets); | |
2e11264a | 477 | } |
76b553b3 | 478 | qht_do_resize_and_reset(ht, new); |
fe9959a2 | 479 | qht_unlock(ht); |
2e11264a | 480 | |
f555a9d0 | 481 | return !!new; |
2e11264a EC |
482 | } |
483 | ||
484 | static inline | |
485 | void *qht_do_lookup(struct qht_bucket *head, qht_lookup_func_t func, | |
486 | const void *userp, uint32_t hash) | |
487 | { | |
488 | struct qht_bucket *b = head; | |
489 | int i; | |
490 | ||
491 | do { | |
492 | for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { | |
a8906439 | 493 | if (atomic_read(&b->hashes[i]) == hash) { |
34506b30 PB |
494 | /* The pointer is dereferenced before seqlock_read_retry, |
495 | * so (unlike qht_insert__locked) we need to use | |
496 | * atomic_rcu_read here. | |
497 | */ | |
498 | void *p = atomic_rcu_read(&b->pointers[i]); | |
2e11264a EC |
499 | |
500 | if (likely(p) && likely(func(p, userp))) { | |
501 | return p; | |
502 | } | |
503 | } | |
504 | } | |
505 | b = atomic_rcu_read(&b->next); | |
506 | } while (b); | |
507 | ||
508 | return NULL; | |
509 | } | |
510 | ||
511 | static __attribute__((noinline)) | |
512 | void *qht_lookup__slowpath(struct qht_bucket *b, qht_lookup_func_t func, | |
513 | const void *userp, uint32_t hash) | |
514 | { | |
515 | unsigned int version; | |
516 | void *ret; | |
517 | ||
518 | do { | |
519 | version = seqlock_read_begin(&b->sequence); | |
520 | ret = qht_do_lookup(b, func, userp, hash); | |
521 | } while (seqlock_read_retry(&b->sequence, version)); | |
522 | return ret; | |
523 | } | |
524 | ||
61b8cef1 EC |
525 | void *qht_lookup_custom(struct qht *ht, const void *userp, uint32_t hash, |
526 | qht_lookup_func_t func) | |
2e11264a EC |
527 | { |
528 | struct qht_bucket *b; | |
529 | struct qht_map *map; | |
530 | unsigned int version; | |
531 | void *ret; | |
532 | ||
533 | map = atomic_rcu_read(&ht->map); | |
534 | b = qht_map_to_bucket(map, hash); | |
535 | ||
536 | version = seqlock_read_begin(&b->sequence); | |
537 | ret = qht_do_lookup(b, func, userp, hash); | |
538 | if (likely(!seqlock_read_retry(&b->sequence, version))) { | |
539 | return ret; | |
540 | } | |
541 | /* | |
542 | * Removing the do/while from the fastpath gives a 4% perf. increase when | |
543 | * running a 100%-lookup microbenchmark. | |
544 | */ | |
545 | return qht_lookup__slowpath(b, func, userp, hash); | |
546 | } | |
547 | ||
61b8cef1 EC |
548 | void *qht_lookup(struct qht *ht, const void *userp, uint32_t hash) |
549 | { | |
550 | return qht_lookup_custom(ht, userp, hash, ht->cmp); | |
551 | } | |
552 | ||
2e11264a | 553 | /* call with head->lock held */ |
32359d52 EC |
554 | static void *qht_insert__locked(struct qht *ht, struct qht_map *map, |
555 | struct qht_bucket *head, void *p, uint32_t hash, | |
556 | bool *needs_resize) | |
2e11264a EC |
557 | { |
558 | struct qht_bucket *b = head; | |
559 | struct qht_bucket *prev = NULL; | |
560 | struct qht_bucket *new = NULL; | |
561 | int i; | |
562 | ||
563 | do { | |
564 | for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { | |
565 | if (b->pointers[i]) { | |
32359d52 EC |
566 | if (unlikely(b->hashes[i] == hash && |
567 | ht->cmp(b->pointers[i], p))) { | |
568 | return b->pointers[i]; | |
2e11264a EC |
569 | } |
570 | } else { | |
571 | goto found; | |
572 | } | |
573 | } | |
574 | prev = b; | |
575 | b = b->next; | |
576 | } while (b); | |
577 | ||
578 | b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b)); | |
579 | memset(b, 0, sizeof(*b)); | |
580 | new = b; | |
581 | i = 0; | |
582 | atomic_inc(&map->n_added_buckets); | |
583 | if (unlikely(qht_map_needs_resize(map)) && needs_resize) { | |
584 | *needs_resize = true; | |
585 | } | |
586 | ||
587 | found: | |
588 | /* found an empty key: acquire the seqlock and write */ | |
589 | seqlock_write_begin(&head->sequence); | |
590 | if (new) { | |
591 | atomic_rcu_set(&prev->next, b); | |
592 | } | |
34506b30 | 593 | /* smp_wmb() implicit in seqlock_write_begin. */ |
a8906439 | 594 | atomic_set(&b->hashes[i], hash); |
2e11264a EC |
595 | atomic_set(&b->pointers[i], p); |
596 | seqlock_write_end(&head->sequence); | |
32359d52 | 597 | return NULL; |
2e11264a EC |
598 | } |
599 | ||
600 | static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht) | |
601 | { | |
602 | struct qht_map *map; | |
603 | ||
604 | /* | |
605 | * If the lock is taken it probably means there's an ongoing resize, | |
606 | * so bail out. | |
607 | */ | |
fe9959a2 | 608 | if (qht_trylock(ht)) { |
2e11264a EC |
609 | return; |
610 | } | |
611 | map = ht->map; | |
612 | /* another thread might have just performed the resize we were after */ | |
613 | if (qht_map_needs_resize(map)) { | |
614 | struct qht_map *new = qht_map_create(map->n_buckets * 2); | |
615 | ||
2e11264a | 616 | qht_do_resize(ht, new); |
2e11264a | 617 | } |
fe9959a2 | 618 | qht_unlock(ht); |
2e11264a EC |
619 | } |
620 | ||
32359d52 | 621 | bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing) |
2e11264a EC |
622 | { |
623 | struct qht_bucket *b; | |
624 | struct qht_map *map; | |
625 | bool needs_resize = false; | |
32359d52 | 626 | void *prev; |
2e11264a EC |
627 | |
628 | /* NULL pointers are not supported */ | |
629 | qht_debug_assert(p); | |
630 | ||
631 | b = qht_bucket_lock__no_stale(ht, hash, &map); | |
32359d52 | 632 | prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize); |
2e11264a EC |
633 | qht_bucket_debug__locked(b); |
634 | qemu_spin_unlock(&b->lock); | |
635 | ||
636 | if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) { | |
637 | qht_grow_maybe(ht); | |
638 | } | |
32359d52 EC |
639 | if (likely(prev == NULL)) { |
640 | return true; | |
641 | } | |
642 | if (existing) { | |
643 | *existing = prev; | |
644 | } | |
645 | return false; | |
2e11264a EC |
646 | } |
647 | ||
648 | static inline bool qht_entry_is_last(struct qht_bucket *b, int pos) | |
649 | { | |
650 | if (pos == QHT_BUCKET_ENTRIES - 1) { | |
651 | if (b->next == NULL) { | |
652 | return true; | |
653 | } | |
654 | return b->next->pointers[0] == NULL; | |
655 | } | |
656 | return b->pointers[pos + 1] == NULL; | |
657 | } | |
658 | ||
659 | static void | |
660 | qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j) | |
661 | { | |
662 | qht_debug_assert(!(to == from && i == j)); | |
663 | qht_debug_assert(to->pointers[i]); | |
664 | qht_debug_assert(from->pointers[j]); | |
665 | ||
a8906439 | 666 | atomic_set(&to->hashes[i], from->hashes[j]); |
2e11264a EC |
667 | atomic_set(&to->pointers[i], from->pointers[j]); |
668 | ||
a8906439 | 669 | atomic_set(&from->hashes[j], 0); |
2e11264a EC |
670 | atomic_set(&from->pointers[j], NULL); |
671 | } | |
672 | ||
673 | /* | |
674 | * Find the last valid entry in @head, and swap it with @orig[pos], which has | |
675 | * just been invalidated. | |
676 | */ | |
677 | static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos) | |
678 | { | |
679 | struct qht_bucket *b = orig; | |
680 | struct qht_bucket *prev = NULL; | |
681 | int i; | |
682 | ||
683 | if (qht_entry_is_last(orig, pos)) { | |
684 | orig->hashes[pos] = 0; | |
685 | atomic_set(&orig->pointers[pos], NULL); | |
686 | return; | |
687 | } | |
688 | do { | |
689 | for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { | |
690 | if (b->pointers[i]) { | |
691 | continue; | |
692 | } | |
693 | if (i > 0) { | |
694 | return qht_entry_move(orig, pos, b, i - 1); | |
695 | } | |
696 | qht_debug_assert(prev); | |
697 | return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1); | |
698 | } | |
699 | prev = b; | |
700 | b = b->next; | |
701 | } while (b); | |
702 | /* no free entries other than orig[pos], so swap it with the last one */ | |
703 | qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1); | |
704 | } | |
705 | ||
706 | /* call with b->lock held */ | |
707 | static inline | |
e2f07efa | 708 | bool qht_remove__locked(struct qht_bucket *head, const void *p, uint32_t hash) |
2e11264a EC |
709 | { |
710 | struct qht_bucket *b = head; | |
711 | int i; | |
712 | ||
713 | do { | |
714 | for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { | |
715 | void *q = b->pointers[i]; | |
716 | ||
717 | if (unlikely(q == NULL)) { | |
718 | return false; | |
719 | } | |
720 | if (q == p) { | |
721 | qht_debug_assert(b->hashes[i] == hash); | |
722 | seqlock_write_begin(&head->sequence); | |
723 | qht_bucket_remove_entry(b, i); | |
724 | seqlock_write_end(&head->sequence); | |
725 | return true; | |
726 | } | |
727 | } | |
728 | b = b->next; | |
729 | } while (b); | |
730 | return false; | |
731 | } | |
732 | ||
733 | bool qht_remove(struct qht *ht, const void *p, uint32_t hash) | |
734 | { | |
735 | struct qht_bucket *b; | |
736 | struct qht_map *map; | |
737 | bool ret; | |
738 | ||
739 | /* NULL pointers are not supported */ | |
740 | qht_debug_assert(p); | |
741 | ||
742 | b = qht_bucket_lock__no_stale(ht, hash, &map); | |
e2f07efa | 743 | ret = qht_remove__locked(b, p, hash); |
2e11264a EC |
744 | qht_bucket_debug__locked(b); |
745 | qemu_spin_unlock(&b->lock); | |
746 | return ret; | |
747 | } | |
748 | ||
78255ba2 | 749 | static inline void qht_bucket_iter(struct qht_bucket *head, |
69d55e9c | 750 | const struct qht_iter *iter, void *userp) |
2e11264a | 751 | { |
69d55e9c | 752 | struct qht_bucket *b = head; |
2e11264a EC |
753 | int i; |
754 | ||
755 | do { | |
756 | for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { | |
757 | if (b->pointers[i] == NULL) { | |
758 | return; | |
759 | } | |
69d55e9c EC |
760 | switch (iter->type) { |
761 | case QHT_ITER_VOID: | |
78255ba2 | 762 | iter->f.retvoid(b->pointers[i], b->hashes[i], userp); |
69d55e9c EC |
763 | break; |
764 | case QHT_ITER_RM: | |
78255ba2 | 765 | if (iter->f.retbool(b->pointers[i], b->hashes[i], userp)) { |
69d55e9c EC |
766 | /* replace i with the last valid element in the bucket */ |
767 | seqlock_write_begin(&head->sequence); | |
768 | qht_bucket_remove_entry(b, i); | |
769 | seqlock_write_end(&head->sequence); | |
770 | qht_bucket_debug__locked(b); | |
771 | /* reevaluate i, since it just got replaced */ | |
772 | i--; | |
773 | continue; | |
774 | } | |
775 | break; | |
776 | default: | |
777 | g_assert_not_reached(); | |
778 | } | |
2e11264a EC |
779 | } |
780 | b = b->next; | |
781 | } while (b); | |
782 | } | |
783 | ||
784 | /* call with all of the map's locks held */ | |
78255ba2 | 785 | static inline void qht_map_iter__all_locked(struct qht_map *map, |
69d55e9c EC |
786 | const struct qht_iter *iter, |
787 | void *userp) | |
2e11264a EC |
788 | { |
789 | size_t i; | |
790 | ||
791 | for (i = 0; i < map->n_buckets; i++) { | |
78255ba2 | 792 | qht_bucket_iter(&map->buckets[i], iter, userp); |
2e11264a EC |
793 | } |
794 | } | |
795 | ||
69d55e9c EC |
796 | static inline void |
797 | do_qht_iter(struct qht *ht, const struct qht_iter *iter, void *userp) | |
2e11264a EC |
798 | { |
799 | struct qht_map *map; | |
800 | ||
801 | map = atomic_rcu_read(&ht->map); | |
802 | qht_map_lock_buckets(map); | |
78255ba2 | 803 | qht_map_iter__all_locked(map, iter, userp); |
2e11264a EC |
804 | qht_map_unlock_buckets(map); |
805 | } | |
806 | ||
69d55e9c EC |
807 | void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp) |
808 | { | |
809 | const struct qht_iter iter = { | |
810 | .f.retvoid = func, | |
811 | .type = QHT_ITER_VOID, | |
812 | }; | |
813 | ||
814 | do_qht_iter(ht, &iter, userp); | |
815 | } | |
816 | ||
817 | void qht_iter_remove(struct qht *ht, qht_iter_bool_func_t func, void *userp) | |
818 | { | |
819 | const struct qht_iter iter = { | |
820 | .f.retbool = func, | |
821 | .type = QHT_ITER_RM, | |
822 | }; | |
823 | ||
824 | do_qht_iter(ht, &iter, userp); | |
825 | } | |
826 | ||
78255ba2 EC |
827 | struct qht_map_copy_data { |
828 | struct qht *ht; | |
829 | struct qht_map *new; | |
830 | }; | |
831 | ||
832 | static void qht_map_copy(void *p, uint32_t hash, void *userp) | |
2e11264a | 833 | { |
78255ba2 EC |
834 | struct qht_map_copy_data *data = userp; |
835 | struct qht *ht = data->ht; | |
836 | struct qht_map *new = data->new; | |
2e11264a EC |
837 | struct qht_bucket *b = qht_map_to_bucket(new, hash); |
838 | ||
839 | /* no need to acquire b->lock because no thread has seen this map yet */ | |
840 | qht_insert__locked(ht, new, b, p, hash, NULL); | |
841 | } | |
842 | ||
843 | /* | |
76b553b3 EC |
844 | * Atomically perform a resize and/or reset. |
845 | * Call with ht->lock held. | |
2e11264a | 846 | */ |
76b553b3 | 847 | static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, bool reset) |
2e11264a EC |
848 | { |
849 | struct qht_map *old; | |
69d55e9c EC |
850 | const struct qht_iter iter = { |
851 | .f.retvoid = qht_map_copy, | |
852 | .type = QHT_ITER_VOID, | |
853 | }; | |
78255ba2 | 854 | struct qht_map_copy_data data; |
2e11264a EC |
855 | |
856 | old = ht->map; | |
76b553b3 | 857 | qht_map_lock_buckets(old); |
2e11264a | 858 | |
76b553b3 EC |
859 | if (reset) { |
860 | qht_map_reset__all_locked(old); | |
861 | } | |
862 | ||
863 | if (new == NULL) { | |
864 | qht_map_unlock_buckets(old); | |
865 | return; | |
866 | } | |
867 | ||
719a3077 | 868 | g_assert(new->n_buckets != old->n_buckets); |
78255ba2 EC |
869 | data.ht = ht; |
870 | data.new = new; | |
871 | qht_map_iter__all_locked(old, &iter, &data); | |
2e11264a EC |
872 | qht_map_debug__all_locked(new); |
873 | ||
874 | atomic_rcu_set(&ht->map, new); | |
76b553b3 | 875 | qht_map_unlock_buckets(old); |
2e11264a EC |
876 | call_rcu(old, qht_map_destroy, rcu); |
877 | } | |
878 | ||
879 | bool qht_resize(struct qht *ht, size_t n_elems) | |
880 | { | |
881 | size_t n_buckets = qht_elems_to_buckets(n_elems); | |
882 | size_t ret = false; | |
883 | ||
fe9959a2 | 884 | qht_lock(ht); |
2e11264a EC |
885 | if (n_buckets != ht->map->n_buckets) { |
886 | struct qht_map *new; | |
2e11264a EC |
887 | |
888 | new = qht_map_create(n_buckets); | |
2e11264a | 889 | qht_do_resize(ht, new); |
2e11264a EC |
890 | ret = true; |
891 | } | |
fe9959a2 | 892 | qht_unlock(ht); |
2e11264a EC |
893 | |
894 | return ret; | |
895 | } | |
896 | ||
897 | /* pass @stats to qht_statistics_destroy() when done */ | |
898 | void qht_statistics_init(struct qht *ht, struct qht_stats *stats) | |
899 | { | |
900 | struct qht_map *map; | |
901 | int i; | |
902 | ||
903 | map = atomic_rcu_read(&ht->map); | |
904 | ||
2e11264a EC |
905 | stats->used_head_buckets = 0; |
906 | stats->entries = 0; | |
907 | qdist_init(&stats->chain); | |
908 | qdist_init(&stats->occupancy); | |
7266ae91 EC |
909 | /* bail out if the qht has not yet been initialized */ |
910 | if (unlikely(map == NULL)) { | |
911 | stats->head_buckets = 0; | |
912 | return; | |
913 | } | |
914 | stats->head_buckets = map->n_buckets; | |
2e11264a EC |
915 | |
916 | for (i = 0; i < map->n_buckets; i++) { | |
917 | struct qht_bucket *head = &map->buckets[i]; | |
918 | struct qht_bucket *b; | |
919 | unsigned int version; | |
920 | size_t buckets; | |
921 | size_t entries; | |
922 | int j; | |
923 | ||
924 | do { | |
925 | version = seqlock_read_begin(&head->sequence); | |
926 | buckets = 0; | |
927 | entries = 0; | |
928 | b = head; | |
929 | do { | |
930 | for (j = 0; j < QHT_BUCKET_ENTRIES; j++) { | |
931 | if (atomic_read(&b->pointers[j]) == NULL) { | |
932 | break; | |
933 | } | |
934 | entries++; | |
935 | } | |
936 | buckets++; | |
937 | b = atomic_rcu_read(&b->next); | |
938 | } while (b); | |
939 | } while (seqlock_read_retry(&head->sequence, version)); | |
940 | ||
941 | if (entries) { | |
942 | qdist_inc(&stats->chain, buckets); | |
943 | qdist_inc(&stats->occupancy, | |
944 | (double)entries / QHT_BUCKET_ENTRIES / buckets); | |
945 | stats->used_head_buckets++; | |
946 | stats->entries += entries; | |
947 | } else { | |
948 | qdist_inc(&stats->occupancy, 0); | |
949 | } | |
950 | } | |
951 | } | |
952 | ||
953 | void qht_statistics_destroy(struct qht_stats *stats) | |
954 | { | |
955 | qdist_destroy(&stats->occupancy); | |
956 | qdist_destroy(&stats->chain); | |
957 | } |