]> git.proxmox.com Git - mirror_qemu.git/blob - util/qht.c
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
[mirror_qemu.git] / util / qht.c
1 /*
2 * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads.
3 *
4 * Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
5 *
6 * License: GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 * Assumptions:
10 * - NULL cannot be inserted/removed as a pointer value.
11 * - Trying to insert an already-existing hash-pointer pair is OK. However,
12 * it is not OK to insert into the same hash table different hash-pointer
13 * pairs that have the same pointer value, but not the hashes.
14 * - Lookups are performed under an RCU read-critical section; removals
15 * must wait for a grace period to elapse before freeing removed objects.
16 *
17 * Features:
18 * - Reads (i.e. lookups and iterators) can be concurrent with other reads.
19 * Lookups that are concurrent with writes to the same bucket will retry
20 * via a seqlock; iterators acquire all bucket locks and therefore can be
21 * concurrent with lookups and are serialized wrt writers.
22 * - Writes (i.e. insertions/removals) can be concurrent with writes to
23 * different buckets; writes to the same bucket are serialized through a lock.
24 * - Optional auto-resizing: the hash table resizes up if the load surpasses
25 * a certain threshold. Resizing is done concurrently with readers; writes
26 * are serialized with the resize operation.
27 *
28 * The key structure is the bucket, which is cacheline-sized. Buckets
29 * contain a few hash values and pointers; the u32 hash values are stored in
30 * full so that resizing is fast. Having this structure instead of directly
31 * chaining items has two advantages:
32 * - Failed lookups fail fast, and touch a minimum number of cache lines.
33 * - Resizing the hash table with concurrent lookups is easy.
34 *
35 * There are two types of buckets:
36 * 1. "head" buckets are the ones allocated in the array of buckets in qht_map.
37 * 2. all "non-head" buckets (i.e. all others) are members of a chain that
38 * starts from a head bucket.
39 * Note that the seqlock and spinlock of a head bucket applies to all buckets
40 * chained to it; these two fields are unused in non-head buckets.
41 *
42 * On removals, we move the last valid item in the chain to the position of the
43 * just-removed entry. This makes lookups slightly faster, since the moment an
44 * invalid entry is found, the (failed) lookup is over.
45 *
46 * Resizing is done by taking all bucket spinlocks (so that no other writers can
47 * race with us) and then copying all entries into a new hash map. Then, the
48 * ht->map pointer is set, and the old map is freed once no RCU readers can see
49 * it anymore.
50 *
51 * Writers check for concurrent resizes by comparing ht->map before and after
52 * acquiring their bucket lock. If they don't match, a resize has occured
53 * while the bucket spinlock was being acquired.
54 *
55 * Related Work:
56 * - Idea of cacheline-sized buckets with full hashes taken from:
57 * David, Guerraoui & Trigonakis, "Asynchronized Concurrency:
58 * The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15.
59 * - Why not RCU-based hash tables? They would allow us to get rid of the
60 * seqlock, but resizing would take forever since RCU read critical
61 * sections in QEMU take quite a long time.
62 * More info on relativistic hash tables:
63 * + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash
64 * Tables via Relativistic Programming", USENIX ATC'11.
65 * + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014.
66 * https://lwn.net/Articles/612021/
67 */
68 #include "qemu/osdep.h"
69 #include "qemu/qht.h"
70 #include "qemu/atomic.h"
71 #include "qemu/rcu.h"
72
73 //#define QHT_DEBUG
74
75 /*
76 * We want to avoid false sharing of cache lines. Most systems have 64-byte
77 * cache lines so we go with it for simplicity.
78 *
79 * Note that systems with smaller cache lines will be fine (the struct is
80 * almost 64-bytes); systems with larger cache lines might suffer from
81 * some false sharing.
82 */
83 #define QHT_BUCKET_ALIGN 64
84
85 /* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */
86 #if HOST_LONG_BITS == 32
87 #define QHT_BUCKET_ENTRIES 6
88 #else /* 64-bit */
89 #define QHT_BUCKET_ENTRIES 4
90 #endif
91
92 /*
93 * Note: reading partially-updated pointers in @pointers could lead to
94 * segfaults. We thus access them with atomic_read/set; this guarantees
95 * that the compiler makes all those accesses atomic. We also need the
96 * volatile-like behavior in atomic_read, since otherwise the compiler
97 * might refetch the pointer.
98 * atomic_read's are of course not necessary when the bucket lock is held.
99 *
100 * If both ht->lock and b->lock are grabbed, ht->lock should always
101 * be grabbed first.
102 */
103 struct qht_bucket {
104 QemuSpin lock;
105 QemuSeqLock sequence;
106 uint32_t hashes[QHT_BUCKET_ENTRIES];
107 void *pointers[QHT_BUCKET_ENTRIES];
108 struct qht_bucket *next;
109 } QEMU_ALIGNED(QHT_BUCKET_ALIGN);
110
111 QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
112
113 /**
114 * struct qht_map - structure to track an array of buckets
115 * @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
116 * find the whole struct.
117 * @buckets: array of head buckets. It is constant once the map is created.
118 * @n_buckets: number of head buckets. It is constant once the map is created.
119 * @n_added_buckets: number of added (i.e. "non-head") buckets
120 * @n_added_buckets_threshold: threshold to trigger an upward resize once the
121 * number of added buckets surpasses it.
122 *
123 * Buckets are tracked in what we call a "map", i.e. this structure.
124 */
125 struct qht_map {
126 struct rcu_head rcu;
127 struct qht_bucket *buckets;
128 size_t n_buckets;
129 size_t n_added_buckets;
130 size_t n_added_buckets_threshold;
131 };
132
133 /* trigger a resize when n_added_buckets > n_buckets / div */
134 #define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
135
136 static void qht_do_resize(struct qht *ht, struct qht_map *new);
137 static void qht_grow_maybe(struct qht *ht);
138
139 #ifdef QHT_DEBUG
140
141 #define qht_debug_assert(X) do { assert(X); } while (0)
142
143 static void qht_bucket_debug__locked(struct qht_bucket *b)
144 {
145 bool seen_empty = false;
146 bool corrupt = false;
147 int i;
148
149 do {
150 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
151 if (b->pointers[i] == NULL) {
152 seen_empty = true;
153 continue;
154 }
155 if (seen_empty) {
156 fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n",
157 __func__, b, i, b->hashes[i], b->pointers[i]);
158 corrupt = true;
159 }
160 }
161 b = b->next;
162 } while (b);
163 qht_debug_assert(!corrupt);
164 }
165
166 static void qht_map_debug__all_locked(struct qht_map *map)
167 {
168 int i;
169
170 for (i = 0; i < map->n_buckets; i++) {
171 qht_bucket_debug__locked(&map->buckets[i]);
172 }
173 }
174 #else
175
176 #define qht_debug_assert(X) do { (void)(X); } while (0)
177
178 static inline void qht_bucket_debug__locked(struct qht_bucket *b)
179 { }
180
181 static inline void qht_map_debug__all_locked(struct qht_map *map)
182 { }
183 #endif /* QHT_DEBUG */
184
185 static inline size_t qht_elems_to_buckets(size_t n_elems)
186 {
187 return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
188 }
189
190 static inline void qht_head_init(struct qht_bucket *b)
191 {
192 memset(b, 0, sizeof(*b));
193 qemu_spin_init(&b->lock);
194 seqlock_init(&b->sequence);
195 }
196
197 static inline
198 struct qht_bucket *qht_map_to_bucket(struct qht_map *map, uint32_t hash)
199 {
200 return &map->buckets[hash & (map->n_buckets - 1)];
201 }
202
203 /* acquire all bucket locks from a map */
204 static void qht_map_lock_buckets(struct qht_map *map)
205 {
206 size_t i;
207
208 for (i = 0; i < map->n_buckets; i++) {
209 struct qht_bucket *b = &map->buckets[i];
210
211 qemu_spin_lock(&b->lock);
212 }
213 }
214
215 static void qht_map_unlock_buckets(struct qht_map *map)
216 {
217 size_t i;
218
219 for (i = 0; i < map->n_buckets; i++) {
220 struct qht_bucket *b = &map->buckets[i];
221
222 qemu_spin_unlock(&b->lock);
223 }
224 }
225
226 /*
227 * Call with at least a bucket lock held.
228 * @map should be the value read before acquiring the lock (or locks).
229 */
230 static inline bool qht_map_is_stale__locked(struct qht *ht, struct qht_map *map)
231 {
232 return map != ht->map;
233 }
234
235 /*
236 * Grab all bucket locks, and set @pmap after making sure the map isn't stale.
237 *
238 * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference.
239 *
240 * Note: callers cannot have ht->lock held.
241 */
242 static inline
243 void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
244 {
245 struct qht_map *map;
246
247 map = atomic_rcu_read(&ht->map);
248 qht_map_lock_buckets(map);
249 if (likely(!qht_map_is_stale__locked(ht, map))) {
250 *pmap = map;
251 return;
252 }
253 qht_map_unlock_buckets(map);
254
255 /* we raced with a resize; acquire ht->lock to see the updated ht->map */
256 qemu_mutex_lock(&ht->lock);
257 map = ht->map;
258 qht_map_lock_buckets(map);
259 qemu_mutex_unlock(&ht->lock);
260 *pmap = map;
261 return;
262 }
263
264 /*
265 * Get a head bucket and lock it, making sure its parent map is not stale.
266 * @pmap is filled with a pointer to the bucket's parent map.
267 *
268 * Unlock with qemu_spin_unlock(&b->lock).
269 *
270 * Note: callers cannot have ht->lock held.
271 */
272 static inline
273 struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
274 struct qht_map **pmap)
275 {
276 struct qht_bucket *b;
277 struct qht_map *map;
278
279 map = atomic_rcu_read(&ht->map);
280 b = qht_map_to_bucket(map, hash);
281
282 qemu_spin_lock(&b->lock);
283 if (likely(!qht_map_is_stale__locked(ht, map))) {
284 *pmap = map;
285 return b;
286 }
287 qemu_spin_unlock(&b->lock);
288
289 /* we raced with a resize; acquire ht->lock to see the updated ht->map */
290 qemu_mutex_lock(&ht->lock);
291 map = ht->map;
292 b = qht_map_to_bucket(map, hash);
293 qemu_spin_lock(&b->lock);
294 qemu_mutex_unlock(&ht->lock);
295 *pmap = map;
296 return b;
297 }
298
299 static inline bool qht_map_needs_resize(struct qht_map *map)
300 {
301 return atomic_read(&map->n_added_buckets) > map->n_added_buckets_threshold;
302 }
303
304 static inline void qht_chain_destroy(struct qht_bucket *head)
305 {
306 struct qht_bucket *curr = head->next;
307 struct qht_bucket *prev;
308
309 while (curr) {
310 prev = curr;
311 curr = curr->next;
312 qemu_vfree(prev);
313 }
314 }
315
316 /* pass only an orphan map */
317 static void qht_map_destroy(struct qht_map *map)
318 {
319 size_t i;
320
321 for (i = 0; i < map->n_buckets; i++) {
322 qht_chain_destroy(&map->buckets[i]);
323 }
324 qemu_vfree(map->buckets);
325 g_free(map);
326 }
327
328 static struct qht_map *qht_map_create(size_t n_buckets)
329 {
330 struct qht_map *map;
331 size_t i;
332
333 map = g_malloc(sizeof(*map));
334 map->n_buckets = n_buckets;
335
336 map->n_added_buckets = 0;
337 map->n_added_buckets_threshold = n_buckets /
338 QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV;
339
340 /* let tiny hash tables to at least add one non-head bucket */
341 if (unlikely(map->n_added_buckets_threshold == 0)) {
342 map->n_added_buckets_threshold = 1;
343 }
344
345 map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
346 sizeof(*map->buckets) * n_buckets);
347 for (i = 0; i < n_buckets; i++) {
348 qht_head_init(&map->buckets[i]);
349 }
350 return map;
351 }
352
353 void qht_init(struct qht *ht, size_t n_elems, unsigned int mode)
354 {
355 struct qht_map *map;
356 size_t n_buckets = qht_elems_to_buckets(n_elems);
357
358 ht->mode = mode;
359 qemu_mutex_init(&ht->lock);
360 map = qht_map_create(n_buckets);
361 atomic_rcu_set(&ht->map, map);
362 }
363
364 /* call only when there are no readers/writers left */
365 void qht_destroy(struct qht *ht)
366 {
367 qht_map_destroy(ht->map);
368 memset(ht, 0, sizeof(*ht));
369 }
370
371 static void qht_bucket_reset__locked(struct qht_bucket *head)
372 {
373 struct qht_bucket *b = head;
374 int i;
375
376 seqlock_write_begin(&head->sequence);
377 do {
378 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
379 if (b->pointers[i] == NULL) {
380 goto done;
381 }
382 b->hashes[i] = 0;
383 atomic_set(&b->pointers[i], NULL);
384 }
385 b = b->next;
386 } while (b);
387 done:
388 seqlock_write_end(&head->sequence);
389 }
390
391 /* call with all bucket locks held */
392 static void qht_map_reset__all_locked(struct qht_map *map)
393 {
394 size_t i;
395
396 for (i = 0; i < map->n_buckets; i++) {
397 qht_bucket_reset__locked(&map->buckets[i]);
398 }
399 qht_map_debug__all_locked(map);
400 }
401
402 void qht_reset(struct qht *ht)
403 {
404 struct qht_map *map;
405
406 qht_map_lock_buckets__no_stale(ht, &map);
407 qht_map_reset__all_locked(map);
408 qht_map_unlock_buckets(map);
409 }
410
411 bool qht_reset_size(struct qht *ht, size_t n_elems)
412 {
413 struct qht_map *new;
414 struct qht_map *map;
415 size_t n_buckets;
416 bool resize = false;
417
418 n_buckets = qht_elems_to_buckets(n_elems);
419
420 qemu_mutex_lock(&ht->lock);
421 map = ht->map;
422 if (n_buckets != map->n_buckets) {
423 new = qht_map_create(n_buckets);
424 resize = true;
425 }
426
427 qht_map_lock_buckets(map);
428 qht_map_reset__all_locked(map);
429 if (resize) {
430 qht_do_resize(ht, new);
431 }
432 qht_map_unlock_buckets(map);
433 qemu_mutex_unlock(&ht->lock);
434
435 return resize;
436 }
437
438 static inline
439 void *qht_do_lookup(struct qht_bucket *head, qht_lookup_func_t func,
440 const void *userp, uint32_t hash)
441 {
442 struct qht_bucket *b = head;
443 int i;
444
445 do {
446 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
447 if (b->hashes[i] == hash) {
448 void *p = atomic_read(&b->pointers[i]);
449
450 if (likely(p) && likely(func(p, userp))) {
451 return p;
452 }
453 }
454 }
455 b = atomic_rcu_read(&b->next);
456 } while (b);
457
458 return NULL;
459 }
460
461 static __attribute__((noinline))
462 void *qht_lookup__slowpath(struct qht_bucket *b, qht_lookup_func_t func,
463 const void *userp, uint32_t hash)
464 {
465 unsigned int version;
466 void *ret;
467
468 do {
469 version = seqlock_read_begin(&b->sequence);
470 ret = qht_do_lookup(b, func, userp, hash);
471 } while (seqlock_read_retry(&b->sequence, version));
472 return ret;
473 }
474
475 void *qht_lookup(struct qht *ht, qht_lookup_func_t func, const void *userp,
476 uint32_t hash)
477 {
478 struct qht_bucket *b;
479 struct qht_map *map;
480 unsigned int version;
481 void *ret;
482
483 map = atomic_rcu_read(&ht->map);
484 b = qht_map_to_bucket(map, hash);
485
486 version = seqlock_read_begin(&b->sequence);
487 ret = qht_do_lookup(b, func, userp, hash);
488 if (likely(!seqlock_read_retry(&b->sequence, version))) {
489 return ret;
490 }
491 /*
492 * Removing the do/while from the fastpath gives a 4% perf. increase when
493 * running a 100%-lookup microbenchmark.
494 */
495 return qht_lookup__slowpath(b, func, userp, hash);
496 }
497
498 /* call with head->lock held */
499 static bool qht_insert__locked(struct qht *ht, struct qht_map *map,
500 struct qht_bucket *head, void *p, uint32_t hash,
501 bool *needs_resize)
502 {
503 struct qht_bucket *b = head;
504 struct qht_bucket *prev = NULL;
505 struct qht_bucket *new = NULL;
506 int i;
507
508 do {
509 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
510 if (b->pointers[i]) {
511 if (unlikely(b->pointers[i] == p)) {
512 return false;
513 }
514 } else {
515 goto found;
516 }
517 }
518 prev = b;
519 b = b->next;
520 } while (b);
521
522 b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b));
523 memset(b, 0, sizeof(*b));
524 new = b;
525 i = 0;
526 atomic_inc(&map->n_added_buckets);
527 if (unlikely(qht_map_needs_resize(map)) && needs_resize) {
528 *needs_resize = true;
529 }
530
531 found:
532 /* found an empty key: acquire the seqlock and write */
533 seqlock_write_begin(&head->sequence);
534 if (new) {
535 atomic_rcu_set(&prev->next, b);
536 }
537 b->hashes[i] = hash;
538 atomic_set(&b->pointers[i], p);
539 seqlock_write_end(&head->sequence);
540 return true;
541 }
542
543 static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
544 {
545 struct qht_map *map;
546
547 /*
548 * If the lock is taken it probably means there's an ongoing resize,
549 * so bail out.
550 */
551 if (qemu_mutex_trylock(&ht->lock)) {
552 return;
553 }
554 map = ht->map;
555 /* another thread might have just performed the resize we were after */
556 if (qht_map_needs_resize(map)) {
557 struct qht_map *new = qht_map_create(map->n_buckets * 2);
558
559 qht_map_lock_buckets(map);
560 qht_do_resize(ht, new);
561 qht_map_unlock_buckets(map);
562 }
563 qemu_mutex_unlock(&ht->lock);
564 }
565
566 bool qht_insert(struct qht *ht, void *p, uint32_t hash)
567 {
568 struct qht_bucket *b;
569 struct qht_map *map;
570 bool needs_resize = false;
571 bool ret;
572
573 /* NULL pointers are not supported */
574 qht_debug_assert(p);
575
576 b = qht_bucket_lock__no_stale(ht, hash, &map);
577 ret = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
578 qht_bucket_debug__locked(b);
579 qemu_spin_unlock(&b->lock);
580
581 if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
582 qht_grow_maybe(ht);
583 }
584 return ret;
585 }
586
587 static inline bool qht_entry_is_last(struct qht_bucket *b, int pos)
588 {
589 if (pos == QHT_BUCKET_ENTRIES - 1) {
590 if (b->next == NULL) {
591 return true;
592 }
593 return b->next->pointers[0] == NULL;
594 }
595 return b->pointers[pos + 1] == NULL;
596 }
597
598 static void
599 qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
600 {
601 qht_debug_assert(!(to == from && i == j));
602 qht_debug_assert(to->pointers[i]);
603 qht_debug_assert(from->pointers[j]);
604
605 to->hashes[i] = from->hashes[j];
606 atomic_set(&to->pointers[i], from->pointers[j]);
607
608 from->hashes[j] = 0;
609 atomic_set(&from->pointers[j], NULL);
610 }
611
612 /*
613 * Find the last valid entry in @head, and swap it with @orig[pos], which has
614 * just been invalidated.
615 */
616 static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
617 {
618 struct qht_bucket *b = orig;
619 struct qht_bucket *prev = NULL;
620 int i;
621
622 if (qht_entry_is_last(orig, pos)) {
623 orig->hashes[pos] = 0;
624 atomic_set(&orig->pointers[pos], NULL);
625 return;
626 }
627 do {
628 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
629 if (b->pointers[i]) {
630 continue;
631 }
632 if (i > 0) {
633 return qht_entry_move(orig, pos, b, i - 1);
634 }
635 qht_debug_assert(prev);
636 return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
637 }
638 prev = b;
639 b = b->next;
640 } while (b);
641 /* no free entries other than orig[pos], so swap it with the last one */
642 qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
643 }
644
645 /* call with b->lock held */
646 static inline
647 bool qht_remove__locked(struct qht_map *map, struct qht_bucket *head,
648 const void *p, uint32_t hash)
649 {
650 struct qht_bucket *b = head;
651 int i;
652
653 do {
654 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
655 void *q = b->pointers[i];
656
657 if (unlikely(q == NULL)) {
658 return false;
659 }
660 if (q == p) {
661 qht_debug_assert(b->hashes[i] == hash);
662 seqlock_write_begin(&head->sequence);
663 qht_bucket_remove_entry(b, i);
664 seqlock_write_end(&head->sequence);
665 return true;
666 }
667 }
668 b = b->next;
669 } while (b);
670 return false;
671 }
672
673 bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
674 {
675 struct qht_bucket *b;
676 struct qht_map *map;
677 bool ret;
678
679 /* NULL pointers are not supported */
680 qht_debug_assert(p);
681
682 b = qht_bucket_lock__no_stale(ht, hash, &map);
683 ret = qht_remove__locked(map, b, p, hash);
684 qht_bucket_debug__locked(b);
685 qemu_spin_unlock(&b->lock);
686 return ret;
687 }
688
689 static inline void qht_bucket_iter(struct qht *ht, struct qht_bucket *b,
690 qht_iter_func_t func, void *userp)
691 {
692 int i;
693
694 do {
695 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
696 if (b->pointers[i] == NULL) {
697 return;
698 }
699 func(ht, b->pointers[i], b->hashes[i], userp);
700 }
701 b = b->next;
702 } while (b);
703 }
704
705 /* call with all of the map's locks held */
706 static inline void qht_map_iter__all_locked(struct qht *ht, struct qht_map *map,
707 qht_iter_func_t func, void *userp)
708 {
709 size_t i;
710
711 for (i = 0; i < map->n_buckets; i++) {
712 qht_bucket_iter(ht, &map->buckets[i], func, userp);
713 }
714 }
715
716 void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp)
717 {
718 struct qht_map *map;
719
720 map = atomic_rcu_read(&ht->map);
721 qht_map_lock_buckets(map);
722 /* Note: ht here is merely for carrying ht->mode; ht->map won't be read */
723 qht_map_iter__all_locked(ht, map, func, userp);
724 qht_map_unlock_buckets(map);
725 }
726
727 static void qht_map_copy(struct qht *ht, void *p, uint32_t hash, void *userp)
728 {
729 struct qht_map *new = userp;
730 struct qht_bucket *b = qht_map_to_bucket(new, hash);
731
732 /* no need to acquire b->lock because no thread has seen this map yet */
733 qht_insert__locked(ht, new, b, p, hash, NULL);
734 }
735
736 /*
737 * Call with ht->lock and all bucket locks held.
738 *
739 * Creating the @new map here would add unnecessary delay while all the locks
740 * are held--holding up the bucket locks is particularly bad, since no writes
741 * can occur while these are held. Thus, we let callers create the new map,
742 * hopefully without the bucket locks held.
743 */
744 static void qht_do_resize(struct qht *ht, struct qht_map *new)
745 {
746 struct qht_map *old;
747
748 old = ht->map;
749 g_assert_cmpuint(new->n_buckets, !=, old->n_buckets);
750
751 qht_map_iter__all_locked(ht, old, qht_map_copy, new);
752 qht_map_debug__all_locked(new);
753
754 atomic_rcu_set(&ht->map, new);
755 call_rcu(old, qht_map_destroy, rcu);
756 }
757
758 bool qht_resize(struct qht *ht, size_t n_elems)
759 {
760 size_t n_buckets = qht_elems_to_buckets(n_elems);
761 size_t ret = false;
762
763 qemu_mutex_lock(&ht->lock);
764 if (n_buckets != ht->map->n_buckets) {
765 struct qht_map *new;
766 struct qht_map *old = ht->map;
767
768 new = qht_map_create(n_buckets);
769 qht_map_lock_buckets(old);
770 qht_do_resize(ht, new);
771 qht_map_unlock_buckets(old);
772 ret = true;
773 }
774 qemu_mutex_unlock(&ht->lock);
775
776 return ret;
777 }
778
779 /* pass @stats to qht_statistics_destroy() when done */
780 void qht_statistics_init(struct qht *ht, struct qht_stats *stats)
781 {
782 struct qht_map *map;
783 int i;
784
785 map = atomic_rcu_read(&ht->map);
786
787 stats->head_buckets = map->n_buckets;
788 stats->used_head_buckets = 0;
789 stats->entries = 0;
790 qdist_init(&stats->chain);
791 qdist_init(&stats->occupancy);
792
793 for (i = 0; i < map->n_buckets; i++) {
794 struct qht_bucket *head = &map->buckets[i];
795 struct qht_bucket *b;
796 unsigned int version;
797 size_t buckets;
798 size_t entries;
799 int j;
800
801 do {
802 version = seqlock_read_begin(&head->sequence);
803 buckets = 0;
804 entries = 0;
805 b = head;
806 do {
807 for (j = 0; j < QHT_BUCKET_ENTRIES; j++) {
808 if (atomic_read(&b->pointers[j]) == NULL) {
809 break;
810 }
811 entries++;
812 }
813 buckets++;
814 b = atomic_rcu_read(&b->next);
815 } while (b);
816 } while (seqlock_read_retry(&head->sequence, version));
817
818 if (entries) {
819 qdist_inc(&stats->chain, buckets);
820 qdist_inc(&stats->occupancy,
821 (double)entries / QHT_BUCKET_ENTRIES / buckets);
822 stats->used_head_buckets++;
823 stats->entries += entries;
824 } else {
825 qdist_inc(&stats->occupancy, 0);
826 }
827 }
828 }
829
830 void qht_statistics_destroy(struct qht_stats *stats)
831 {
832 qdist_destroy(&stats->occupancy);
833 qdist_destroy(&stats->chain);
834 }