]> git.proxmox.com Git - mirror_qemu.git/blame - util/qht.c
qht: simplify qht_reset_size
[mirror_qemu.git] / util / qht.c
CommitLineData
2e11264a
EC
1/*
2 * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads.
3 *
4 * Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
5 *
6 * License: GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 * Assumptions:
10 * - NULL cannot be inserted/removed as a pointer value.
11 * - Trying to insert an already-existing hash-pointer pair is OK. However,
12 * it is not OK to insert into the same hash table different hash-pointer
13 * pairs that have the same pointer value, but not the hashes.
14 * - Lookups are performed under an RCU read-critical section; removals
15 * must wait for a grace period to elapse before freeing removed objects.
16 *
17 * Features:
18 * - Reads (i.e. lookups and iterators) can be concurrent with other reads.
19 * Lookups that are concurrent with writes to the same bucket will retry
20 * via a seqlock; iterators acquire all bucket locks and therefore can be
21 * concurrent with lookups and are serialized wrt writers.
22 * - Writes (i.e. insertions/removals) can be concurrent with writes to
23 * different buckets; writes to the same bucket are serialized through a lock.
24 * - Optional auto-resizing: the hash table resizes up if the load surpasses
25 * a certain threshold. Resizing is done concurrently with readers; writes
26 * are serialized with the resize operation.
27 *
28 * The key structure is the bucket, which is cacheline-sized. Buckets
29 * contain a few hash values and pointers; the u32 hash values are stored in
30 * full so that resizing is fast. Having this structure instead of directly
31 * chaining items has two advantages:
32 * - Failed lookups fail fast, and touch a minimum number of cache lines.
33 * - Resizing the hash table with concurrent lookups is easy.
34 *
35 * There are two types of buckets:
36 * 1. "head" buckets are the ones allocated in the array of buckets in qht_map.
37 * 2. all "non-head" buckets (i.e. all others) are members of a chain that
38 * starts from a head bucket.
39 * Note that the seqlock and spinlock of a head bucket applies to all buckets
40 * chained to it; these two fields are unused in non-head buckets.
41 *
42 * On removals, we move the last valid item in the chain to the position of the
43 * just-removed entry. This makes lookups slightly faster, since the moment an
44 * invalid entry is found, the (failed) lookup is over.
45 *
46 * Resizing is done by taking all bucket spinlocks (so that no other writers can
47 * race with us) and then copying all entries into a new hash map. Then, the
48 * ht->map pointer is set, and the old map is freed once no RCU readers can see
49 * it anymore.
50 *
51 * Writers check for concurrent resizes by comparing ht->map before and after
52 * acquiring their bucket lock. If they don't match, a resize has occured
53 * while the bucket spinlock was being acquired.
54 *
55 * Related Work:
56 * - Idea of cacheline-sized buckets with full hashes taken from:
57 * David, Guerraoui & Trigonakis, "Asynchronized Concurrency:
58 * The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15.
59 * - Why not RCU-based hash tables? They would allow us to get rid of the
60 * seqlock, but resizing would take forever since RCU read critical
61 * sections in QEMU take quite a long time.
62 * More info on relativistic hash tables:
63 * + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash
64 * Tables via Relativistic Programming", USENIX ATC'11.
65 * + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014.
66 * https://lwn.net/Articles/612021/
67 */
e9abfcb5 68#include "qemu/osdep.h"
2e11264a
EC
69#include "qemu/qht.h"
70#include "qemu/atomic.h"
71#include "qemu/rcu.h"
72
73//#define QHT_DEBUG
74
75/*
76 * We want to avoid false sharing of cache lines. Most systems have 64-byte
77 * cache lines so we go with it for simplicity.
78 *
79 * Note that systems with smaller cache lines will be fine (the struct is
80 * almost 64-bytes); systems with larger cache lines might suffer from
81 * some false sharing.
82 */
83#define QHT_BUCKET_ALIGN 64
84
85/* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */
86#if HOST_LONG_BITS == 32
87#define QHT_BUCKET_ENTRIES 6
88#else /* 64-bit */
89#define QHT_BUCKET_ENTRIES 4
90#endif
91
92/*
93 * Note: reading partially-updated pointers in @pointers could lead to
94 * segfaults. We thus access them with atomic_read/set; this guarantees
95 * that the compiler makes all those accesses atomic. We also need the
96 * volatile-like behavior in atomic_read, since otherwise the compiler
97 * might refetch the pointer.
98 * atomic_read's are of course not necessary when the bucket lock is held.
99 *
100 * If both ht->lock and b->lock are grabbed, ht->lock should always
101 * be grabbed first.
102 */
103struct qht_bucket {
104 QemuSpin lock;
105 QemuSeqLock sequence;
106 uint32_t hashes[QHT_BUCKET_ENTRIES];
107 void *pointers[QHT_BUCKET_ENTRIES];
108 struct qht_bucket *next;
109} QEMU_ALIGNED(QHT_BUCKET_ALIGN);
110
111QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
112
113/**
114 * struct qht_map - structure to track an array of buckets
115 * @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
116 * find the whole struct.
117 * @buckets: array of head buckets. It is constant once the map is created.
118 * @n_buckets: number of head buckets. It is constant once the map is created.
119 * @n_added_buckets: number of added (i.e. "non-head") buckets
120 * @n_added_buckets_threshold: threshold to trigger an upward resize once the
121 * number of added buckets surpasses it.
122 *
123 * Buckets are tracked in what we call a "map", i.e. this structure.
124 */
125struct qht_map {
126 struct rcu_head rcu;
127 struct qht_bucket *buckets;
128 size_t n_buckets;
129 size_t n_added_buckets;
130 size_t n_added_buckets_threshold;
131};
132
133/* trigger a resize when n_added_buckets > n_buckets / div */
134#define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
135
136static void qht_do_resize(struct qht *ht, struct qht_map *new);
137static void qht_grow_maybe(struct qht *ht);
138
139#ifdef QHT_DEBUG
140
141#define qht_debug_assert(X) do { assert(X); } while (0)
142
143static void qht_bucket_debug__locked(struct qht_bucket *b)
144{
145 bool seen_empty = false;
146 bool corrupt = false;
147 int i;
148
149 do {
150 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
151 if (b->pointers[i] == NULL) {
152 seen_empty = true;
153 continue;
154 }
155 if (seen_empty) {
156 fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n",
157 __func__, b, i, b->hashes[i], b->pointers[i]);
158 corrupt = true;
159 }
160 }
161 b = b->next;
162 } while (b);
163 qht_debug_assert(!corrupt);
164}
165
166static void qht_map_debug__all_locked(struct qht_map *map)
167{
168 int i;
169
170 for (i = 0; i < map->n_buckets; i++) {
171 qht_bucket_debug__locked(&map->buckets[i]);
172 }
173}
174#else
175
176#define qht_debug_assert(X) do { (void)(X); } while (0)
177
178static inline void qht_bucket_debug__locked(struct qht_bucket *b)
179{ }
180
181static inline void qht_map_debug__all_locked(struct qht_map *map)
182{ }
183#endif /* QHT_DEBUG */
184
185static inline size_t qht_elems_to_buckets(size_t n_elems)
186{
187 return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
188}
189
190static inline void qht_head_init(struct qht_bucket *b)
191{
192 memset(b, 0, sizeof(*b));
193 qemu_spin_init(&b->lock);
194 seqlock_init(&b->sequence);
195}
196
197static inline
198struct qht_bucket *qht_map_to_bucket(struct qht_map *map, uint32_t hash)
199{
200 return &map->buckets[hash & (map->n_buckets - 1)];
201}
202
203/* acquire all bucket locks from a map */
204static void qht_map_lock_buckets(struct qht_map *map)
205{
206 size_t i;
207
208 for (i = 0; i < map->n_buckets; i++) {
209 struct qht_bucket *b = &map->buckets[i];
210
211 qemu_spin_lock(&b->lock);
212 }
213}
214
215static void qht_map_unlock_buckets(struct qht_map *map)
216{
217 size_t i;
218
219 for (i = 0; i < map->n_buckets; i++) {
220 struct qht_bucket *b = &map->buckets[i];
221
222 qemu_spin_unlock(&b->lock);
223 }
224}
225
226/*
227 * Call with at least a bucket lock held.
228 * @map should be the value read before acquiring the lock (or locks).
229 */
230static inline bool qht_map_is_stale__locked(struct qht *ht, struct qht_map *map)
231{
232 return map != ht->map;
233}
234
235/*
236 * Grab all bucket locks, and set @pmap after making sure the map isn't stale.
237 *
238 * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference.
239 *
240 * Note: callers cannot have ht->lock held.
241 */
242static inline
243void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
244{
245 struct qht_map *map;
246
247 map = atomic_rcu_read(&ht->map);
248 qht_map_lock_buckets(map);
249 if (likely(!qht_map_is_stale__locked(ht, map))) {
250 *pmap = map;
251 return;
252 }
253 qht_map_unlock_buckets(map);
254
255 /* we raced with a resize; acquire ht->lock to see the updated ht->map */
256 qemu_mutex_lock(&ht->lock);
257 map = ht->map;
258 qht_map_lock_buckets(map);
259 qemu_mutex_unlock(&ht->lock);
260 *pmap = map;
261 return;
262}
263
264/*
265 * Get a head bucket and lock it, making sure its parent map is not stale.
266 * @pmap is filled with a pointer to the bucket's parent map.
267 *
268 * Unlock with qemu_spin_unlock(&b->lock).
269 *
270 * Note: callers cannot have ht->lock held.
271 */
272static inline
273struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
274 struct qht_map **pmap)
275{
276 struct qht_bucket *b;
277 struct qht_map *map;
278
279 map = atomic_rcu_read(&ht->map);
280 b = qht_map_to_bucket(map, hash);
281
282 qemu_spin_lock(&b->lock);
283 if (likely(!qht_map_is_stale__locked(ht, map))) {
284 *pmap = map;
285 return b;
286 }
287 qemu_spin_unlock(&b->lock);
288
289 /* we raced with a resize; acquire ht->lock to see the updated ht->map */
290 qemu_mutex_lock(&ht->lock);
291 map = ht->map;
292 b = qht_map_to_bucket(map, hash);
293 qemu_spin_lock(&b->lock);
294 qemu_mutex_unlock(&ht->lock);
295 *pmap = map;
296 return b;
297}
298
299static inline bool qht_map_needs_resize(struct qht_map *map)
300{
301 return atomic_read(&map->n_added_buckets) > map->n_added_buckets_threshold;
302}
303
304static inline void qht_chain_destroy(struct qht_bucket *head)
305{
306 struct qht_bucket *curr = head->next;
307 struct qht_bucket *prev;
308
309 while (curr) {
310 prev = curr;
311 curr = curr->next;
312 qemu_vfree(prev);
313 }
314}
315
316/* pass only an orphan map */
317static void qht_map_destroy(struct qht_map *map)
318{
319 size_t i;
320
321 for (i = 0; i < map->n_buckets; i++) {
322 qht_chain_destroy(&map->buckets[i]);
323 }
324 qemu_vfree(map->buckets);
325 g_free(map);
326}
327
328static struct qht_map *qht_map_create(size_t n_buckets)
329{
330 struct qht_map *map;
331 size_t i;
332
333 map = g_malloc(sizeof(*map));
334 map->n_buckets = n_buckets;
335
336 map->n_added_buckets = 0;
337 map->n_added_buckets_threshold = n_buckets /
338 QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV;
339
340 /* let tiny hash tables to at least add one non-head bucket */
341 if (unlikely(map->n_added_buckets_threshold == 0)) {
342 map->n_added_buckets_threshold = 1;
343 }
344
345 map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
346 sizeof(*map->buckets) * n_buckets);
347 for (i = 0; i < n_buckets; i++) {
348 qht_head_init(&map->buckets[i]);
349 }
350 return map;
351}
352
353void qht_init(struct qht *ht, size_t n_elems, unsigned int mode)
354{
355 struct qht_map *map;
356 size_t n_buckets = qht_elems_to_buckets(n_elems);
357
358 ht->mode = mode;
359 qemu_mutex_init(&ht->lock);
360 map = qht_map_create(n_buckets);
361 atomic_rcu_set(&ht->map, map);
362}
363
364/* call only when there are no readers/writers left */
365void qht_destroy(struct qht *ht)
366{
367 qht_map_destroy(ht->map);
368 memset(ht, 0, sizeof(*ht));
369}
370
371static void qht_bucket_reset__locked(struct qht_bucket *head)
372{
373 struct qht_bucket *b = head;
374 int i;
375
376 seqlock_write_begin(&head->sequence);
377 do {
378 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
379 if (b->pointers[i] == NULL) {
380 goto done;
381 }
a8906439 382 atomic_set(&b->hashes[i], 0);
2e11264a
EC
383 atomic_set(&b->pointers[i], NULL);
384 }
385 b = b->next;
386 } while (b);
387 done:
388 seqlock_write_end(&head->sequence);
389}
390
391/* call with all bucket locks held */
392static void qht_map_reset__all_locked(struct qht_map *map)
393{
394 size_t i;
395
396 for (i = 0; i < map->n_buckets; i++) {
397 qht_bucket_reset__locked(&map->buckets[i]);
398 }
399 qht_map_debug__all_locked(map);
400}
401
402void qht_reset(struct qht *ht)
403{
404 struct qht_map *map;
405
406 qht_map_lock_buckets__no_stale(ht, &map);
407 qht_map_reset__all_locked(map);
408 qht_map_unlock_buckets(map);
409}
410
411bool qht_reset_size(struct qht *ht, size_t n_elems)
412{
f555a9d0 413 struct qht_map *new = NULL;
2e11264a
EC
414 struct qht_map *map;
415 size_t n_buckets;
2e11264a
EC
416
417 n_buckets = qht_elems_to_buckets(n_elems);
418
419 qemu_mutex_lock(&ht->lock);
420 map = ht->map;
421 if (n_buckets != map->n_buckets) {
422 new = qht_map_create(n_buckets);
2e11264a
EC
423 }
424
425 qht_map_lock_buckets(map);
426 qht_map_reset__all_locked(map);
f555a9d0 427 if (new) {
2e11264a
EC
428 qht_do_resize(ht, new);
429 }
430 qht_map_unlock_buckets(map);
431 qemu_mutex_unlock(&ht->lock);
432
f555a9d0 433 return !!new;
2e11264a
EC
434}
435
436static inline
437void *qht_do_lookup(struct qht_bucket *head, qht_lookup_func_t func,
438 const void *userp, uint32_t hash)
439{
440 struct qht_bucket *b = head;
441 int i;
442
443 do {
444 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
a8906439 445 if (atomic_read(&b->hashes[i]) == hash) {
34506b30
PB
446 /* The pointer is dereferenced before seqlock_read_retry,
447 * so (unlike qht_insert__locked) we need to use
448 * atomic_rcu_read here.
449 */
450 void *p = atomic_rcu_read(&b->pointers[i]);
2e11264a
EC
451
452 if (likely(p) && likely(func(p, userp))) {
453 return p;
454 }
455 }
456 }
457 b = atomic_rcu_read(&b->next);
458 } while (b);
459
460 return NULL;
461}
462
463static __attribute__((noinline))
464void *qht_lookup__slowpath(struct qht_bucket *b, qht_lookup_func_t func,
465 const void *userp, uint32_t hash)
466{
467 unsigned int version;
468 void *ret;
469
470 do {
471 version = seqlock_read_begin(&b->sequence);
472 ret = qht_do_lookup(b, func, userp, hash);
473 } while (seqlock_read_retry(&b->sequence, version));
474 return ret;
475}
476
477void *qht_lookup(struct qht *ht, qht_lookup_func_t func, const void *userp,
478 uint32_t hash)
479{
480 struct qht_bucket *b;
481 struct qht_map *map;
482 unsigned int version;
483 void *ret;
484
485 map = atomic_rcu_read(&ht->map);
486 b = qht_map_to_bucket(map, hash);
487
488 version = seqlock_read_begin(&b->sequence);
489 ret = qht_do_lookup(b, func, userp, hash);
490 if (likely(!seqlock_read_retry(&b->sequence, version))) {
491 return ret;
492 }
493 /*
494 * Removing the do/while from the fastpath gives a 4% perf. increase when
495 * running a 100%-lookup microbenchmark.
496 */
497 return qht_lookup__slowpath(b, func, userp, hash);
498}
499
500/* call with head->lock held */
501static bool qht_insert__locked(struct qht *ht, struct qht_map *map,
502 struct qht_bucket *head, void *p, uint32_t hash,
503 bool *needs_resize)
504{
505 struct qht_bucket *b = head;
506 struct qht_bucket *prev = NULL;
507 struct qht_bucket *new = NULL;
508 int i;
509
510 do {
511 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
512 if (b->pointers[i]) {
513 if (unlikely(b->pointers[i] == p)) {
514 return false;
515 }
516 } else {
517 goto found;
518 }
519 }
520 prev = b;
521 b = b->next;
522 } while (b);
523
524 b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b));
525 memset(b, 0, sizeof(*b));
526 new = b;
527 i = 0;
528 atomic_inc(&map->n_added_buckets);
529 if (unlikely(qht_map_needs_resize(map)) && needs_resize) {
530 *needs_resize = true;
531 }
532
533 found:
534 /* found an empty key: acquire the seqlock and write */
535 seqlock_write_begin(&head->sequence);
536 if (new) {
537 atomic_rcu_set(&prev->next, b);
538 }
34506b30 539 /* smp_wmb() implicit in seqlock_write_begin. */
a8906439 540 atomic_set(&b->hashes[i], hash);
2e11264a
EC
541 atomic_set(&b->pointers[i], p);
542 seqlock_write_end(&head->sequence);
543 return true;
544}
545
546static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
547{
548 struct qht_map *map;
549
550 /*
551 * If the lock is taken it probably means there's an ongoing resize,
552 * so bail out.
553 */
554 if (qemu_mutex_trylock(&ht->lock)) {
555 return;
556 }
557 map = ht->map;
558 /* another thread might have just performed the resize we were after */
559 if (qht_map_needs_resize(map)) {
560 struct qht_map *new = qht_map_create(map->n_buckets * 2);
561
562 qht_map_lock_buckets(map);
563 qht_do_resize(ht, new);
564 qht_map_unlock_buckets(map);
565 }
566 qemu_mutex_unlock(&ht->lock);
567}
568
569bool qht_insert(struct qht *ht, void *p, uint32_t hash)
570{
571 struct qht_bucket *b;
572 struct qht_map *map;
573 bool needs_resize = false;
574 bool ret;
575
576 /* NULL pointers are not supported */
577 qht_debug_assert(p);
578
579 b = qht_bucket_lock__no_stale(ht, hash, &map);
580 ret = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
581 qht_bucket_debug__locked(b);
582 qemu_spin_unlock(&b->lock);
583
584 if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
585 qht_grow_maybe(ht);
586 }
587 return ret;
588}
589
590static inline bool qht_entry_is_last(struct qht_bucket *b, int pos)
591{
592 if (pos == QHT_BUCKET_ENTRIES - 1) {
593 if (b->next == NULL) {
594 return true;
595 }
596 return b->next->pointers[0] == NULL;
597 }
598 return b->pointers[pos + 1] == NULL;
599}
600
601static void
602qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
603{
604 qht_debug_assert(!(to == from && i == j));
605 qht_debug_assert(to->pointers[i]);
606 qht_debug_assert(from->pointers[j]);
607
a8906439 608 atomic_set(&to->hashes[i], from->hashes[j]);
2e11264a
EC
609 atomic_set(&to->pointers[i], from->pointers[j]);
610
a8906439 611 atomic_set(&from->hashes[j], 0);
2e11264a
EC
612 atomic_set(&from->pointers[j], NULL);
613}
614
615/*
616 * Find the last valid entry in @head, and swap it with @orig[pos], which has
617 * just been invalidated.
618 */
619static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
620{
621 struct qht_bucket *b = orig;
622 struct qht_bucket *prev = NULL;
623 int i;
624
625 if (qht_entry_is_last(orig, pos)) {
626 orig->hashes[pos] = 0;
627 atomic_set(&orig->pointers[pos], NULL);
628 return;
629 }
630 do {
631 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
632 if (b->pointers[i]) {
633 continue;
634 }
635 if (i > 0) {
636 return qht_entry_move(orig, pos, b, i - 1);
637 }
638 qht_debug_assert(prev);
639 return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
640 }
641 prev = b;
642 b = b->next;
643 } while (b);
644 /* no free entries other than orig[pos], so swap it with the last one */
645 qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
646}
647
648/* call with b->lock held */
649static inline
650bool qht_remove__locked(struct qht_map *map, struct qht_bucket *head,
651 const void *p, uint32_t hash)
652{
653 struct qht_bucket *b = head;
654 int i;
655
656 do {
657 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
658 void *q = b->pointers[i];
659
660 if (unlikely(q == NULL)) {
661 return false;
662 }
663 if (q == p) {
664 qht_debug_assert(b->hashes[i] == hash);
665 seqlock_write_begin(&head->sequence);
666 qht_bucket_remove_entry(b, i);
667 seqlock_write_end(&head->sequence);
668 return true;
669 }
670 }
671 b = b->next;
672 } while (b);
673 return false;
674}
675
676bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
677{
678 struct qht_bucket *b;
679 struct qht_map *map;
680 bool ret;
681
682 /* NULL pointers are not supported */
683 qht_debug_assert(p);
684
685 b = qht_bucket_lock__no_stale(ht, hash, &map);
686 ret = qht_remove__locked(map, b, p, hash);
687 qht_bucket_debug__locked(b);
688 qemu_spin_unlock(&b->lock);
689 return ret;
690}
691
692static inline void qht_bucket_iter(struct qht *ht, struct qht_bucket *b,
693 qht_iter_func_t func, void *userp)
694{
695 int i;
696
697 do {
698 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
699 if (b->pointers[i] == NULL) {
700 return;
701 }
702 func(ht, b->pointers[i], b->hashes[i], userp);
703 }
704 b = b->next;
705 } while (b);
706}
707
708/* call with all of the map's locks held */
709static inline void qht_map_iter__all_locked(struct qht *ht, struct qht_map *map,
710 qht_iter_func_t func, void *userp)
711{
712 size_t i;
713
714 for (i = 0; i < map->n_buckets; i++) {
715 qht_bucket_iter(ht, &map->buckets[i], func, userp);
716 }
717}
718
719void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp)
720{
721 struct qht_map *map;
722
723 map = atomic_rcu_read(&ht->map);
724 qht_map_lock_buckets(map);
725 /* Note: ht here is merely for carrying ht->mode; ht->map won't be read */
726 qht_map_iter__all_locked(ht, map, func, userp);
727 qht_map_unlock_buckets(map);
728}
729
730static void qht_map_copy(struct qht *ht, void *p, uint32_t hash, void *userp)
731{
732 struct qht_map *new = userp;
733 struct qht_bucket *b = qht_map_to_bucket(new, hash);
734
735 /* no need to acquire b->lock because no thread has seen this map yet */
736 qht_insert__locked(ht, new, b, p, hash, NULL);
737}
738
739/*
740 * Call with ht->lock and all bucket locks held.
741 *
742 * Creating the @new map here would add unnecessary delay while all the locks
743 * are held--holding up the bucket locks is particularly bad, since no writes
744 * can occur while these are held. Thus, we let callers create the new map,
745 * hopefully without the bucket locks held.
746 */
747static void qht_do_resize(struct qht *ht, struct qht_map *new)
748{
749 struct qht_map *old;
750
751 old = ht->map;
752 g_assert_cmpuint(new->n_buckets, !=, old->n_buckets);
753
754 qht_map_iter__all_locked(ht, old, qht_map_copy, new);
755 qht_map_debug__all_locked(new);
756
757 atomic_rcu_set(&ht->map, new);
758 call_rcu(old, qht_map_destroy, rcu);
759}
760
761bool qht_resize(struct qht *ht, size_t n_elems)
762{
763 size_t n_buckets = qht_elems_to_buckets(n_elems);
764 size_t ret = false;
765
766 qemu_mutex_lock(&ht->lock);
767 if (n_buckets != ht->map->n_buckets) {
768 struct qht_map *new;
769 struct qht_map *old = ht->map;
770
771 new = qht_map_create(n_buckets);
772 qht_map_lock_buckets(old);
773 qht_do_resize(ht, new);
774 qht_map_unlock_buckets(old);
775 ret = true;
776 }
777 qemu_mutex_unlock(&ht->lock);
778
779 return ret;
780}
781
782/* pass @stats to qht_statistics_destroy() when done */
783void qht_statistics_init(struct qht *ht, struct qht_stats *stats)
784{
785 struct qht_map *map;
786 int i;
787
788 map = atomic_rcu_read(&ht->map);
789
2e11264a
EC
790 stats->used_head_buckets = 0;
791 stats->entries = 0;
792 qdist_init(&stats->chain);
793 qdist_init(&stats->occupancy);
7266ae91
EC
794 /* bail out if the qht has not yet been initialized */
795 if (unlikely(map == NULL)) {
796 stats->head_buckets = 0;
797 return;
798 }
799 stats->head_buckets = map->n_buckets;
2e11264a
EC
800
801 for (i = 0; i < map->n_buckets; i++) {
802 struct qht_bucket *head = &map->buckets[i];
803 struct qht_bucket *b;
804 unsigned int version;
805 size_t buckets;
806 size_t entries;
807 int j;
808
809 do {
810 version = seqlock_read_begin(&head->sequence);
811 buckets = 0;
812 entries = 0;
813 b = head;
814 do {
815 for (j = 0; j < QHT_BUCKET_ENTRIES; j++) {
816 if (atomic_read(&b->pointers[j]) == NULL) {
817 break;
818 }
819 entries++;
820 }
821 buckets++;
822 b = atomic_rcu_read(&b->next);
823 } while (b);
824 } while (seqlock_read_retry(&head->sequence, version));
825
826 if (entries) {
827 qdist_inc(&stats->chain, buckets);
828 qdist_inc(&stats->occupancy,
829 (double)entries / QHT_BUCKET_ENTRIES / buckets);
830 stats->used_head_buckets++;
831 stats->entries += entries;
832 } else {
833 qdist_inc(&stats->occupancy, 0);
834 }
835 }
836}
837
838void qht_statistics_destroy(struct qht_stats *stats)
839{
840 qdist_destroy(&stats->occupancy);
841 qdist_destroy(&stats->chain);
842}