QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
+/*
+ * Under TSAN, we use striped locks instead of one lock per bucket chain.
+ * This avoids crashing under TSAN, since TSAN aborts the program if more than
+ * 64 locks are held (this is a hardcoded limit in TSAN).
+ * When resizing a QHT we grab all the buckets' locks, which can easily
+ * go over TSAN's limit. By using striped locks, we avoid this problem.
+ *
+ * Note: this number must be a power of two for easy index computation.
+ */
+#define QHT_TSAN_BUCKET_LOCKS_BITS 4
+#define QHT_TSAN_BUCKET_LOCKS (1 << QHT_TSAN_BUCKET_LOCKS_BITS)
+
+struct qht_tsan_lock {
+ QemuSpin lock;
+} QEMU_ALIGNED(QHT_BUCKET_ALIGN);
+
/**
* struct qht_map - structure to track an array of buckets
* @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
* @n_added_buckets: number of added (i.e. "non-head") buckets
* @n_added_buckets_threshold: threshold to trigger an upward resize once the
* number of added buckets surpasses it.
+ * @tsan_bucket_locks: Array of striped locks to be used only under TSAN.
*
* Buckets are tracked in what we call a "map", i.e. this structure.
*/
size_t n_buckets;
size_t n_added_buckets;
size_t n_added_buckets_threshold;
+#ifdef CONFIG_TSAN
+ struct qht_tsan_lock tsan_bucket_locks[QHT_TSAN_BUCKET_LOCKS];
+#endif
};
/* trigger a resize when n_added_buckets > n_buckets / div */
return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
}
-static inline void qht_head_init(struct qht_bucket *b)
+/*
+ * When using striped locks (i.e. under TSAN), we have to be careful not
+ * to operate on the same lock twice (e.g. when iterating through all buckets).
+ * We achieve this by operating only on each stripe's first matching lock.
+ */
+static inline void qht_do_if_first_in_stripe(struct qht_map *map,
+ struct qht_bucket *b,
+ void (*func)(QemuSpin *spin))
+{
+#ifdef CONFIG_TSAN
+ unsigned long bucket_idx = b - map->buckets;
+ bool is_first_in_stripe = (bucket_idx >> QHT_TSAN_BUCKET_LOCKS_BITS) == 0;
+ if (is_first_in_stripe) {
+ unsigned long lock_idx = bucket_idx & (QHT_TSAN_BUCKET_LOCKS - 1);
+ func(&map->tsan_bucket_locks[lock_idx].lock);
+ }
+#else
+ func(&b->lock);
+#endif
+}
+
+static inline void qht_bucket_lock_do(struct qht_map *map,
+ struct qht_bucket *b,
+ void (*func)(QemuSpin *lock))
+{
+#ifdef CONFIG_TSAN
+ unsigned long bucket_idx = b - map->buckets;
+ unsigned long lock_idx = bucket_idx & (QHT_TSAN_BUCKET_LOCKS - 1);
+ func(&map->tsan_bucket_locks[lock_idx].lock);
+#else
+ func(&b->lock);
+#endif
+}
+
+static inline void qht_bucket_lock(struct qht_map *map,
+ struct qht_bucket *b)
+{
+ qht_bucket_lock_do(map, b, qemu_spin_lock);
+}
+
+static inline void qht_bucket_unlock(struct qht_map *map,
+ struct qht_bucket *b)
+{
+ qht_bucket_lock_do(map, b, qemu_spin_unlock);
+}
+
+static inline void qht_head_init(struct qht_map *map, struct qht_bucket *b)
{
memset(b, 0, sizeof(*b));
- qemu_spin_init(&b->lock);
+ qht_do_if_first_in_stripe(map, b, qemu_spin_init);
seqlock_init(&b->sequence);
}
for (i = 0; i < map->n_buckets; i++) {
struct qht_bucket *b = &map->buckets[i];
- qemu_spin_lock(&b->lock);
+ qht_do_if_first_in_stripe(map, b, qemu_spin_lock);
}
}
for (i = 0; i < map->n_buckets; i++) {
struct qht_bucket *b = &map->buckets[i];
- qemu_spin_unlock(&b->lock);
+ qht_do_if_first_in_stripe(map, b, qemu_spin_unlock);
}
}
* Get a head bucket and lock it, making sure its parent map is not stale.
* @pmap is filled with a pointer to the bucket's parent map.
*
- * Unlock with qemu_spin_unlock(&b->lock).
+ * Unlock with qht_bucket_unlock.
*
* Note: callers cannot have ht->lock held.
*/
map = qatomic_rcu_read(&ht->map);
b = qht_map_to_bucket(map, hash);
- qemu_spin_lock(&b->lock);
+ qht_bucket_lock(map, b);
if (likely(!qht_map_is_stale__locked(ht, map))) {
*pmap = map;
return b;
}
- qemu_spin_unlock(&b->lock);
+ qht_bucket_unlock(map, b);
/* we raced with a resize; acquire ht->lock to see the updated ht->map */
qht_lock(ht);
map = ht->map;
b = qht_map_to_bucket(map, hash);
- qemu_spin_lock(&b->lock);
+ qht_bucket_lock(map, b);
qht_unlock(ht);
*pmap = map;
return b;
map->n_added_buckets_threshold;
}
-static inline void qht_chain_destroy(const struct qht_bucket *head)
+static inline void qht_chain_destroy(struct qht_map *map,
+ struct qht_bucket *head)
{
struct qht_bucket *curr = head->next;
struct qht_bucket *prev;
- qemu_spin_destroy(&head->lock);
+ qht_do_if_first_in_stripe(map, head, qemu_spin_destroy);
while (curr) {
prev = curr;
curr = curr->next;
size_t i;
for (i = 0; i < map->n_buckets; i++) {
- qht_chain_destroy(&map->buckets[i]);
+ qht_chain_destroy(map, &map->buckets[i]);
}
qemu_vfree(map->buckets);
g_free(map);
map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
sizeof(*map->buckets) * n_buckets);
for (i = 0; i < n_buckets; i++) {
- qht_head_init(&map->buckets[i]);
+ qht_head_init(map, &map->buckets[i]);
}
return map;
}
b = qht_bucket_lock__no_stale(ht, hash, &map);
prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
qht_bucket_debug__locked(b);
- qemu_spin_unlock(&b->lock);
+ qht_bucket_unlock(map, b);
if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
qht_grow_maybe(ht);
int i;
if (qht_entry_is_last(orig, pos)) {
- orig->hashes[pos] = 0;
+ qatomic_set(&orig->hashes[pos], 0);
qatomic_set(&orig->pointers[pos], NULL);
return;
}
b = qht_bucket_lock__no_stale(ht, hash, &map);
ret = qht_remove__locked(b, p, hash);
qht_bucket_debug__locked(b);
- qemu_spin_unlock(&b->lock);
+ qht_bucket_unlock(map, b);
return ret;
}