|
@@ -151,6 +151,22 @@ struct qht_bucket {
|
|
|
|
|
|
QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
|
|
QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Under TSAN, we use striped locks instead of one lock per bucket chain.
|
|
|
|
+ * This avoids crashing under TSAN, since TSAN aborts the program if more than
|
|
|
|
+ * 64 locks are held (this is a hardcoded limit in TSAN).
|
|
|
|
+ * When resizing a QHT we grab all the buckets' locks, which can easily
|
|
|
|
+ * go over TSAN's limit. By using striped locks, we avoid this problem.
|
|
|
|
+ *
|
|
|
|
+ * Note: this number must be a power of two for easy index computation.
|
|
|
|
+ */
|
|
|
|
+#define QHT_TSAN_BUCKET_LOCKS_BITS 4
|
|
|
|
+#define QHT_TSAN_BUCKET_LOCKS (1 << QHT_TSAN_BUCKET_LOCKS_BITS)
|
|
|
|
+
|
|
|
|
+struct qht_tsan_lock {
|
|
|
|
+ QemuSpin lock;
|
|
|
|
+} QEMU_ALIGNED(QHT_BUCKET_ALIGN);
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* struct qht_map - structure to track an array of buckets
|
|
* struct qht_map - structure to track an array of buckets
|
|
* @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
|
|
* @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
|
|
@@ -160,6 +176,7 @@ QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
|
|
* @n_added_buckets: number of added (i.e. "non-head") buckets
|
|
* @n_added_buckets: number of added (i.e. "non-head") buckets
|
|
* @n_added_buckets_threshold: threshold to trigger an upward resize once the
|
|
* @n_added_buckets_threshold: threshold to trigger an upward resize once the
|
|
* number of added buckets surpasses it.
|
|
* number of added buckets surpasses it.
|
|
|
|
+ * @tsan_bucket_locks: Array of striped locks to be used only under TSAN.
|
|
*
|
|
*
|
|
* Buckets are tracked in what we call a "map", i.e. this structure.
|
|
* Buckets are tracked in what we call a "map", i.e. this structure.
|
|
*/
|
|
*/
|
|
@@ -169,6 +186,9 @@ struct qht_map {
|
|
size_t n_buckets;
|
|
size_t n_buckets;
|
|
size_t n_added_buckets;
|
|
size_t n_added_buckets;
|
|
size_t n_added_buckets_threshold;
|
|
size_t n_added_buckets_threshold;
|
|
|
|
+#ifdef CONFIG_TSAN
|
|
|
|
+ struct qht_tsan_lock tsan_bucket_locks[QHT_TSAN_BUCKET_LOCKS];
|
|
|
|
+#endif
|
|
};
|
|
};
|
|
|
|
|
|
/* trigger a resize when n_added_buckets > n_buckets / div */
|
|
/* trigger a resize when n_added_buckets > n_buckets / div */
|
|
@@ -229,10 +249,56 @@ static inline size_t qht_elems_to_buckets(size_t n_elems)
|
|
return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
|
|
return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void qht_head_init(struct qht_bucket *b)
|
|
|
|
|
|
+/*
|
|
|
|
+ * When using striped locks (i.e. under TSAN), we have to be careful not
|
|
|
|
+ * to operate on the same lock twice (e.g. when iterating through all buckets).
|
|
|
|
+ * We achieve this by operating only on each stripe's first matching lock.
|
|
|
|
+ */
|
|
|
|
+static inline void qht_do_if_first_in_stripe(struct qht_map *map,
|
|
|
|
+ struct qht_bucket *b,
|
|
|
|
+ void (*func)(QemuSpin *spin))
|
|
|
|
+{
|
|
|
|
+#ifdef CONFIG_TSAN
|
|
|
|
+ unsigned long bucket_idx = b - map->buckets;
|
|
|
|
+ bool is_first_in_stripe = (bucket_idx >> QHT_TSAN_BUCKET_LOCKS_BITS) == 0;
|
|
|
|
+ if (is_first_in_stripe) {
|
|
|
|
+ unsigned long lock_idx = bucket_idx & (QHT_TSAN_BUCKET_LOCKS - 1);
|
|
|
|
+ func(&map->tsan_bucket_locks[lock_idx].lock);
|
|
|
|
+ }
|
|
|
|
+#else
|
|
|
|
+ func(&b->lock);
|
|
|
|
+#endif
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void qht_bucket_lock_do(struct qht_map *map,
|
|
|
|
+ struct qht_bucket *b,
|
|
|
|
+ void (*func)(QemuSpin *lock))
|
|
|
|
+{
|
|
|
|
+#ifdef CONFIG_TSAN
|
|
|
|
+ unsigned long bucket_idx = b - map->buckets;
|
|
|
|
+ unsigned long lock_idx = bucket_idx & (QHT_TSAN_BUCKET_LOCKS - 1);
|
|
|
|
+ func(&map->tsan_bucket_locks[lock_idx].lock);
|
|
|
|
+#else
|
|
|
|
+ func(&b->lock);
|
|
|
|
+#endif
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void qht_bucket_lock(struct qht_map *map,
|
|
|
|
+ struct qht_bucket *b)
|
|
|
|
+{
|
|
|
|
+ qht_bucket_lock_do(map, b, qemu_spin_lock);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void qht_bucket_unlock(struct qht_map *map,
|
|
|
|
+ struct qht_bucket *b)
|
|
|
|
+{
|
|
|
|
+ qht_bucket_lock_do(map, b, qemu_spin_unlock);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void qht_head_init(struct qht_map *map, struct qht_bucket *b)
|
|
{
|
|
{
|
|
memset(b, 0, sizeof(*b));
|
|
memset(b, 0, sizeof(*b));
|
|
- qemu_spin_init(&b->lock);
|
|
|
|
|
|
+ qht_do_if_first_in_stripe(map, b, qemu_spin_init);
|
|
seqlock_init(&b->sequence);
|
|
seqlock_init(&b->sequence);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -250,7 +316,7 @@ static void qht_map_lock_buckets(struct qht_map *map)
|
|
for (i = 0; i < map->n_buckets; i++) {
|
|
for (i = 0; i < map->n_buckets; i++) {
|
|
struct qht_bucket *b = &map->buckets[i];
|
|
struct qht_bucket *b = &map->buckets[i];
|
|
|
|
|
|
- qemu_spin_lock(&b->lock);
|
|
|
|
|
|
+ qht_do_if_first_in_stripe(map, b, qemu_spin_lock);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -261,7 +327,7 @@ static void qht_map_unlock_buckets(struct qht_map *map)
|
|
for (i = 0; i < map->n_buckets; i++) {
|
|
for (i = 0; i < map->n_buckets; i++) {
|
|
struct qht_bucket *b = &map->buckets[i];
|
|
struct qht_bucket *b = &map->buckets[i];
|
|
|
|
|
|
- qemu_spin_unlock(&b->lock);
|
|
|
|
|
|
+ qht_do_if_first_in_stripe(map, b, qemu_spin_unlock);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -308,7 +374,7 @@ void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
|
|
* Get a head bucket and lock it, making sure its parent map is not stale.
|
|
* Get a head bucket and lock it, making sure its parent map is not stale.
|
|
* @pmap is filled with a pointer to the bucket's parent map.
|
|
* @pmap is filled with a pointer to the bucket's parent map.
|
|
*
|
|
*
|
|
- * Unlock with qemu_spin_unlock(&b->lock).
|
|
|
|
|
|
+ * Unlock with qht_bucket_unlock.
|
|
*
|
|
*
|
|
* Note: callers cannot have ht->lock held.
|
|
* Note: callers cannot have ht->lock held.
|
|
*/
|
|
*/
|
|
@@ -322,18 +388,18 @@ struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
|
|
map = qatomic_rcu_read(&ht->map);
|
|
map = qatomic_rcu_read(&ht->map);
|
|
b = qht_map_to_bucket(map, hash);
|
|
b = qht_map_to_bucket(map, hash);
|
|
|
|
|
|
- qemu_spin_lock(&b->lock);
|
|
|
|
|
|
+ qht_bucket_lock(map, b);
|
|
if (likely(!qht_map_is_stale__locked(ht, map))) {
|
|
if (likely(!qht_map_is_stale__locked(ht, map))) {
|
|
*pmap = map;
|
|
*pmap = map;
|
|
return b;
|
|
return b;
|
|
}
|
|
}
|
|
- qemu_spin_unlock(&b->lock);
|
|
|
|
|
|
+ qht_bucket_unlock(map, b);
|
|
|
|
|
|
/* we raced with a resize; acquire ht->lock to see the updated ht->map */
|
|
/* we raced with a resize; acquire ht->lock to see the updated ht->map */
|
|
qht_lock(ht);
|
|
qht_lock(ht);
|
|
map = ht->map;
|
|
map = ht->map;
|
|
b = qht_map_to_bucket(map, hash);
|
|
b = qht_map_to_bucket(map, hash);
|
|
- qemu_spin_lock(&b->lock);
|
|
|
|
|
|
+ qht_bucket_lock(map, b);
|
|
qht_unlock(ht);
|
|
qht_unlock(ht);
|
|
*pmap = map;
|
|
*pmap = map;
|
|
return b;
|
|
return b;
|
|
@@ -345,12 +411,13 @@ static inline bool qht_map_needs_resize(const struct qht_map *map)
|
|
map->n_added_buckets_threshold;
|
|
map->n_added_buckets_threshold;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void qht_chain_destroy(const struct qht_bucket *head)
|
|
|
|
|
|
+static inline void qht_chain_destroy(struct qht_map *map,
|
|
|
|
+ struct qht_bucket *head)
|
|
{
|
|
{
|
|
struct qht_bucket *curr = head->next;
|
|
struct qht_bucket *curr = head->next;
|
|
struct qht_bucket *prev;
|
|
struct qht_bucket *prev;
|
|
|
|
|
|
- qemu_spin_destroy(&head->lock);
|
|
|
|
|
|
+ qht_do_if_first_in_stripe(map, head, qemu_spin_destroy);
|
|
while (curr) {
|
|
while (curr) {
|
|
prev = curr;
|
|
prev = curr;
|
|
curr = curr->next;
|
|
curr = curr->next;
|
|
@@ -364,7 +431,7 @@ static void qht_map_destroy(struct qht_map *map)
|
|
size_t i;
|
|
size_t i;
|
|
|
|
|
|
for (i = 0; i < map->n_buckets; i++) {
|
|
for (i = 0; i < map->n_buckets; i++) {
|
|
- qht_chain_destroy(&map->buckets[i]);
|
|
|
|
|
|
+ qht_chain_destroy(map, &map->buckets[i]);
|
|
}
|
|
}
|
|
qemu_vfree(map->buckets);
|
|
qemu_vfree(map->buckets);
|
|
g_free(map);
|
|
g_free(map);
|
|
@@ -390,7 +457,7 @@ static struct qht_map *qht_map_create(size_t n_buckets)
|
|
map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
|
|
map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
|
|
sizeof(*map->buckets) * n_buckets);
|
|
sizeof(*map->buckets) * n_buckets);
|
|
for (i = 0; i < n_buckets; i++) {
|
|
for (i = 0; i < n_buckets; i++) {
|
|
- qht_head_init(&map->buckets[i]);
|
|
|
|
|
|
+ qht_head_init(map, &map->buckets[i]);
|
|
}
|
|
}
|
|
return map;
|
|
return map;
|
|
}
|
|
}
|
|
@@ -638,7 +705,7 @@ bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing)
|
|
b = qht_bucket_lock__no_stale(ht, hash, &map);
|
|
b = qht_bucket_lock__no_stale(ht, hash, &map);
|
|
prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
|
|
prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
|
|
qht_bucket_debug__locked(b);
|
|
qht_bucket_debug__locked(b);
|
|
- qemu_spin_unlock(&b->lock);
|
|
|
|
|
|
+ qht_bucket_unlock(map, b);
|
|
|
|
|
|
if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
|
|
if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
|
|
qht_grow_maybe(ht);
|
|
qht_grow_maybe(ht);
|
|
@@ -749,7 +816,7 @@ bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
|
|
b = qht_bucket_lock__no_stale(ht, hash, &map);
|
|
b = qht_bucket_lock__no_stale(ht, hash, &map);
|
|
ret = qht_remove__locked(b, p, hash);
|
|
ret = qht_remove__locked(b, p, hash);
|
|
qht_bucket_debug__locked(b);
|
|
qht_bucket_debug__locked(b);
|
|
- qemu_spin_unlock(&b->lock);
|
|
|
|
|
|
+ qht_bucket_unlock(map, b);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|