aboutsummaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
authorAlex Bennée <alex.bennee@linaro.org>2016-09-30 22:31:00 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2016-10-04 10:00:26 +0200
commita890643958f03aaa344290700093b280cb606c28 (patch)
tree3c49b66767086eb01021e88e8c4611dabdc8361b /util
parent027d9a7d2911e993cdcbd21c7c35d1dd058f05bb (diff)
downloadqemu-a890643958f03aaa344290700093b280cb606c28.zip
qemu-a890643958f03aaa344290700093b280cb606c28.tar.gz
qemu-a890643958f03aaa344290700093b280cb606c28.tar.bz2
util/qht: atomically set b->hashes
ThreadSanitizer detects a possible race between reading/writing the hashes. The ordering semantics are already documented for QHT however for true C11 compliance we should use relaxed atomic primitives for accesses that are done across threads. On x86 this slightly changes to the code to not do a load/compare in a single instruction leading to a slight performance degradation. Running 'taskset -c 0 tests/qht-bench -n 1 -d 10' (i.e. all lookups) 10 times, we get: before the patch: $ ./mean.pl 34.04 34.24 34.38 34.25 34.18 34.51 34.46 34.44 34.29 34.08 34.287 +- 0.160072900059109 after: $ ./mean.pl 33.94 34.00 33.52 33.46 33.55 33.71 34.27 34.06 34.28 34.58 33.937 +- 0.374731014640279 Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Emilio G. Cota <cota@braap.org> Message-Id: <20160930213106.20186-10-alex.bennee@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'util')
-rw-r--r--util/qht.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/util/qht.c b/util/qht.c
index 16a8d79..571639d 100644
--- a/util/qht.c
+++ b/util/qht.c
@@ -379,7 +379,7 @@ static void qht_bucket_reset__locked(struct qht_bucket *head)
if (b->pointers[i] == NULL) {
goto done;
}
- b->hashes[i] = 0;
+ atomic_set(&b->hashes[i], 0);
atomic_set(&b->pointers[i], NULL);
}
b = b->next;
@@ -444,7 +444,7 @@ void *qht_do_lookup(struct qht_bucket *head, qht_lookup_func_t func,
do {
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
- if (b->hashes[i] == hash) {
+ if (atomic_read(&b->hashes[i]) == hash) {
/* The pointer is dereferenced before seqlock_read_retry,
* so (unlike qht_insert__locked) we need to use
* atomic_rcu_read here.
@@ -538,8 +538,8 @@ static bool qht_insert__locked(struct qht *ht, struct qht_map *map,
if (new) {
atomic_rcu_set(&prev->next, b);
}
- b->hashes[i] = hash;
/* smp_wmb() implicit in seqlock_write_begin. */
+ atomic_set(&b->hashes[i], hash);
atomic_set(&b->pointers[i], p);
seqlock_write_end(&head->sequence);
return true;
@@ -607,10 +607,10 @@ qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
qht_debug_assert(to->pointers[i]);
qht_debug_assert(from->pointers[j]);
- to->hashes[i] = from->hashes[j];
+ atomic_set(&to->hashes[i], from->hashes[j]);
atomic_set(&to->pointers[i], from->pointers[j]);
- from->hashes[j] = 0;
+ atomic_set(&from->hashes[j], 0);
atomic_set(&from->pointers[j], NULL);
}