aboutsummaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
authorEmilio G. Cota <cota@braap.org>2018-09-10 19:27:44 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2018-10-02 18:47:55 +0200
commitac8c77486cbdd5d7be17c447ca64cbebd330f25a (patch)
tree4ee8837662cf3f04d1a09c0811619c0a90ba9ce6 /util
parent82fdfcbe64ec09048d5f9b430b16231b9285d54b (diff)
downloadqemu-ac8c77486cbdd5d7be17c447ca64cbebd330f25a.zip
qemu-ac8c77486cbdd5d7be17c447ca64cbebd330f25a.tar.gz
qemu-ac8c77486cbdd5d7be17c447ca64cbebd330f25a.tar.bz2
qsp: use atomic64 accessors
With the seqlock, we either have to use atomics to remain within defined behaviour (and note that 64-bit atomics aren't always guaranteed to compile, irrespective of __nocheck), or drop the atomics and be in undefined behaviour territory. Fix it by dropping the seqlock and using atomic64 accessors. This will limit scalability when !CONFIG_ATOMIC64, but those machines (1) don't have many users and (2) are unlikely to have many cores. - With CONFIG_ATOMIC64: $ tests/atomic_add-bench -n 1 -m -p Throughput: 13.00 Mops/s - Forcing !CONFIG_ATOMIC64: $ tests/atomic_add-bench -n 1 -m -p Throughput: 10.89 Mops/s Signed-off-by: Emilio G. Cota <cota@braap.org> Message-Id: <20180910232752.31565-5-cota@braap.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'util')
-rw-r--r--util/qsp.c49
1 files changed, 8 insertions, 41 deletions
diff --git a/util/qsp.c b/util/qsp.c
index 2de3a97..a848b09 100644
--- a/util/qsp.c
+++ b/util/qsp.c
@@ -84,13 +84,6 @@ struct QSPEntry {
uint64_t n_acqs;
uint64_t ns;
unsigned int n_objs; /* count of coalesced objs; only used for reporting */
-#ifndef CONFIG_ATOMIC64
- /*
- * If we cannot update the counts atomically, then use a seqlock.
- * We don't need an associated lock because the updates are thread-local.
- */
- QemuSeqLock sequence;
-#endif
};
typedef struct QSPEntry QSPEntry;
@@ -345,46 +338,15 @@ static QSPEntry *qsp_entry_get(const void *obj, const char *file, int line,
}
/*
- * @from is in the global hash table; read it atomically if the host
- * supports it, otherwise use the seqlock.
- */
-static void qsp_entry_aggregate(QSPEntry *to, const QSPEntry *from)
-{
-#ifdef CONFIG_ATOMIC64
- to->ns += atomic_read__nocheck(&from->ns);
- to->n_acqs += atomic_read__nocheck(&from->n_acqs);
-#else
- unsigned int version;
- uint64_t ns, n_acqs;
-
- do {
- version = seqlock_read_begin(&from->sequence);
- ns = atomic_read__nocheck(&from->ns);
- n_acqs = atomic_read__nocheck(&from->n_acqs);
- } while (seqlock_read_retry(&from->sequence, version));
-
- to->ns += ns;
- to->n_acqs += n_acqs;
-#endif
-}
-
-/*
* @e is in the global hash table; it is only written to by the current thread,
* so we write to it atomically (as in "write once") to prevent torn reads.
- * If the host doesn't support u64 atomics, use the seqlock.
*/
static inline void do_qsp_entry_record(QSPEntry *e, int64_t delta, bool acq)
{
-#ifndef CONFIG_ATOMIC64
- seqlock_write_begin(&e->sequence);
-#endif
- atomic_set__nocheck(&e->ns, e->ns + delta);
+ atomic_set_u64(&e->ns, e->ns + delta);
if (acq) {
- atomic_set__nocheck(&e->n_acqs, e->n_acqs + 1);
+ atomic_set_u64(&e->n_acqs, e->n_acqs + 1);
}
-#ifndef CONFIG_ATOMIC64
- seqlock_write_end(&e->sequence);
-#endif
}
static inline void qsp_entry_record(QSPEntry *e, int64_t delta)
@@ -550,7 +512,12 @@ static void qsp_aggregate(void *p, uint32_t h, void *up)
hash = qsp_entry_no_thread_hash(e);
agg = qsp_entry_find(ht, e, hash);
- qsp_entry_aggregate(agg, e);
+ /*
+ * The entry is in the global hash table; read from it atomically (as in
+ * "read once").
+ */
+ agg->ns += atomic_read_u64(&e->ns);
+ agg->n_acqs += atomic_read_u64(&e->n_acqs);
}
static void qsp_iter_diff(void *p, uint32_t hash, void *htp)