From 9586a1329f5dce6c1d7f4de53cf0536644d7e593 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 2 Mar 2023 11:19:52 +0100 Subject: qemu-thread-posix: cleanup, fix, document QemuEvent QemuEvent is currently broken on ARM due to missing memory barriers after qatomic_*(). Apart from adding the memory barrier, a closer look reveals some unpaired memory barriers too. Document more clearly what is going on. Reviewed-by: Richard Henderson Reviewed-by: David Hildenbrand Signed-off-by: Paolo Bonzini --- util/qemu-thread-posix.c | 69 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 49 insertions(+), 20 deletions(-) (limited to 'util') diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c index 93d2505..b2e26e2 100644 --- a/util/qemu-thread-posix.c +++ b/util/qemu-thread-posix.c @@ -384,13 +384,21 @@ void qemu_event_destroy(QemuEvent *ev) void qemu_event_set(QemuEvent *ev) { - /* qemu_event_set has release semantics, but because it *loads* + assert(ev->initialized); + + /* + * Pairs with both qemu_event_reset() and qemu_event_wait(). + * + * qemu_event_set has release semantics, but because it *loads* * ev->value we need a full memory barrier here. */ - assert(ev->initialized); smp_mb(); if (qatomic_read(&ev->value) != EV_SET) { - if (qatomic_xchg(&ev->value, EV_SET) == EV_BUSY) { + int old = qatomic_xchg(&ev->value, EV_SET); + + /* Pairs with memory barrier in kernel futex_wait system call. */ + smp_mb__after_rmw(); + if (old == EV_BUSY) { /* There were waiters, wake them up. */ qemu_futex_wake(ev, INT_MAX); } @@ -399,18 +407,19 @@ void qemu_event_set(QemuEvent *ev) void qemu_event_reset(QemuEvent *ev) { - unsigned value; - assert(ev->initialized); - value = qatomic_read(&ev->value); - smp_mb_acquire(); - if (value == EV_SET) { - /* - * If there was a concurrent reset (or even reset+wait), - * do nothing. Otherwise change EV_SET->EV_FREE. - */ - qatomic_or(&ev->value, EV_FREE); - } + + /* + * If there was a concurrent reset (or even reset+wait), + * do nothing. Otherwise change EV_SET->EV_FREE. + */ + qatomic_or(&ev->value, EV_FREE); + + /* + * Order reset before checking the condition in the caller. + * Pairs with the first memory barrier in qemu_event_set(). + */ + smp_mb__after_rmw(); } void qemu_event_wait(QemuEvent *ev) @@ -418,20 +427,40 @@ void qemu_event_wait(QemuEvent *ev) unsigned value; assert(ev->initialized); - value = qatomic_read(&ev->value); - smp_mb_acquire(); + + /* + * qemu_event_wait must synchronize with qemu_event_set even if it does + * not go down the slow path, so this load-acquire is needed that + * synchronizes with the first memory barrier in qemu_event_set(). + * + * If we do go down the slow path, there is no requirement at all: we + * might miss a qemu_event_set() here but ultimately the memory barrier in + * qemu_futex_wait() will ensure the check is done correctly. + */ + value = qatomic_load_acquire(&ev->value); if (value != EV_SET) { if (value == EV_FREE) { /* - * Leave the event reset and tell qemu_event_set that there - * are waiters. No need to retry, because there cannot be - * a concurrent busy->free transition. After the CAS, the - * event will be either set or busy. + * Leave the event reset and tell qemu_event_set that there are + * waiters. No need to retry, because there cannot be a concurrent + * busy->free transition. After the CAS, the event will be either + * set or busy. + * + * This cmpxchg doesn't have particular ordering requirements if it + * succeeds (moving the store earlier can only cause qemu_event_set() + * to issue _more_ wakeups), the failing case needs acquire semantics + * like the load above. */ if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { return; } } + + /* + * This is the final check for a concurrent set, so it does need + * a smp_mb() pairing with the second barrier of qemu_event_set(). + * The barrier is inside the FUTEX_WAIT system call. + */ qemu_futex_wait(ev, EV_BUSY); } } -- cgit v1.1 From 6c5df4b48f0c52a61342ecb307a43f4c2a3565c4 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 2 Mar 2023 11:22:50 +0100 Subject: qemu-thread-win32: cleanup, fix, document QemuEvent QemuEvent is currently broken on ARM due to missing memory barriers after qatomic_*(). Apart from adding the memory barrier, a closer look reveals some unpaired memory barriers that are not really needed and complicated the functions unnecessarily. Also, it is relying on a memory barrier in ResetEvent(); the barrier _ought_ to be there but there is really no documentation about it, so make it explicit. Reviewed-by: Richard Henderson Reviewed-by: David Hildenbrand Signed-off-by: Paolo Bonzini --- util/qemu-thread-win32.c | 82 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 56 insertions(+), 26 deletions(-) (limited to 'util') diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c index 69db254..a7fe3cc 100644 --- a/util/qemu-thread-win32.c +++ b/util/qemu-thread-win32.c @@ -272,12 +272,20 @@ void qemu_event_destroy(QemuEvent *ev) void qemu_event_set(QemuEvent *ev) { assert(ev->initialized); - /* qemu_event_set has release semantics, but because it *loads* + + /* + * Pairs with both qemu_event_reset() and qemu_event_wait(). + * + * qemu_event_set has release semantics, but because it *loads* * ev->value we need a full memory barrier here. */ smp_mb(); if (qatomic_read(&ev->value) != EV_SET) { - if (qatomic_xchg(&ev->value, EV_SET) == EV_BUSY) { + int old = qatomic_xchg(&ev->value, EV_SET); + + /* Pairs with memory barrier after ResetEvent. */ + smp_mb__after_rmw(); + if (old == EV_BUSY) { /* There were waiters, wake them up. */ SetEvent(ev->event); } @@ -286,17 +294,19 @@ void qemu_event_set(QemuEvent *ev) void qemu_event_reset(QemuEvent *ev) { - unsigned value; - assert(ev->initialized); - value = qatomic_read(&ev->value); - smp_mb_acquire(); - if (value == EV_SET) { - /* If there was a concurrent reset (or even reset+wait), - * do nothing. Otherwise change EV_SET->EV_FREE. - */ - qatomic_or(&ev->value, EV_FREE); - } + + /* + * If there was a concurrent reset (or even reset+wait), + * do nothing. Otherwise change EV_SET->EV_FREE. + */ + qatomic_or(&ev->value, EV_FREE); + + /* + * Order reset before checking the condition in the caller. + * Pairs with the first memory barrier in qemu_event_set(). + */ + smp_mb__after_rmw(); } void qemu_event_wait(QemuEvent *ev) @@ -304,29 +314,49 @@ void qemu_event_wait(QemuEvent *ev) unsigned value; assert(ev->initialized); - value = qatomic_read(&ev->value); - smp_mb_acquire(); + + /* + * qemu_event_wait must synchronize with qemu_event_set even if it does + * not go down the slow path, so this load-acquire is needed that + * synchronizes with the first memory barrier in qemu_event_set(). + * + * If we do go down the slow path, there is no requirement at all: we + * might miss a qemu_event_set() here but ultimately the memory barrier in + * qemu_futex_wait() will ensure the check is done correctly. + */ + value = qatomic_load_acquire(&ev->value); if (value != EV_SET) { if (value == EV_FREE) { - /* qemu_event_set is not yet going to call SetEvent, but we are - * going to do another check for EV_SET below when setting EV_BUSY. - * At that point it is safe to call WaitForSingleObject. + /* + * Here the underlying kernel event is reset, but qemu_event_set is + * not yet going to call SetEvent. However, there will be another + * check for EV_SET below when setting EV_BUSY. At that point it + * is safe to call WaitForSingleObject. */ ResetEvent(ev->event); - /* Tell qemu_event_set that there are waiters. No need to retry - * because there cannot be a concurrent busy->free transition. - * After the CAS, the event will be either set or busy. + /* + * It is not clear whether ResetEvent provides this barrier; kernel + * APIs (KeResetEvent/KeClearEvent) do not. Better safe than sorry! + */ + smp_mb(); + + /* + * Leave the event reset and tell qemu_event_set that there are + * waiters. No need to retry, because there cannot be a concurrent + * busy->free transition. After the CAS, the event will be either + * set or busy. */ if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { - value = EV_SET; - } else { - value = EV_BUSY; + return; } } - if (value == EV_BUSY) { - WaitForSingleObject(ev->event, INFINITE); - } + + /* + * ev->value is now EV_BUSY. Since we didn't observe EV_SET, + * qemu_event_set() must observe EV_BUSY and call SetEvent(). + */ + WaitForSingleObject(ev->event, INFINITE); } } -- cgit v1.1 From e3a3b6ec8169eab2feb241b4982585001512cd55 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 3 Mar 2023 10:52:59 +0100 Subject: qemu-coroutine-lock: add smp_mb__after_rmw() mutex->from_push and mutex->handoff in qemu-coroutine-lock implement the familiar pattern: write a write b smp_mb() smp_mb() read b read a The memory barrier is required by the C memory model even after a SEQ_CST read-modify-write operation such as QSLIST_INSERT_HEAD_ATOMIC. Add it and avoid the unclear qatomic_mb_read() operation. Reviewed-by: Richard Henderson Reviewed-by: David Hildenbrand Signed-off-by: Paolo Bonzini --- util/qemu-coroutine-lock.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'util') diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c index 58f3f77..84a50a9 100644 --- a/util/qemu-coroutine-lock.c +++ b/util/qemu-coroutine-lock.c @@ -201,10 +201,16 @@ static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx, trace_qemu_co_mutex_lock_entry(mutex, self); push_waiter(mutex, &w); + /* + * Add waiter before reading mutex->handoff. Pairs with qatomic_mb_set + * in qemu_co_mutex_unlock. + */ + smp_mb__after_rmw(); + /* This is the "Responsibility Hand-Off" protocol; a lock() picks from * a concurrent unlock() the responsibility of waking somebody up. */ - old_handoff = qatomic_mb_read(&mutex->handoff); + old_handoff = qatomic_read(&mutex->handoff); if (old_handoff && has_waiters(mutex) && qatomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) { @@ -303,6 +309,7 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) } our_handoff = mutex->sequence; + /* Set handoff before checking for waiters. */ qatomic_mb_set(&mutex->handoff, our_handoff); if (!has_waiters(mutex)) { /* The concurrent lock has not added itself yet, so it -- cgit v1.1 From 8dd48650b43dfde4ebea34191ac267e474bcc29e Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 6 Mar 2023 10:15:06 +0100 Subject: async: update documentation of the memory barriers Ever since commit 8c6b0356b539 ("util/async: make bh_aio_poll() O(1)", 2020-02-22), synchronization between qemu_bh_schedule() and aio_bh_poll() is happening when the bottom half is enqueued in the bh_list; not when the flags are set. Update the documentation to match. Reviewed-by: Stefan Hajnoczi Signed-off-by: Paolo Bonzini --- util/async.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) (limited to 'util') diff --git a/util/async.c b/util/async.c index 0657b75..e4b4941 100644 --- a/util/async.c +++ b/util/async.c @@ -74,14 +74,21 @@ static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags) unsigned old_flags; /* - * The memory barrier implicit in qatomic_fetch_or makes sure that: - * 1. idle & any writes needed by the callback are done before the - * locations are read in the aio_bh_poll. - * 2. ctx is loaded before the callback has a chance to execute and bh - * could be freed. + * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that + * insertion starts after BH_PENDING is set. */ old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags); + if (!(old_flags & BH_PENDING)) { + /* + * At this point the bottom half becomes visible to aio_bh_poll(). + * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in + * aio_bh_poll(), ensuring that: + * 1. any writes needed by the callback are visible from the callback + * after aio_bh_dequeue() returns bh. + * 2. ctx is loaded before the callback has a chance to execute and bh + * could be freed. + */ QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next); } @@ -107,11 +114,8 @@ static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags) QSLIST_REMOVE_HEAD(head, next); /* - * The qatomic_and is paired with aio_bh_enqueue(). The implicit memory - * barrier ensures that the callback sees all writes done by the scheduling - * thread. It also ensures that the scheduling thread sees the cleared - * flag before bh->cb has run, and thus will call aio_notify again if - * necessary. + * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that + * the removal finishes before BH_PENDING is reset. */ *flags = qatomic_fetch_and(&bh->flags, ~(BH_PENDING | BH_SCHEDULED | BH_IDLE)); @@ -158,6 +162,7 @@ int aio_bh_poll(AioContext *ctx) BHListSlice *s; int ret = 0; + /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */ QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list); QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next); @@ -448,15 +453,15 @@ LuringState *aio_get_linux_io_uring(AioContext *ctx) void aio_notify(AioContext *ctx) { /* - * Write e.g. bh->flags before writing ctx->notified. Pairs with smp_mb in - * aio_notify_accept. + * Write e.g. ctx->bh_list before writing ctx->notified. Pairs with + * smp_mb() in aio_notify_accept(). */ smp_wmb(); qatomic_set(&ctx->notified, true); /* - * Write ctx->notified before reading ctx->notify_me. Pairs - * with smp_mb in aio_ctx_prepare or aio_poll. + * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me. + * Pairs with smp_mb() in aio_ctx_prepare or aio_poll. */ smp_mb(); if (qatomic_read(&ctx->notify_me)) { -- cgit v1.1 From 6229438cca037d42f44a96d38feb15cb102a444f Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 6 Mar 2023 10:43:52 +0100 Subject: async: clarify usage of barriers in the polling case Explain that aio_context_notifier_poll() relies on aio_notify_accept() to catch all the memory writes that were done before ctx->notified was set to true. Reviewed-by: Richard Henderson Reviewed-by: Stefan Hajnoczi Signed-off-by: Paolo Bonzini --- util/async.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'util') diff --git a/util/async.c b/util/async.c index e4b4941..21016a1 100644 --- a/util/async.c +++ b/util/async.c @@ -474,8 +474,9 @@ void aio_notify_accept(AioContext *ctx) qatomic_set(&ctx->notified, false); /* - * Write ctx->notified before reading e.g. bh->flags. Pairs with smp_wmb - * in aio_notify. + * Order reads of ctx->notified (in aio_context_notifier_poll()) and the + * above clearing of ctx->notified before reads of e.g. bh->flags. Pairs + * with smp_wmb() in aio_notify. */ smp_mb(); } @@ -498,6 +499,11 @@ static bool aio_context_notifier_poll(void *opaque) EventNotifier *e = opaque; AioContext *ctx = container_of(e, AioContext, notifier); + /* + * No need for load-acquire because we just want to kick the + * event loop. aio_notify_accept() takes care of synchronizing + * the event loop with the producers. + */ return qatomic_read(&ctx->notified); } -- cgit v1.1