From d816614ca4f5af89a2b6d50ac840d7b77973f2fc Mon Sep 17 00:00:00 2001 From: Maxim Levitsky Date: Tue, 15 Sep 2020 20:12:53 +0800 Subject: rcu: Implement drain_call_rcu This will allow is to preserve the semantics of hmp_device_del, that the device is deleted immediatly which was changed by previos patch that delayed this to RCU callback Signed-off-by: Maxim Levitsky Suggested-by: Stefan Hajnoczi Reviewed-by: Stefan Hajnoczi Message-Id: <20200915121318.247-2-luoyonggang@gmail.com> Signed-off-by: Thomas Huth --- util/rcu.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) (limited to 'util') diff --git a/util/rcu.c b/util/rcu.c index 60a37f7..c4fefa9 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -293,6 +293,61 @@ void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node)) qemu_event_set(&rcu_call_ready_event); } + +struct rcu_drain { + struct rcu_head rcu; + QemuEvent drain_complete_event; +}; + +static void drain_rcu_callback(struct rcu_head *node) +{ + struct rcu_drain *event = (struct rcu_drain *)node; + qemu_event_set(&event->drain_complete_event); +} + +/* + * This function ensures that all pending RCU callbacks + * on the current thread are done executing + + * drops big qemu lock during the wait to allow RCU thread + * to process the callbacks + * + */ + +void drain_call_rcu(void) +{ + struct rcu_drain rcu_drain; + bool locked = qemu_mutex_iothread_locked(); + + memset(&rcu_drain, 0, sizeof(struct rcu_drain)); + qemu_event_init(&rcu_drain.drain_complete_event, false); + + if (locked) { + qemu_mutex_unlock_iothread(); + } + + + /* + * RCU callbacks are invoked in the same order as in which they + * are registered, thus we can be sure that when 'drain_rcu_callback' + * is called, all RCU callbacks that were registered on this thread + * prior to calling this function are completed. + * + * Note that since we have only one global queue of the RCU callbacks, + * we also end up waiting for most of RCU callbacks that were registered + * on the other threads, but this is a side effect that shoudn't be + * assumed. + */ + + call_rcu1(&rcu_drain.rcu, drain_rcu_callback); + qemu_event_wait(&rcu_drain.drain_complete_event); + + if (locked) { + qemu_mutex_lock_iothread(); + } + +} + void rcu_register_thread(void) { assert(rcu_reader.ctr == 0); -- cgit v1.1 From da0652c043c8014ed194a99a7989cc4b0093707e Mon Sep 17 00:00:00 2001 From: Yonggang Luo Date: Wed, 16 Sep 2020 01:12:26 +0800 Subject: tests: fixes aio-win32 about aio_remove_fd_handler, get it consistence with aio-posix.c This is a fixes for (C:\work\xemu\qemu\build\tests\test-aio-multithread.exe:19100): GLib-CRITICAL **: 23:03:24.965: g_source_remove_poll: assertion '!SOURCE_DESTROYED (source)' failed ERROR test-aio-multithread - Bail out! GLib-FATAL-CRITICAL: g_source_remove_poll: assertion '!SOURCE_DESTROYED (source)' failed (C:\work\xemu\qemu\build\tests\test-bdrv-drain.exe:21036): GLib-CRITICAL **: 23:03:29.861: g_source_remove_poll: assertion '!SOURCE_DESTROYED (source)' failed ERROR test-bdrv-drain - Bail out! GLib-FATAL-CRITICAL: g_source_remove_poll: assertion '!SOURCE_DESTROYED (source)' failed And the idea comes from https://patchwork.kernel.org/patch/9975239/ Signed-off-by: Yonggang Luo Message-Id: <20200915171234.236-19-luoyonggang@gmail.com> Signed-off-by: Thomas Huth --- util/aio-win32.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'util') diff --git a/util/aio-win32.c b/util/aio-win32.c index 953c56a..49bd90e 100644 --- a/util/aio-win32.c +++ b/util/aio-win32.c @@ -37,6 +37,16 @@ struct AioHandler { static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node) { + /* + * If the GSource is in the process of being destroyed then + * g_source_remove_poll() causes an assertion failure. Skip + * removal in that case, because glib cleans up its state during + * destruction anyway. + */ + if (!g_source_is_destroyed(&ctx->source)) { + g_source_remove_poll(&ctx->source, &node->pfd); + } + /* If aio_poll is in progress, just mark the node as deleted */ if (qemu_lockcnt_count(&ctx->list_lock)) { node->deleted = 1; @@ -139,8 +149,6 @@ void aio_set_event_notifier(AioContext *ctx, /* Are we deleting the fd handler? */ if (!io_notify) { if (node) { - g_source_remove_poll(&ctx->source, &node->pfd); - aio_remove_fd_handler(ctx, node); } } else { -- cgit v1.1