diff options
author | Kevin Wolf <kwolf@redhat.com> | 2025-03-11 15:19:12 +0100 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2025-03-13 17:57:23 +0100 |
commit | f76d3bee754a2f8d73373d5959dc983169a93eee (patch) | |
tree | 60b9cdc2ec845c74f7cda6d219097bdecd3d8d7a | |
parent | ee416407b3c0f45253779e98404acb41231a9279 (diff) | |
download | qemu-f76d3bee754a2f8d73373d5959dc983169a93eee.zip qemu-f76d3bee754a2f8d73373d5959dc983169a93eee.tar.gz qemu-f76d3bee754a2f8d73373d5959dc983169a93eee.tar.bz2 |
aio-posix: Adjust polling time also for new handlers
aio_dispatch_handler() adds handlers to ctx->poll_aio_handlers if
polling should be enabled. If we call adjust_polling_time() for all
polling handlers before this, new polling handlers are still left at
poll->ns = 0 and polling is only actually enabled after the next event.
Move the adjust_polling_time() call after aio_dispatch_handler().
This fixes test-nested-aio-poll, which expects that polling becomes
effective the first time around.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20250311141912.135657-1-kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
-rw-r--r-- | util/aio-posix.c | 28 |
1 files changed, 17 insertions, 11 deletions
diff --git a/util/aio-posix.c b/util/aio-posix.c index 80785c2..2e0a5da 100644 --- a/util/aio-posix.c +++ b/util/aio-posix.c @@ -28,6 +28,9 @@ /* Stop userspace polling on a handler if it isn't active for some time */ #define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND) +static void adjust_polling_time(AioContext *ctx, AioPolledEvent *poll, + int64_t block_ns); + bool aio_poll_disabled(AioContext *ctx) { return qatomic_read(&ctx->poll_disable_cnt); @@ -392,7 +395,8 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node) * scanning all handlers with aio_dispatch_handlers(). */ static bool aio_dispatch_ready_handlers(AioContext *ctx, - AioHandlerList *ready_list) + AioHandlerList *ready_list, + int64_t block_ns) { bool progress = false; AioHandler *node; @@ -400,6 +404,14 @@ static bool aio_dispatch_ready_handlers(AioContext *ctx, while ((node = QLIST_FIRST(ready_list))) { QLIST_REMOVE(node, node_ready); progress = aio_dispatch_handler(ctx, node) || progress; + + /* + * Adjust polling time only after aio_dispatch_handler(), which can + * add the handler to ctx->poll_aio_handlers. + */ + if (ctx->poll_max_ns && QLIST_IS_INSERTED(node, node_poll)) { + adjust_polling_time(ctx, &node->poll, block_ns); + } } return progress; @@ -653,6 +665,7 @@ bool aio_poll(AioContext *ctx, bool blocking) bool use_notify_me; int64_t timeout; int64_t start = 0; + int64_t block_ns = 0; /* * There cannot be two concurrent aio_poll calls for the same AioContext (or @@ -725,20 +738,13 @@ bool aio_poll(AioContext *ctx, bool blocking) aio_notify_accept(ctx); - /* Adjust polling time */ + /* Calculate blocked time for adaptive polling */ if (ctx->poll_max_ns) { - AioHandler *node; - int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start; - - QLIST_FOREACH(node, &ctx->poll_aio_handlers, node_poll) { - if (QLIST_IS_INSERTED(node, node_ready)) { - adjust_polling_time(ctx, &node->poll, block_ns); - } - } + block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start; } progress |= aio_bh_poll(ctx); - progress |= aio_dispatch_ready_handlers(ctx, &ready_list); + progress |= aio_dispatch_ready_handlers(ctx, &ready_list, block_ns); aio_free_deleted_handlers(ctx); |