aboutsummaryrefslogtreecommitdiff
path: root/util/aio-posix.c
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2020-02-14 17:17:12 +0000
committerStefan Hajnoczi <stefanha@redhat.com>2020-02-22 08:26:47 +0000
commit7391d34c3cca09c0bb0140275839c6619b86ec0f (patch)
treed0ba9fcdcb26b681cb76cfdf31ccdf9a6330d473 /util/aio-posix.c
parent4749079ce033a94784cbe20a661abeac598ff057 (diff)
downloadqemu-7391d34c3cca09c0bb0140275839c6619b86ec0f.zip
qemu-7391d34c3cca09c0bb0140275839c6619b86ec0f.tar.gz
qemu-7391d34c3cca09c0bb0140275839c6619b86ec0f.tar.bz2
aio-posix: make AioHandler dispatch O(1) with epoll
File descriptor monitoring is O(1) with epoll(7), but aio_dispatch_handlers() still scans all AioHandlers instead of dispatching just those that are ready. This makes aio_poll() O(n) with respect to the total number of registered handlers. Add a local ready_list to aio_poll() so that each nested aio_poll() builds a list of handlers ready to be dispatched. Since file descriptor polling is level-triggered, nested aio_poll() calls also see fds that were ready in the parent but not yet dispatched. This guarantees that nested aio_poll() invocations will dispatch all fds, even those that became ready before the nested invocation. Since only handlers ready to be dispatched are placed onto the ready_list, the new aio_dispatch_ready_handlers() function provides O(1) dispatch. Note that AioContext polling is still O(n) and currently cannot be fully disabled. This still needs to be fixed before aio_poll() is fully O(1). Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Sergio Lopez <slp@redhat.com> Message-id: 20200214171712.541358-6-stefanha@redhat.com [Fix compilation error on macOS where there is no epoll(87). The aio_epoll() prototype was out of date and aio_add_ready_list() needed to be moved outside the ifdef. --Stefan] Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'util/aio-posix.c')
-rw-r--r--util/aio-posix.c110
1 files changed, 78 insertions, 32 deletions
diff --git a/util/aio-posix.c b/util/aio-posix.c
index b5cfdbd..9e1befc 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -35,9 +35,20 @@ struct AioHandler
void *opaque;
bool is_external;
QLIST_ENTRY(AioHandler) node;
+ QLIST_ENTRY(AioHandler) node_ready; /* only used during aio_poll() */
QLIST_ENTRY(AioHandler) node_deleted;
};
+/* Add a handler to a ready list */
+static void add_ready_handler(AioHandlerList *ready_list,
+ AioHandler *node,
+ int revents)
+{
+ QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */
+ node->pfd.revents = revents;
+ QLIST_INSERT_HEAD(ready_list, node, node_ready);
+}
+
#ifdef CONFIG_EPOLL_CREATE1
/* The fd number threshold to switch to epoll */
@@ -105,7 +116,8 @@ static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
}
}
-static int aio_epoll(AioContext *ctx, int64_t timeout)
+static int aio_epoll(AioContext *ctx, AioHandlerList *ready_list,
+ int64_t timeout)
{
GPollFD pfd = {
.fd = ctx->epollfd,
@@ -130,11 +142,13 @@ static int aio_epoll(AioContext *ctx, int64_t timeout)
}
for (i = 0; i < ret; i++) {
int ev = events[i].events;
+ int revents = (ev & EPOLLIN ? G_IO_IN : 0) |
+ (ev & EPOLLOUT ? G_IO_OUT : 0) |
+ (ev & EPOLLHUP ? G_IO_HUP : 0) |
+ (ev & EPOLLERR ? G_IO_ERR : 0);
+
node = events[i].data.ptr;
- node->pfd.revents = (ev & EPOLLIN ? G_IO_IN : 0) |
- (ev & EPOLLOUT ? G_IO_OUT : 0) |
- (ev & EPOLLHUP ? G_IO_HUP : 0) |
- (ev & EPOLLERR ? G_IO_ERR : 0);
+ add_ready_handler(ready_list, node, revents);
}
}
out:
@@ -172,8 +186,8 @@ static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
{
}
-static int aio_epoll(AioContext *ctx, GPollFD *pfds,
- unsigned npfd, int64_t timeout)
+static int aio_epoll(AioContext *ctx, AioHandlerList *ready_list,
+ int64_t timeout)
{
assert(false);
}
@@ -438,36 +452,63 @@ static void aio_free_deleted_handlers(AioContext *ctx)
qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
}
-static bool aio_dispatch_handlers(AioContext *ctx)
+static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
{
- AioHandler *node, *tmp;
bool progress = false;
+ int revents;
- QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
- int revents;
+ revents = node->pfd.revents & node->pfd.events;
+ node->pfd.revents = 0;
- revents = node->pfd.revents & node->pfd.events;
- node->pfd.revents = 0;
+ if (!QLIST_IS_INSERTED(node, node_deleted) &&
+ (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
+ aio_node_check(ctx, node->is_external) &&
+ node->io_read) {
+ node->io_read(node->opaque);
- if (!QLIST_IS_INSERTED(node, node_deleted) &&
- (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
- aio_node_check(ctx, node->is_external) &&
- node->io_read) {
- node->io_read(node->opaque);
-
- /* aio_notify() does not count as progress */
- if (node->opaque != &ctx->notifier) {
- progress = true;
- }
- }
- if (!QLIST_IS_INSERTED(node, node_deleted) &&
- (revents & (G_IO_OUT | G_IO_ERR)) &&
- aio_node_check(ctx, node->is_external) &&
- node->io_write) {
- node->io_write(node->opaque);
+ /* aio_notify() does not count as progress */
+ if (node->opaque != &ctx->notifier) {
progress = true;
}
}
+ if (!QLIST_IS_INSERTED(node, node_deleted) &&
+ (revents & (G_IO_OUT | G_IO_ERR)) &&
+ aio_node_check(ctx, node->is_external) &&
+ node->io_write) {
+ node->io_write(node->opaque);
+ progress = true;
+ }
+
+ return progress;
+}
+
+/*
+ * If we have a list of ready handlers then this is more efficient than
+ * scanning all handlers with aio_dispatch_handlers().
+ */
+static bool aio_dispatch_ready_handlers(AioContext *ctx,
+ AioHandlerList *ready_list)
+{
+ bool progress = false;
+ AioHandler *node;
+
+ while ((node = QLIST_FIRST(ready_list))) {
+ QLIST_SAFE_REMOVE(node, node_ready);
+ progress = aio_dispatch_handler(ctx, node) || progress;
+ }
+
+ return progress;
+}
+
+/* Slower than aio_dispatch_ready_handlers() but only used via glib */
+static bool aio_dispatch_handlers(AioContext *ctx)
+{
+ AioHandler *node, *tmp;
+ bool progress = false;
+
+ QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
+ progress = aio_dispatch_handler(ctx, node) || progress;
+ }
return progress;
}
@@ -639,6 +680,7 @@ static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
bool aio_poll(AioContext *ctx, bool blocking)
{
+ AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
AioHandler *node;
int i;
int ret = 0;
@@ -689,7 +731,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* wait until next event */
if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
npfd = 0; /* pollfds[] is not being used */
- ret = aio_epoll(ctx, timeout);
+ ret = aio_epoll(ctx, &ready_list, timeout);
} else {
ret = qemu_poll_ns(pollfds, npfd, timeout);
}
@@ -744,7 +786,11 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* if we have any readable fds, dispatch event */
if (ret > 0) {
for (i = 0; i < npfd; i++) {
- nodes[i]->pfd.revents = pollfds[i].revents;
+ int revents = pollfds[i].revents;
+
+ if (revents) {
+ add_ready_handler(&ready_list, nodes[i], revents);
+ }
}
}
@@ -753,7 +799,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
progress |= aio_bh_poll(ctx);
if (ret > 0) {
- progress |= aio_dispatch_handlers(ctx);
+ progress |= aio_dispatch_ready_handlers(ctx, &ready_list);
}
aio_free_deleted_handlers(ctx);