diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2013-02-20 11:28:31 +0100 |
---|---|---|
committer | Anthony Liguori <aliguori@us.ibm.com> | 2013-02-21 16:17:31 -0600 |
commit | d0c8d2c05f67a1a007d87fa3b99254abfa42d06d (patch) | |
tree | ada60845139760d25723bb77ba0c105b023dbd04 | |
parent | 9cbaacf999b01b27dc3a22502705178057af66de (diff) | |
download | qemu-d0c8d2c05f67a1a007d87fa3b99254abfa42d06d.zip qemu-d0c8d2c05f67a1a007d87fa3b99254abfa42d06d.tar.gz qemu-d0c8d2c05f67a1a007d87fa3b99254abfa42d06d.tar.bz2 |
aio: extract aio_dispatch() from aio_poll()
We will need to loop over AioHandlers calling ->io_read()/->io_write()
when aio_poll() is converted from select(2) to g_poll(2).
Luckily the code for this already exists, extract it into the new
aio_dispatch() function.
Two small changes:
* aio_poll() checks !node->deleted to avoid calling handlers that have
been deleted.
* Fix typo 'then' -> 'them' in aio_poll() comment.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Message-id: 1361356113-11049-9-git-send-email-stefanha@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
-rw-r--r-- | aio-posix.c | 57 |
1 files changed, 35 insertions, 22 deletions
diff --git a/aio-posix.c b/aio-posix.c index fe4dbb4..35131a3 100644 --- a/aio-posix.c +++ b/aio-posix.c @@ -129,30 +129,12 @@ bool aio_pending(AioContext *ctx) return false; } -bool aio_poll(AioContext *ctx, bool blocking) +static bool aio_dispatch(AioContext *ctx) { - static struct timeval tv0; AioHandler *node; - fd_set rdfds, wrfds; - int max_fd = -1; - int ret; - bool busy, progress; - - progress = false; - - /* - * If there are callbacks left that have been queued, we need to call then. - * Do not call select in this case, because it is possible that the caller - * does not need a complete flush (as is the case for qemu_aio_wait loops). - */ - if (aio_bh_poll(ctx)) { - blocking = false; - progress = true; - } + bool progress = false; /* - * Then dispatch any pending callbacks from the GSource. - * * We have to walk very carefully in case qemu_aio_set_fd_handler is * called while we're walking. */ @@ -167,11 +149,15 @@ bool aio_poll(AioContext *ctx, bool blocking) node->pfd.revents = 0; /* See comment in aio_pending. */ - if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) { + if (!node->deleted && + (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && + node->io_read) { node->io_read(node->opaque); progress = true; } - if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) { + if (!node->deleted && + (revents & (G_IO_OUT | G_IO_ERR)) && + node->io_write) { node->io_write(node->opaque); progress = true; } @@ -186,6 +172,33 @@ bool aio_poll(AioContext *ctx, bool blocking) g_free(tmp); } } + return progress; +} + +bool aio_poll(AioContext *ctx, bool blocking) +{ + static struct timeval tv0; + AioHandler *node; + fd_set rdfds, wrfds; + int max_fd = -1; + int ret; + bool busy, progress; + + progress = false; + + /* + * If there are callbacks left that have been queued, we need to call them. + * Do not call select in this case, because it is possible that the caller + * does not need a complete flush (as is the case for qemu_aio_wait loops). + */ + if (aio_bh_poll(ctx)) { + blocking = false; + progress = true; + } + + if (aio_dispatch(ctx)) { + progress = true; + } if (progress && !blocking) { return true; |