aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2015-07-29 17:08:38 +0100
committerPeter Maydell <peter.maydell@linaro.org>2015-07-29 17:08:38 +0100
commit46739a2d7ace71b43cacf73ea71c10429db0d5e8 (patch)
treebdf284e97b877f7a9941ac6dd8537ba047677b70
parentb83d017d88b2c4710c7a4614ecf9f845e4db80ba (diff)
parentca96ac44dcd290566090b2435bc828fded356ad9 (diff)
downloadqemu-46739a2d7ace71b43cacf73ea71c10429db0d5e8.zip
qemu-46739a2d7ace71b43cacf73ea71c10429db0d5e8.tar.gz
qemu-46739a2d7ace71b43cacf73ea71c10429db0d5e8.tar.bz2
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
Pull request These fixes make dataplane work again after the notify_me optimization was added. They also solve QEMUBH memory leaks and fix a bug in dataplane's cleanup code. # gpg: Signature made Wed Jul 29 14:50:26 2015 BST using RSA key ID 81AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" * remotes/stefanha/tags/block-pull-request: AioContext: force event loop iteration using BH AioContext: avoid leaking BHs on cleanup virtio-blk-dataplane: delete bottom half before the AioContext is freed Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--async.c29
-rw-r--r--hw/block/dataplane/virtio-blk.c2
-rw-r--r--include/block/aio.h3
3 files changed, 31 insertions, 3 deletions
diff --git a/async.c b/async.c
index 9a98a74..efce14b 100644
--- a/async.c
+++ b/async.c
@@ -79,8 +79,10 @@ int aio_bh_poll(AioContext *ctx)
* aio_notify again if necessary.
*/
if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) {
- if (!bh->idle)
+ /* Idle BHs and the notify BH don't count as progress */
+ if (!bh->idle && bh != ctx->notify_dummy_bh) {
ret = 1;
+ }
bh->idle = 0;
bh->cb(bh->opaque);
}
@@ -230,7 +232,21 @@ aio_ctx_finalize(GSource *source)
{
AioContext *ctx = (AioContext *) source;
+ qemu_bh_delete(ctx->notify_dummy_bh);
thread_pool_free(ctx->thread_pool);
+
+ qemu_mutex_lock(&ctx->bh_lock);
+ while (ctx->first_bh) {
+ QEMUBH *next = ctx->first_bh->next;
+
+ /* qemu_bh_delete() must have been called on BHs in this AioContext */
+ assert(ctx->first_bh->deleted);
+
+ g_free(ctx->first_bh);
+ ctx->first_bh = next;
+ }
+ qemu_mutex_unlock(&ctx->bh_lock);
+
aio_set_event_notifier(ctx, &ctx->notifier, NULL);
event_notifier_cleanup(&ctx->notifier);
rfifolock_destroy(&ctx->lock);
@@ -285,8 +301,15 @@ static void aio_timerlist_notify(void *opaque)
static void aio_rfifolock_cb(void *opaque)
{
+ AioContext *ctx = opaque;
+
/* Kick owner thread in case they are blocked in aio_poll() */
- aio_notify(opaque);
+ qemu_bh_schedule(ctx->notify_dummy_bh);
+}
+
+static void notify_dummy_bh(void *opaque)
+{
+ /* Do nothing, we were invoked just to force the event loop to iterate */
}
static void event_notifier_dummy_cb(EventNotifier *e)
@@ -313,6 +336,8 @@ AioContext *aio_context_new(Error **errp)
rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
+ ctx->notify_dummy_bh = aio_bh_new(ctx, notify_dummy_bh, NULL);
+
return ctx;
}
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 3db139b..6106e46 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -223,8 +223,8 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
virtio_blk_data_plane_stop(s);
blk_op_unblock_all(s->conf->conf.blk, s->blocker);
error_free(s->blocker);
- object_unref(OBJECT(s->iothread));
qemu_bh_delete(s->bh);
+ object_unref(OBJECT(s->iothread));
g_free(s);
}
diff --git a/include/block/aio.h b/include/block/aio.h
index 9dd32e0..400b1b0 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -114,6 +114,9 @@ struct AioContext {
bool notified;
EventNotifier notifier;
+ /* Scheduling this BH forces the event loop it iterate */
+ QEMUBH *notify_dummy_bh;
+
/* Thread pool for performing work and receiving completion callbacks */
struct ThreadPool *thread_pool;