aboutsummaryrefslogtreecommitdiff
path: root/hw/block
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2023-05-16 15:02:35 -0400
committerKevin Wolf <kwolf@redhat.com>2023-05-30 17:32:02 +0200
commit1665d9326fd2dd97f1f4061decd67702956ec53c (patch)
treeafd5f39ade36feebdc930fc30d6a84dc126e9290 /hw/block
parentbd58ab40c3fcfdd94f5524626ae13c43818bd23a (diff)
downloadqemu-1665d9326fd2dd97f1f4061decd67702956ec53c.zip
qemu-1665d9326fd2dd97f1f4061decd67702956ec53c.tar.gz
qemu-1665d9326fd2dd97f1f4061decd67702956ec53c.tar.bz2
virtio-blk: implement BlockDevOps->drained_begin()
Detach ioeventfds during drained sections to stop I/O submission from the guest. virtio-blk is no longer reliant on aio_disable_external() after this patch. This will allow us to remove the aio_disable_external() API once all other code that relies on it is converted. Take extra care to avoid attaching/detaching ioeventfds if the data plane is started/stopped during a drained section. This should be rare, but maybe the mirror block job can trigger it. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20230516190238.8401-18-stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'hw/block')
-rw-r--r--hw/block/dataplane/virtio-blk.c16
-rw-r--r--hw/block/virtio-blk.c38
2 files changed, 47 insertions, 7 deletions
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 4f5c7cd..b90456c 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -246,13 +246,15 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
}
/* Get this show started by hooking up our callbacks */
- aio_context_acquire(s->ctx);
- for (i = 0; i < nvqs; i++) {
- VirtQueue *vq = virtio_get_queue(s->vdev, i);
+ if (!blk_in_drain(s->conf->conf.blk)) {
+ aio_context_acquire(s->ctx);
+ for (i = 0; i < nvqs; i++) {
+ VirtQueue *vq = virtio_get_queue(s->vdev, i);
- virtio_queue_aio_attach_host_notifier(vq, s->ctx);
+ virtio_queue_aio_attach_host_notifier(vq, s->ctx);
+ }
+ aio_context_release(s->ctx);
}
- aio_context_release(s->ctx);
return 0;
fail_aio_context:
@@ -322,7 +324,9 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
s->stopping = true;
trace_virtio_blk_data_plane_stop(s);
- aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
+ if (!blk_in_drain(s->conf->conf.blk)) {
+ aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
+ }
aio_context_acquire(s->ctx);
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 8f65ea4..4ca66b5 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -1506,8 +1506,44 @@ static void virtio_blk_resize(void *opaque)
aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev);
}
+/* Suspend virtqueue ioeventfd processing during drain */
+static void virtio_blk_drained_begin(void *opaque)
+{
+ VirtIOBlock *s = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
+ AioContext *ctx = blk_get_aio_context(s->conf.conf.blk);
+
+ if (!s->dataplane || !s->dataplane_started) {
+ return;
+ }
+
+ for (uint16_t i = 0; i < s->conf.num_queues; i++) {
+ VirtQueue *vq = virtio_get_queue(vdev, i);
+ virtio_queue_aio_detach_host_notifier(vq, ctx);
+ }
+}
+
+/* Resume virtqueue ioeventfd processing after drain */
+static void virtio_blk_drained_end(void *opaque)
+{
+ VirtIOBlock *s = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
+ AioContext *ctx = blk_get_aio_context(s->conf.conf.blk);
+
+ if (!s->dataplane || !s->dataplane_started) {
+ return;
+ }
+
+ for (uint16_t i = 0; i < s->conf.num_queues; i++) {
+ VirtQueue *vq = virtio_get_queue(vdev, i);
+ virtio_queue_aio_attach_host_notifier(vq, ctx);
+ }
+}
+
static const BlockDevOps virtio_block_ops = {
- .resize_cb = virtio_blk_resize,
+ .resize_cb = virtio_blk_resize,
+ .drained_begin = virtio_blk_drained_begin,
+ .drained_end = virtio_blk_drained_end,
};
static void virtio_blk_device_realize(DeviceState *dev, Error **errp)