aboutsummaryrefslogtreecommitdiff
path: root/block/vdi.c
diff options
context:
space:
mode:
authorKevin Wolf <kwolf@redhat.com>2011-06-07 16:12:58 +0200
committerKevin Wolf <kwolf@redhat.com>2011-06-15 14:35:15 +0200
commite67a64a869312eccc1487409aaa03177da4d2f26 (patch)
tree51c534bdb6ef78585b36d292a7803b825b49404d /block/vdi.c
parentb11a24dee661dd1e1de0dcbc149052ed67b0647a (diff)
downloadqemu-e67a64a869312eccc1487409aaa03177da4d2f26.zip
qemu-e67a64a869312eccc1487409aaa03177da4d2f26.tar.gz
qemu-e67a64a869312eccc1487409aaa03177da4d2f26.tar.bz2
vdi: Avoid direct AIO callback
bdrv_aio_* must not call the callback before returning to its caller. In vdi, this could happen in some error cases. This starts the real requests processing in a BH to avoid this situation. Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block/vdi.c')
-rw-r--r--block/vdi.c41
1 files changed, 36 insertions, 5 deletions
diff --git a/block/vdi.c b/block/vdi.c
index 4c9e201..261cf9b 100644
--- a/block/vdi.c
+++ b/block/vdi.c
@@ -152,6 +152,7 @@ typedef struct {
/* Buffer for new allocated block. */
void *block_buffer;
void *orig_buf;
+ bool is_write;
int header_modified;
BlockDriverAIOCB *hd_aiocb;
struct iovec hd_iov;
@@ -504,6 +505,8 @@ static VdiAIOCB *vdi_aio_setup(BlockDriverState *bs, int64_t sector_num,
acb->hd_aiocb = NULL;
acb->sector_num = sector_num;
acb->qiov = qiov;
+ acb->is_write = is_write;
+
if (qiov->niov > 1) {
acb->buf = qemu_blockalign(bs, qiov->size);
acb->orig_buf = acb->buf;
@@ -542,14 +545,20 @@ static int vdi_schedule_bh(QEMUBHFunc *cb, VdiAIOCB *acb)
}
static void vdi_aio_read_cb(void *opaque, int ret);
+static void vdi_aio_write_cb(void *opaque, int ret);
-static void vdi_aio_read_bh(void *opaque)
+static void vdi_aio_rw_bh(void *opaque)
{
VdiAIOCB *acb = opaque;
logout("\n");
qemu_bh_delete(acb->bh);
acb->bh = NULL;
- vdi_aio_read_cb(opaque, 0);
+
+ if (acb->is_write) {
+ vdi_aio_write_cb(opaque, 0);
+ } else {
+ vdi_aio_read_cb(opaque, 0);
+ }
}
static void vdi_aio_read_cb(void *opaque, int ret)
@@ -597,7 +606,7 @@ static void vdi_aio_read_cb(void *opaque, int ret)
if (bmap_entry == VDI_UNALLOCATED) {
/* Block not allocated, return zeros, no need to wait. */
memset(acb->buf, 0, n_sectors * SECTOR_SIZE);
- ret = vdi_schedule_bh(vdi_aio_read_bh, acb);
+ ret = vdi_schedule_bh(vdi_aio_rw_bh, acb);
if (ret < 0) {
goto done;
}
@@ -630,12 +639,23 @@ static BlockDriverAIOCB *vdi_aio_readv(BlockDriverState *bs,
BlockDriverCompletionFunc *cb, void *opaque)
{
VdiAIOCB *acb;
+ int ret;
+
logout("\n");
acb = vdi_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
if (!acb) {
return NULL;
}
- vdi_aio_read_cb(acb, 0);
+
+ ret = vdi_schedule_bh(vdi_aio_rw_bh, acb);
+ if (ret < 0) {
+ if (acb->qiov->niov > 1) {
+ qemu_vfree(acb->orig_buf);
+ }
+ qemu_aio_release(acb);
+ return NULL;
+ }
+
return &acb->common;
}
@@ -789,12 +809,23 @@ static BlockDriverAIOCB *vdi_aio_writev(BlockDriverState *bs,
BlockDriverCompletionFunc *cb, void *opaque)
{
VdiAIOCB *acb;
+ int ret;
+
logout("\n");
acb = vdi_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
if (!acb) {
return NULL;
}
- vdi_aio_write_cb(acb, 0);
+
+ ret = vdi_schedule_bh(vdi_aio_rw_bh, acb);
+ if (ret < 0) {
+ if (acb->qiov->niov > 1) {
+ qemu_vfree(acb->orig_buf);
+ }
+ qemu_aio_release(acb);
+ return NULL;
+ }
+
return &acb->common;
}