aboutsummaryrefslogtreecommitdiff
path: root/dma-helpers.c
diff options
context:
space:
mode:
authorFam Zheng <famz@redhat.com>2015-03-16 17:03:37 +0800
committerPaolo Bonzini <pbonzini@redhat.com>2015-04-27 18:24:18 +0200
commite95205e1f9cd2c4262b7a7b1c992a94512c86d0e (patch)
tree1c03e53eeb46e2d7c15b9c15a1f1c9414e254e6e /dma-helpers.c
parent33b6c2edf6214f02b9beaea61b169506c01f90aa (diff)
downloadqemu-e95205e1f9cd2c4262b7a7b1c992a94512c86d0e.zip
qemu-e95205e1f9cd2c4262b7a7b1c992a94512c86d0e.tar.gz
qemu-e95205e1f9cd2c4262b7a7b1c992a94512c86d0e.tar.bz2
dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel
If DMA's owning thread cancels the IO while the bounce buffer's owning thread is notifying the "cpu client list", a use-after-free happens: continue_after_map_failure dma_aio_cancel ------------------------------------------------------------------ aio_bh_new qemu_bh_delete qemu_bh_schedule (use after free) Also, the old code doesn't run the bh in the right AioContext. Fix both problems by passing a QEMUBH to cpu_register_map_client. Signed-off-by: Fam Zheng <famz@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <1426496617-10702-6-git-send-email-famz@redhat.com> [Remove unnecessary forward declaration. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'dma-helpers.c')
-rw-r--r--dma-helpers.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/dma-helpers.c b/dma-helpers.c
index 6918572..1fddf6a 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -92,14 +92,6 @@ static void reschedule_dma(void *opaque)
dma_blk_cb(dbs, 0);
}
-static void continue_after_map_failure(void *opaque)
-{
- DMAAIOCB *dbs = (DMAAIOCB *)opaque;
-
- dbs->bh = qemu_bh_new(reschedule_dma, dbs);
- qemu_bh_schedule(dbs->bh);
-}
-
static void dma_blk_unmap(DMAAIOCB *dbs)
{
int i;
@@ -161,7 +153,9 @@ static void dma_blk_cb(void *opaque, int ret)
if (dbs->iov.size == 0) {
trace_dma_map_wait(dbs);
- cpu_register_map_client(dbs, continue_after_map_failure);
+ dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk),
+ reschedule_dma, dbs);
+ cpu_register_map_client(dbs->bh);
return;
}
@@ -183,6 +177,11 @@ static void dma_aio_cancel(BlockAIOCB *acb)
if (dbs->acb) {
blk_aio_cancel_async(dbs->acb);
}
+ if (dbs->bh) {
+ cpu_unregister_map_client(dbs->bh);
+ qemu_bh_delete(dbs->bh);
+ dbs->bh = NULL;
+ }
}