diff options
author | Kevin Wolf <kwolf@redhat.com> | 2014-07-01 16:09:54 +0200 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2014-07-14 12:03:20 +0200 |
commit | 8eb029c26ecf61da6c2b45c3c23472e8c477ba34 (patch) | |
tree | 33de0b160ce79aac82971c308b748adc96df0d71 | |
parent | f06ee3d4aa547df8d7d2317b2b6db7a88c1f3744 (diff) | |
download | qemu-8eb029c26ecf61da6c2b45c3c23472e8c477ba34.zip qemu-8eb029c26ecf61da6c2b45c3c23472e8c477ba34.tar.gz qemu-8eb029c26ecf61da6c2b45c3c23472e8c477ba34.tar.bz2 |
block: Assert qiov length matches request length
At least raw-posix relies on this because it can allocate bounce buffers
based on the request length, but access it using all of the qiov entries
later.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
-rw-r--r-- | block.c | 2 | ||||
-rw-r--r-- | block/raw-posix.c | 15 |
2 files changed, 13 insertions, 4 deletions
@@ -3010,6 +3010,7 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); + assert(!qiov || bytes == qiov->size); /* Handle Copy on Read and associated serialisation */ if (flags & BDRV_REQ_COPY_ON_READ) { @@ -3279,6 +3280,7 @@ static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs, assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); + assert(!qiov || bytes == qiov->size); waited = wait_serialising_requests(req); assert(!waited || !req->serialising); diff --git a/block/raw-posix.c b/block/raw-posix.c index a857def..2bcc73d 100644 --- a/block/raw-posix.c +++ b/block/raw-posix.c @@ -790,6 +790,7 @@ static ssize_t handle_aiocb_rw(RawPosixAIOData *aiocb) memcpy(p, aiocb->aio_iov[i].iov_base, aiocb->aio_iov[i].iov_len); p += aiocb->aio_iov[i].iov_len; } + assert(p - buf == aiocb->aio_nbytes); } nbytes = handle_aiocb_rw_linear(aiocb, buf); @@ -804,9 +805,11 @@ static ssize_t handle_aiocb_rw(RawPosixAIOData *aiocb) copy = aiocb->aio_iov[i].iov_len; } memcpy(aiocb->aio_iov[i].iov_base, p, copy); + assert(count >= copy); p += copy; count -= copy; } + assert(count == 0); } qemu_vfree(buf); @@ -993,12 +996,14 @@ static int paio_submit_co(BlockDriverState *bs, int fd, acb->aio_type = type; acb->aio_fildes = fd; + acb->aio_nbytes = nb_sectors * BDRV_SECTOR_SIZE; + acb->aio_offset = sector_num * BDRV_SECTOR_SIZE; + if (qiov) { acb->aio_iov = qiov->iov; acb->aio_niov = qiov->niov; + assert(qiov->size == acb->aio_nbytes); } - acb->aio_nbytes = nb_sectors * 512; - acb->aio_offset = sector_num * 512; trace_paio_submit_co(sector_num, nb_sectors, type); pool = aio_get_thread_pool(bdrv_get_aio_context(bs)); @@ -1016,12 +1021,14 @@ static BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd, acb->aio_type = type; acb->aio_fildes = fd; + acb->aio_nbytes = nb_sectors * BDRV_SECTOR_SIZE; + acb->aio_offset = sector_num * BDRV_SECTOR_SIZE; + if (qiov) { acb->aio_iov = qiov->iov; acb->aio_niov = qiov->niov; + assert(qiov->size == acb->aio_nbytes); } - acb->aio_nbytes = nb_sectors * 512; - acb->aio_offset = sector_num * 512; trace_paio_submit(acb, opaque, sector_num, nb_sectors, type); pool = aio_get_thread_pool(bdrv_get_aio_context(bs)); |