diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2016-10-28 12:06:41 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2016-10-28 12:06:41 +0100 |
commit | 9879b75873cacc88cdee490f6ab481e8ce766c69 (patch) | |
tree | c5a575eed03a36c2824d8c061132e2adb6690497 /block/block-backend.c | |
parent | 86398328467d78c84d8e3341f098f4697148a665 (diff) | |
parent | b74fc7f78e0dd54fbae67d46552cebf81b59ae9f (diff) | |
download | qemu-9879b75873cacc88cdee490f6ab481e8ce766c69.zip qemu-9879b75873cacc88cdee490f6ab481e8ce766c69.tar.gz qemu-9879b75873cacc88cdee490f6ab481e8ce766c69.tar.bz2 |
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
Block layer patches
# gpg: Signature made Thu 27 Oct 2016 18:15:47 BST
# gpg: using RSA key 0x7F09B272C88F2FD6
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>"
# Primary key fingerprint: DC3D EB15 9A9A F95D 3D74 56FE 7F09 B272 C88F 2FD6
* remotes/kevin/tags/for-upstream: (23 commits)
iotests: Add test for NBD's blockdev-add interface
iotests: Add assert_json_filename_equal() method
socket_scm_helper: Accept fd directly
iotests.py: Allow concurrent qemu instances
iotests.py: Add qemu_nbd function
qapi: Allow blockdev-add for NBD
block/nbd: Use SocketAddress options
block/nbd: Accept SocketAddress
block/nbd: Add nbd_has_filename_options_conflict()
block/nbd: Use qdict_put()
block/nbd: Default port in nbd_refresh_filename()
block/nbd: Reject port parameter without host
block/nbd: Drop trailing "." in error messages
qemu-iotests: Fix typo for NFS with IMGOPTSSYNTAX
block: Remove bdrv_aio_ioctl()
raw: Implement .bdrv_co_ioctl instead of .bdrv_aio_ioctl
block: Introduce .bdrv_co_ioctl() driver callback
block: Remove bdrv_ioctl()
raw-posix: Don't use bdrv_ioctl()
block: Use blk_co_ioctl() for all BB level ioctls
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'block/block-backend.c')
-rw-r--r-- | block/block-backend.c | 94 |
1 files changed, 67 insertions, 27 deletions
diff --git a/block/block-backend.c b/block/block-backend.c index 1a724a8..c53ca30 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -1099,26 +1099,36 @@ BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, blk_aio_write_entry, flags, cb, opaque); } +static void blk_aio_flush_entry(void *opaque) +{ + BlkAioEmAIOCB *acb = opaque; + BlkRwCo *rwco = &acb->rwco; + + rwco->ret = blk_co_flush(rwco->blk); + blk_aio_complete(acb); +} + BlockAIOCB *blk_aio_flush(BlockBackend *blk, BlockCompletionFunc *cb, void *opaque) { - if (!blk_is_available(blk)) { - return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM); - } + return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque); +} + +static void blk_aio_pdiscard_entry(void *opaque) +{ + BlkAioEmAIOCB *acb = opaque; + BlkRwCo *rwco = &acb->rwco; - return bdrv_aio_flush(blk_bs(blk), cb, opaque); + rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes); + blk_aio_complete(acb); } BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int count, BlockCompletionFunc *cb, void *opaque) { - int ret = blk_check_byte_request(blk, offset, count); - if (ret < 0) { - return blk_abort_aio_request(blk, cb, opaque, ret); - } - - return bdrv_aio_pdiscard(blk_bs(blk), offset, count, cb, opaque); + return blk_aio_prwv(blk, offset, count, NULL, blk_aio_pdiscard_entry, 0, + cb, opaque); } void blk_aio_cancel(BlockAIOCB *acb) @@ -1131,23 +1141,50 @@ void blk_aio_cancel_async(BlockAIOCB *acb) bdrv_aio_cancel_async(acb); } -int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf) +int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf) { if (!blk_is_available(blk)) { return -ENOMEDIUM; } - return bdrv_ioctl(blk_bs(blk), req, buf); + return bdrv_co_ioctl(blk_bs(blk), req, buf); +} + +static void blk_ioctl_entry(void *opaque) +{ + BlkRwCo *rwco = opaque; + rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, + rwco->qiov->iov[0].iov_base); +} + +int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf) +{ + return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0); +} + +static void blk_aio_ioctl_entry(void *opaque) +{ + BlkAioEmAIOCB *acb = opaque; + BlkRwCo *rwco = &acb->rwco; + + rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, + rwco->qiov->iov[0].iov_base); + blk_aio_complete(acb); } BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, BlockCompletionFunc *cb, void *opaque) { - if (!blk_is_available(blk)) { - return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM); - } + QEMUIOVector qiov; + struct iovec iov; - return bdrv_aio_ioctl(blk_bs(blk), req, buf, cb, opaque); + iov = (struct iovec) { + .iov_base = buf, + .iov_len = 0, + }; + qemu_iovec_init_external(&qiov, &iov, 1); + + return blk_aio_prwv(blk, req, 0, &qiov, blk_aio_ioctl_entry, 0, cb, opaque); } int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int count) @@ -1169,13 +1206,15 @@ int blk_co_flush(BlockBackend *blk) return bdrv_co_flush(blk_bs(blk)); } -int blk_flush(BlockBackend *blk) +static void blk_flush_entry(void *opaque) { - if (!blk_is_available(blk)) { - return -ENOMEDIUM; - } + BlkRwCo *rwco = opaque; + rwco->ret = blk_co_flush(rwco->blk); +} - return bdrv_flush(blk_bs(blk)); +int blk_flush(BlockBackend *blk) +{ + return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0); } void blk_drain(BlockBackend *blk) @@ -1555,14 +1594,15 @@ int blk_truncate(BlockBackend *blk, int64_t offset) return bdrv_truncate(blk_bs(blk), offset); } -int blk_pdiscard(BlockBackend *blk, int64_t offset, int count) +static void blk_pdiscard_entry(void *opaque) { - int ret = blk_check_byte_request(blk, offset, count); - if (ret < 0) { - return ret; - } + BlkRwCo *rwco = opaque; + rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, rwco->qiov->size); +} - return bdrv_pdiscard(blk_bs(blk), offset, count); +int blk_pdiscard(BlockBackend *blk, int64_t offset, int count) +{ + return blk_prw(blk, offset, NULL, count, blk_pdiscard_entry, 0); } int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf, |