diff options
author | Mateusz Kozlowski <kozlowski.mateuszpl@gmail.com> | 2023-03-20 13:40:36 +0100 |
---|---|---|
committer | Klaus Jensen <k.jensen@samsung.com> | 2023-03-27 17:48:08 +0200 |
commit | 9b4f01812f69ad6066725338c89945bb61f41823 (patch) | |
tree | b9e2cbbb13083683faded2c88d565605c64aafba /hw | |
parent | e3debd5e7d0ce031356024878a0a18b9d109354a (diff) | |
download | qemu-9b4f01812f69ad6066725338c89945bb61f41823.zip qemu-9b4f01812f69ad6066725338c89945bb61f41823.tar.gz qemu-9b4f01812f69ad6066725338c89945bb61f41823.tar.bz2 |
hw/nvme: Change alignment in dma functions for nvme_blk_*
Since the nvme_blk_read/write are used by both the data and metadata
portions of the IO, it can't have the 512B alignment requirement.
Without this change any metadata transfer, which length isn't a multiple
of 512B and which is bigger than 512B, will result in only a partial
transfer.
Signed-off-by: Mateusz Kozlowski <kozlowski.mateuszpl@gmail.com>
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/nvme/ctrl.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index 49c1210..2910095 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -1434,26 +1434,26 @@ uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len, } static inline void nvme_blk_read(BlockBackend *blk, int64_t offset, - BlockCompletionFunc *cb, NvmeRequest *req) + uint32_t align, BlockCompletionFunc *cb, + NvmeRequest *req) { assert(req->sg.flags & NVME_SG_ALLOC); if (req->sg.flags & NVME_SG_DMA) { - req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE, - cb, req); + req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, align, cb, req); } else { req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req); } } static inline void nvme_blk_write(BlockBackend *blk, int64_t offset, - BlockCompletionFunc *cb, NvmeRequest *req) + uint32_t align, BlockCompletionFunc *cb, + NvmeRequest *req) { assert(req->sg.flags & NVME_SG_ALLOC); if (req->sg.flags & NVME_SG_DMA) { - req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE, - cb, req); + req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, align, cb, req); } else { req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req); } @@ -2207,10 +2207,10 @@ static void nvme_rw_cb(void *opaque, int ret) } if (req->cmd.opcode == NVME_CMD_READ) { - return nvme_blk_read(blk, offset, nvme_rw_complete_cb, req); + return nvme_blk_read(blk, offset, 1, nvme_rw_complete_cb, req); } - return nvme_blk_write(blk, offset, nvme_rw_complete_cb, req); + return nvme_blk_write(blk, offset, 1, nvme_rw_complete_cb, req); } } @@ -3437,7 +3437,7 @@ static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req) block_acct_start(blk_get_stats(blk), &req->acct, data_size, BLOCK_ACCT_READ); - nvme_blk_read(blk, data_offset, nvme_rw_cb, req); + nvme_blk_read(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req); return NVME_NO_COMPLETE; invalid: @@ -3607,7 +3607,7 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, block_acct_start(blk_get_stats(blk), &req->acct, data_size, BLOCK_ACCT_WRITE); - nvme_blk_write(blk, data_offset, nvme_rw_cb, req); + nvme_blk_write(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req); } else { req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size, BDRV_REQ_MAY_UNMAP, nvme_rw_cb, |