diff options
author | Klaus Jensen <k.jensen@samsung.com> | 2020-02-23 08:32:25 -0800 |
---|---|---|
committer | Klaus Jensen <k.jensen@samsung.com> | 2020-09-02 08:48:50 +0200 |
commit | 36c100f530b52cc43ad214c48dcfe4c6752859b6 (patch) | |
tree | 29292d1e58b35f6fe65ec7e5091b58c5812f7716 | |
parent | 076c816f4e62824714427a9d193f31ccb693d404 (diff) | |
download | qemu-36c100f530b52cc43ad214c48dcfe4c6752859b6.zip qemu-36c100f530b52cc43ad214c48dcfe4c6752859b6.tar.gz qemu-36c100f530b52cc43ad214c48dcfe4c6752859b6.tar.bz2 |
hw/block/nvme: refactor request bounds checking
Hoist bounds checking into its own function and check for wrap-around.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
-rw-r--r-- | hw/block/nvme.c | 26 |
1 files changed, 21 insertions, 5 deletions
diff --git a/hw/block/nvme.c b/hw/block/nvme.c index fc629cd..8dade3e 100644 --- a/hw/block/nvme.c +++ b/hw/block/nvme.c @@ -570,6 +570,18 @@ static void nvme_clear_events(NvmeCtrl *n, uint8_t event_type) } } +static inline uint16_t nvme_check_bounds(NvmeCtrl *n, NvmeNamespace *ns, + uint64_t slba, uint32_t nlb) +{ + uint64_t nsze = le64_to_cpu(ns->id_ns.nsze); + + if (unlikely(UINT64_MAX - slba < nlb || slba + nlb > nsze)) { + return NVME_LBA_RANGE | NVME_DNR; + } + + return NVME_SUCCESS; +} + static void nvme_rw_cb(void *opaque, int ret) { NvmeRequest *req = opaque; @@ -617,12 +629,14 @@ static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, uint32_t nlb = le16_to_cpu(rw->nlb) + 1; uint64_t offset = slba << data_shift; uint32_t count = nlb << data_shift; + uint16_t status; trace_pci_nvme_write_zeroes(nvme_cid(req), slba, nlb); - if (unlikely(slba + nlb > ns->id_ns.nsze)) { + status = nvme_check_bounds(n, ns, slba, nlb); + if (status) { trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze); - return NVME_LBA_RANGE | NVME_DNR; + return status; } block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, @@ -645,13 +659,15 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, uint64_t data_offset = slba << data_shift; int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0; enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ; + uint16_t status; trace_pci_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba); - if (unlikely((slba + nlb) > ns->id_ns.nsze)) { - block_acct_invalid(blk_get_stats(n->conf.blk), acct); + status = nvme_check_bounds(n, ns, slba, nlb); + if (status) { trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze); - return NVME_LBA_RANGE | NVME_DNR; + block_acct_invalid(blk_get_stats(n->conf.blk), acct); + return status; } if (nvme_map_dptr(n, cmd, data_size, req)) { |