diff options
author | Klaus Jensen <k.jensen@samsung.com> | 2021-02-07 21:21:45 +0100 |
---|---|---|
committer | Klaus Jensen <k.jensen@samsung.com> | 2021-03-09 11:00:57 +0100 |
commit | 073d12d99871d0d500f44bd49cb0c45df14cf2c3 (patch) | |
tree | 78757e5630da6ea8ea792223f0ec037d7f9222ad /hw/block | |
parent | f80a1c331ac2fe7428ddd76b1f059642d6a91338 (diff) | |
download | qemu-073d12d99871d0d500f44bd49cb0c45df14cf2c3.zip qemu-073d12d99871d0d500f44bd49cb0c45df14cf2c3.tar.gz qemu-073d12d99871d0d500f44bd49cb0c45df14cf2c3.tar.bz2 |
hw/block/nvme: remove the req dependency in map functions
The PRP and SGL mapping functions does not have any particular need for
the entire NvmeRequest as a parameter. Clean it up.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Diffstat (limited to 'hw/block')
-rw-r--r-- | hw/block/nvme.c | 61 | ||||
-rw-r--r-- | hw/block/trace-events | 4 |
2 files changed, 33 insertions, 32 deletions
diff --git a/hw/block/nvme.c b/hw/block/nvme.c index a1e28c6..59942f8 100644 --- a/hw/block/nvme.c +++ b/hw/block/nvme.c @@ -544,8 +544,8 @@ static inline bool nvme_addr_is_dma(NvmeCtrl *n, hwaddr addr) return !(nvme_addr_is_cmb(n, addr) || nvme_addr_is_pmr(n, addr)); } -static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2, - uint32_t len, NvmeRequest *req) +static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1, + uint64_t prp2, uint32_t len) { hwaddr trans_len = n->page_size - (prp1 % n->page_size); trans_len = MIN(len, trans_len); @@ -555,9 +555,9 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2, trace_pci_nvme_map_prp(trans_len, len, prp1, prp2, num_prps); - nvme_sg_init(n, &req->sg, nvme_addr_is_dma(n, prp1)); + nvme_sg_init(n, sg, nvme_addr_is_dma(n, prp1)); - status = nvme_map_addr(n, &req->sg, prp1, trans_len); + status = nvme_map_addr(n, sg, prp1, trans_len); if (status) { goto unmap; } @@ -607,7 +607,7 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2, } trans_len = MIN(len, n->page_size); - status = nvme_map_addr(n, &req->sg, prp_ent, trans_len); + status = nvme_map_addr(n, sg, prp_ent, trans_len); if (status) { goto unmap; } @@ -621,7 +621,7 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2, status = NVME_INVALID_PRP_OFFSET | NVME_DNR; goto unmap; } - status = nvme_map_addr(n, &req->sg, prp2, len); + status = nvme_map_addr(n, sg, prp2, len); if (status) { goto unmap; } @@ -631,7 +631,7 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2, return NVME_SUCCESS; unmap: - nvme_sg_unmap(&req->sg); + nvme_sg_unmap(sg); return status; } @@ -641,7 +641,7 @@ unmap: */ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor *segment, uint64_t nsgld, - size_t *len, NvmeRequest *req) + size_t *len, NvmeCmd *cmd) { dma_addr_t addr, trans_len; uint32_t dlen; @@ -652,7 +652,7 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg, switch (type) { case NVME_SGL_DESCR_TYPE_BIT_BUCKET: - if (req->cmd.opcode == NVME_CMD_WRITE) { + if (cmd->opcode == NVME_CMD_WRITE) { continue; } case NVME_SGL_DESCR_TYPE_DATA_BLOCK: @@ -681,7 +681,7 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg, break; } - trace_pci_nvme_err_invalid_sgl_excess_length(nvme_cid(req)); + trace_pci_nvme_err_invalid_sgl_excess_length(dlen); return NVME_DATA_SGL_LEN_INVALID | NVME_DNR; } @@ -710,7 +710,7 @@ next: } static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl, - size_t len, NvmeRequest *req) + size_t len, NvmeCmd *cmd) { /* * Read the segment in chunks of 256 descriptors (one 4k page) to avoid @@ -731,7 +731,7 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl, sgld = &sgl; addr = le64_to_cpu(sgl.addr); - trace_pci_nvme_map_sgl(nvme_cid(req), NVME_SGL_TYPE(sgl.type), len); + trace_pci_nvme_map_sgl(NVME_SGL_TYPE(sgl.type), len); nvme_sg_init(n, sg, nvme_addr_is_dma(n, addr)); @@ -740,7 +740,7 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl, * be mapped directly. */ if (NVME_SGL_TYPE(sgl.type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) { - status = nvme_map_sgl_data(n, sg, sgld, 1, &len, req); + status = nvme_map_sgl_data(n, sg, sgld, 1, &len, cmd); if (status) { goto unmap; } @@ -779,7 +779,7 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl, } status = nvme_map_sgl_data(n, sg, segment, SEG_CHUNK_SIZE, - &len, req); + &len, cmd); if (status) { goto unmap; } @@ -805,7 +805,7 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl, switch (NVME_SGL_TYPE(last_sgld->type)) { case NVME_SGL_DESCR_TYPE_DATA_BLOCK: case NVME_SGL_DESCR_TYPE_BIT_BUCKET: - status = nvme_map_sgl_data(n, sg, segment, nsgld, &len, req); + status = nvme_map_sgl_data(n, sg, segment, nsgld, &len, cmd); if (status) { goto unmap; } @@ -832,7 +832,7 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl, * Do not map the last descriptor; it will be a Segment or Last Segment * descriptor and is handled by the next iteration. */ - status = nvme_map_sgl_data(n, sg, segment, nsgld - 1, &len, req); + status = nvme_map_sgl_data(n, sg, segment, nsgld - 1, &len, cmd); if (status) { goto unmap; } @@ -852,24 +852,20 @@ unmap: return status; } -static uint16_t nvme_map_dptr(NvmeCtrl *n, size_t len, NvmeRequest *req) +static uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len, + NvmeCmd *cmd) { uint64_t prp1, prp2; - switch (NVME_CMD_FLAGS_PSDT(req->cmd.flags)) { + switch (NVME_CMD_FLAGS_PSDT(cmd->flags)) { case NVME_PSDT_PRP: - prp1 = le64_to_cpu(req->cmd.dptr.prp1); - prp2 = le64_to_cpu(req->cmd.dptr.prp2); + prp1 = le64_to_cpu(cmd->dptr.prp1); + prp2 = le64_to_cpu(cmd->dptr.prp2); - return nvme_map_prp(n, prp1, prp2, len, req); + return nvme_map_prp(n, sg, prp1, prp2, len); case NVME_PSDT_SGL_MPTR_CONTIGUOUS: case NVME_PSDT_SGL_MPTR_SGL: - /* SGLs shall not be used for Admin commands in NVMe over PCIe */ - if (!req->sq->sqid) { - return NVME_INVALID_FIELD | NVME_DNR; - } - - return nvme_map_sgl(n, &req->sg, req->cmd.dptr.sgl, len, req); + return nvme_map_sgl(n, sg, cmd->dptr.sgl, len, cmd); default: return NVME_INVALID_FIELD; } @@ -880,7 +876,7 @@ static uint16_t nvme_dma(NvmeCtrl *n, uint8_t *ptr, uint32_t len, { uint16_t status = NVME_SUCCESS; - status = nvme_map_dptr(n, len, req); + status = nvme_map_dptr(n, &req->sg, len, &req->cmd); if (status) { return status; } @@ -2096,7 +2092,7 @@ static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req) } } - status = nvme_map_dptr(n, data_size, req); + status = nvme_map_dptr(n, &req->sg, data_size, &req->cmd); if (status) { goto invalid; } @@ -2185,7 +2181,7 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, data_offset = nvme_l2b(ns, slba); if (!wrz) { - status = nvme_map_dptr(n, data_size, req); + status = nvme_map_dptr(n, &req->sg, data_size, &req->cmd); if (status) { goto invalid; } @@ -3867,6 +3863,11 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) return NVME_INVALID_OPCODE | NVME_DNR; } + /* SGLs shall not be used for Admin commands in NVMe over PCIe */ + if (NVME_CMD_FLAGS_PSDT(req->cmd.flags) != NVME_PSDT_PRP) { + return NVME_INVALID_FIELD | NVME_DNR; + } + switch (req->cmd.opcode) { case NVME_ADM_CMD_DELETE_SQ: return nvme_del_sq(n, req); diff --git a/hw/block/trace-events b/hw/block/trace-events index 8deeacc..60a076c 100644 --- a/hw/block/trace-events +++ b/hw/block/trace-events @@ -37,7 +37,7 @@ pci_nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2 pci_nvme_map_addr(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64"" pci_nvme_map_addr_cmb(uint64_t addr, uint64_t len) "addr 0x%"PRIx64" len %"PRIu64"" pci_nvme_map_prp(uint64_t trans_len, uint32_t len, uint64_t prp1, uint64_t prp2, int num_prps) "trans_len %"PRIu64" len %"PRIu32" prp1 0x%"PRIx64" prp2 0x%"PRIx64" num_prps %d" -pci_nvme_map_sgl(uint16_t cid, uint8_t typ, uint64_t len) "cid %"PRIu16" type 0x%"PRIx8" len %"PRIu64"" +pci_nvme_map_sgl(uint8_t typ, uint64_t len) "type 0x%"PRIx8" len %"PRIu64"" pci_nvme_io_cmd(uint16_t cid, uint32_t nsid, uint16_t sqid, uint8_t opcode, const char *opname) "cid %"PRIu16" nsid %"PRIu32" sqid %"PRIu16" opc 0x%"PRIx8" opname '%s'" pci_nvme_admin_cmd(uint16_t cid, uint16_t sqid, uint8_t opcode, const char *opname) "cid %"PRIu16" sqid %"PRIu16" opc 0x%"PRIx8" opname '%s'" pci_nvme_flush(uint16_t cid, uint32_t nsid) "cid %"PRIu16" nsid %"PRIu32"" @@ -126,7 +126,7 @@ pci_nvme_err_aio(uint16_t cid, const char *errname, uint16_t status) "cid %"PRIu pci_nvme_err_copy_invalid_format(uint8_t format) "format 0x%"PRIx8"" pci_nvme_err_invalid_sgld(uint16_t cid, uint8_t typ) "cid %"PRIu16" type 0x%"PRIx8"" pci_nvme_err_invalid_num_sgld(uint16_t cid, uint8_t typ) "cid %"PRIu16" type 0x%"PRIx8"" -pci_nvme_err_invalid_sgl_excess_length(uint16_t cid) "cid %"PRIu16"" +pci_nvme_err_invalid_sgl_excess_length(uint32_t residual) "residual %"PRIu32"" pci_nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size" pci_nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is not page aligned: 0x%"PRIx64"" pci_nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64"" |