aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKlaus Jensen <k.jensen@samsung.com>2019-10-11 08:32:00 +0200
committerKlaus Jensen <k.jensen@samsung.com>2020-10-27 07:24:46 +0100
commit94cbcf530f9d9789b75ef1ae227ba4a4f702bf7d (patch)
tree3727b9a0229d96a132ccab13e05644ebe036b5ec
parent195cc354696d75e9625cf303a0791404b3215501 (diff)
downloadqemu-94cbcf530f9d9789b75ef1ae227ba4a4f702bf7d.zip
qemu-94cbcf530f9d9789b75ef1ae227ba4a4f702bf7d.tar.gz
qemu-94cbcf530f9d9789b75ef1ae227ba4a4f702bf7d.tar.bz2
hw/block/nvme: handle dma errors
Handling DMA errors gracefully is required for the device to pass the block/011 test ("disable PCI device while doing I/O") in the blktests suite. With this patch the device sets the Controller Fatal Status bit in the CSTS register when failing to read from a submission queue or writing to a completion queue; expecting the host to reset the controller. If DMA errors occur at any other point in the execution of the command (say, while mapping the PRPs), the command is aborted with a Data Transfer Error status code. Signed-off-by: Klaus Jensen <k.jensen@samsung.com> Reviewed-by: Keith Busch <kbusch@kernel.org>
-rw-r--r--hw/block/nvme.c41
-rw-r--r--hw/block/trace-events3
2 files changed, 34 insertions, 10 deletions
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 44fa5b9..7d328c0 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -140,14 +140,14 @@ static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
return &n->cmbuf[addr - n->ctrl_mem.addr];
}
-static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
+static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
{
if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) {
memcpy(buf, nvme_addr_to_cmb(n, addr), size);
- return;
+ return 0;
}
- pci_dma_read(&n->parent_obj, addr, buf, size);
+ return pci_dma_read(&n->parent_obj, addr, buf, size);
}
static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
@@ -307,6 +307,7 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
int num_prps = (len >> n->page_bits) + 1;
uint16_t status;
bool prp_list_in_cmb = false;
+ int ret;
QEMUSGList *qsg = &req->qsg;
QEMUIOVector *iov = &req->iov;
@@ -347,7 +348,11 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
nents = (len + n->page_size - 1) >> n->page_bits;
prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
- nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
+ ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
+ if (ret) {
+ trace_pci_nvme_err_addr_read(prp2);
+ return NVME_DATA_TRAS_ERROR;
+ }
while (len != 0) {
uint64_t prp_ent = le64_to_cpu(prp_list[i]);
@@ -364,8 +369,12 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
i = 0;
nents = (len + n->page_size - 1) >> n->page_bits;
prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
- nvme_addr_read(n, prp_ent, (void *)prp_list,
- prp_trans);
+ ret = nvme_addr_read(n, prp_ent, (void *)prp_list,
+ prp_trans);
+ if (ret) {
+ trace_pci_nvme_err_addr_read(prp_ent);
+ return NVME_DATA_TRAS_ERROR;
+ }
prp_ent = le64_to_cpu(prp_list[i]);
}
@@ -457,6 +466,7 @@ static void nvme_post_cqes(void *opaque)
NvmeCQueue *cq = opaque;
NvmeCtrl *n = cq->ctrl;
NvmeRequest *req, *next;
+ int ret;
QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
NvmeSQueue *sq;
@@ -466,15 +476,21 @@ static void nvme_post_cqes(void *opaque)
break;
}
- QTAILQ_REMOVE(&cq->req_list, req, entry);
sq = req->sq;
req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase);
req->cqe.sq_id = cpu_to_le16(sq->sqid);
req->cqe.sq_head = cpu_to_le16(sq->head);
addr = cq->dma_addr + cq->tail * n->cqe_size;
+ ret = pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
+ sizeof(req->cqe));
+ if (ret) {
+ trace_pci_nvme_err_addr_write(addr);
+ trace_pci_nvme_err_cfs();
+ n->bar.csts = NVME_CSTS_FAILED;
+ break;
+ }
+ QTAILQ_REMOVE(&cq->req_list, req, entry);
nvme_inc_cq_tail(cq);
- pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
- sizeof(req->cqe));
nvme_req_exit(req);
QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
}
@@ -1606,7 +1622,12 @@ static void nvme_process_sq(void *opaque)
while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
addr = sq->dma_addr + sq->head * n->sqe_size;
- nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd));
+ if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) {
+ trace_pci_nvme_err_addr_read(addr);
+ trace_pci_nvme_err_cfs();
+ n->bar.csts = NVME_CSTS_FAILED;
+ break;
+ }
nvme_inc_sq_head(sq);
req = QTAILQ_FIRST(&sq->req_list);
diff --git a/hw/block/trace-events b/hw/block/trace-events
index 8ff4cbc..5589db4 100644
--- a/hw/block/trace-events
+++ b/hw/block/trace-events
@@ -86,6 +86,9 @@ pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
# nvme traces for error conditions
pci_nvme_err_mdts(uint16_t cid, size_t len) "cid %"PRIu16" len %zu"
+pci_nvme_err_addr_read(uint64_t addr) "addr 0x%"PRIx64""
+pci_nvme_err_addr_write(uint64_t addr) "addr 0x%"PRIx64""
+pci_nvme_err_cfs(void) "controller fatal status"
pci_nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size"
pci_nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is null or not page aligned: 0x%"PRIx64""
pci_nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64""