aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2025-05-09 12:04:19 -0400
committerStefan Hajnoczi <stefanha@redhat.com>2025-05-09 12:04:19 -0400
commit6b18f6e34246ae62aefcaaa56585f14cd1a15295 (patch)
treeab0cbbf043b653856dfd598444fea0afd810aab4
parentf9bb7e53a341d08fd4ec8d7e810ebfd4f6f936bd (diff)
parent624379be3a8136db420ec3a3fee36fe91e9f789e (diff)
downloadqemu-6b18f6e34246ae62aefcaaa56585f14cd1a15295.zip
qemu-6b18f6e34246ae62aefcaaa56585f14cd1a15295.tar.gz
qemu-6b18f6e34246ae62aefcaaa56585f14cd1a15295.tar.bz2
Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging
Pull request Farhan Ali's s390x host PCI support for the block/nvme.c driver. # -----BEGIN PGP SIGNATURE----- # # iQEzBAABCgAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmgcviUACgkQnKSrs4Gr # c8hRswgAupxH5Zhx50F7GzwZyu9TCF2sphEPd2VuFVxze8Sg6mXnJq5BFTjv9IuC # 0trPppfDyKFKujDk+FA3pl9bT45btm0xctNbFYNRS3HXrVUyMQLy73MlFF2twa5g # U3uiX2d7DAYOdi5O1Cn3bhlByDh4qSko7YyUDFKio+WU57cdJxEd+pUqwyVXrU3E # AMC2ZmJdKFGGC+tWxBIAuWNc5apq9yzbiywR8z62/Z2IC+Bym0RpvCbdklqcZb8O # tpGxDKN8bY6s+hy1NZmA8eBA/iCiu6SUFmNpoe2vSwCFEk9R3gi+UNcuTVt3FaWO # lgzoZSOelmI3JkF0UBqvKsPXt3fdJw== # =KII7 # -----END PGP SIGNATURE----- # gpg: Signature made Thu 08 May 2025 10:22:29 EDT # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [ultimate] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [ultimate] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * tag 'block-pull-request' of https://gitlab.com/stefanha/qemu: block/nvme: Use host PCI MMIO API include: Add a header to define host PCI MMIO functions util: Add functions for s390x mmio read/write Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
-rw-r--r--block/nvme.c41
-rw-r--r--include/qemu/host-pci-mmio.h136
-rw-r--r--include/qemu/s390x_pci_mmio.h24
-rw-r--r--util/meson.build2
-rw-r--r--util/s390x_pci_mmio.c146
5 files changed, 331 insertions, 18 deletions
diff --git a/block/nvme.c b/block/nvme.c
index bbf7c23..8df53ee 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -18,6 +18,7 @@
#include "qobject/qstring.h"
#include "qemu/defer-call.h"
#include "qemu/error-report.h"
+#include "qemu/host-pci-mmio.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qemu/cutils.h"
@@ -60,7 +61,7 @@ typedef struct {
uint8_t *queue;
uint64_t iova;
/* Hardware MMIO register */
- volatile uint32_t *doorbell;
+ uint32_t *doorbell;
} NVMeQueue;
typedef struct {
@@ -100,7 +101,7 @@ struct BDRVNVMeState {
QEMUVFIOState *vfio;
void *bar0_wo_map;
/* Memory mapped registers */
- volatile struct {
+ struct {
uint32_t sq_tail;
uint32_t cq_head;
} *doorbells;
@@ -292,7 +293,7 @@ static void nvme_kick(NVMeQueuePair *q)
assert(!(q->sq.tail & 0xFF00));
/* Fence the write to submission queue entry before notifying the device. */
smp_wmb();
- *q->sq.doorbell = cpu_to_le32(q->sq.tail);
+ host_pci_stl_le_p(q->sq.doorbell, q->sq.tail);
q->inflight += q->need_kick;
q->need_kick = 0;
}
@@ -441,7 +442,7 @@ static bool nvme_process_completion(NVMeQueuePair *q)
if (progress) {
/* Notify the device so it can post more completions. */
smp_mb_release();
- *q->cq.doorbell = cpu_to_le32(q->cq.head);
+ host_pci_stl_le_p(q->cq.doorbell, q->cq.head);
nvme_wake_free_req_locked(q);
}
@@ -460,7 +461,7 @@ static void nvme_process_completion_bh(void *opaque)
* so notify the device that it has space to fill in more completions now.
*/
smp_mb_release();
- *q->cq.doorbell = cpu_to_le32(q->cq.head);
+ host_pci_stl_le_p(q->cq.doorbell, q->cq.head);
nvme_wake_free_req_locked(q);
nvme_process_completion(q);
@@ -749,9 +750,10 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
int ret;
uint64_t cap;
uint32_t ver;
+ uint32_t cc;
uint64_t timeout_ms;
uint64_t deadline, now;
- volatile NvmeBar *regs = NULL;
+ NvmeBar *regs = NULL;
qemu_co_mutex_init(&s->dma_map_lock);
qemu_co_queue_init(&s->dma_flush_queue);
@@ -779,7 +781,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
/* Perform initialize sequence as described in NVMe spec "7.6.1
* Initialization". */
- cap = le64_to_cpu(regs->cap);
+ cap = host_pci_ldq_le_p(&regs->cap);
trace_nvme_controller_capability_raw(cap);
trace_nvme_controller_capability("Maximum Queue Entries Supported",
1 + NVME_CAP_MQES(cap));
@@ -805,16 +807,17 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
bs->bl.request_alignment = s->page_size;
timeout_ms = MIN(500 * NVME_CAP_TO(cap), 30000);
- ver = le32_to_cpu(regs->vs);
+ ver = host_pci_ldl_le_p(&regs->vs);
trace_nvme_controller_spec_version(extract32(ver, 16, 16),
extract32(ver, 8, 8),
extract32(ver, 0, 8));
/* Reset device to get a clean state. */
- regs->cc = cpu_to_le32(le32_to_cpu(regs->cc) & 0xFE);
+ cc = host_pci_ldl_le_p(&regs->cc);
+ host_pci_stl_le_p(&regs->cc, cc & 0xFE);
/* Wait for CSTS.RDY = 0. */
deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS;
- while (NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
+ while (NVME_CSTS_RDY(host_pci_ldl_le_p(&regs->csts))) {
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
error_setg(errp, "Timeout while waiting for device to reset (%"
PRId64 " ms)",
@@ -843,19 +846,21 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
s->queues[INDEX_ADMIN] = q;
s->queue_count = 1;
QEMU_BUILD_BUG_ON((NVME_QUEUE_SIZE - 1) & 0xF000);
- regs->aqa = cpu_to_le32(((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) |
- ((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT));
- regs->asq = cpu_to_le64(q->sq.iova);
- regs->acq = cpu_to_le64(q->cq.iova);
+ host_pci_stl_le_p(&regs->aqa,
+ ((NVME_QUEUE_SIZE - 1) << AQA_ACQS_SHIFT) |
+ ((NVME_QUEUE_SIZE - 1) << AQA_ASQS_SHIFT));
+ host_pci_stq_le_p(&regs->asq, q->sq.iova);
+ host_pci_stq_le_p(&regs->acq, q->cq.iova);
/* After setting up all control registers we can enable device now. */
- regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) |
- (ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) |
- CC_EN_MASK);
+ host_pci_stl_le_p(&regs->cc,
+ (ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) |
+ (ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) |
+ CC_EN_MASK);
/* Wait for CSTS.RDY = 1. */
now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
deadline = now + timeout_ms * SCALE_MS;
- while (!NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
+ while (!NVME_CSTS_RDY(host_pci_ldl_le_p(&regs->csts))) {
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
error_setg(errp, "Timeout while waiting for device to start (%"
PRId64 " ms)",
diff --git a/include/qemu/host-pci-mmio.h b/include/qemu/host-pci-mmio.h
new file mode 100644
index 0000000..a8ed993
--- /dev/null
+++ b/include/qemu/host-pci-mmio.h
@@ -0,0 +1,136 @@
+/*
+ * API for host PCI MMIO accesses (e.g. Linux VFIO BARs)
+ *
+ * Copyright 2025 IBM Corp.
+ * Author(s): Farhan Ali <alifm@linux.ibm.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HOST_PCI_MMIO_H
+#define HOST_PCI_MMIO_H
+
+#include "qemu/bswap.h"
+#include "qemu/s390x_pci_mmio.h"
+
+static inline uint8_t host_pci_ldub_p(const void *ioaddr)
+{
+ uint8_t ret = 0;
+#ifdef __s390x__
+ ret = s390x_pci_mmio_read_8(ioaddr);
+#else
+ ret = ldub_p(ioaddr);
+#endif
+
+ return ret;
+}
+
+static inline uint16_t host_pci_lduw_le_p(const void *ioaddr)
+{
+ uint16_t ret = 0;
+#ifdef __s390x__
+ ret = le16_to_cpu(s390x_pci_mmio_read_16(ioaddr));
+#else
+ ret = lduw_le_p(ioaddr);
+#endif
+
+ return ret;
+}
+
+static inline uint32_t host_pci_ldl_le_p(const void *ioaddr)
+{
+ uint32_t ret = 0;
+#ifdef __s390x__
+ ret = le32_to_cpu(s390x_pci_mmio_read_32(ioaddr));
+#else
+ ret = ldl_le_p(ioaddr);
+#endif
+
+ return ret;
+}
+
+static inline uint64_t host_pci_ldq_le_p(const void *ioaddr)
+{
+ uint64_t ret = 0;
+#ifdef __s390x__
+ ret = le64_to_cpu(s390x_pci_mmio_read_64(ioaddr));
+#else
+ ret = ldq_le_p(ioaddr);
+#endif
+
+ return ret;
+}
+
+static inline void host_pci_stb_p(void *ioaddr, uint8_t val)
+{
+#ifdef __s390x__
+ s390x_pci_mmio_write_8(ioaddr, val);
+#else
+ stb_p(ioaddr, val);
+#endif
+}
+
+static inline void host_pci_stw_le_p(void *ioaddr, uint16_t val)
+{
+#ifdef __s390x__
+ s390x_pci_mmio_write_16(ioaddr, cpu_to_le16(val));
+#else
+ stw_le_p(ioaddr, val);
+#endif
+}
+
+static inline void host_pci_stl_le_p(void *ioaddr, uint32_t val)
+{
+#ifdef __s390x__
+ s390x_pci_mmio_write_32(ioaddr, cpu_to_le32(val));
+#else
+ stl_le_p(ioaddr, val);
+#endif
+}
+
+static inline void host_pci_stq_le_p(void *ioaddr, uint64_t val)
+{
+#ifdef __s390x__
+ s390x_pci_mmio_write_64(ioaddr, cpu_to_le64(val));
+#else
+ stq_le_p(ioaddr, val);
+#endif
+}
+
+static inline uint64_t host_pci_ldn_le_p(const void *ioaddr, int sz)
+{
+ switch (sz) {
+ case 1:
+ return host_pci_ldub_p(ioaddr);
+ case 2:
+ return host_pci_lduw_le_p(ioaddr);
+ case 4:
+ return host_pci_ldl_le_p(ioaddr);
+ case 8:
+ return host_pci_ldq_le_p(ioaddr);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline void host_pci_stn_le_p(void *ioaddr, int sz, uint64_t v)
+{
+ switch (sz) {
+ case 1:
+ host_pci_stb_p(ioaddr, v);
+ break;
+ case 2:
+ host_pci_stw_le_p(ioaddr, v);
+ break;
+ case 4:
+ host_pci_stl_le_p(ioaddr, v);
+ break;
+ case 8:
+ host_pci_stq_le_p(ioaddr, v);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+#endif
diff --git a/include/qemu/s390x_pci_mmio.h b/include/qemu/s390x_pci_mmio.h
new file mode 100644
index 0000000..c5f63ec
--- /dev/null
+++ b/include/qemu/s390x_pci_mmio.h
@@ -0,0 +1,24 @@
+/*
+ * s390x PCI MMIO definitions
+ *
+ * Copyright 2025 IBM Corp.
+ * Author(s): Farhan Ali <alifm@linux.ibm.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef S390X_PCI_MMIO_H
+#define S390X_PCI_MMIO_H
+
+#ifdef __s390x__
+uint8_t s390x_pci_mmio_read_8(const void *ioaddr);
+uint16_t s390x_pci_mmio_read_16(const void *ioaddr);
+uint32_t s390x_pci_mmio_read_32(const void *ioaddr);
+uint64_t s390x_pci_mmio_read_64(const void *ioaddr);
+
+void s390x_pci_mmio_write_8(void *ioaddr, uint8_t val);
+void s390x_pci_mmio_write_16(void *ioaddr, uint16_t val);
+void s390x_pci_mmio_write_32(void *ioaddr, uint32_t val);
+void s390x_pci_mmio_write_64(void *ioaddr, uint64_t val);
+#endif /* __s390x__ */
+
+#endif /* S390X_PCI_MMIO_H */
diff --git a/util/meson.build b/util/meson.build
index e5cd327..1adff96 100644
--- a/util/meson.build
+++ b/util/meson.build
@@ -133,4 +133,6 @@ elif cpu in ['ppc', 'ppc64']
util_ss.add(files('cpuinfo-ppc.c'))
elif cpu in ['riscv32', 'riscv64']
util_ss.add(files('cpuinfo-riscv.c'))
+elif cpu == 's390x'
+ util_ss.add(files('s390x_pci_mmio.c'))
endif
diff --git a/util/s390x_pci_mmio.c b/util/s390x_pci_mmio.c
new file mode 100644
index 0000000..5ab24fa
--- /dev/null
+++ b/util/s390x_pci_mmio.c
@@ -0,0 +1,146 @@
+/*
+ * s390x PCI MMIO definitions
+ *
+ * Copyright 2025 IBM Corp.
+ * Author(s): Farhan Ali <alifm@linux.ibm.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include <sys/syscall.h>
+#include "qemu/s390x_pci_mmio.h"
+#include "elf.h"
+
+union register_pair {
+ unsigned __int128 pair;
+ struct {
+ uint64_t even;
+ uint64_t odd;
+ };
+};
+
+static bool is_mio_supported;
+
+static __attribute__((constructor)) void check_is_mio_supported(void)
+{
+ is_mio_supported = !!(qemu_getauxval(AT_HWCAP) & HWCAP_S390_PCI_MIO);
+}
+
+static uint64_t s390x_pcilgi(const void *ioaddr, size_t len)
+{
+ union register_pair ioaddr_len = { .even = (uint64_t)ioaddr,
+ .odd = len };
+ uint64_t val;
+ int cc;
+
+ asm volatile(
+ /* pcilgi */
+ ".insn rre,0xb9d60000,%[val],%[ioaddr_len]\n"
+ "ipm %[cc]\n"
+ "srl %[cc],28\n"
+ : [cc] "=d"(cc), [val] "=d"(val),
+ [ioaddr_len] "+d"(ioaddr_len.pair) :: "cc");
+
+ if (cc) {
+ val = -1ULL;
+ }
+
+ return val;
+}
+
+static void s390x_pcistgi(void *ioaddr, uint64_t val, size_t len)
+{
+ union register_pair ioaddr_len = {.even = (uint64_t)ioaddr, .odd = len};
+
+ asm volatile (
+ /* pcistgi */
+ ".insn rre,0xb9d40000,%[val],%[ioaddr_len]\n"
+ : [ioaddr_len] "+d" (ioaddr_len.pair)
+ : [val] "d" (val)
+ : "cc", "memory");
+}
+
+uint8_t s390x_pci_mmio_read_8(const void *ioaddr)
+{
+ uint8_t val = 0;
+
+ if (is_mio_supported) {
+ val = s390x_pcilgi(ioaddr, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_read, ioaddr, &val, sizeof(val));
+ }
+ return val;
+}
+
+uint16_t s390x_pci_mmio_read_16(const void *ioaddr)
+{
+ uint16_t val = 0;
+
+ if (is_mio_supported) {
+ val = s390x_pcilgi(ioaddr, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_read, ioaddr, &val, sizeof(val));
+ }
+ return val;
+}
+
+uint32_t s390x_pci_mmio_read_32(const void *ioaddr)
+{
+ uint32_t val = 0;
+
+ if (is_mio_supported) {
+ val = s390x_pcilgi(ioaddr, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_read, ioaddr, &val, sizeof(val));
+ }
+ return val;
+}
+
+uint64_t s390x_pci_mmio_read_64(const void *ioaddr)
+{
+ uint64_t val = 0;
+
+ if (is_mio_supported) {
+ val = s390x_pcilgi(ioaddr, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_read, ioaddr, &val, sizeof(val));
+ }
+ return val;
+}
+
+void s390x_pci_mmio_write_8(void *ioaddr, uint8_t val)
+{
+ if (is_mio_supported) {
+ s390x_pcistgi(ioaddr, val, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_write, ioaddr, &val, sizeof(val));
+ }
+}
+
+void s390x_pci_mmio_write_16(void *ioaddr, uint16_t val)
+{
+ if (is_mio_supported) {
+ s390x_pcistgi(ioaddr, val, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_write, ioaddr, &val, sizeof(val));
+ }
+}
+
+void s390x_pci_mmio_write_32(void *ioaddr, uint32_t val)
+{
+ if (is_mio_supported) {
+ s390x_pcistgi(ioaddr, val, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_write, ioaddr, &val, sizeof(val));
+ }
+}
+
+void s390x_pci_mmio_write_64(void *ioaddr, uint64_t val)
+{
+ if (is_mio_supported) {
+ s390x_pcistgi(ioaddr, val, sizeof(val));
+ } else {
+ syscall(__NR_s390_pci_mmio_write, ioaddr, &val, sizeof(val));
+ }
+}