aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS8
-rw-r--r--backends/cryptodev-builtin.c9
-rw-r--r--docs/interop/vhost-user.rst60
-rw-r--r--docs/system/devices/virtio/vhost-user.rst3
-rw-r--r--hw/acpi/ghes-stub.c6
-rw-r--r--hw/acpi/ghes.c45
-rw-r--r--hw/block/vhost-user-blk.c27
-rw-r--r--hw/cxl/cxl-mailbox-utils.c11
-rw-r--r--hw/display/virtio-dmabuf.c6
-rw-r--r--hw/display/virtio-gpu-virgl.c8
-rw-r--r--hw/display/virtio-gpu.c4
-rw-r--r--hw/i386/intel_iommu.c204
-rw-r--r--hw/i386/intel_iommu_internal.h14
-rw-r--r--hw/pci-host/articia.c1
-rw-r--r--hw/pci-host/aspeed_pcie.c1
-rw-r--r--hw/pci-host/designware.c3
-rw-r--r--hw/pci-host/gpex.c2
-rw-r--r--hw/pci-host/grackle.c1
-rw-r--r--hw/pci-host/gt64120.c1
-rw-r--r--hw/pci-host/mv64361.c1
-rw-r--r--hw/pci-host/pnv_phb.c1
-rw-r--r--hw/pci-host/ppce500.c1
-rw-r--r--hw/pci-host/q35.c26
-rw-r--r--hw/pci-host/raven.c1
-rw-r--r--hw/pci-host/remote.c1
-rw-r--r--hw/pci-host/sabre.c1
-rw-r--r--hw/pci-host/uninorth.c4
-rw-r--r--hw/pci-host/xilinx-pcie.c2
-rw-r--r--hw/pci/pci.c4
-rw-r--r--hw/pci/pci_host.c1
-rw-r--r--hw/pci/pcie.c21
-rw-r--r--hw/pci/pcie_sriov.c9
-rw-r--r--hw/pci/shpc.c1
-rw-r--r--hw/virtio/Kconfig5
-rw-r--r--hw/virtio/meson.build3
-rw-r--r--hw/virtio/vhost-shadow-virtqueue.c1
-rw-r--r--hw/virtio/vhost-user-spi-pci.c69
-rw-r--r--hw/virtio/vhost-user-spi.c65
-rw-r--r--hw/virtio/vhost-user.c7
-rw-r--r--hw/virtio/vhost-vdpa.c6
-rw-r--r--hw/virtio/vhost.c66
-rw-r--r--hw/virtio/virtio-crypto.c7
-rw-r--r--hw/virtio/virtio-pci.c20
-rw-r--r--hw/virtio/virtio-pmem.c1
-rw-r--r--hw/virtio/virtio.c4
-rw-r--r--include/hw/acpi/ghes.h6
-rw-r--r--include/hw/i386/intel_iommu.h1
-rw-r--r--include/hw/pci/pcie.h1
-rw-r--r--include/hw/virtio/vhost-user-blk.h1
-rw-r--r--include/hw/virtio/vhost-user-spi.h25
-rw-r--r--include/hw/virtio/vhost-user.h2
-rw-r--r--include/hw/virtio/vhost-vdpa.h8
-rw-r--r--include/hw/virtio/vhost.h6
-rw-r--r--include/migration/vmstate.h10
-rw-r--r--include/standard-headers/linux/virtio_spi.h181
-rw-r--r--target/arm/kvm.c10
-rw-r--r--tests/qtest/q35-test.c6
57 files changed, 791 insertions, 208 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 4c35249..65ac60b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2199,7 +2199,7 @@ S: Maintained
F: hw/riscv/virt-acpi-build.c
ACPI/VIOT
-M: Jean-Philippe Brucker <jean-philippe@linaro.org>
+M: Eric Auger <eric.auger@redhat.com>
S: Supported
F: hw/acpi/viot.c
F: hw/acpi/viot.h
@@ -2558,6 +2558,12 @@ F: hw/virtio/vhost-user-scmi*
F: include/hw/virtio/vhost-user-scmi.h
F: tests/qtest/libqos/virtio-scmi.*
+vhost-user-spi
+M: Haixu Cui <quic_haixcui@quicinc.com>
+S: Maintained
+F: include/hw/virtio/vhost-user-spi.h
+F: hw/virtio/vhost-user-spi*
+
virtio-crypto
M: Gonglei <arei.gonglei@huawei.com>
S: Supported
diff --git a/backends/cryptodev-builtin.c b/backends/cryptodev-builtin.c
index 0414c01..55a3fbd 100644
--- a/backends/cryptodev-builtin.c
+++ b/backends/cryptodev-builtin.c
@@ -53,6 +53,8 @@ typedef struct CryptoDevBackendBuiltinSession {
#define CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN 512
#define CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN 64
+/* demonstration purposes only, use a limited size to avoid QEMU OOM */
+#define CRYPTODEV_BUITLIN_MAX_REQUEST_SIZE (1024 * 1024)
struct CryptoDevBackendBuiltin {
CryptoDevBackend parent_obj;
@@ -98,12 +100,7 @@ static void cryptodev_builtin_init(
1u << QCRYPTODEV_BACKEND_SERVICE_TYPE_MAC;
backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC;
backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1;
- /*
- * Set the Maximum length of crypto request.
- * Why this value? Just avoid to overflow when
- * memory allocation for each crypto request.
- */
- backend->conf.max_size = LONG_MAX - sizeof(CryptoDevBackendOpInfo);
+ backend->conf.max_size = CRYPTODEV_BUITLIN_MAX_REQUEST_SIZE;
backend->conf.max_cipher_key_len = CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN;
backend->conf.max_auth_key_len = CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN;
cryptodev_builtin_init_akcipher(backend);
diff --git a/docs/interop/vhost-user.rst b/docs/interop/vhost-user.rst
index 93a9c8d..137c9f3 100644
--- a/docs/interop/vhost-user.rst
+++ b/docs/interop/vhost-user.rst
@@ -411,8 +411,8 @@ in the ancillary data:
* ``VHOST_USER_SET_INFLIGHT_FD`` (if ``VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD``)
* ``VHOST_USER_SET_DEVICE_STATE_FD``
-When sending file descriptors in ancilliary data, *front-end* should
-associate the ancilliary data with a ``sendmsg`` operation (or
+When sending file descriptors in ancillary data, *front-end* should
+associate the ancillary data with a ``sendmsg`` operation (or
equivalent) that sends bytes starting with the first byte of the
message header. *back-end* can therefore expect that file descriptors
will only be received in the first ``recvmsg`` operation for a message
@@ -743,6 +743,8 @@ negotiated, back-end can send file descriptors (at most 8 descriptors in
each message) to front-end via ancillary data using this fd communication
channel.
+.. _inflight_io_tracking:
+
Inflight I/O tracking
---------------------
@@ -1040,26 +1042,27 @@ Protocol features
.. code:: c
- #define VHOST_USER_PROTOCOL_F_MQ 0
- #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
- #define VHOST_USER_PROTOCOL_F_RARP 2
- #define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
- #define VHOST_USER_PROTOCOL_F_MTU 4
- #define VHOST_USER_PROTOCOL_F_BACKEND_REQ 5
- #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6
- #define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7
- #define VHOST_USER_PROTOCOL_F_PAGEFAULT 8
- #define VHOST_USER_PROTOCOL_F_CONFIG 9
- #define VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD 10
- #define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11
- #define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12
- #define VHOST_USER_PROTOCOL_F_RESET_DEVICE 13
- #define VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS 14
- #define VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS 15
- #define VHOST_USER_PROTOCOL_F_STATUS 16
- #define VHOST_USER_PROTOCOL_F_XEN_MMAP 17
- #define VHOST_USER_PROTOCOL_F_SHARED_OBJECT 18
- #define VHOST_USER_PROTOCOL_F_DEVICE_STATE 19
+ #define VHOST_USER_PROTOCOL_F_MQ 0
+ #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
+ #define VHOST_USER_PROTOCOL_F_RARP 2
+ #define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
+ #define VHOST_USER_PROTOCOL_F_MTU 4
+ #define VHOST_USER_PROTOCOL_F_BACKEND_REQ 5
+ #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6
+ #define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7
+ #define VHOST_USER_PROTOCOL_F_PAGEFAULT 8
+ #define VHOST_USER_PROTOCOL_F_CONFIG 9
+ #define VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD 10
+ #define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11
+ #define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12
+ #define VHOST_USER_PROTOCOL_F_RESET_DEVICE 13
+ #define VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS 14
+ #define VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS 15
+ #define VHOST_USER_PROTOCOL_F_STATUS 16
+ #define VHOST_USER_PROTOCOL_F_XEN_MMAP 17
+ #define VHOST_USER_PROTOCOL_F_SHARED_OBJECT 18
+ #define VHOST_USER_PROTOCOL_F_DEVICE_STATE 19
+ #define VHOST_USER_PROTOCOL_F_GET_VRING_BASE_INFLIGHT 20
Front-end message types
-----------------------
@@ -1255,6 +1258,19 @@ Front-end message types
The request payload's *num* field is currently reserved and must be
set to 0.
+ By default, the back-end must complete all inflight I/O requests for the
+ specified vring before stopping it.
+
+ If the ``VHOST_USER_PROTOCOL_F_GET_VRING_BASE_INFLIGHT`` protocol
+ feature has been negotiated, the back-end may suspend in-flight I/O
+ requests and record them as described in :ref:`Inflight I/O tracking
+ <inflight_io_tracking>` instead of completing them before stopping the vring.
+ How to suspend an in-flight request depends on the implementation of the back-end
+ but it typically can be done by aborting or cancelling the underlying I/O
+ request. The ``VHOST_USER_PROTOCOL_F_GET_VRING_BASE_INFLIGHT``
+ protocol feature must only be neogotiated if
+ ``VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD`` is also negotiated.
+
``VHOST_USER_SET_VRING_KICK``
:id: 12
:equivalent ioctl: ``VHOST_SET_VRING_KICK``
diff --git a/docs/system/devices/virtio/vhost-user.rst b/docs/system/devices/virtio/vhost-user.rst
index f556a84..2806d81 100644
--- a/docs/system/devices/virtio/vhost-user.rst
+++ b/docs/system/devices/virtio/vhost-user.rst
@@ -58,6 +58,9 @@ platform details for what sort of virtio bus to use.
* - vhost-user-vsock
- Socket based communication
- `vhost-device-vsock <https://github.com/rust-vmm/vhost-device/tree/main/vhost-device-vsock>`_
+ * - vhost-user-spi
+ - Proxy spi devices to host
+ - `vhost-device-spi <https://github.com/rust-vmm/vhost-device/tree/main/vhost-device-spi>`_
The referenced *daemons* are not exhaustive, any conforming backend
implementing the device and using the vhost-user protocol should work.
diff --git a/hw/acpi/ghes-stub.c b/hw/acpi/ghes-stub.c
index 40f660c..5f9313c 100644
--- a/hw/acpi/ghes-stub.c
+++ b/hw/acpi/ghes-stub.c
@@ -11,10 +11,10 @@
#include "qemu/osdep.h"
#include "hw/acpi/ghes.h"
-int acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id,
- uint64_t physical_address)
+bool acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id,
+ uint64_t physical_address, Error **errp)
{
- return -1;
+ g_assert_not_reached();
}
AcpiGhesState *acpi_ghes_get_state(void)
diff --git a/hw/acpi/ghes.c b/hw/acpi/ghes.c
index 5445dc1..c42f172 100644
--- a/hw/acpi/ghes.c
+++ b/hw/acpi/ghes.c
@@ -444,7 +444,7 @@ static void get_hw_error_offsets(uint64_t ghes_addr,
*read_ack_register_addr = ghes_addr + sizeof(uint64_t);
}
-static void get_ghes_source_offsets(uint16_t source_id,
+static bool get_ghes_source_offsets(uint16_t source_id,
uint64_t hest_addr,
uint64_t *cper_addr,
uint64_t *read_ack_start_addr,
@@ -475,7 +475,7 @@ static void get_ghes_source_offsets(uint16_t source_id,
/* For now, we only know the size of GHESv2 table */
if (type != ACPI_GHES_SOURCE_GENERIC_ERROR_V2) {
error_setg(errp, "HEST: type %d not supported.", type);
- return;
+ return false;
}
/* Compare CPER source ID at the GHESv2 structure */
@@ -489,7 +489,7 @@ static void get_ghes_source_offsets(uint16_t source_id,
}
if (i == num_sources) {
error_setg(errp, "HEST: Source %d not found.", source_id);
- return;
+ return false;
}
/* Navigate through table address pointers */
@@ -509,27 +509,30 @@ static void get_ghes_source_offsets(uint16_t source_id,
cpu_physical_memory_read(hest_read_ack_addr, read_ack_start_addr,
sizeof(*read_ack_start_addr));
*read_ack_start_addr = le64_to_cpu(*read_ack_start_addr);
+
+ return true;
}
NotifierList acpi_generic_error_notifiers =
NOTIFIER_LIST_INITIALIZER(acpi_generic_error_notifiers);
-void ghes_record_cper_errors(AcpiGhesState *ags, const void *cper, size_t len,
+bool ghes_record_cper_errors(AcpiGhesState *ags, const void *cper, size_t len,
uint16_t source_id, Error **errp)
{
uint64_t cper_addr = 0, read_ack_register_addr = 0, read_ack_register;
if (len > ACPI_GHES_MAX_RAW_DATA_LENGTH) {
error_setg(errp, "GHES CPER record is too big: %zd", len);
- return;
+ return false;
}
if (!ags->use_hest_addr) {
get_hw_error_offsets(le64_to_cpu(ags->hw_error_le),
&cper_addr, &read_ack_register_addr);
- } else {
- get_ghes_source_offsets(source_id, le64_to_cpu(ags->hest_addr_le),
- &cper_addr, &read_ack_register_addr, errp);
+ } else if (!get_ghes_source_offsets(source_id,
+ le64_to_cpu(ags->hest_addr_le),
+ &cper_addr, &read_ack_register_addr, errp)) {
+ return false;
}
cpu_physical_memory_read(read_ack_register_addr,
@@ -540,7 +543,7 @@ void ghes_record_cper_errors(AcpiGhesState *ags, const void *cper, size_t len,
error_setg(errp,
"OSPM does not acknowledge previous error,"
" so can not record CPER for current error anymore");
- return;
+ return false;
}
read_ack_register = cpu_to_le64(0);
@@ -555,20 +558,19 @@ void ghes_record_cper_errors(AcpiGhesState *ags, const void *cper, size_t len,
cpu_physical_memory_write(cper_addr, cper, len);
notifier_list_notify(&acpi_generic_error_notifiers, &source_id);
+
+ return true;
}
-int acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id,
- uint64_t physical_address)
+bool acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id,
+ uint64_t physical_address, Error **errp)
{
/* Memory Error Section Type */
const uint8_t guid[] =
UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
0xED, 0x7C, 0x83, 0xB1);
- Error *err = NULL;
int data_length;
- GArray *block;
-
- block = g_array_new(false, true /* clear */, 1);
+ g_autoptr(GArray) block = g_array_new(false, true /* clear */, 1);
data_length = ACPI_GHES_DATA_LENGTH + ACPI_GHES_MEM_CPER_LENGTH;
/*
@@ -583,17 +585,8 @@ int acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id,
/* Build the memory section CPER for above new generic error data entry */
acpi_ghes_build_append_mem_cper(block, physical_address);
- /* Report the error */
- ghes_record_cper_errors(ags, block->data, block->len, source_id, &err);
-
- g_array_free(block, true);
-
- if (err) {
- error_report_err(err);
- return -1;
- }
-
- return 0;
+ return ghes_record_cper_errors(ags, block->data, block->len,
+ source_id, errp);
}
AcpiGhesState *acpi_ghes_get_state(void)
diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index 4d81d2d..c151e83 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -353,6 +353,7 @@ static int vhost_user_blk_connect(DeviceState *dev, Error **errp)
vhost_dev_set_config_notifier(&s->dev, &blk_ops);
s->vhost_user.supports_config = true;
+ s->vhost_user.supports_inflight_migration = s->inflight_migration;
ret = vhost_dev_init(&s->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0,
errp);
if (ret < 0) {
@@ -568,6 +569,26 @@ static struct vhost_dev *vhost_user_blk_get_vhost(VirtIODevice *vdev)
return &s->dev;
}
+static bool vhost_user_blk_inflight_needed(void *opaque)
+{
+ struct VHostUserBlk *s = opaque;
+
+ bool inflight_migration = virtio_has_feature(s->dev.protocol_features,
+ VHOST_USER_PROTOCOL_F_GET_VRING_BASE_INFLIGHT);
+
+ return inflight_migration;
+}
+
+static const VMStateDescription vmstate_vhost_user_blk_inflight = {
+ .name = "vhost-user-blk/inflight",
+ .version_id = 1,
+ .needed = vhost_user_blk_inflight_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_VHOST_INFLIGHT_REGION(inflight, VHostUserBlk),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
static const VMStateDescription vmstate_vhost_user_blk = {
.name = "vhost-user-blk",
.minimum_version_id = 1,
@@ -576,6 +597,10 @@ static const VMStateDescription vmstate_vhost_user_blk = {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
+ .subsections = (const VMStateDescription * const []) {
+ &vmstate_vhost_user_blk_inflight,
+ NULL
+ }
};
static const Property vhost_user_blk_properties[] = {
@@ -591,6 +616,8 @@ static const Property vhost_user_blk_properties[] = {
VIRTIO_BLK_F_WRITE_ZEROES, true),
DEFINE_PROP_BOOL("skip-get-vring-base-on-force-shutdown", VHostUserBlk,
skip_get_vring_base_on_force_shutdown, false),
+ DEFINE_PROP_BOOL("inflight-migration", VHostUserBlk,
+ inflight_migration, false),
};
static void vhost_user_blk_class_init(ObjectClass *klass, const void *data)
diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c
index 6cfdd98..9b99d44 100644
--- a/hw/cxl/cxl-mailbox-utils.c
+++ b/hw/cxl/cxl-mailbox-utils.c
@@ -1875,7 +1875,7 @@ static uint64_t get_dc_size(CXLType3Dev *ct3d, MemoryRegion **dc_mr)
static int validate_dpa_addr(CXLType3Dev *ct3d, uint64_t dpa_addr,
size_t length)
{
- uint64_t vmr_size, pmr_size, dc_size;
+ uint64_t vmr_size, pmr_size, dc_size, dpa_end;
if ((dpa_addr % CXL_CACHE_LINE_SIZE) ||
(length % CXL_CACHE_LINE_SIZE) ||
@@ -1887,7 +1887,12 @@ static int validate_dpa_addr(CXLType3Dev *ct3d, uint64_t dpa_addr,
pmr_size = get_pmr_size(ct3d, NULL);
dc_size = get_dc_size(ct3d, NULL);
- if (dpa_addr + length > vmr_size + pmr_size + dc_size) {
+ /* sanitize 64 bit values coming from guest */
+ if (uadd64_overflow(dpa_addr, length, &dpa_end)) {
+ return -EINVAL;
+ }
+
+ if (dpa_end > vmr_size + pmr_size + dc_size) {
return -EINVAL;
}
@@ -2006,7 +2011,7 @@ static CXLRetCode media_operations_discovery(uint8_t *payload_in,
* sub class command.
*/
if (media_op_in_disc_pl->dpa_range_count ||
- start_index > ARRAY_SIZE(media_op_matrix)) {
+ start_index + num_ops > ARRAY_SIZE(media_op_matrix)) {
return CXL_MBOX_INVALID_INPUT;
}
diff --git a/hw/display/virtio-dmabuf.c b/hw/display/virtio-dmabuf.c
index 3dba457..5e0395b 100644
--- a/hw/display/virtio-dmabuf.c
+++ b/hw/display/virtio-dmabuf.c
@@ -35,11 +35,13 @@ static bool virtio_add_resource(QemuUUID *uuid, VirtioSharedObject *value)
if (resource_uuids == NULL) {
resource_uuids = g_hash_table_new_full(qemu_uuid_hash,
uuid_equal_func,
- NULL,
+ g_free,
g_free);
}
if (g_hash_table_lookup(resource_uuids, uuid) == NULL) {
- g_hash_table_insert(resource_uuids, uuid, value);
+ g_hash_table_insert(resource_uuids,
+ g_memdup2(uuid, sizeof(*uuid)),
+ value);
} else {
result = false;
}
diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
index 07f6355..ecf8494 100644
--- a/hw/display/virtio-gpu-virgl.c
+++ b/hw/display/virtio-gpu-virgl.c
@@ -120,7 +120,7 @@ virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
vmr->g = g;
mr = &vmr->mr;
- memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
+ memory_region_init_ram_ptr(mr, OBJECT(mr), NULL, size, data);
memory_region_add_subregion(&b->hostmem, offset, mr);
memory_region_set_enabled(mr, true);
@@ -186,7 +186,7 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
/* memory region owns self res->mr object and frees it by itself */
memory_region_set_enabled(mr, false);
memory_region_del_subregion(&b->hostmem, mr);
- object_unparent(OBJECT(mr));
+ object_unref(OBJECT(mr));
}
return 0;
@@ -561,7 +561,7 @@ static void virgl_resource_attach_backing(VirtIOGPU *g,
ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
cmd, NULL, &res_iovs, &res_niov);
- if (ret != 0) {
+ if (ret < 0) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
}
@@ -705,7 +705,7 @@ static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
cmd, &res->base.addrs,
&res->base.iov, &res->base.iov_cnt);
- if (!ret) {
+ if (ret < 0) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
}
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index f23eec6..643e91c 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -354,7 +354,7 @@ static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
cmd, &res->addrs, &res->iov,
&res->iov_cnt);
- if (ret != 0) {
+ if (ret < 0) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
g_free(res);
return;
@@ -933,7 +933,7 @@ virtio_gpu_resource_attach_backing(VirtIOGPU *g,
ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd,
&res->addrs, &res->iov, &res->iov_cnt);
- if (ret != 0) {
+ if (ret < 0) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
}
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index e8a6f50..dd00079 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -1998,7 +1998,7 @@ static int vtd_iova_to_fspte(IntelIOMMUState *s, VTDContextEntry *ce,
uint64_t iova, bool is_write,
uint64_t *fsptep, uint32_t *fspte_level,
bool *reads, bool *writes, uint8_t aw_bits,
- uint32_t pasid)
+ uint32_t pasid, int iommu_idx)
{
dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce, pasid);
uint32_t offset;
@@ -2039,7 +2039,8 @@ static int vtd_iova_to_fspte(IntelIOMMUState *s, VTDContextEntry *ce,
*reads = true;
*writes = (*writes) && (fspte & VTD_FS_RW);
- if (is_write && !(fspte & VTD_FS_RW)) {
+ /* ATS should not fail when the write permission is not set */
+ if (is_write && !(fspte & VTD_FS_RW) && iommu_idx != VTD_IDX_ATS) {
return -VTD_FR_SM_WRITE;
}
if (vtd_fspte_nonzero_rsvd(fspte, *fspte_level)) {
@@ -2098,7 +2099,7 @@ static void vtd_report_fault(IntelIOMMUState *s,
*/
static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
uint8_t devfn, hwaddr addr, bool is_write,
- IOMMUTLBEntry *entry)
+ IOMMUTLBEntry *entry, int iommu_idx)
{
IntelIOMMUState *s = vtd_as->iommu_state;
VTDContextEntry ce;
@@ -2204,7 +2205,8 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
if (s->fsts && s->root_scalable) {
ret_fr = vtd_iova_to_fspte(s, &ce, addr, is_write, &pte, &level,
- &reads, &writes, s->aw_bits, pasid);
+ &reads, &writes, s->aw_bits, pasid,
+ iommu_idx);
pgtt = VTD_SM_PASID_ENTRY_FST;
} else {
ret_fr = vtd_iova_to_sspte(s, &ce, addr, is_write, &pte, &level,
@@ -2860,8 +2862,10 @@ static bool vtd_inv_desc_reserved_check(IntelIOMMUState *s,
static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
{
- uint64_t mask[4] = {VTD_INV_DESC_WAIT_RSVD_LO, VTD_INV_DESC_WAIT_RSVD_HI,
- VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE};
+ uint64_t mask[4] = {
+ VTD_INV_DESC_WAIT_RSVD_LO(s->ecap), VTD_INV_DESC_WAIT_RSVD_HI,
+ VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE
+ };
bool ret = true;
if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false,
@@ -3985,6 +3989,25 @@ static void vtd_mem_write(void *opaque, hwaddr addr,
}
}
+static void vtd_prepare_identity_entry(hwaddr addr, IOMMUAccessFlags perm,
+ uint32_t pasid, IOMMUTLBEntry *iotlb)
+{
+ iotlb->iova = addr & VTD_PAGE_MASK_4K;
+ iotlb->translated_addr = addr & VTD_PAGE_MASK_4K;
+ iotlb->addr_mask = ~VTD_PAGE_MASK_4K;
+ iotlb->perm = perm;
+ iotlb->pasid = pasid;
+}
+
+static inline void vtd_prepare_error_entry(IOMMUTLBEntry *entry)
+{
+ entry->iova = 0;
+ entry->translated_addr = 0;
+ entry->addr_mask = ~VTD_PAGE_MASK_4K;
+ entry->perm = IOMMU_NONE;
+ entry->pasid = PCI_NO_PASID;
+}
+
static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
IOMMUAccessFlags flag, int iommu_idx)
{
@@ -3996,16 +4019,29 @@ static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
.pasid = vtd_as->pasid,
};
bool success;
+ bool is_write = flag & IOMMU_WO;
if (likely(s->dmar_enabled)) {
- success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn,
- addr, flag & IOMMU_WO, &iotlb);
+ /* Only support translated requests in scalable mode */
+ if (iommu_idx == VTD_IDX_TRANSLATED && s->root_scalable) {
+ if (vtd_as->pasid == PCI_NO_PASID) {
+ vtd_prepare_identity_entry(addr, IOMMU_RW, PCI_NO_PASID,
+ &iotlb);
+ success = true;
+ } else {
+ vtd_prepare_error_entry(&iotlb);
+ error_report_once("%s: translated request with PASID not "
+ "allowed (pasid=0x%" PRIx32 ")", __func__,
+ vtd_as->pasid);
+ success = false;
+ }
+ } else {
+ success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn,
+ addr, is_write, &iotlb, iommu_idx);
+ }
} else {
/* DMAR disabled, passthrough, use 4k-page*/
- iotlb.iova = addr & VTD_PAGE_MASK_4K;
- iotlb.translated_addr = addr & VTD_PAGE_MASK_4K;
- iotlb.addr_mask = ~VTD_PAGE_MASK_4K;
- iotlb.perm = IOMMU_RW;
+ vtd_prepare_identity_entry(addr, IOMMU_RW, vtd_as->pasid, &iotlb);
success = true;
}
@@ -4152,6 +4188,7 @@ static const Property vtd_properties[] = {
DEFINE_PROP_BOOL("x-flts", IntelIOMMUState, fsts, FALSE),
DEFINE_PROP_BOOL("snoop-control", IntelIOMMUState, snoop_control, false),
DEFINE_PROP_BOOL("x-pasid-mode", IntelIOMMUState, pasid, false),
+ DEFINE_PROP_BOOL("svm", IntelIOMMUState, svm, false),
DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true),
DEFINE_PROP_BOOL("stale-tm", IntelIOMMUState, stale_tm, false),
DEFINE_PROP_BOOL("fs1gp", IntelIOMMUState, fs1gp, true),
@@ -4414,6 +4451,37 @@ static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src,
src, dst, sid, false);
}
+static void vtd_report_sid_ir_illegal_access(IntelIOMMUState *s, uint16_t sid,
+ uint32_t pasid, hwaddr addr,
+ bool is_write)
+{
+ uint8_t bus_n = VTD_SID_TO_BUS(sid);
+ uint8_t devfn = VTD_SID_TO_DEVFN(sid);
+ bool is_fpd_set = false;
+ VTDContextEntry ce;
+
+ /* Try out best to fetch FPD, we can't do anything more */
+ if (vtd_dev_to_context_entry(s, bus_n, devfn, &ce) == 0) {
+ is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
+ if (!is_fpd_set && s->root_scalable) {
+ vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set, pasid);
+ }
+ }
+
+ vtd_report_fault(s, VTD_FR_SM_INTERRUPT_ADDR, is_fpd_set, sid, addr,
+ is_write, pasid != PCI_NO_PASID, pasid);
+}
+
+static void vtd_report_ir_illegal_access(VTDAddressSpace *vtd_as,
+ hwaddr addr, bool is_write)
+{
+ uint8_t bus_n = pci_bus_num(vtd_as->bus);
+ uint16_t sid = PCI_BUILD_BDF(bus_n, vtd_as->devfn);
+
+ vtd_report_sid_ir_illegal_access(vtd_as->iommu_state, sid, vtd_as->pasid,
+ addr, is_write);
+}
+
static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr,
uint64_t *data, unsigned size,
MemTxAttrs attrs)
@@ -4425,9 +4493,11 @@ static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size,
MemTxAttrs attrs)
{
+ IntelIOMMUState *s = opaque;
int ret = 0;
MSIMessage from = {}, to = {};
uint16_t sid = X86_IOMMU_SID_INVALID;
+ uint32_t pasid;
from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST;
from.data = (uint32_t) value;
@@ -4435,9 +4505,16 @@ static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr,
if (!attrs.unspecified) {
/* We have explicit Source ID */
sid = attrs.requester_id;
+ pasid = attrs.pid != 0 ? attrs.pid : PCI_NO_PASID;
+
+ if (attrs.address_type == PCI_AT_TRANSLATED &&
+ sid != X86_IOMMU_SID_INVALID) {
+ vtd_report_sid_ir_illegal_access(s, sid, pasid, from.address, true);
+ return MEMTX_ERROR;
+ }
}
- ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid, true);
+ ret = vtd_interrupt_remap_msi(s, &from, &to, sid, true);
if (ret) {
/* Drop this interrupt */
return MEMTX_ERROR;
@@ -4462,30 +4539,6 @@ static const MemoryRegionOps vtd_mem_ir_ops = {
},
};
-static void vtd_report_ir_illegal_access(VTDAddressSpace *vtd_as,
- hwaddr addr, bool is_write)
-{
- IntelIOMMUState *s = vtd_as->iommu_state;
- uint8_t bus_n = pci_bus_num(vtd_as->bus);
- uint16_t sid = PCI_BUILD_BDF(bus_n, vtd_as->devfn);
- bool is_fpd_set = false;
- VTDContextEntry ce;
-
- assert(vtd_as->pasid != PCI_NO_PASID);
-
- /* Try out best to fetch FPD, we can't do anything more */
- if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) {
- is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
- if (!is_fpd_set && s->root_scalable) {
- vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set, vtd_as->pasid);
- }
- }
-
- vtd_report_fault(s, VTD_FR_SM_INTERRUPT_ADDR,
- is_fpd_set, sid, addr, is_write,
- true, vtd_as->pasid);
-}
-
static MemTxResult vtd_mem_ir_fault_read(void *opaque, hwaddr addr,
uint64_t *data, unsigned size,
MemTxAttrs attrs)
@@ -5046,6 +5099,10 @@ static void vtd_init(IntelIOMMUState *s)
vtd_spte_rsvd_large[3] &= ~VTD_SPTE_SNP;
}
+ if (s->svm) {
+ s->ecap |= VTD_ECAP_PRS | VTD_ECAP_PDS | VTD_ECAP_NWFS;
+ }
+
vtd_reset_caches(s);
/* Define registers with default values and bit semantics */
@@ -5140,19 +5197,29 @@ static IOMMUTLBEntry vtd_iommu_ats_do_translate(IOMMUMemoryRegion *iommu,
hwaddr addr,
IOMMUAccessFlags flags)
{
- IOMMUTLBEntry entry;
+ IOMMUTLBEntry entry = { .target_as = &address_space_memory };
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
+ IntelIOMMUState *s = vtd_as->iommu_state;
+
+ /* Guard that makes sure we avoid weird behaviors */
+ if ((flags & IOMMU_PRIV) && (s->ecap & VTD_ECAP_SRS)) {
+ error_report_once("Privileged ATS not supported");
+ abort();
+ }
if (vtd_is_interrupt_addr(addr)) {
+ vtd_prepare_error_entry(&entry);
vtd_report_ir_illegal_access(vtd_as, addr, flags & IOMMU_WO);
- entry.target_as = &address_space_memory;
- entry.iova = 0;
- entry.translated_addr = 0;
- entry.addr_mask = ~VTD_PAGE_MASK_4K;
- entry.perm = IOMMU_NONE;
- entry.pasid = PCI_NO_PASID;
+ } else if ((flags & IOMMU_PRIV) && !(s->ecap & VTD_ECAP_SRS)) {
+ /*
+ * For translation-request-with-PASID with PR=1, remapping hardware
+ * not supporting supervisor requests (SRS=0 in the Extended
+ * Capability Register) forces R=W=E=0 in addition to setting PRIV=1.
+ */
+ vtd_prepare_error_entry(&entry);
+ entry.perm = IOMMU_PRIV;
} else {
- entry = vtd_iommu_translate(iommu, addr, flags, 0);
+ entry = vtd_iommu_translate(iommu, addr, flags, VTD_IDX_ATS);
}
return entry;
@@ -5513,6 +5580,29 @@ static bool vtd_decide_config(IntelIOMMUState *s, Error **errp)
return false;
}
+ if (s->svm) {
+ if (!x86_iommu->dt_supported) {
+ error_setg(errp, "Need to set device IOTLB for svm");
+ return false;
+ }
+
+ if (!s->fsts) {
+ error_setg(errp, "Need to set flts for svm");
+ return false;
+ }
+
+ if (!x86_iommu->dma_translation) {
+ error_setg(errp, "Need to set dma-translation for svm");
+ return false;
+ }
+
+ if (!s->pasid) {
+ error_setg(errp, "Need to set PASID support for svm");
+ return false;
+ }
+ }
+
+
return true;
}
@@ -5523,17 +5613,6 @@ static void vtd_realize(DeviceState *dev, Error **errp)
X86MachineState *x86ms = X86_MACHINE(ms);
PCIBus *bus = pcms->pcibus;
IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
- X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
-
- if (s->pasid && x86_iommu->dt_supported) {
- /*
- * PASID-based-Device-TLB Invalidate Descriptor is not
- * implemented and it requires support from vhost layer which
- * needs to be implemented in the future.
- */
- error_setg(errp, "PASID based device IOTLB is not supported");
- return;
- }
if (!vtd_decide_config(s, errp)) {
return;
@@ -5601,6 +5680,17 @@ static const TypeInfo vtd_info = {
.class_init = vtd_class_init,
};
+static int vtd_attrs_to_index(IOMMUMemoryRegion *iommu_mr, MemTxAttrs attrs)
+{
+ return attrs.address_type == PCI_AT_TRANSLATED ?
+ VTD_IDX_TRANSLATED : VTD_IDX_UNTRANSLATED;
+}
+
+static int vtd_num_indexes(IOMMUMemoryRegion *iommu)
+{
+ return VTD_IDX_COUNT;
+}
+
static void vtd_iommu_memory_region_class_init(ObjectClass *klass,
const void *data)
{
@@ -5609,6 +5699,8 @@ static void vtd_iommu_memory_region_class_init(ObjectClass *klass,
imrc->translate = vtd_iommu_translate;
imrc->notify_flag_changed = vtd_iommu_notify_flag_changed;
imrc->replay = vtd_iommu_replay;
+ imrc->attrs_to_index = vtd_attrs_to_index;
+ imrc->num_indexes = vtd_num_indexes;
}
static const TypeInfo vtd_iommu_memory_region_info = {
diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
index a2ca79f..11a53aa 100644
--- a/hw/i386/intel_iommu_internal.h
+++ b/hw/i386/intel_iommu_internal.h
@@ -194,8 +194,10 @@
#define VTD_ECAP_PRS (1ULL << 29)
#define VTD_ECAP_MHMV (15ULL << 20)
#define VTD_ECAP_SRS (1ULL << 31)
+#define VTD_ECAP_NWFS (1ULL << 33)
#define VTD_ECAP_PSS (7ULL << 35) /* limit: MemTxAttrs::pid */
#define VTD_ECAP_PASID (1ULL << 40)
+#define VTD_ECAP_PDS (1ULL << 42)
#define VTD_ECAP_SMTS (1ULL << 43)
#define VTD_ECAP_SSTS (1ULL << 46)
#define VTD_ECAP_FSTS (1ULL << 47)
@@ -417,7 +419,9 @@ typedef union VTDPRDesc VTDPRDesc;
#define VTD_INV_DESC_WAIT_IF (1ULL << 4)
#define VTD_INV_DESC_WAIT_FN (1ULL << 6)
#define VTD_INV_DESC_WAIT_DATA_SHIFT 32
-#define VTD_INV_DESC_WAIT_RSVD_LO 0Xfffff180ULL
+#define VTD_INV_DESC_WAIT_RSVD_LO(ecap) (0Xfffff100ULL | \
+ (((ecap) & VTD_ECAP_PDS) \
+ ? 0 : (1 << 7)))
#define VTD_INV_DESC_WAIT_RSVD_HI 3ULL
/* Masks for Context-cache Invalidation Descriptor */
@@ -688,6 +692,14 @@ typedef struct VTDPIOTLBInvInfo {
/* Bits to decide the offset for each level */
#define VTD_LEVEL_BITS 9
+/* IOMMU Index */
+typedef enum VTDIOMMUIndex {
+ VTD_IDX_UNTRANSLATED = 0, /* Default */
+ VTD_IDX_TRANSLATED = 1,
+ VTD_IDX_ATS = 2,
+ VTD_IDX_COUNT = 3, /* Number of supported indexes */
+} VTDIOMMUIndex;
+
typedef struct VTDHostIOMMUDevice {
IntelIOMMUState *iommu_state;
PCIBus *bus;
diff --git a/hw/pci-host/articia.c b/hw/pci-host/articia.c
index 1881e03..04623df 100644
--- a/hw/pci-host/articia.c
+++ b/hw/pci-host/articia.c
@@ -200,7 +200,6 @@ static void articia_class_init(ObjectClass *klass, const void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = articia_realize;
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
/* TYPE_ARTICIA_PCI_HOST */
diff --git a/hw/pci-host/aspeed_pcie.c b/hw/pci-host/aspeed_pcie.c
index 4f896f8..c150496 100644
--- a/hw/pci-host/aspeed_pcie.c
+++ b/hw/pci-host/aspeed_pcie.c
@@ -298,7 +298,6 @@ static void aspeed_pcie_rc_class_init(ObjectClass *klass, const void *data)
dc->desc = "ASPEED PCIe RC";
dc->realize = aspeed_pcie_rc_realize;
dc->fw_name = "pci";
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
hc->root_bus_path = aspeed_pcie_rc_root_bus_path;
device_class_set_props(dc, aspeed_pcie_rc_props);
diff --git a/hw/pci-host/designware.c b/hw/pci-host/designware.c
index 019e025..00c9449 100644
--- a/hw/pci-host/designware.c
+++ b/hw/pci-host/designware.c
@@ -593,8 +593,6 @@ static void designware_pcie_root_class_init(ObjectClass *klass,
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
-
k->vendor_id = PCI_VENDOR_ID_SYNOPSYS;
k->device_id = 0xABCD;
k->revision = 0;
@@ -736,7 +734,6 @@ static void designware_pcie_host_class_init(ObjectClass *klass,
hc->root_bus_path = designware_pcie_host_root_bus_path;
dc->realize = designware_pcie_host_realize;
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
dc->vmsd = &vmstate_designware_pcie_host;
}
diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c
index b5074c0..e66784c 100644
--- a/hw/pci-host/gpex.c
+++ b/hw/pci-host/gpex.c
@@ -200,7 +200,6 @@ static void gpex_host_class_init(ObjectClass *klass, const void *data)
hc->root_bus_path = gpex_host_root_bus_path;
dc->realize = gpex_host_realize;
dc->unrealize = gpex_host_unrealize;
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
device_class_set_props(dc, gpex_host_properties);
}
@@ -242,7 +241,6 @@ static void gpex_root_class_init(ObjectClass *klass, const void *data)
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->desc = "QEMU generic PCIe host bridge";
dc->vmsd = &vmstate_gpex_root;
k->vendor_id = PCI_VENDOR_ID_REDHAT;
diff --git a/hw/pci-host/grackle.c b/hw/pci-host/grackle.c
index 9a58f0e..b0db787 100644
--- a/hw/pci-host/grackle.c
+++ b/hw/pci-host/grackle.c
@@ -140,7 +140,6 @@ static void grackle_class_init(ObjectClass *klass, const void *data)
dc->realize = grackle_realize;
device_class_set_props(dc, grackle_properties);
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
sbc->explicit_ofw_unit_address = grackle_ofw_unit_address;
}
diff --git a/hw/pci-host/gt64120.c b/hw/pci-host/gt64120.c
index d361c45..566f928 100644
--- a/hw/pci-host/gt64120.c
+++ b/hw/pci-host/gt64120.c
@@ -1298,7 +1298,6 @@ static void gt64120_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
device_class_set_props(dc, gt64120_properties);
dc->realize = gt64120_realize;
device_class_set_legacy_reset(dc, gt64120_reset);
diff --git a/hw/pci-host/mv64361.c b/hw/pci-host/mv64361.c
index ef1c775..495a82b 100644
--- a/hw/pci-host/mv64361.c
+++ b/hw/pci-host/mv64361.c
@@ -108,7 +108,6 @@ static void mv64361_pcihost_class_init(ObjectClass *klass, const void *data)
dc->realize = mv64361_pcihost_realize;
device_class_set_props(dc, mv64361_pcihost_props);
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
static const TypeInfo mv64361_pcihost_info = {
diff --git a/hw/pci-host/pnv_phb.c b/hw/pci-host/pnv_phb.c
index 85fcc3b..0b556d1 100644
--- a/hw/pci-host/pnv_phb.c
+++ b/hw/pci-host/pnv_phb.c
@@ -202,7 +202,6 @@ static void pnv_phb_class_init(ObjectClass *klass, const void *data)
hc->root_bus_path = pnv_phb_root_bus_path;
dc->realize = pnv_phb_realize;
device_class_set_props(dc, pnv_phb_properties);
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->user_creatable = true;
}
diff --git a/hw/pci-host/ppce500.c b/hw/pci-host/ppce500.c
index 76623f7..67180ba 100644
--- a/hw/pci-host/ppce500.c
+++ b/hw/pci-host/ppce500.c
@@ -516,7 +516,6 @@ static void e500_pcihost_class_init(ObjectClass *klass, const void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = e500_pcihost_realize;
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
device_class_set_props(dc, pcihost_properties);
dc->vmsd = &vmstate_ppce500_pci;
}
diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c
index bf56229..e85e422 100644
--- a/hw/pci-host/q35.c
+++ b/hw/pci-host/q35.c
@@ -194,7 +194,6 @@ static void q35_host_class_init(ObjectClass *klass, const void *data)
device_class_set_props(dc, q35_host_props);
/* Reason: needs to be wired up by pc_q35_init */
dc->user_creatable = false;
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
}
@@ -432,30 +431,27 @@ static void mch_update_smbase_smram(MCHPCIState *mch)
}
if (*reg == MCH_HOST_BRIDGE_F_SMBASE_QUERY) {
- pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] =
- MCH_HOST_BRIDGE_F_SMBASE_LCK;
+ pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] = MCH_HOST_BRIDGE_F_SMBASE_LCK;
*reg = MCH_HOST_BRIDGE_F_SMBASE_IN_RAM;
return;
}
/*
- * default/reset state, discard written value
- * which will disable SMRAM balackhole at SMBASE
+ * reg value can come from register write/reset/migration source,
+ * update wmask to be in sync with it regardless of source
*/
- if (pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] == 0xff) {
- *reg = 0x00;
+ if (*reg == MCH_HOST_BRIDGE_F_SMBASE_IN_RAM) {
+ pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] = MCH_HOST_BRIDGE_F_SMBASE_LCK;
+ return;
}
-
- memory_region_transaction_begin();
if (*reg & MCH_HOST_BRIDGE_F_SMBASE_LCK) {
- /* disable all writes */
- pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] &=
- ~MCH_HOST_BRIDGE_F_SMBASE_LCK;
+ /* lock register at 0x2 and disable all writes */
+ pd->wmask[MCH_HOST_BRIDGE_F_SMBASE] = 0;
*reg = MCH_HOST_BRIDGE_F_SMBASE_LCK;
- lck = true;
- } else {
- lck = false;
}
+
+ lck = *reg & MCH_HOST_BRIDGE_F_SMBASE_LCK;
+ memory_region_transaction_begin();
memory_region_set_enabled(&mch->smbase_blackhole, lck);
memory_region_set_enabled(&mch->smbase_window, lck);
memory_region_transaction_commit();
diff --git a/hw/pci-host/raven.c b/hw/pci-host/raven.c
index c500619..b3c2678 100644
--- a/hw/pci-host/raven.c
+++ b/hw/pci-host/raven.c
@@ -296,7 +296,6 @@ static void raven_pcihost_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->realize = raven_pcihost_realizefn;
dc->fw_name = "pci";
}
diff --git a/hw/pci-host/remote.c b/hw/pci-host/remote.c
index feaaa9a..9ea95fa 100644
--- a/hw/pci-host/remote.c
+++ b/hw/pci-host/remote.c
@@ -55,7 +55,6 @@ static void remote_pcihost_class_init(ObjectClass *klass, const void *data)
dc->realize = remote_pcihost_realize;
dc->user_creatable = false;
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
}
diff --git a/hw/pci-host/sabre.c b/hw/pci-host/sabre.c
index b3f57dc..cd2328a 100644
--- a/hw/pci-host/sabre.c
+++ b/hw/pci-host/sabre.c
@@ -505,7 +505,6 @@ static void sabre_class_init(ObjectClass *klass, const void *data)
dc->realize = sabre_realize;
device_class_set_legacy_reset(dc, sabre_reset);
device_class_set_props(dc, sabre_properties);
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
sbc->explicit_ofw_unit_address = sabre_ofw_unit_address;
}
diff --git a/hw/pci-host/uninorth.c b/hw/pci-host/uninorth.c
index d39546b..10972de 100644
--- a/hw/pci-host/uninorth.c
+++ b/hw/pci-host/uninorth.c
@@ -435,7 +435,6 @@ static void pci_unin_main_class_init(ObjectClass *klass, const void *data)
dc->realize = pci_unin_main_realize;
device_class_set_props(dc, pci_unin_main_pci_host_props);
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
sbc->explicit_ofw_unit_address = pci_unin_main_ofw_unit_address;
}
@@ -453,7 +452,6 @@ static void pci_u3_agp_class_init(ObjectClass *klass, const void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = pci_u3_agp_realize;
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
static const TypeInfo pci_u3_agp_info = {
@@ -469,7 +467,6 @@ static void pci_unin_agp_class_init(ObjectClass *klass, const void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = pci_unin_agp_realize;
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
static const TypeInfo pci_unin_agp_info = {
@@ -485,7 +482,6 @@ static void pci_unin_internal_class_init(ObjectClass *klass, const void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = pci_unin_internal_realize;
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
static const TypeInfo pci_unin_internal_info = {
diff --git a/hw/pci-host/xilinx-pcie.c b/hw/pci-host/xilinx-pcie.c
index 86c2037..40f625b 100644
--- a/hw/pci-host/xilinx-pcie.c
+++ b/hw/pci-host/xilinx-pcie.c
@@ -172,7 +172,6 @@ static void xilinx_pcie_host_class_init(ObjectClass *klass, const void *data)
hc->root_bus_path = xilinx_pcie_host_root_bus_path;
dc->realize = xilinx_pcie_host_realize;
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "pci";
device_class_set_props(dc, xilinx_pcie_host_props);
}
@@ -291,7 +290,6 @@ static void xilinx_pcie_root_class_init(ObjectClass *klass, const void *data)
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->desc = "Xilinx AXI-PCIe Host Bridge";
k->vendor_id = PCI_VENDOR_ID_XILINX;
k->device_id = 0x7021;
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
index 9035cac..90d6d71 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
@@ -3171,6 +3171,10 @@ ssize_t pci_ats_request_translation(PCIDevice *dev, uint32_t pasid,
return -EPERM;
}
+ if (priv_req && !pcie_pasid_priv_enabled(dev)) {
+ return -EPERM;
+ }
+
pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn);
if (iommu_bus && iommu_bus->iommu_ops->ats_request_translation) {
return iommu_bus->iommu_ops->ats_request_translation(bus,
diff --git a/hw/pci/pci_host.c b/hw/pci/pci_host.c
index 05f1475..91e3885 100644
--- a/hw/pci/pci_host.c
+++ b/hw/pci/pci_host.c
@@ -245,6 +245,7 @@ static void pci_host_class_init(ObjectClass *klass, const void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, pci_host_properties_common);
dc->vmsd = &vmstate_pcihost;
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
static const TypeInfo pci_host_type_info = {
diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c
index c481c16..50fc4aa 100644
--- a/hw/pci/pcie.c
+++ b/hw/pci/pcie.c
@@ -1340,6 +1340,16 @@ void pcie_pri_init(PCIDevice *dev, uint16_t offset, uint32_t outstanding_pr_cap,
dev->exp.pri_cap = offset;
}
+static inline bool pcie_pasid_check_ctrl_bit_enabled(const PCIDevice *dev,
+ uint16_t mask)
+{
+ if (!pci_is_express(dev) || !dev->exp.pasid_cap) {
+ return false;
+ }
+ return (pci_get_word(dev->config + dev->exp.pasid_cap + PCI_PASID_CTRL) &
+ mask) != 0;
+}
+
uint32_t pcie_pri_get_req_alloc(const PCIDevice *dev)
{
if (!pcie_pri_enabled(dev)) {
@@ -1359,11 +1369,12 @@ bool pcie_pri_enabled(const PCIDevice *dev)
bool pcie_pasid_enabled(const PCIDevice *dev)
{
- if (!pci_is_express(dev) || !dev->exp.pasid_cap) {
- return false;
- }
- return (pci_get_word(dev->config + dev->exp.pasid_cap + PCI_PASID_CTRL) &
- PCI_PASID_CTRL_ENABLE) != 0;
+ return pcie_pasid_check_ctrl_bit_enabled(dev, PCI_PASID_CTRL_ENABLE);
+}
+
+bool pcie_pasid_priv_enabled(PCIDevice *dev)
+{
+ return pcie_pasid_check_ctrl_bit_enabled(dev, PCI_PASID_CTRL_PRIV);
}
bool pcie_ats_enabled(const PCIDevice *dev)
diff --git a/hw/pci/pcie_sriov.c b/hw/pci/pcie_sriov.c
index 34e0875..c41ac95 100644
--- a/hw/pci/pcie_sriov.c
+++ b/hw/pci/pcie_sriov.c
@@ -195,14 +195,17 @@ bool pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset,
void pcie_sriov_pf_exit(PCIDevice *dev)
{
+ uint8_t *cfg;
+
if (dev->exp.sriov_cap == 0) {
return;
}
+ cfg = dev->config + dev->exp.sriov_cap;
if (dev->exp.sriov_pf.vf_user_created) {
uint16_t ven_id = pci_get_word(dev->config + PCI_VENDOR_ID);
- uint16_t total_vfs = pci_get_word(dev->config + PCI_SRIOV_TOTAL_VF);
- uint16_t vf_dev_id = pci_get_word(dev->config + PCI_SRIOV_VF_DID);
+ uint16_t total_vfs = pci_get_word(cfg + PCI_SRIOV_TOTAL_VF);
+ uint16_t vf_dev_id = pci_get_word(cfg + PCI_SRIOV_VF_DID);
unregister_vfs(dev);
@@ -213,8 +216,6 @@ void pcie_sriov_pf_exit(PCIDevice *dev)
pci_config_set_device_id(dev->exp.sriov_pf.vf[i]->config, vf_dev_id);
}
} else {
- uint8_t *cfg = dev->config + dev->exp.sriov_cap;
-
unparent_vfs(dev, pci_get_word(cfg + PCI_SRIOV_TOTAL_VF));
}
}
diff --git a/hw/pci/shpc.c b/hw/pci/shpc.c
index aac6f2d..9386028 100644
--- a/hw/pci/shpc.c
+++ b/hw/pci/shpc.c
@@ -735,7 +735,6 @@ void shpc_free(PCIDevice *d)
if (!shpc) {
return;
}
- object_unparent(OBJECT(&shpc->mmio));
g_free(shpc->config);
g_free(shpc->cmask);
g_free(shpc->wmask);
diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig
index 10f5c53..8895682 100644
--- a/hw/virtio/Kconfig
+++ b/hw/virtio/Kconfig
@@ -127,6 +127,11 @@ config VHOST_USER_SCMI
default y
depends on VIRTIO && VHOST_USER && ARM
+config VHOST_USER_SPI
+ bool
+ default y
+ depends on VIRTIO && VHOST_USER
+
config VHOST_USER_TEST
bool
default y
diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build
index affd668..6675b63 100644
--- a/hw/virtio/meson.build
+++ b/hw/virtio/meson.build
@@ -28,6 +28,7 @@ if have_vhost
system_virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c'))
system_virtio_ss.add(when: 'CONFIG_VHOST_USER_SND', if_true: files('vhost-user-snd.c'))
system_virtio_ss.add(when: 'CONFIG_VHOST_USER_INPUT', if_true: files('vhost-user-input.c'))
+ system_virtio_ss.add(when: 'CONFIG_VHOST_USER_SPI', if_true: files('vhost-user-spi.c'))
# PCI Stubs
system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_TEST'],
@@ -42,6 +43,8 @@ if have_vhost
if_true: files('vhost-user-snd-pci.c'))
system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_INPUT'],
if_true: files('vhost-user-input-pci.c'))
+ system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_SPI'],
+ if_true: files('vhost-user-spi-pci.c'))
endif
if have_vhost_vdpa
system_virtio_ss.add(files('vhost-vdpa.c'))
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index 2481d49..6242aeb 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -34,6 +34,7 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp)
switch (b) {
case VIRTIO_F_ANY_LAYOUT:
case VIRTIO_RING_F_EVENT_IDX:
+ case VIRTIO_RING_F_INDIRECT_DESC:
continue;
case VIRTIO_F_ACCESS_PLATFORM:
diff --git a/hw/virtio/vhost-user-spi-pci.c b/hw/virtio/vhost-user-spi-pci.c
new file mode 100644
index 0000000..f813b82
--- /dev/null
+++ b/hw/virtio/vhost-user-spi-pci.c
@@ -0,0 +1,69 @@
+/*
+ * Vhost-user spi virtio device PCI glue
+ *
+ * Copyright (C) 2025 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hw/core/qdev-properties.h"
+#include "hw/virtio/vhost-user-spi.h"
+#include "hw/virtio/virtio-pci.h"
+
+struct VHostUserSPIPCI {
+ VirtIOPCIProxy parent_obj;
+ VHostUserSPI vdev;
+};
+
+typedef struct VHostUserSPIPCI VHostUserSPIPCI;
+
+#define TYPE_VHOST_USER_SPI_PCI "vhost-user-spi-pci-base"
+
+DECLARE_INSTANCE_CHECKER(VHostUserSPIPCI, VHOST_USER_SPI_PCI,
+ TYPE_VHOST_USER_SPI_PCI)
+
+static void vhost_user_spi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VHostUserSPIPCI *dev = VHOST_USER_SPI_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+
+ vpci_dev->nvectors = 1;
+ qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
+}
+
+static void vhost_user_spi_pci_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+ k->realize = vhost_user_spi_pci_realize;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+ pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */
+ pcidev_k->revision = 0x00;
+ pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
+}
+
+static void vhost_user_spi_pci_instance_init(Object *obj)
+{
+ VHostUserSPIPCI *dev = VHOST_USER_SPI_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VHOST_USER_SPI);
+}
+
+static const VirtioPCIDeviceTypeInfo vhost_user_spi_pci_info = {
+ .base_name = TYPE_VHOST_USER_SPI_PCI,
+ .non_transitional_name = "vhost-user-spi-pci",
+ .instance_size = sizeof(VHostUserSPIPCI),
+ .instance_init = vhost_user_spi_pci_instance_init,
+ .class_init = vhost_user_spi_pci_class_init,
+};
+
+static void vhost_user_spi_pci_register(void)
+{
+ virtio_pci_types_register(&vhost_user_spi_pci_info);
+}
+
+type_init(vhost_user_spi_pci_register);
diff --git a/hw/virtio/vhost-user-spi.c b/hw/virtio/vhost-user-spi.c
new file mode 100644
index 0000000..707f96c
--- /dev/null
+++ b/hw/virtio/vhost-user-spi.c
@@ -0,0 +1,65 @@
+/*
+ * Vhost-user spi virtio device
+ *
+ * Copyright (C) 2025 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/core/qdev-properties.h"
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/vhost-user-spi.h"
+#include "qemu/error-report.h"
+#include "standard-headers/linux/virtio_ids.h"
+#include "standard-headers/linux/virtio_spi.h"
+
+static const Property vspi_properties[] = {
+ DEFINE_PROP_CHR("chardev", VHostUserBase, chardev),
+};
+
+static void vspi_realize(DeviceState *dev, Error **errp)
+{
+ VHostUserBase *vub = VHOST_USER_BASE(dev);
+ VHostUserBaseClass *vubc = VHOST_USER_BASE_GET_CLASS(dev);
+
+ /* Fixed for SPI */
+ vub->virtio_id = VIRTIO_ID_SPI;
+ vub->num_vqs = 1;
+ vub->vq_size = 4;
+ vub->config_size = sizeof(struct virtio_spi_config);
+
+ vubc->parent_realize(dev, errp);
+}
+
+static const VMStateDescription vu_spi_vmstate = {
+ .name = "vhost-user-spi",
+ .unmigratable = 1,
+};
+
+static void vu_spi_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass);
+
+ dc->vmsd = &vu_spi_vmstate;
+ device_class_set_props(dc, vspi_properties);
+ device_class_set_parent_realize(dc, vspi_realize,
+ &vubc->parent_realize);
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+}
+
+static const TypeInfo vu_spi_info = {
+ .name = TYPE_VHOST_USER_SPI,
+ .parent = TYPE_VHOST_USER_BASE,
+ .instance_size = sizeof(VHostUserSPI),
+ .class_init = vu_spi_class_init,
+};
+
+static void vu_spi_register_types(void)
+{
+ type_register_static(&vu_spi_info);
+}
+
+type_init(vu_spi_register_types)
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 63fa9a1..bb8f8ea 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -2225,6 +2225,13 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque,
}
}
+ if (!u->user->supports_inflight_migration ||
+ !virtio_has_feature(protocol_features,
+ VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
+ protocol_features &= ~(1ULL <<
+ VHOST_USER_PROTOCOL_F_GET_VRING_BASE_INFLIGHT);
+ }
+
/* final set of protocol features */
dev->protocol_features = protocol_features;
err = vhost_user_set_protocol_features(dev, dev->protocol_features);
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 7061b6e..2f8f11d 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -905,7 +905,7 @@ static int vhost_vdpa_reset_device(struct vhost_dev *dev)
memory_listener_unregister(&v->shared->listener);
v->shared->listener_registered = false;
- v->suspended = false;
+ v->shared->suspended = false;
return 0;
}
@@ -1354,7 +1354,7 @@ static void vhost_vdpa_suspend(struct vhost_dev *dev)
if (unlikely(r)) {
error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno);
} else {
- v->suspended = true;
+ v->shared->suspended = true;
return;
}
}
@@ -1481,7 +1481,7 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
return 0;
}
- if (!v->suspended) {
+ if (!v->shared->suspended) {
/*
* Cannot trust in value returned by device, let vhost recover used
* idx from guest.
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 31e9704..52801c1 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -592,11 +592,13 @@ static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section)
/*
* Some backends (like vhost-user) can only handle memory regions
* that have an fd (can be mapped into a different process). Filter
- * the ones without an fd out, if requested.
- *
- * TODO: we might have to limit to MAP_SHARED as well.
+ * the ones without an fd out, if requested. Also make sure that
+ * this region is mapped as shared so that the vhost backend can
+ * observe modifications to this region, otherwise we consider it
+ * private.
*/
- if (memory_region_get_fd(section->mr) < 0 &&
+ if ((memory_region_get_fd(section->mr) < 0 ||
+ !qemu_ram_is_shared(section->mr->ram_block)) &&
dev->vhost_ops->vhost_backend_no_private_memslots &&
dev->vhost_ops->vhost_backend_no_private_memslots(dev)) {
trace_vhost_reject_section(mr->name, 2);
@@ -1916,6 +1918,62 @@ void vhost_get_features_ex(struct vhost_dev *hdev,
}
}
+static bool vhost_inflight_buffer_pre_load(void *opaque, Error **errp)
+{
+ struct vhost_inflight *inflight = opaque;
+
+ int fd = -1;
+ void *addr = qemu_memfd_alloc("vhost-inflight", inflight->size,
+ F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
+ &fd, errp);
+ if (*errp) {
+ return -ENOMEM;
+ }
+
+ inflight->offset = 0;
+ inflight->addr = addr;
+ inflight->fd = fd;
+
+ return true;
+}
+
+const VMStateDescription vmstate_vhost_inflight_region_buffer = {
+ .name = "vhost-inflight-region/buffer",
+ .pre_load_errp = vhost_inflight_buffer_pre_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_VBUFFER_UINT64(addr, struct vhost_inflight, 0, NULL, size),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool vhost_inflight_region_post_load(void *opaque,
+ int version_id,
+ Error **errp)
+{
+ struct vhost_inflight *inflight = opaque;
+
+ if (inflight->addr == NULL) {
+ error_setg(errp, "inflight buffer subsection has not been loaded");
+ return false;
+ }
+
+ return true;
+}
+
+const VMStateDescription vmstate_vhost_inflight_region = {
+ .name = "vhost-inflight-region",
+ .post_load_errp = vhost_inflight_region_post_load,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT64(size, struct vhost_inflight),
+ VMSTATE_UINT16(queue_size, struct vhost_inflight),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * const []) {
+ &vmstate_vhost_inflight_region_buffer,
+ NULL
+ }
+};
+
void vhost_ack_features_ex(struct vhost_dev *hdev, const int *feature_bits,
const uint64_t *features)
{
diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
index cbd1810..6fceb39 100644
--- a/hw/virtio/virtio-crypto.c
+++ b/hw/virtio/virtio-crypto.c
@@ -767,11 +767,18 @@ virtio_crypto_handle_asym_req(VirtIOCrypto *vcrypto,
uint32_t len;
uint8_t *src = NULL;
uint8_t *dst = NULL;
+ uint64_t max_len;
asym_op_info = g_new0(CryptoDevBackendAsymOpInfo, 1);
src_len = ldl_le_p(&req->para.src_data_len);
dst_len = ldl_le_p(&req->para.dst_data_len);
+ max_len = (uint64_t)src_len + dst_len;
+ if (unlikely(max_len > vcrypto->conf.max_size)) {
+ virtio_error(vdev, "virtio-crypto asym request is too large");
+ goto err;
+ }
+
if (src_len > 0) {
src = g_malloc0(src_len);
len = iov_to_buf(iov, out_num, 0, src, src_len);
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index b273eb2..1e8f90d 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -2183,15 +2183,17 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
}
- if (pci_is_vf(&proxy->pci_dev)) {
- pcie_ari_init(&proxy->pci_dev, proxy->last_pcie_cap_offset);
- proxy->last_pcie_cap_offset += PCI_ARI_SIZEOF;
- } else {
- res = pcie_sriov_pf_init_from_user_created_vfs(
- &proxy->pci_dev, proxy->last_pcie_cap_offset, errp);
- if (res > 0) {
- proxy->last_pcie_cap_offset += res;
- virtio_add_feature(&vdev->host_features, VIRTIO_F_SR_IOV);
+ if (pci_is_express(&proxy->pci_dev)) {
+ if (pci_is_vf(&proxy->pci_dev)) {
+ pcie_ari_init(&proxy->pci_dev, proxy->last_pcie_cap_offset);
+ proxy->last_pcie_cap_offset += PCI_ARI_SIZEOF;
+ } else {
+ res = pcie_sriov_pf_init_from_user_created_vfs(
+ &proxy->pci_dev, proxy->last_pcie_cap_offset, errp);
+ if (res > 0) {
+ proxy->last_pcie_cap_offset += res;
+ virtio_add_feature(&vdev->host_features, VIRTIO_F_SR_IOV);
+ }
}
}
}
diff --git a/hw/virtio/virtio-pmem.c b/hw/virtio/virtio-pmem.c
index 5381d59..c3b3299 100644
--- a/hw/virtio/virtio-pmem.c
+++ b/hw/virtio/virtio-pmem.c
@@ -73,7 +73,6 @@ static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq)
trace_virtio_pmem_flush_request();
req_data = virtqueue_pop(vq, sizeof(VirtIODeviceRequest));
if (!req_data) {
- virtio_error(vdev, "virtio-pmem missing request data");
return;
}
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 3dc9423..77ca54e 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -48,6 +48,7 @@
#include "standard-headers/linux/virtio_iommu.h"
#include "standard-headers/linux/virtio_mem.h"
#include "standard-headers/linux/virtio_vsock.h"
+#include "standard-headers/linux/virtio_spi.h"
/*
* Maximum size of virtio device config space
@@ -196,7 +197,8 @@ const char *virtio_device_names[] = {
[VIRTIO_ID_PARAM_SERV] = "virtio-param-serv",
[VIRTIO_ID_AUDIO_POLICY] = "virtio-audio-pol",
[VIRTIO_ID_BT] = "virtio-bluetooth",
- [VIRTIO_ID_GPIO] = "virtio-gpio"
+ [VIRTIO_ID_GPIO] = "virtio-gpio",
+ [VIRTIO_ID_SPI] = "virtio-spi"
};
static const char *virtio_id_to_name(uint16_t device_id)
diff --git a/include/hw/acpi/ghes.h b/include/hw/acpi/ghes.h
index df2ecbf..5b29aae 100644
--- a/include/hw/acpi/ghes.h
+++ b/include/hw/acpi/ghes.h
@@ -98,9 +98,9 @@ void acpi_build_hest(AcpiGhesState *ags, GArray *table_data,
const char *oem_id, const char *oem_table_id);
void acpi_ghes_add_fw_cfg(AcpiGhesState *vms, FWCfgState *s,
GArray *hardware_errors);
-int acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id,
- uint64_t error_physical_addr);
-void ghes_record_cper_errors(AcpiGhesState *ags, const void *cper, size_t len,
+bool acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id,
+ uint64_t error_physical_addr, Error **errp);
+bool ghes_record_cper_errors(AcpiGhesState *ags, const void *cper, size_t len,
uint16_t source_id, Error **errp);
/**
diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h
index 6c61fd3..d600e54 100644
--- a/include/hw/i386/intel_iommu.h
+++ b/include/hw/i386/intel_iommu.h
@@ -275,6 +275,7 @@ struct IntelIOMMUState {
bool scalable_mode; /* RO - is Scalable Mode supported? */
bool fsts; /* RO - is first stage translation supported? */
bool snoop_control; /* RO - is SNP filed supported? */
+ bool svm; /* RO - is SVA/SVM supported? */
dma_addr_t root; /* Current root table pointer */
bool root_scalable; /* Type of root table (scalable or not) */
diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h
index fc02aeb..d4e065d 100644
--- a/include/hw/pci/pcie.h
+++ b/include/hw/pci/pcie.h
@@ -165,5 +165,6 @@ void pcie_pri_init(PCIDevice *dev, uint16_t offset, uint32_t outstanding_pr_cap,
uint32_t pcie_pri_get_req_alloc(const PCIDevice *dev);
bool pcie_pri_enabled(const PCIDevice *dev);
bool pcie_pasid_enabled(const PCIDevice *dev);
+bool pcie_pasid_priv_enabled(PCIDevice *dev);
bool pcie_ats_enabled(const PCIDevice *dev);
#endif /* QEMU_PCIE_H */
diff --git a/include/hw/virtio/vhost-user-blk.h b/include/hw/virtio/vhost-user-blk.h
index 8158d46..1e41a2b 100644
--- a/include/hw/virtio/vhost-user-blk.h
+++ b/include/hw/virtio/vhost-user-blk.h
@@ -52,6 +52,7 @@ struct VHostUserBlk {
bool started_vu;
bool skip_get_vring_base_on_force_shutdown;
+ bool inflight_migration;
};
#endif
diff --git a/include/hw/virtio/vhost-user-spi.h b/include/hw/virtio/vhost-user-spi.h
new file mode 100644
index 0000000..a1a6582
--- /dev/null
+++ b/include/hw/virtio/vhost-user-spi.h
@@ -0,0 +1,25 @@
+/*
+ * Vhost-user spi virtio device
+ *
+ * Copyright (C) 2025 Qualcomm Innovation Center, Inc. All Rights Reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef QEMU_VHOST_USER_SPI_H
+#define QEMU_VHOST_USER_SPI_H
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+#include "hw/virtio/vhost-user-base.h"
+
+#define TYPE_VHOST_USER_SPI "vhost-user-spi-device"
+
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSPI, VHOST_USER_SPI)
+
+struct VHostUserSPI {
+ VHostUserBase parent_obj;
+};
+
+#endif /* QEMU_VHOST_USER_SPI_H */
diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
index 55d5725..53fe996 100644
--- a/include/hw/virtio/vhost-user.h
+++ b/include/hw/virtio/vhost-user.h
@@ -32,6 +32,7 @@ enum VhostUserProtocolFeature {
/* Feature 17 reserved for VHOST_USER_PROTOCOL_F_XEN_MMAP. */
VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 18,
VHOST_USER_PROTOCOL_F_DEVICE_STATE = 19,
+ VHOST_USER_PROTOCOL_F_GET_VRING_BASE_INFLIGHT = 20,
VHOST_USER_PROTOCOL_F_MAX
};
@@ -68,6 +69,7 @@ typedef struct VhostUserState {
GPtrArray *notifiers;
int memory_slots;
bool supports_config;
+ bool supports_inflight_migration;
} VhostUserState;
/**
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 449bf5c..80ff670 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -76,6 +76,12 @@ typedef struct vhost_vdpa_shared {
/* SVQ switching is in progress, or already completed? */
SVQTransitionState svq_switching;
+
+ /*
+ * Device suspended successfully.
+ * The vhost_vdpa devices cannot have different suspended states.
+ */
+ bool suspended;
} VhostVDPAShared;
typedef struct vhost_vdpa {
@@ -83,8 +89,6 @@ typedef struct vhost_vdpa {
uint32_t address_space_id;
uint64_t acked_features;
bool shadow_vqs_enabled;
- /* Device suspended successfully */
- bool suspended;
VhostVDPAShared *shared;
GPtrArray *shadow_vqs;
const VhostShadowVirtqueueOps *shadow_vq_ops;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 08bbb4d..89817bd 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -554,4 +554,10 @@ static inline int vhost_load_backend_state(struct vhost_dev *dev, QEMUFile *f,
}
#endif
+extern const VMStateDescription vmstate_vhost_inflight_region;
+#define VMSTATE_VHOST_INFLIGHT_REGION(_field, _state) \
+ VMSTATE_STRUCT_POINTER(_field, _state, \
+ vmstate_vhost_inflight_region, \
+ struct vhost_inflight)
+
#endif
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index ed9095a..89f9f49 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -706,6 +706,16 @@ extern const VMStateInfo vmstate_info_qlist;
.offset = offsetof(_state, _field), \
}
+#define VMSTATE_VBUFFER_UINT64(_field, _state, _version, _test, _field_size) { \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .field_exists = (_test), \
+ .size_offset = vmstate_offset_value(_state, _field_size, uint64_t),\
+ .info = &vmstate_info_buffer, \
+ .flags = VMS_VBUFFER | VMS_POINTER, \
+ .offset = offsetof(_state, _field), \
+}
+
#define VMSTATE_VBUFFER_ALLOC_UINT32(_field, _state, _version, \
_test, _field_size) { \
.name = (stringify(_field)), \
diff --git a/include/standard-headers/linux/virtio_spi.h b/include/standard-headers/linux/virtio_spi.h
new file mode 100644
index 0000000..54e570f
--- /dev/null
+++ b/include/standard-headers/linux/virtio_spi.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (C) 2023 OpenSynergy GmbH
+ * Copyright (C) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _LINUX_VIRTIO_VIRTIO_SPI_H
+#define _LINUX_VIRTIO_VIRTIO_SPI_H
+
+#include "standard-headers/linux/types.h"
+#include "standard-headers/linux/virtio_config.h"
+#include "standard-headers/linux/virtio_ids.h"
+#include "standard-headers/linux/virtio_types.h"
+
+/* Sample data on trailing clock edge */
+#define VIRTIO_SPI_CPHA _BITUL(0)
+/* Clock is high when IDLE */
+#define VIRTIO_SPI_CPOL _BITUL(1)
+/* Chip Select is active high */
+#define VIRTIO_SPI_CS_HIGH _BITUL(2)
+/* Transmit LSB first */
+#define VIRTIO_SPI_MODE_LSB_FIRST _BITUL(3)
+/* Loopback mode */
+#define VIRTIO_SPI_MODE_LOOP _BITUL(4)
+
+/**
+ * struct virtio_spi_config - All config fields are read-only for the
+ * Virtio SPI driver
+ * @cs_max_number: maximum number of chipselect the host SPI controller
+ * supports.
+ * @cs_change_supported: indicates if the host SPI controller supports to toggle
+ * chipselect after each transfer in one message:
+ * 0: unsupported, chipselect will be kept in active state throughout the
+ * message transaction;
+ * 1: supported.
+ * Note: Message here contains a sequence of SPI transfers.
+ * @tx_nbits_supported: indicates the supported number of bit for writing:
+ * bit 0: DUAL (2-bit transfer), 1 for supported
+ * bit 1: QUAD (4-bit transfer), 1 for supported
+ * bit 2: OCTAL (8-bit transfer), 1 for supported
+ * other bits are reserved as 0, 1-bit transfer is always supported.
+ * @rx_nbits_supported: indicates the supported number of bit for reading:
+ * bit 0: DUAL (2-bit transfer), 1 for supported
+ * bit 1: QUAD (4-bit transfer), 1 for supported
+ * bit 2: OCTAL (8-bit transfer), 1 for supported
+ * other bits are reserved as 0, 1-bit transfer is always supported.
+ * @bits_per_word_mask: mask indicating which values of bits_per_word are
+ * supported. If not set, no limitation for bits_per_word.
+ * @mode_func_supported: indicates the following features are supported or not:
+ * bit 0-1: CPHA feature
+ * 0b00: invalid, should support as least one CPHA setting
+ * 0b01: supports CPHA=0 only
+ * 0b10: supports CPHA=1 only
+ * 0b11: supports CPHA=0 and CPHA=1.
+ * bit 2-3: CPOL feature
+ * 0b00: invalid, should support as least one CPOL setting
+ * 0b01: supports CPOL=0 only
+ * 0b10: supports CPOL=1 only
+ * 0b11: supports CPOL=0 and CPOL=1.
+ * bit 4: chipselect active high feature, 0 for unsupported and 1 for
+ * supported, chipselect active low is supported by default.
+ * bit 5: LSB first feature, 0 for unsupported and 1 for supported,
+ * MSB first is supported by default.
+ * bit 6: loopback mode feature, 0 for unsupported and 1 for supported,
+ * normal mode is supported by default.
+ * @max_freq_hz: the maximum clock rate supported in Hz unit, 0 means no
+ * limitation for transfer speed.
+ * @max_word_delay_ns: the maximum word delay supported, in nanoseconds.
+ * A value of 0 indicates that word delay is unsupported.
+ * Each transfer may consist of a sequence of words.
+ * @max_cs_setup_ns: the maximum delay supported after chipselect is asserted,
+ * in ns unit, 0 means delay is not supported to introduce after chipselect is
+ * asserted.
+ * @max_cs_hold_ns: the maximum delay supported before chipselect is deasserted,
+ * in ns unit, 0 means delay is not supported to introduce before chipselect
+ * is deasserted.
+ * @max_cs_incative_ns: maximum delay supported after chipselect is deasserted,
+ * in ns unit, 0 means delay is not supported to introduce after chipselect is
+ * deasserted.
+ */
+struct virtio_spi_config {
+ uint8_t cs_max_number;
+ uint8_t cs_change_supported;
+#define VIRTIO_SPI_RX_TX_SUPPORT_DUAL _BITUL(0)
+#define VIRTIO_SPI_RX_TX_SUPPORT_QUAD _BITUL(1)
+#define VIRTIO_SPI_RX_TX_SUPPORT_OCTAL _BITUL(2)
+ uint8_t tx_nbits_supported;
+ uint8_t rx_nbits_supported;
+ uint32_t bits_per_word_mask;
+#define VIRTIO_SPI_MF_SUPPORT_CPHA_0 _BITUL(0)
+#define VIRTIO_SPI_MF_SUPPORT_CPHA_1 _BITUL(1)
+#define VIRTIO_SPI_MF_SUPPORT_CPOL_0 _BITUL(2)
+#define VIRTIO_SPI_MF_SUPPORT_CPOL_1 _BITUL(3)
+#define VIRTIO_SPI_MF_SUPPORT_CS_HIGH _BITUL(4)
+#define VIRTIO_SPI_MF_SUPPORT_LSB_FIRST _BITUL(5)
+#define VIRTIO_SPI_MF_SUPPORT_LOOPBACK _BITUL(6)
+ uint32_t mode_func_supported;
+ uint32_t max_freq_hz;
+ uint32_t max_word_delay_ns;
+ uint32_t max_cs_setup_ns;
+ uint32_t max_cs_hold_ns;
+ uint32_t max_cs_inactive_ns;
+};
+
+/**
+ * struct spi_transfer_head - virtio SPI transfer descriptor
+ * @chip_select_id: chipselect index the SPI transfer used.
+ * @bits_per_word: the number of bits in each SPI transfer word.
+ * @cs_change: whether to deselect device after finishing this transfer
+ * before starting the next transfer, 0 means cs keep asserted and
+ * 1 means cs deasserted then asserted again.
+ * @tx_nbits: bus width for write transfer.
+ * 0,1: bus width is 1, also known as SINGLE
+ * 2 : bus width is 2, also known as DUAL
+ * 4 : bus width is 4, also known as QUAD
+ * 8 : bus width is 8, also known as OCTAL
+ * other values are invalid.
+ * @rx_nbits: bus width for read transfer.
+ * 0,1: bus width is 1, also known as SINGLE
+ * 2 : bus width is 2, also known as DUAL
+ * 4 : bus width is 4, also known as QUAD
+ * 8 : bus width is 8, also known as OCTAL
+ * other values are invalid.
+ * @reserved: for future use.
+ * @mode: SPI transfer mode.
+ * bit 0: CPHA, determines the timing (i.e. phase) of the data
+ * bits relative to the clock pulses.For CPHA=0, the
+ * "out" side changes the data on the trailing edge of the
+ * preceding clock cycle, while the "in" side captures the data
+ * on (or shortly after) the leading edge of the clock cycle.
+ * For CPHA=1, the "out" side changes the data on the leading
+ * edge of the current clock cycle, while the "in" side
+ * captures the data on (or shortly after) the trailing edge of
+ * the clock cycle.
+ * bit 1: CPOL, determines the polarity of the clock. CPOL=0 is a
+ * clock which idles at 0, and each cycle consists of a pulse
+ * of 1. CPOL=1 is a clock which idles at 1, and each cycle
+ * consists of a pulse of 0.
+ * bit 2: CS_HIGH, if 1, chip select active high, else active low.
+ * bit 3: LSB_FIRST, determines per-word bits-on-wire, if 0, MSB
+ * first, else LSB first.
+ * bit 4: LOOP, loopback mode.
+ * @freq: the transfer speed in Hz.
+ * @word_delay_ns: delay to be inserted between consecutive words of a
+ * transfer, in ns unit.
+ * @cs_setup_ns: delay to be introduced after CS is asserted, in ns
+ * unit.
+ * @cs_delay_hold_ns: delay to be introduced before CS is deasserted
+ * for each transfer, in ns unit.
+ * @cs_change_delay_inactive_ns: delay to be introduced after CS is
+ * deasserted and before next asserted, in ns unit.
+ */
+struct spi_transfer_head {
+ uint8_t chip_select_id;
+ uint8_t bits_per_word;
+ uint8_t cs_change;
+ uint8_t tx_nbits;
+ uint8_t rx_nbits;
+ uint8_t reserved[3];
+ uint32_t mode;
+ uint32_t freq;
+ uint32_t word_delay_ns;
+ uint32_t cs_setup_ns;
+ uint32_t cs_delay_hold_ns;
+ uint32_t cs_change_delay_inactive_ns;
+};
+
+/**
+ * struct spi_transfer_result - virtio SPI transfer result
+ * @result: Transfer result code.
+ * VIRTIO_SPI_TRANS_OK: Transfer successful.
+ * VIRTIO_SPI_PARAM_ERR: Parameter error.
+ * VIRTIO_SPI_TRANS_ERR: Transfer error.
+ */
+struct spi_transfer_result {
+#define VIRTIO_SPI_TRANS_OK 0
+#define VIRTIO_SPI_PARAM_ERR 1
+#define VIRTIO_SPI_TRANS_ERR 2
+ uint8_t result;
+};
+
+#endif /* #ifndef _LINUX_VIRTIO_VIRTIO_SPI_H */
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 0828e8b..3e35570 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -2473,13 +2473,9 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
*/
if (code == BUS_MCEERR_AR) {
kvm_cpu_synchronize_state(c);
- if (!acpi_ghes_memory_errors(ags, ACPI_HEST_SRC_ID_SYNC,
- paddr)) {
- kvm_inject_arm_sea(c);
- } else {
- error_report("failed to record the error");
- abort();
- }
+ acpi_ghes_memory_errors(ags, ACPI_HEST_SRC_ID_SYNC,
+ paddr, &error_fatal);
+ kvm_inject_arm_sea(c);
}
return;
}
diff --git a/tests/qtest/q35-test.c b/tests/qtest/q35-test.c
index 62fff49..4e3a445 100644
--- a/tests/qtest/q35-test.c
+++ b/tests/qtest/q35-test.c
@@ -206,12 +206,6 @@ static void test_smram_smbase_lock(void)
qtest_writeb(qts, SMBASE, SMRAM_TEST_PATTERN);
g_assert_cmpint(qtest_readb(qts, SMBASE), ==, SMRAM_TEST_PATTERN);
- /* check that writing junk to 0x9c before before negotiating is ignored */
- for (i = 0; i < 0xff; i++) {
- qpci_config_writeb(pcidev, MCH_HOST_BRIDGE_F_SMBASE, i);
- g_assert(qpci_config_readb(pcidev, MCH_HOST_BRIDGE_F_SMBASE) == 0);
- }
-
/* enable SMRAM at SMBASE */
qpci_config_writeb(pcidev, MCH_HOST_BRIDGE_F_SMBASE, 0xff);
g_assert(qpci_config_readb(pcidev, MCH_HOST_BRIDGE_F_SMBASE) == 0x01);