aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitlab-ci.d/edk2/Dockerfile4
-rw-r--r--MAINTAINERS2
-rw-r--r--hw/intc/xive2.c1
-rw-r--r--hw/net/virtio-net.c1
-rw-r--r--hw/pci-host/pnv_phb3.c33
-rw-r--r--hw/pci-host/pnv_phb4.c69
-rw-r--r--hw/pci-host/pnv_phb4_pec.c16
-rw-r--r--hw/ppc/pnv.c29
-rw-r--r--hw/virtio/meson.build2
-rw-r--r--hw/virtio/vhost-iova-tree.c110
-rw-r--r--hw/virtio/vhost-iova-tree.h27
-rw-r--r--hw/virtio/vhost-shadow-virtqueue.c636
-rw-r--r--hw/virtio/vhost-shadow-virtqueue.h87
-rw-r--r--hw/virtio/vhost-vdpa.c522
-rw-r--r--include/hw/pci-host/pnv_phb4.h5
-rw-r--r--include/hw/ppc/pnv.h1
-rw-r--r--include/hw/virtio/vhost-vdpa.h8
-rw-r--r--include/qemu/iova-tree.h38
-rw-r--r--pc-bios/edk2-aarch64-code.fd.bz2bin1507722 -> 1161290 bytes
-rw-r--r--pc-bios/edk2-arm-code.fd.bz2bin1503187 -> 1161845 bytes
-rw-r--r--pc-bios/edk2-i386-code.fd.bz2bin1646741 -> 1282120 bytes
-rw-r--r--pc-bios/edk2-i386-secure-code.fd.bz2bin1860546 -> 1473677 bytes
-rw-r--r--pc-bios/edk2-x86_64-code.fd.bz2bin1680164 -> 1327522 bytes
-rw-r--r--pc-bios/edk2-x86_64-microvm.fd.bz2bin0 -> 1309407 bytes
-rw-r--r--pc-bios/edk2-x86_64-secure-code.fd.bz2bin1912112 -> 1513711 bytes
-rw-r--r--roms/Makefile.edk230
m---------roms/edk20
-rwxr-xr-xroms/edk2-build.sh2
-rw-r--r--target/ppc/mmu-radix64.c3
-rw-r--r--target/ppc/translate/vsx-impl.c.inc2
-rw-r--r--tests/avocado/boot_linux_console.py17
-rw-r--r--tests/avocado/ppc_405.py10
-rw-r--r--tests/avocado/ppc_74xx.py13
-rw-r--r--tests/avocado/ppc_bamboo.py2
-rw-r--r--tests/avocado/ppc_mpc8544ds.py2
-rw-r--r--tests/avocado/ppc_prep_40p.py6
-rw-r--r--tests/avocado/ppc_virtex_ml507.py2
-rw-r--r--tests/avocado/replay_kernel.py4
-rw-r--r--tests/data/acpi/virt/SSDT.memhpbin736 -> 736 bytes
-rw-r--r--tests/qtest/meson.build4
-rw-r--r--tests/tcg/ppc64/Makefile.target1
-rw-r--r--tests/tcg/ppc64le/Makefile.target1
-rw-r--r--tests/tcg/ppc64le/xxspltw.c46
-rw-r--r--util/iova-tree.c170
44 files changed, 1740 insertions, 166 deletions
diff --git a/.gitlab-ci.d/edk2/Dockerfile b/.gitlab-ci.d/edk2/Dockerfile
index 1302931..bbe50ff 100644
--- a/.gitlab-ci.d/edk2/Dockerfile
+++ b/.gitlab-ci.d/edk2/Dockerfile
@@ -1,7 +1,7 @@
#
# Docker image to cross-compile EDK2 firmware binaries
#
-FROM ubuntu:16.04
+FROM ubuntu:18.04
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
@@ -20,7 +20,7 @@ RUN apt update \
iasl \
make \
nasm \
- python \
+ python3 \
uuid-dev \
&& \
\
diff --git a/MAINTAINERS b/MAINTAINERS
index f2e9ce1..b976a94 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3159,7 +3159,7 @@ F: docs/interop/firmware.json
EDK2 Firmware
M: Philippe Mathieu-Daudé <f4bug@amsat.org>
-R: Gerd Hoffmann <kraxel@redhat.com>
+M: Gerd Hoffmann <kraxel@redhat.com>
S: Supported
F: hw/i386/*ovmf*
F: pc-bios/descriptors/??-edk2-*.json
diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c
index b6452f1..3aff42a 100644
--- a/hw/intc/xive2.c
+++ b/hw/intc/xive2.c
@@ -1000,6 +1000,7 @@ static void xive2_end_source_class_init(ObjectClass *klass, void *data)
dc->desc = "XIVE END Source";
device_class_set_props(dc, xive2_end_source_properties);
dc->realize = xive2_end_source_realize;
+ dc->user_creatable = false;
}
static const TypeInfo xive2_end_source_info = {
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index b02a063..2087516 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1870,6 +1870,7 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
err:
for (j = 0; j < i; j++) {
+ virtqueue_detach_element(q->rx_vq, elems[j], lens[j]);
g_free(elems[j]);
}
diff --git a/hw/pci-host/pnv_phb3.c b/hw/pci-host/pnv_phb3.c
index aafd46b..ac801ac 100644
--- a/hw/pci-host/pnv_phb3.c
+++ b/hw/pci-host/pnv_phb3.c
@@ -994,30 +994,6 @@ static void pnv_phb3_realize(DeviceState *dev, Error **errp)
PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
int i;
- /* User created devices */
- if (!phb->chip) {
- Error *local_err = NULL;
- BusState *s;
-
- phb->chip = pnv_get_chip(pnv, phb->chip_id);
- if (!phb->chip) {
- error_setg(errp, "invalid chip id: %d", phb->chip_id);
- return;
- }
-
- /*
- * Reparent user created devices to the chip to build
- * correctly the device tree.
- */
- pnv_chip_parent_fixup(phb->chip, OBJECT(phb), phb->phb_id);
-
- s = qdev_get_parent_bus(DEVICE(phb->chip));
- if (!qdev_set_parent_bus(DEVICE(phb), s, &local_err)) {
- error_propagate(errp, local_err);
- return;
- }
- }
-
if (phb->phb_id >= PNV_CHIP_GET_CLASS(phb->chip)->num_phbs) {
error_setg(errp, "invalid PHB index: %d", phb->phb_id);
return;
@@ -1077,10 +1053,7 @@ static void pnv_phb3_realize(DeviceState *dev, Error **errp)
pci_setup_iommu(pci->bus, pnv_phb3_dma_iommu, phb);
- if (defaults_enabled()) {
- pnv_phb_attach_root_port(PCI_HOST_BRIDGE(phb),
- TYPE_PNV_PHB3_ROOT_PORT);
- }
+ pnv_phb_attach_root_port(PCI_HOST_BRIDGE(phb), TYPE_PNV_PHB3_ROOT_PORT);
}
void pnv_phb3_update_regions(PnvPHB3 *phb)
@@ -1131,7 +1104,7 @@ static void pnv_phb3_class_init(ObjectClass *klass, void *data)
dc->realize = pnv_phb3_realize;
device_class_set_props(dc, pnv_phb3_properties);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
- dc->user_creatable = true;
+ dc->user_creatable = false;
}
static const TypeInfo pnv_phb3_type_info = {
@@ -1201,7 +1174,7 @@ static void pnv_phb3_root_port_class_init(ObjectClass *klass, void *data)
device_class_set_parent_realize(dc, pnv_phb3_root_port_realize,
&rpc->parent_realize);
- dc->user_creatable = true;
+ dc->user_creatable = false;
k->vendor_id = PCI_VENDOR_ID_IBM;
k->device_id = 0x03dc;
diff --git a/hw/pci-host/pnv_phb4.c b/hw/pci-host/pnv_phb4.c
index b5b384e..b301762 100644
--- a/hw/pci-host/pnv_phb4.c
+++ b/hw/pci-host/pnv_phb4.c
@@ -1545,70 +1545,14 @@ static void pnv_phb4_instance_init(Object *obj)
object_initialize_child(obj, "source", &phb->xsrc, TYPE_XIVE_SOURCE);
}
-static PnvPhb4PecState *pnv_phb4_get_pec(PnvChip *chip, PnvPHB4 *phb,
- Error **errp)
-{
- Pnv9Chip *chip9 = PNV9_CHIP(chip);
- int chip_id = phb->chip_id;
- int index = phb->phb_id;
- int i, j;
-
- for (i = 0; i < chip->num_pecs; i++) {
- /*
- * For each PEC, check the amount of phbs it supports
- * and see if the given phb4 index matches an index.
- */
- PnvPhb4PecState *pec = &chip9->pecs[i];
-
- for (j = 0; j < pec->num_phbs; j++) {
- if (index == pnv_phb4_pec_get_phb_id(pec, j)) {
- return pec;
- }
- }
- }
-
- error_setg(errp,
- "pnv-phb4 chip-id %d index %d didn't match any existing PEC",
- chip_id, index);
-
- return NULL;
-}
-
static void pnv_phb4_realize(DeviceState *dev, Error **errp)
{
PnvPHB4 *phb = PNV_PHB4(dev);
- PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
- PnvChip *chip = pnv_get_chip(pnv, phb->chip_id);
PCIHostState *pci = PCI_HOST_BRIDGE(dev);
XiveSource *xsrc = &phb->xsrc;
- BusState *s;
- Error *local_err = NULL;
int nr_irqs;
char name[32];
- if (!chip) {
- error_setg(errp, "invalid chip id: %d", phb->chip_id);
- return;
- }
-
- /* User created PHBs need to be assigned to a PEC */
- if (!phb->pec) {
- phb->pec = pnv_phb4_get_pec(chip, phb, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
- }
-
- /* Reparent the PHB to the chip to build the device tree */
- pnv_chip_parent_fixup(chip, OBJECT(phb), phb->phb_id);
-
- s = qdev_get_parent_bus(DEVICE(chip));
- if (!qdev_set_parent_bus(DEVICE(phb), s, &local_err)) {
- error_propagate(errp, local_err);
- return;
- }
-
/* Set the "big_phb" flag */
phb->big_phb = phb->phb_id == 0 || phb->phb_id == 3;
@@ -1766,7 +1710,7 @@ static void pnv_phb4_class_init(ObjectClass *klass, void *data)
dc->realize = pnv_phb4_realize;
device_class_set_props(dc, pnv_phb4_properties);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
- dc->user_creatable = true;
+ dc->user_creatable = false;
xfc->notify = pnv_phb4_xive_notify;
}
@@ -1783,6 +1727,12 @@ static const TypeInfo pnv_phb4_type_info = {
}
};
+static const TypeInfo pnv_phb5_type_info = {
+ .name = TYPE_PNV_PHB5,
+ .parent = TYPE_PNV_PHB4,
+ .instance_size = sizeof(PnvPHB4),
+};
+
static void pnv_phb4_root_bus_class_init(ObjectClass *klass, void *data)
{
BusClass *k = BUS_CLASS(klass);
@@ -1858,7 +1808,7 @@ static void pnv_phb4_root_port_class_init(ObjectClass *klass, void *data)
PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(klass);
dc->desc = "IBM PHB4 PCIE Root Port";
- dc->user_creatable = true;
+ dc->user_creatable = false;
device_class_set_parent_realize(dc, pnv_phb4_root_port_realize,
&rpc->parent_realize);
@@ -1888,7 +1838,7 @@ static void pnv_phb5_root_port_class_init(ObjectClass *klass, void *data)
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
dc->desc = "IBM PHB5 PCIE Root Port";
- dc->user_creatable = true;
+ dc->user_creatable = false;
k->vendor_id = PCI_VENDOR_ID_IBM;
k->device_id = PNV_PHB5_DEVICE_ID;
@@ -1907,6 +1857,7 @@ static void pnv_phb4_register_types(void)
type_register_static(&pnv_phb5_root_port_info);
type_register_static(&pnv_phb4_root_port_info);
type_register_static(&pnv_phb4_type_info);
+ type_register_static(&pnv_phb5_type_info);
type_register_static(&pnv_phb4_iommu_memory_region_info);
}
diff --git a/hw/pci-host/pnv_phb4_pec.c b/hw/pci-host/pnv_phb4_pec.c
index 0ab36e9..6f1121a 100644
--- a/hw/pci-host/pnv_phb4_pec.c
+++ b/hw/pci-host/pnv_phb4_pec.c
@@ -116,9 +116,11 @@ static void pnv_pec_default_phb_realize(PnvPhb4PecState *pec,
int stack_no,
Error **errp)
{
- PnvPHB4 *phb = PNV_PHB4(qdev_new(TYPE_PNV_PHB4));
+ PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec);
+ PnvPHB4 *phb = PNV_PHB4(qdev_new(pecc->phb_type));
int phb_id = pnv_phb4_pec_get_phb_id(pec, stack_no);
+ object_property_add_child(OBJECT(pec), "phb[*]", OBJECT(phb));
object_property_set_link(OBJECT(phb), "pec", OBJECT(pec),
&error_abort);
object_property_set_int(OBJECT(phb), "chip-id", pec->chip_id,
@@ -131,9 +133,7 @@ static void pnv_pec_default_phb_realize(PnvPhb4PecState *pec,
}
/* Add a single Root port if running with defaults */
- pnv_phb_attach_root_port(PCI_HOST_BRIDGE(phb),
- PNV_PHB4_PEC_GET_CLASS(pec)->rp_model);
-
+ pnv_phb_attach_root_port(PCI_HOST_BRIDGE(phb), pecc->rp_model);
}
static void pnv_pec_realize(DeviceState *dev, Error **errp)
@@ -151,10 +151,8 @@ static void pnv_pec_realize(DeviceState *dev, Error **errp)
pec->num_phbs = pecc->num_phbs[pec->index];
/* Create PHBs if running with defaults */
- if (defaults_enabled()) {
- for (i = 0; i < pec->num_phbs; i++) {
- pnv_pec_default_phb_realize(pec, i, errp);
- }
+ for (i = 0; i < pec->num_phbs; i++) {
+ pnv_pec_default_phb_realize(pec, i, errp);
}
/* Initialize the XSCOM regions for the PEC registers */
@@ -265,6 +263,7 @@ static void pnv_pec_class_init(ObjectClass *klass, void *data)
pecc->stk_compat = stk_compat;
pecc->stk_compat_size = sizeof(stk_compat);
pecc->version = PNV_PHB4_VERSION;
+ pecc->phb_type = TYPE_PNV_PHB4;
pecc->num_phbs = pnv_pec_num_phbs;
pecc->rp_model = TYPE_PNV_PHB4_ROOT_PORT;
}
@@ -317,6 +316,7 @@ static void pnv_phb5_pec_class_init(ObjectClass *klass, void *data)
pecc->stk_compat = stk_compat;
pecc->stk_compat_size = sizeof(stk_compat);
pecc->version = PNV_PHB5_VERSION;
+ pecc->phb_type = TYPE_PNV_PHB5;
pecc->num_phbs = pnv_phb5_pec_num_stacks;
pecc->rp_model = TYPE_PNV_PHB5_ROOT_PORT;
}
diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c
index 0ac86e1..00f57c9 100644
--- a/hw/ppc/pnv.c
+++ b/hw/ppc/pnv.c
@@ -1141,9 +1141,7 @@ static void pnv_chip_power8_instance_init(Object *obj)
object_initialize_child(obj, "homer", &chip8->homer, TYPE_PNV8_HOMER);
- if (defaults_enabled()) {
- chip8->num_phbs = pcc->num_phbs;
- }
+ chip8->num_phbs = pcc->num_phbs;
for (i = 0; i < chip8->num_phbs; i++) {
object_initialize_child(obj, "phb[*]", &chip8->phbs[i], TYPE_PNV_PHB3);
@@ -1600,9 +1598,7 @@ static void pnv_chip_power10_instance_init(Object *obj)
object_initialize_child(obj, "occ", &chip10->occ, TYPE_PNV10_OCC);
object_initialize_child(obj, "homer", &chip10->homer, TYPE_PNV10_HOMER);
- if (defaults_enabled()) {
- chip->num_pecs = pcc->num_pecs;
- }
+ chip->num_pecs = pcc->num_pecs;
for (i = 0; i < chip->num_pecs; i++) {
object_initialize_child(obj, "pec[*]", &chip10->pecs[i],
@@ -1976,23 +1972,6 @@ static ICSState *pnv_ics_get(XICSFabric *xi, int irq)
return NULL;
}
-void pnv_chip_parent_fixup(PnvChip *chip, Object *obj, int index)
-{
- Object *parent = OBJECT(chip);
- g_autofree char *default_id =
- g_strdup_printf("%s[%d]", object_get_typename(obj), index);
-
- if (obj->parent == parent) {
- return;
- }
-
- object_ref(obj);
- object_unparent(obj);
- object_property_add_child(
- parent, DEVICE(obj)->id ? DEVICE(obj)->id : default_id, obj);
- object_unref(obj);
-}
-
PnvChip *pnv_get_chip(PnvMachineState *pnv, uint32_t chip_id)
{
int i;
@@ -2132,8 +2111,6 @@ static void pnv_machine_power8_class_init(ObjectClass *oc, void *data)
pmc->compat = compat;
pmc->compat_size = sizeof(compat);
-
- machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB3);
}
static void pnv_machine_power9_class_init(ObjectClass *oc, void *data)
@@ -2152,8 +2129,6 @@ static void pnv_machine_power9_class_init(ObjectClass *oc, void *data)
pmc->compat = compat;
pmc->compat_size = sizeof(compat);
pmc->dt_power_mgt = pnv_dt_power_mgt;
-
- machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB4);
}
static void pnv_machine_power10_class_init(ObjectClass *oc, void *data)
diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build
index 521f7d6..6047670 100644
--- a/hw/virtio/meson.build
+++ b/hw/virtio/meson.build
@@ -11,7 +11,7 @@ softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-stub.c'))
virtio_ss = ss.source_set()
virtio_ss.add(files('virtio.c'))
-virtio_ss.add(when: 'CONFIG_VHOST', if_true: files('vhost.c', 'vhost-backend.c'))
+virtio_ss.add(when: 'CONFIG_VHOST', if_true: files('vhost.c', 'vhost-backend.c', 'vhost-shadow-virtqueue.c', 'vhost-iova-tree.c'))
virtio_ss.add(when: 'CONFIG_VHOST_USER', if_true: files('vhost-user.c'))
virtio_ss.add(when: 'CONFIG_VHOST_VDPA', if_true: files('vhost-vdpa.c'))
virtio_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-balloon.c'))
diff --git a/hw/virtio/vhost-iova-tree.c b/hw/virtio/vhost-iova-tree.c
new file mode 100644
index 0000000..55fed1f
--- /dev/null
+++ b/hw/virtio/vhost-iova-tree.c
@@ -0,0 +1,110 @@
+/*
+ * vhost software live migration iova tree
+ *
+ * SPDX-FileCopyrightText: Red Hat, Inc. 2021
+ * SPDX-FileContributor: Author: Eugenio Pérez <eperezma@redhat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/iova-tree.h"
+#include "vhost-iova-tree.h"
+
+#define iova_min_addr qemu_real_host_page_size
+
+/**
+ * VhostIOVATree, able to:
+ * - Translate iova address
+ * - Reverse translate iova address (from translated to iova)
+ * - Allocate IOVA regions for translated range (linear operation)
+ */
+struct VhostIOVATree {
+ /* First addressable iova address in the device */
+ uint64_t iova_first;
+
+ /* Last addressable iova address in the device */
+ uint64_t iova_last;
+
+ /* IOVA address to qemu memory maps. */
+ IOVATree *iova_taddr_map;
+};
+
+/**
+ * Create a new IOVA tree
+ *
+ * Returns the new IOVA tree
+ */
+VhostIOVATree *vhost_iova_tree_new(hwaddr iova_first, hwaddr iova_last)
+{
+ VhostIOVATree *tree = g_new(VhostIOVATree, 1);
+
+ /* Some devices do not like 0 addresses */
+ tree->iova_first = MAX(iova_first, iova_min_addr);
+ tree->iova_last = iova_last;
+
+ tree->iova_taddr_map = iova_tree_new();
+ return tree;
+}
+
+/**
+ * Delete an iova tree
+ */
+void vhost_iova_tree_delete(VhostIOVATree *iova_tree)
+{
+ iova_tree_destroy(iova_tree->iova_taddr_map);
+ g_free(iova_tree);
+}
+
+/**
+ * Find the IOVA address stored from a memory address
+ *
+ * @tree: The iova tree
+ * @map: The map with the memory address
+ *
+ * Return the stored mapping, or NULL if not found.
+ */
+const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *tree,
+ const DMAMap *map)
+{
+ return iova_tree_find_iova(tree->iova_taddr_map, map);
+}
+
+/**
+ * Allocate a new mapping
+ *
+ * @tree: The iova tree
+ * @map: The iova map
+ *
+ * Returns:
+ * - IOVA_OK if the map fits in the container
+ * - IOVA_ERR_INVALID if the map does not make sense (like size overflow)
+ * - IOVA_ERR_NOMEM if tree cannot allocate more space.
+ *
+ * It returns assignated iova in map->iova if return value is VHOST_DMA_MAP_OK.
+ */
+int vhost_iova_tree_map_alloc(VhostIOVATree *tree, DMAMap *map)
+{
+ /* Some vhost devices do not like addr 0. Skip first page */
+ hwaddr iova_first = tree->iova_first ?: qemu_real_host_page_size;
+
+ if (map->translated_addr + map->size < map->translated_addr ||
+ map->perm == IOMMU_NONE) {
+ return IOVA_ERR_INVALID;
+ }
+
+ /* Allocate a node in IOVA address */
+ return iova_tree_alloc_map(tree->iova_taddr_map, map, iova_first,
+ tree->iova_last);
+}
+
+/**
+ * Remove existing mappings from iova tree
+ *
+ * @iova_tree: The vhost iova tree
+ * @map: The map to remove
+ */
+void vhost_iova_tree_remove(VhostIOVATree *iova_tree, const DMAMap *map)
+{
+ iova_tree_remove(iova_tree->iova_taddr_map, map);
+}
diff --git a/hw/virtio/vhost-iova-tree.h b/hw/virtio/vhost-iova-tree.h
new file mode 100644
index 0000000..6a4f24e
--- /dev/null
+++ b/hw/virtio/vhost-iova-tree.h
@@ -0,0 +1,27 @@
+/*
+ * vhost software live migration iova tree
+ *
+ * SPDX-FileCopyrightText: Red Hat, Inc. 2021
+ * SPDX-FileContributor: Author: Eugenio Pérez <eperezma@redhat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VIRTIO_VHOST_IOVA_TREE_H
+#define HW_VIRTIO_VHOST_IOVA_TREE_H
+
+#include "qemu/iova-tree.h"
+#include "exec/memory.h"
+
+typedef struct VhostIOVATree VhostIOVATree;
+
+VhostIOVATree *vhost_iova_tree_new(uint64_t iova_first, uint64_t iova_last);
+void vhost_iova_tree_delete(VhostIOVATree *iova_tree);
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostIOVATree, vhost_iova_tree_delete);
+
+const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *iova_tree,
+ const DMAMap *map);
+int vhost_iova_tree_map_alloc(VhostIOVATree *iova_tree, DMAMap *map);
+void vhost_iova_tree_remove(VhostIOVATree *iova_tree, const DMAMap *map);
+
+#endif
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
new file mode 100644
index 0000000..b232803
--- /dev/null
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -0,0 +1,636 @@
+/*
+ * vhost shadow virtqueue
+ *
+ * SPDX-FileCopyrightText: Red Hat, Inc. 2021
+ * SPDX-FileContributor: Author: Eugenio Pérez <eperezma@redhat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hw/virtio/vhost-shadow-virtqueue.h"
+
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "qemu/main-loop.h"
+#include "qemu/log.h"
+#include "qemu/memalign.h"
+#include "linux-headers/linux/vhost.h"
+
+/**
+ * Validate the transport device features that both guests can use with the SVQ
+ * and SVQs can use with the device.
+ *
+ * @dev_features: The features
+ * @errp: Error pointer
+ */
+bool vhost_svq_valid_features(uint64_t features, Error **errp)
+{
+ bool ok = true;
+ uint64_t svq_features = features;
+
+ for (uint64_t b = VIRTIO_TRANSPORT_F_START; b <= VIRTIO_TRANSPORT_F_END;
+ ++b) {
+ switch (b) {
+ case VIRTIO_F_ANY_LAYOUT:
+ continue;
+
+ case VIRTIO_F_ACCESS_PLATFORM:
+ /* SVQ trust in the host's IOMMU to translate addresses */
+ case VIRTIO_F_VERSION_1:
+ /* SVQ trust that the guest vring is little endian */
+ if (!(svq_features & BIT_ULL(b))) {
+ svq_features |= BIT_ULL(b);
+ ok = false;
+ }
+ continue;
+
+ default:
+ if (svq_features & BIT_ULL(b)) {
+ svq_features &= ~BIT_ULL(b);
+ ok = false;
+ }
+ }
+ }
+
+ if (!ok) {
+ error_setg(errp, "SVQ Invalid device feature flags, offer: 0x%"PRIx64
+ ", ok: 0x%"PRIx64, features, svq_features);
+ }
+ return ok;
+}
+
+/**
+ * Number of descriptors that the SVQ can make available from the guest.
+ *
+ * @svq: The svq
+ */
+static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
+{
+ return svq->vring.num - (svq->shadow_avail_idx - svq->shadow_used_idx);
+}
+
+/**
+ * Translate addresses between the qemu's virtual address and the SVQ IOVA
+ *
+ * @svq: Shadow VirtQueue
+ * @vaddr: Translated IOVA addresses
+ * @iovec: Source qemu's VA addresses
+ * @num: Length of iovec and minimum length of vaddr
+ */
+static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
+ hwaddr *addrs, const struct iovec *iovec,
+ size_t num)
+{
+ if (num == 0) {
+ return true;
+ }
+
+ for (size_t i = 0; i < num; ++i) {
+ DMAMap needle = {
+ .translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base,
+ .size = iovec[i].iov_len,
+ };
+ Int128 needle_last, map_last;
+ size_t off;
+
+ const DMAMap *map = vhost_iova_tree_find_iova(svq->iova_tree, &needle);
+ /*
+ * Map cannot be NULL since iova map contains all guest space and
+ * qemu already has a physical address mapped
+ */
+ if (unlikely(!map)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid address 0x%"HWADDR_PRIx" given by guest",
+ needle.translated_addr);
+ return false;
+ }
+
+ off = needle.translated_addr - map->translated_addr;
+ addrs[i] = map->iova + off;
+
+ needle_last = int128_add(int128_make64(needle.translated_addr),
+ int128_make64(iovec[i].iov_len));
+ map_last = int128_make64(map->translated_addr + map->size);
+ if (unlikely(int128_gt(needle_last, map_last))) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Guest buffer expands over iova range");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
+ const struct iovec *iovec, size_t num,
+ bool more_descs, bool write)
+{
+ uint16_t i = svq->free_head, last = svq->free_head;
+ unsigned n;
+ uint16_t flags = write ? cpu_to_le16(VRING_DESC_F_WRITE) : 0;
+ vring_desc_t *descs = svq->vring.desc;
+
+ if (num == 0) {
+ return;
+ }
+
+ for (n = 0; n < num; n++) {
+ if (more_descs || (n + 1 < num)) {
+ descs[i].flags = flags | cpu_to_le16(VRING_DESC_F_NEXT);
+ } else {
+ descs[i].flags = flags;
+ }
+ descs[i].addr = cpu_to_le64(sg[n]);
+ descs[i].len = cpu_to_le32(iovec[n].iov_len);
+
+ last = i;
+ i = cpu_to_le16(descs[i].next);
+ }
+
+ svq->free_head = le16_to_cpu(descs[last].next);
+}
+
+static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
+ VirtQueueElement *elem, unsigned *head)
+{
+ unsigned avail_idx;
+ vring_avail_t *avail = svq->vring.avail;
+ bool ok;
+ g_autofree hwaddr *sgs = g_new(hwaddr, MAX(elem->out_num, elem->in_num));
+
+ *head = svq->free_head;
+
+ /* We need some descriptors here */
+ if (unlikely(!elem->out_num && !elem->in_num)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Guest provided element with no descriptors");
+ return false;
+ }
+
+ ok = vhost_svq_translate_addr(svq, sgs, elem->out_sg, elem->out_num);
+ if (unlikely(!ok)) {
+ return false;
+ }
+ vhost_vring_write_descs(svq, sgs, elem->out_sg, elem->out_num,
+ elem->in_num > 0, false);
+
+
+ ok = vhost_svq_translate_addr(svq, sgs, elem->in_sg, elem->in_num);
+ if (unlikely(!ok)) {
+ return false;
+ }
+
+ vhost_vring_write_descs(svq, sgs, elem->in_sg, elem->in_num, false, true);
+
+ /*
+ * Put the entry in the available array (but don't update avail->idx until
+ * they do sync).
+ */
+ avail_idx = svq->shadow_avail_idx & (svq->vring.num - 1);
+ avail->ring[avail_idx] = cpu_to_le16(*head);
+ svq->shadow_avail_idx++;
+
+ /* Update the avail index after write the descriptor */
+ smp_wmb();
+ avail->idx = cpu_to_le16(svq->shadow_avail_idx);
+
+ return true;
+}
+
+static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem)
+{
+ unsigned qemu_head;
+ bool ok = vhost_svq_add_split(svq, elem, &qemu_head);
+ if (unlikely(!ok)) {
+ return false;
+ }
+
+ svq->ring_id_maps[qemu_head] = elem;
+ return true;
+}
+
+static void vhost_svq_kick(VhostShadowVirtqueue *svq)
+{
+ /*
+ * We need to expose the available array entries before checking the used
+ * flags
+ */
+ smp_mb();
+ if (svq->vring.used->flags & VRING_USED_F_NO_NOTIFY) {
+ return;
+ }
+
+ event_notifier_set(&svq->hdev_kick);
+}
+
+/**
+ * Forward available buffers.
+ *
+ * @svq: Shadow VirtQueue
+ *
+ * Note that this function does not guarantee that all guest's available
+ * buffers are available to the device in SVQ avail ring. The guest may have
+ * exposed a GPA / GIOVA contiguous buffer, but it may not be contiguous in
+ * qemu vaddr.
+ *
+ * If that happens, guest's kick notifications will be disabled until the
+ * device uses some buffers.
+ */
+static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
+{
+ /* Clear event notifier */
+ event_notifier_test_and_clear(&svq->svq_kick);
+
+ /* Forward to the device as many available buffers as possible */
+ do {
+ virtio_queue_set_notification(svq->vq, false);
+
+ while (true) {
+ VirtQueueElement *elem;
+ bool ok;
+
+ if (svq->next_guest_avail_elem) {
+ elem = g_steal_pointer(&svq->next_guest_avail_elem);
+ } else {
+ elem = virtqueue_pop(svq->vq, sizeof(*elem));
+ }
+
+ if (!elem) {
+ break;
+ }
+
+ if (elem->out_num + elem->in_num > vhost_svq_available_slots(svq)) {
+ /*
+ * This condition is possible since a contiguous buffer in GPA
+ * does not imply a contiguous buffer in qemu's VA
+ * scatter-gather segments. If that happens, the buffer exposed
+ * to the device needs to be a chain of descriptors at this
+ * moment.
+ *
+ * SVQ cannot hold more available buffers if we are here:
+ * queue the current guest descriptor and ignore further kicks
+ * until some elements are used.
+ */
+ svq->next_guest_avail_elem = elem;
+ return;
+ }
+
+ ok = vhost_svq_add(svq, elem);
+ if (unlikely(!ok)) {
+ /* VQ is broken, just return and ignore any other kicks */
+ return;
+ }
+ vhost_svq_kick(svq);
+ }
+
+ virtio_queue_set_notification(svq->vq, true);
+ } while (!virtio_queue_empty(svq->vq));
+}
+
+/**
+ * Handle guest's kick.
+ *
+ * @n: guest kick event notifier, the one that guest set to notify svq.
+ */
+static void vhost_handle_guest_kick_notifier(EventNotifier *n)
+{
+ VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue, svq_kick);
+ event_notifier_test_and_clear(n);
+ vhost_handle_guest_kick(svq);
+}
+
+static bool vhost_svq_more_used(VhostShadowVirtqueue *svq)
+{
+ if (svq->last_used_idx != svq->shadow_used_idx) {
+ return true;
+ }
+
+ svq->shadow_used_idx = cpu_to_le16(svq->vring.used->idx);
+
+ return svq->last_used_idx != svq->shadow_used_idx;
+}
+
+/**
+ * Enable vhost device calls after disable them.
+ *
+ * @svq: The svq
+ *
+ * It returns false if there are pending used buffers from the vhost device,
+ * avoiding the possible races between SVQ checking for more work and enabling
+ * callbacks. True if SVQ used vring has no more pending buffers.
+ */
+static bool vhost_svq_enable_notification(VhostShadowVirtqueue *svq)
+{
+ svq->vring.avail->flags &= ~cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
+ /* Make sure the flag is written before the read of used_idx */
+ smp_mb();
+ return !vhost_svq_more_used(svq);
+}
+
+static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq)
+{
+ svq->vring.avail->flags |= cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
+}
+
+static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
+ uint32_t *len)
+{
+ vring_desc_t *descs = svq->vring.desc;
+ const vring_used_t *used = svq->vring.used;
+ vring_used_elem_t used_elem;
+ uint16_t last_used;
+
+ if (!vhost_svq_more_used(svq)) {
+ return NULL;
+ }
+
+ /* Only get used array entries after they have been exposed by dev */
+ smp_rmb();
+ last_used = svq->last_used_idx & (svq->vring.num - 1);
+ used_elem.id = le32_to_cpu(used->ring[last_used].id);
+ used_elem.len = le32_to_cpu(used->ring[last_used].len);
+
+ svq->last_used_idx++;
+ if (unlikely(used_elem.id >= svq->vring.num)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Device %s says index %u is used",
+ svq->vdev->name, used_elem.id);
+ return NULL;
+ }
+
+ if (unlikely(!svq->ring_id_maps[used_elem.id])) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Device %s says index %u is used, but it was not available",
+ svq->vdev->name, used_elem.id);
+ return NULL;
+ }
+
+ descs[used_elem.id].next = svq->free_head;
+ svq->free_head = used_elem.id;
+
+ *len = used_elem.len;
+ return g_steal_pointer(&svq->ring_id_maps[used_elem.id]);
+}
+
+static void vhost_svq_flush(VhostShadowVirtqueue *svq,
+ bool check_for_avail_queue)
+{
+ VirtQueue *vq = svq->vq;
+
+ /* Forward as many used buffers as possible. */
+ do {
+ unsigned i = 0;
+
+ vhost_svq_disable_notification(svq);
+ while (true) {
+ uint32_t len;
+ g_autofree VirtQueueElement *elem = vhost_svq_get_buf(svq, &len);
+ if (!elem) {
+ break;
+ }
+
+ if (unlikely(i >= svq->vring.num)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "More than %u used buffers obtained in a %u size SVQ",
+ i, svq->vring.num);
+ virtqueue_fill(vq, elem, len, i);
+ virtqueue_flush(vq, i);
+ return;
+ }
+ virtqueue_fill(vq, elem, len, i++);
+ }
+
+ virtqueue_flush(vq, i);
+ event_notifier_set(&svq->svq_call);
+
+ if (check_for_avail_queue && svq->next_guest_avail_elem) {
+ /*
+ * Avail ring was full when vhost_svq_flush was called, so it's a
+ * good moment to make more descriptors available if possible.
+ */
+ vhost_handle_guest_kick(svq);
+ }
+ } while (!vhost_svq_enable_notification(svq));
+}
+
+/**
+ * Forward used buffers.
+ *
+ * @n: hdev call event notifier, the one that device set to notify svq.
+ *
+ * Note that we are not making any buffers available in the loop, there is no
+ * way that it runs more than virtqueue size times.
+ */
+static void vhost_svq_handle_call(EventNotifier *n)
+{
+ VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue,
+ hdev_call);
+ event_notifier_test_and_clear(n);
+ vhost_svq_flush(svq, true);
+}
+
+/**
+ * Set the call notifier for the SVQ to call the guest
+ *
+ * @svq: Shadow virtqueue
+ * @call_fd: call notifier
+ *
+ * Called on BQL context.
+ */
+void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd)
+{
+ if (call_fd == VHOST_FILE_UNBIND) {
+ /*
+ * Fail event_notifier_set if called handling device call.
+ *
+ * SVQ still needs device notifications, since it needs to keep
+ * forwarding used buffers even with the unbind.
+ */
+ memset(&svq->svq_call, 0, sizeof(svq->svq_call));
+ } else {
+ event_notifier_init_fd(&svq->svq_call, call_fd);
+ }
+}
+
+/**
+ * Get the shadow vq vring address.
+ * @svq: Shadow virtqueue
+ * @addr: Destination to store address
+ */
+void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
+ struct vhost_vring_addr *addr)
+{
+ addr->desc_user_addr = (uint64_t)(uintptr_t)svq->vring.desc;
+ addr->avail_user_addr = (uint64_t)(uintptr_t)svq->vring.avail;
+ addr->used_user_addr = (uint64_t)(uintptr_t)svq->vring.used;
+}
+
+size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
+{
+ size_t desc_size = sizeof(vring_desc_t) * svq->vring.num;
+ size_t avail_size = offsetof(vring_avail_t, ring) +
+ sizeof(uint16_t) * svq->vring.num;
+
+ return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size);
+}
+
+size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq)
+{
+ size_t used_size = offsetof(vring_used_t, ring) +
+ sizeof(vring_used_elem_t) * svq->vring.num;
+ return ROUND_UP(used_size, qemu_real_host_page_size);
+}
+
+/**
+ * Set a new file descriptor for the guest to kick the SVQ and notify for avail
+ *
+ * @svq: The svq
+ * @svq_kick_fd: The svq kick fd
+ *
+ * Note that the SVQ will never close the old file descriptor.
+ */
+void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd)
+{
+ EventNotifier *svq_kick = &svq->svq_kick;
+ bool poll_stop = VHOST_FILE_UNBIND != event_notifier_get_fd(svq_kick);
+ bool poll_start = svq_kick_fd != VHOST_FILE_UNBIND;
+
+ if (poll_stop) {
+ event_notifier_set_handler(svq_kick, NULL);
+ }
+
+ /*
+ * event_notifier_set_handler already checks for guest's notifications if
+ * they arrive at the new file descriptor in the switch, so there is no
+ * need to explicitly check for them.
+ */
+ if (poll_start) {
+ event_notifier_init_fd(svq_kick, svq_kick_fd);
+ event_notifier_set(svq_kick);
+ event_notifier_set_handler(svq_kick, vhost_handle_guest_kick_notifier);
+ }
+}
+
+/**
+ * Start the shadow virtqueue operation.
+ *
+ * @svq: Shadow Virtqueue
+ * @vdev: VirtIO device
+ * @vq: Virtqueue to shadow
+ */
+void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
+ VirtQueue *vq)
+{
+ size_t desc_size, driver_size, device_size;
+
+ svq->next_guest_avail_elem = NULL;
+ svq->shadow_avail_idx = 0;
+ svq->shadow_used_idx = 0;
+ svq->last_used_idx = 0;
+ svq->vdev = vdev;
+ svq->vq = vq;
+
+ svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
+ driver_size = vhost_svq_driver_area_size(svq);
+ device_size = vhost_svq_device_area_size(svq);
+ svq->vring.desc = qemu_memalign(qemu_real_host_page_size, driver_size);
+ desc_size = sizeof(vring_desc_t) * svq->vring.num;
+ svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
+ memset(svq->vring.desc, 0, driver_size);
+ svq->vring.used = qemu_memalign(qemu_real_host_page_size, device_size);
+ memset(svq->vring.used, 0, device_size);
+ svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num);
+ for (unsigned i = 0; i < svq->vring.num - 1; i++) {
+ svq->vring.desc[i].next = cpu_to_le16(i + 1);
+ }
+}
+
+/**
+ * Stop the shadow virtqueue operation.
+ * @svq: Shadow Virtqueue
+ */
+void vhost_svq_stop(VhostShadowVirtqueue *svq)
+{
+ event_notifier_set_handler(&svq->svq_kick, NULL);
+ g_autofree VirtQueueElement *next_avail_elem = NULL;
+
+ if (!svq->vq) {
+ return;
+ }
+
+ /* Send all pending used descriptors to guest */
+ vhost_svq_flush(svq, false);
+
+ for (unsigned i = 0; i < svq->vring.num; ++i) {
+ g_autofree VirtQueueElement *elem = NULL;
+ elem = g_steal_pointer(&svq->ring_id_maps[i]);
+ if (elem) {
+ virtqueue_detach_element(svq->vq, elem, 0);
+ }
+ }
+
+ next_avail_elem = g_steal_pointer(&svq->next_guest_avail_elem);
+ if (next_avail_elem) {
+ virtqueue_detach_element(svq->vq, next_avail_elem, 0);
+ }
+ svq->vq = NULL;
+ g_free(svq->ring_id_maps);
+ qemu_vfree(svq->vring.desc);
+ qemu_vfree(svq->vring.used);
+}
+
+/**
+ * Creates vhost shadow virtqueue, and instructs the vhost device to use the
+ * shadow methods and file descriptors.
+ *
+ * @iova_tree: Tree to perform descriptors translations
+ *
+ * Returns the new virtqueue or NULL.
+ *
+ * In case of error, reason is reported through error_report.
+ */
+VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree)
+{
+ g_autofree VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
+ int r;
+
+ r = event_notifier_init(&svq->hdev_kick, 0);
+ if (r != 0) {
+ error_report("Couldn't create kick event notifier: %s (%d)",
+ g_strerror(errno), errno);
+ goto err_init_hdev_kick;
+ }
+
+ r = event_notifier_init(&svq->hdev_call, 0);
+ if (r != 0) {
+ error_report("Couldn't create call event notifier: %s (%d)",
+ g_strerror(errno), errno);
+ goto err_init_hdev_call;
+ }
+
+ event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
+ event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
+ svq->iova_tree = iova_tree;
+ return g_steal_pointer(&svq);
+
+err_init_hdev_call:
+ event_notifier_cleanup(&svq->hdev_kick);
+
+err_init_hdev_kick:
+ return NULL;
+}
+
+/**
+ * Free the resources of the shadow virtqueue.
+ *
+ * @pvq: gpointer to SVQ so it can be used by autofree functions.
+ */
+void vhost_svq_free(gpointer pvq)
+{
+ VhostShadowVirtqueue *vq = pvq;
+ vhost_svq_stop(vq);
+ event_notifier_cleanup(&vq->hdev_kick);
+ event_notifier_set_handler(&vq->hdev_call, NULL);
+ event_notifier_cleanup(&vq->hdev_call);
+ g_free(vq);
+}
diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h
new file mode 100644
index 0000000..e5e24c5
--- /dev/null
+++ b/hw/virtio/vhost-shadow-virtqueue.h
@@ -0,0 +1,87 @@
+/*
+ * vhost shadow virtqueue
+ *
+ * SPDX-FileCopyrightText: Red Hat, Inc. 2021
+ * SPDX-FileContributor: Author: Eugenio Pérez <eperezma@redhat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef VHOST_SHADOW_VIRTQUEUE_H
+#define VHOST_SHADOW_VIRTQUEUE_H
+
+#include "qemu/event_notifier.h"
+#include "hw/virtio/virtio.h"
+#include "standard-headers/linux/vhost_types.h"
+#include "hw/virtio/vhost-iova-tree.h"
+
+/* Shadow virtqueue to relay notifications */
+typedef struct VhostShadowVirtqueue {
+ /* Shadow vring */
+ struct vring vring;
+
+ /* Shadow kick notifier, sent to vhost */
+ EventNotifier hdev_kick;
+ /* Shadow call notifier, sent to vhost */
+ EventNotifier hdev_call;
+
+ /*
+ * Borrowed virtqueue's guest to host notifier. To borrow it in this event
+ * notifier allows to recover the VhostShadowVirtqueue from the event loop
+ * easily. If we use the VirtQueue's one, we don't have an easy way to
+ * retrieve VhostShadowVirtqueue.
+ *
+ * So shadow virtqueue must not clean it, or we would lose VirtQueue one.
+ */
+ EventNotifier svq_kick;
+
+ /* Guest's call notifier, where the SVQ calls guest. */
+ EventNotifier svq_call;
+
+ /* Virtio queue shadowing */
+ VirtQueue *vq;
+
+ /* Virtio device */
+ VirtIODevice *vdev;
+
+ /* IOVA mapping */
+ VhostIOVATree *iova_tree;
+
+ /* Map for use the guest's descriptors */
+ VirtQueueElement **ring_id_maps;
+
+ /* Next VirtQueue element that guest made available */
+ VirtQueueElement *next_guest_avail_elem;
+
+ /* Next head to expose to the device */
+ uint16_t shadow_avail_idx;
+
+ /* Next free descriptor */
+ uint16_t free_head;
+
+ /* Last seen used idx */
+ uint16_t shadow_used_idx;
+
+ /* Next head to consume from the device */
+ uint16_t last_used_idx;
+} VhostShadowVirtqueue;
+
+bool vhost_svq_valid_features(uint64_t features, Error **errp);
+
+void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
+void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd);
+void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
+ struct vhost_vring_addr *addr);
+size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq);
+size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
+
+void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
+ VirtQueue *vq);
+void vhost_svq_stop(VhostShadowVirtqueue *svq);
+
+VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree);
+
+void vhost_svq_free(gpointer vq);
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostShadowVirtqueue, vhost_svq_free);
+
+#endif
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 6c67d5f..c5ed7a3 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -17,12 +17,14 @@
#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-backend.h"
#include "hw/virtio/virtio-net.h"
+#include "hw/virtio/vhost-shadow-virtqueue.h"
#include "hw/virtio/vhost-vdpa.h"
#include "exec/address-spaces.h"
#include "qemu/main-loop.h"
#include "cpu.h"
#include "trace.h"
#include "qemu-common.h"
+#include "qapi/error.h"
/*
* Return one past the end of the end of section. Be careful with uint64_t
@@ -207,6 +209,21 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
vaddr, section->readonly);
llsize = int128_sub(llend, int128_make64(iova));
+ if (v->shadow_vqs_enabled) {
+ DMAMap mem_region = {
+ .translated_addr = (hwaddr)(uintptr_t)vaddr,
+ .size = int128_get64(llsize) - 1,
+ .perm = IOMMU_ACCESS_FLAG(true, section->readonly),
+ };
+
+ int r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region);
+ if (unlikely(r != IOVA_OK)) {
+ error_report("Can't allocate a mapping (%d)", r);
+ goto fail;
+ }
+
+ iova = mem_region.iova;
+ }
vhost_vdpa_iotlb_batch_begin_once(v);
ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
@@ -259,6 +276,20 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
llsize = int128_sub(llend, int128_make64(iova));
+ if (v->shadow_vqs_enabled) {
+ const DMAMap *result;
+ const void *vaddr = memory_region_get_ram_ptr(section->mr) +
+ section->offset_within_region +
+ (iova - section->offset_within_address_space);
+ DMAMap mem_region = {
+ .translated_addr = (hwaddr)(uintptr_t)vaddr,
+ .size = int128_get64(llsize) - 1,
+ };
+
+ result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region);
+ iova = result->iova;
+ vhost_iova_tree_remove(v->iova_tree, &mem_region);
+ }
vhost_vdpa_iotlb_batch_begin_once(v);
ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
if (ret) {
@@ -342,6 +373,55 @@ static bool vhost_vdpa_one_time_request(struct vhost_dev *dev)
return v->index != 0;
}
+static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
+ uint64_t *features)
+{
+ int ret;
+
+ ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
+ trace_vhost_vdpa_get_features(dev, *features);
+ return ret;
+}
+
+static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
+ Error **errp)
+{
+ g_autoptr(GPtrArray) shadow_vqs = NULL;
+ uint64_t dev_features, svq_features;
+ int r;
+ bool ok;
+
+ if (!v->shadow_vqs_enabled) {
+ return 0;
+ }
+
+ r = vhost_vdpa_get_dev_features(hdev, &dev_features);
+ if (r != 0) {
+ error_setg_errno(errp, -r, "Can't get vdpa device features");
+ return r;
+ }
+
+ svq_features = dev_features;
+ ok = vhost_svq_valid_features(svq_features, errp);
+ if (unlikely(!ok)) {
+ return -1;
+ }
+
+ shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
+ for (unsigned n = 0; n < hdev->nvqs; ++n) {
+ g_autoptr(VhostShadowVirtqueue) svq = vhost_svq_new(v->iova_tree);
+
+ if (unlikely(!svq)) {
+ error_setg(errp, "Cannot create svq %u", n);
+ return -1;
+ }
+ g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
+ }
+
+ v->shadow_vqs = g_steal_pointer(&shadow_vqs);
+ return 0;
+}
+
static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
{
struct vhost_vdpa *v;
@@ -364,6 +444,10 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
dev->opaque = opaque ;
v->listener = vhost_vdpa_memory_listener;
v->msg_type = VHOST_IOTLB_MSG_V2;
+ ret = vhost_vdpa_init_svq(dev, v, errp);
+ if (ret) {
+ goto err;
+ }
vhost_vdpa_get_iova_range(v);
@@ -375,6 +459,10 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
VIRTIO_CONFIG_S_DRIVER);
return 0;
+
+err:
+ ram_block_discard_disable(false);
+ return ret;
}
static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
@@ -445,8 +533,14 @@ static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
{
+ struct vhost_vdpa *v = dev->opaque;
int i;
+ if (v->shadow_vqs_enabled) {
+ /* FIXME SVQ is not compatible with host notifiers mr */
+ return;
+ }
+
for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
if (vhost_vdpa_host_notifier_init(dev, i)) {
goto err;
@@ -460,6 +554,21 @@ err:
return;
}
+static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
+{
+ struct vhost_vdpa *v = dev->opaque;
+ size_t idx;
+
+ if (!v->shadow_vqs) {
+ return;
+ }
+
+ for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
+ vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
+ }
+ g_ptr_array_free(v->shadow_vqs, true);
+}
+
static int vhost_vdpa_cleanup(struct vhost_dev *dev)
{
struct vhost_vdpa *v;
@@ -468,6 +577,7 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
trace_vhost_vdpa_cleanup(dev, v);
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
memory_listener_unregister(&v->listener);
+ vhost_vdpa_svq_cleanup(dev);
dev->opaque = NULL;
ram_block_discard_disable(false);
@@ -510,12 +620,29 @@ static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
static int vhost_vdpa_set_features(struct vhost_dev *dev,
uint64_t features)
{
+ struct vhost_vdpa *v = dev->opaque;
int ret;
if (vhost_vdpa_one_time_request(dev)) {
return 0;
}
+ if (v->shadow_vqs_enabled) {
+ if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) {
+ /*
+ * QEMU is just trying to enable or disable logging. SVQ handles
+ * this sepparately, so no need to forward this.
+ */
+ v->acked_features = features;
+ return 0;
+ }
+
+ v->acked_features = features;
+
+ /* We must not ack _F_LOG if SVQ is enabled */
+ features &= ~BIT_ULL(VHOST_F_LOG_ALL);
+ }
+
trace_vhost_vdpa_set_features(dev, features);
ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
if (ret) {
@@ -559,11 +686,26 @@ static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
return ret;
}
+static void vhost_vdpa_reset_svq(struct vhost_vdpa *v)
+{
+ if (!v->shadow_vqs_enabled) {
+ return;
+ }
+
+ for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
+ VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
+ vhost_svq_stop(svq);
+ }
+}
+
static int vhost_vdpa_reset_device(struct vhost_dev *dev)
{
+ struct vhost_vdpa *v = dev->opaque;
int ret;
uint8_t status = 0;
+ vhost_vdpa_reset_svq(v);
+
ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
trace_vhost_vdpa_reset_device(dev, status);
return ret;
@@ -647,15 +789,311 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
return ret;
}
+static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
+ return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
+}
+
+static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
+ struct vhost_vring_file *file)
+{
+ trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
+ return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
+}
+
+static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev,
+ struct vhost_vring_file *file)
+{
+ trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
+ return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
+}
+
+static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev,
+ struct vhost_vring_addr *addr)
+{
+ trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
+ addr->desc_user_addr, addr->used_user_addr,
+ addr->avail_user_addr,
+ addr->log_guest_addr);
+
+ return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
+
+}
+
+/**
+ * Set the shadow virtqueue descriptors to the device
+ *
+ * @dev: The vhost device model
+ * @svq: The shadow virtqueue
+ * @idx: The index of the virtqueue in the vhost device
+ * @errp: Error
+ *
+ * Note that this function does not rewind kick file descriptor if cannot set
+ * call one.
+ */
+static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
+ VhostShadowVirtqueue *svq, unsigned idx,
+ Error **errp)
+{
+ struct vhost_vring_file file = {
+ .index = dev->vq_index + idx,
+ };
+ const EventNotifier *event_notifier = &svq->hdev_kick;
+ int r;
+
+ file.fd = event_notifier_get_fd(event_notifier);
+ r = vhost_vdpa_set_vring_dev_kick(dev, &file);
+ if (unlikely(r != 0)) {
+ error_setg_errno(errp, -r, "Can't set device kick fd");
+ return r;
+ }
+
+ event_notifier = &svq->hdev_call;
+ file.fd = event_notifier_get_fd(event_notifier);
+ r = vhost_vdpa_set_vring_dev_call(dev, &file);
+ if (unlikely(r != 0)) {
+ error_setg_errno(errp, -r, "Can't set device call fd");
+ }
+
+ return r;
+}
+
+/**
+ * Unmap a SVQ area in the device
+ */
+static bool vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v,
+ const DMAMap *needle)
+{
+ const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, needle);
+ hwaddr size;
+ int r;
+
+ if (unlikely(!result)) {
+ error_report("Unable to find SVQ address to unmap");
+ return false;
+ }
+
+ size = ROUND_UP(result->size, qemu_real_host_page_size);
+ r = vhost_vdpa_dma_unmap(v, result->iova, size);
+ return r == 0;
+}
+
+static bool vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
+ const VhostShadowVirtqueue *svq)
+{
+ DMAMap needle = {};
+ struct vhost_vdpa *v = dev->opaque;
+ struct vhost_vring_addr svq_addr;
+ bool ok;
+
+ vhost_svq_get_vring_addr(svq, &svq_addr);
+
+ needle.translated_addr = svq_addr.desc_user_addr;
+ ok = vhost_vdpa_svq_unmap_ring(v, &needle);
+ if (unlikely(!ok)) {
+ return false;
+ }
+
+ needle.translated_addr = svq_addr.used_user_addr;
+ return vhost_vdpa_svq_unmap_ring(v, &needle);
+}
+
+/**
+ * Map the SVQ area in the device
+ *
+ * @v: Vhost-vdpa device
+ * @needle: The area to search iova
+ * @errorp: Error pointer
+ */
+static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
+ Error **errp)
+{
+ int r;
+
+ r = vhost_iova_tree_map_alloc(v->iova_tree, needle);
+ if (unlikely(r != IOVA_OK)) {
+ error_setg(errp, "Cannot allocate iova (%d)", r);
+ return false;
+ }
+
+ r = vhost_vdpa_dma_map(v, needle->iova, needle->size + 1,
+ (void *)(uintptr_t)needle->translated_addr,
+ needle->perm == IOMMU_RO);
+ if (unlikely(r != 0)) {
+ error_setg_errno(errp, -r, "Cannot map region to device");
+ vhost_iova_tree_remove(v->iova_tree, needle);
+ }
+
+ return r == 0;
+}
+
+/**
+ * Map the shadow virtqueue rings in the device
+ *
+ * @dev: The vhost device
+ * @svq: The shadow virtqueue
+ * @addr: Assigned IOVA addresses
+ * @errp: Error pointer
+ */
+static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev,
+ const VhostShadowVirtqueue *svq,
+ struct vhost_vring_addr *addr,
+ Error **errp)
+{
+ DMAMap device_region, driver_region;
+ struct vhost_vring_addr svq_addr;
+ struct vhost_vdpa *v = dev->opaque;
+ size_t device_size = vhost_svq_device_area_size(svq);
+ size_t driver_size = vhost_svq_driver_area_size(svq);
+ size_t avail_offset;
+ bool ok;
+
+ ERRP_GUARD();
+ vhost_svq_get_vring_addr(svq, &svq_addr);
+
+ driver_region = (DMAMap) {
+ .translated_addr = svq_addr.desc_user_addr,
+ .size = driver_size - 1,
+ .perm = IOMMU_RO,
+ };
+ ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp);
+ if (unlikely(!ok)) {
+ error_prepend(errp, "Cannot create vq driver region: ");
+ return false;
+ }
+ addr->desc_user_addr = driver_region.iova;
+ avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr;
+ addr->avail_user_addr = driver_region.iova + avail_offset;
+
+ device_region = (DMAMap) {
+ .translated_addr = svq_addr.used_user_addr,
+ .size = device_size - 1,
+ .perm = IOMMU_RW,
+ };
+ ok = vhost_vdpa_svq_map_ring(v, &device_region, errp);
+ if (unlikely(!ok)) {
+ error_prepend(errp, "Cannot create vq device region: ");
+ vhost_vdpa_svq_unmap_ring(v, &driver_region);
+ }
+ addr->used_user_addr = device_region.iova;
+
+ return ok;
+}
+
+static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
+ VhostShadowVirtqueue *svq, unsigned idx,
+ Error **errp)
+{
+ uint16_t vq_index = dev->vq_index + idx;
+ struct vhost_vring_state s = {
+ .index = vq_index,
+ };
+ int r;
+
+ r = vhost_vdpa_set_dev_vring_base(dev, &s);
+ if (unlikely(r)) {
+ error_setg_errno(errp, -r, "Cannot set vring base");
+ return false;
+ }
+
+ r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp);
+ return r == 0;
+}
+
+static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
+{
+ struct vhost_vdpa *v = dev->opaque;
+ Error *err = NULL;
+ unsigned i;
+
+ if (!v->shadow_vqs) {
+ return true;
+ }
+
+ for (i = 0; i < v->shadow_vqs->len; ++i) {
+ VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i);
+ VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
+ struct vhost_vring_addr addr = {
+ .index = i,
+ };
+ int r;
+ bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err);
+ if (unlikely(!ok)) {
+ goto err;
+ }
+
+ vhost_svq_start(svq, dev->vdev, vq);
+ ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
+ if (unlikely(!ok)) {
+ goto err_map;
+ }
+
+ /* Override vring GPA set by vhost subsystem */
+ r = vhost_vdpa_set_vring_dev_addr(dev, &addr);
+ if (unlikely(r != 0)) {
+ error_setg_errno(&err, -r, "Cannot set device address");
+ goto err_set_addr;
+ }
+ }
+
+ return true;
+
+err_set_addr:
+ vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i));
+
+err_map:
+ vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i));
+
+err:
+ error_reportf_err(err, "Cannot setup SVQ %u: ", i);
+ for (unsigned j = 0; j < i; ++j) {
+ VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j);
+ vhost_vdpa_svq_unmap_rings(dev, svq);
+ vhost_svq_stop(svq);
+ }
+
+ return false;
+}
+
+static bool vhost_vdpa_svqs_stop(struct vhost_dev *dev)
+{
+ struct vhost_vdpa *v = dev->opaque;
+
+ if (!v->shadow_vqs) {
+ return true;
+ }
+
+ for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
+ VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
+ bool ok = vhost_vdpa_svq_unmap_rings(dev, svq);
+ if (unlikely(!ok)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
{
struct vhost_vdpa *v = dev->opaque;
+ bool ok;
trace_vhost_vdpa_dev_start(dev, started);
if (started) {
vhost_vdpa_host_notifiers_init(dev);
+ ok = vhost_vdpa_svqs_start(dev);
+ if (unlikely(!ok)) {
+ return -1;
+ }
vhost_vdpa_set_vring_ready(dev);
} else {
+ ok = vhost_vdpa_svqs_stop(dev);
+ if (unlikely(!ok)) {
+ return -1;
+ }
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
}
@@ -679,7 +1117,8 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
struct vhost_log *log)
{
- if (vhost_vdpa_one_time_request(dev)) {
+ struct vhost_vdpa *v = dev->opaque;
+ if (v->shadow_vqs_enabled || vhost_vdpa_one_time_request(dev)) {
return 0;
}
@@ -691,11 +1130,17 @@ static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
struct vhost_vring_addr *addr)
{
- trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
- addr->desc_user_addr, addr->used_user_addr,
- addr->avail_user_addr,
- addr->log_guest_addr);
- return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
+ struct vhost_vdpa *v = dev->opaque;
+
+ if (v->shadow_vqs_enabled) {
+ /*
+ * Device vring addr was set at device start. SVQ base is handled by
+ * VirtQueue code.
+ */
+ return 0;
+ }
+
+ return vhost_vdpa_set_vring_dev_addr(dev, addr);
}
static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
@@ -708,15 +1153,41 @@ static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
- trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
- return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
+ struct vhost_vdpa *v = dev->opaque;
+
+ if (v->shadow_vqs_enabled) {
+ /*
+ * Device vring base was set at device start. SVQ base is handled by
+ * VirtQueue code.
+ */
+ return 0;
+ }
+
+ return vhost_vdpa_set_dev_vring_base(dev, ring);
}
static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
+ struct vhost_vdpa *v = dev->opaque;
int ret;
+ if (v->shadow_vqs_enabled) {
+ VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs,
+ ring->index);
+
+ /*
+ * Setting base as last used idx, so destination will see as available
+ * all the entries that the device did not use, including the in-flight
+ * processing ones.
+ *
+ * TODO: This is ok for networking, but other kinds of devices might
+ * have problems with these retransmissions.
+ */
+ ring->num = svq->last_used_idx;
+ return 0;
+ }
+
ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
return ret;
@@ -725,24 +1196,45 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
struct vhost_vring_file *file)
{
- trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
- return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
+ struct vhost_vdpa *v = dev->opaque;
+ int vdpa_idx = file->index - dev->vq_index;
+
+ if (v->shadow_vqs_enabled) {
+ VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
+ vhost_svq_set_svq_kick_fd(svq, file->fd);
+ return 0;
+ } else {
+ return vhost_vdpa_set_vring_dev_kick(dev, file);
+ }
}
static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
struct vhost_vring_file *file)
{
- trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
- return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
+ struct vhost_vdpa *v = dev->opaque;
+
+ if (v->shadow_vqs_enabled) {
+ int vdpa_idx = file->index - dev->vq_index;
+ VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
+
+ vhost_svq_set_svq_call_fd(svq, file->fd);
+ return 0;
+ } else {
+ return vhost_vdpa_set_vring_dev_call(dev, file);
+ }
}
static int vhost_vdpa_get_features(struct vhost_dev *dev,
uint64_t *features)
{
- int ret;
+ struct vhost_vdpa *v = dev->opaque;
+ int ret = vhost_vdpa_get_dev_features(dev, features);
+
+ if (ret == 0 && v->shadow_vqs_enabled) {
+ /* Add SVQ logging capabilities */
+ *features |= BIT_ULL(VHOST_F_LOG_ALL);
+ }
- ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
- trace_vhost_vdpa_get_features(dev, *features);
return ret;
}
diff --git a/include/hw/pci-host/pnv_phb4.h b/include/hw/pci-host/pnv_phb4.h
index fbcf5bf..b02ecdc 100644
--- a/include/hw/pci-host/pnv_phb4.h
+++ b/include/hw/pci-host/pnv_phb4.h
@@ -203,6 +203,7 @@ struct PnvPhb4PecClass {
const char *stk_compat;
int stk_compat_size;
uint64_t version;
+ const char *phb_type;
const uint32_t *num_phbs;
const char *rp_model;
};
@@ -211,6 +212,10 @@ struct PnvPhb4PecClass {
* POWER10 definitions
*/
+#define TYPE_PNV_PHB5 "pnv-phb5"
+#define PNV_PHB5(obj) \
+ OBJECT_CHECK(PnvPhb4, (obj), TYPE_PNV_PHB5)
+
#define PNV_PHB5_VERSION 0x000000a500000001ull
#define PNV_PHB5_DEVICE_ID 0x0652
diff --git a/include/hw/ppc/pnv.h b/include/hw/ppc/pnv.h
index 1e34ddd..86cb7d7 100644
--- a/include/hw/ppc/pnv.h
+++ b/include/hw/ppc/pnv.h
@@ -190,7 +190,6 @@ DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER10,
PowerPCCPU *pnv_chip_find_cpu(PnvChip *chip, uint32_t pir);
void pnv_phb_attach_root_port(PCIHostState *pci, const char *name);
-void pnv_chip_parent_fixup(PnvChip *chip, Object *obj, int index);
#define TYPE_PNV_MACHINE MACHINE_TYPE_NAME("powernv")
typedef struct PnvMachineClass PnvMachineClass;
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 3ce79a6..a29dbb3 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -12,6 +12,9 @@
#ifndef HW_VIRTIO_VHOST_VDPA_H
#define HW_VIRTIO_VHOST_VDPA_H
+#include <gmodule.h>
+
+#include "hw/virtio/vhost-iova-tree.h"
#include "hw/virtio/virtio.h"
#include "standard-headers/linux/vhost_types.h"
@@ -27,6 +30,11 @@ typedef struct vhost_vdpa {
bool iotlb_batch_begin_sent;
MemoryListener listener;
struct vhost_vdpa_iova_range iova_range;
+ uint64_t acked_features;
+ bool shadow_vqs_enabled;
+ /* IOVA mapping used by the Shadow Virtqueue */
+ VhostIOVATree *iova_tree;
+ GPtrArray *shadow_vqs;
struct vhost_dev *dev;
VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
} VhostVDPA;
diff --git a/include/qemu/iova-tree.h b/include/qemu/iova-tree.h
index 8249edd..c938fb0 100644
--- a/include/qemu/iova-tree.h
+++ b/include/qemu/iova-tree.h
@@ -29,6 +29,7 @@
#define IOVA_OK (0)
#define IOVA_ERR_INVALID (-1) /* Invalid parameters */
#define IOVA_ERR_OVERLAP (-2) /* IOVA range overlapped */
+#define IOVA_ERR_NOMEM (-3) /* Cannot allocate */
typedef struct IOVATree IOVATree;
typedef struct DMAMap {
@@ -82,7 +83,7 @@ int iova_tree_remove(IOVATree *tree, const DMAMap *map);
* @tree: the iova tree to search from
* @map: the mapping to search
*
- * Search for a mapping in the iova tree that overlaps with the
+ * Search for a mapping in the iova tree that iova overlaps with the
* mapping range specified. Only the first found mapping will be
* returned.
*
@@ -95,6 +96,24 @@ int iova_tree_remove(IOVATree *tree, const DMAMap *map);
const DMAMap *iova_tree_find(const IOVATree *tree, const DMAMap *map);
/**
+ * iova_tree_find_iova:
+ *
+ * @tree: the iova tree to search from
+ * @map: the mapping to search
+ *
+ * Search for a mapping in the iova tree that translated_addr overlaps with the
+ * mapping range specified. Only the first found mapping will be
+ * returned.
+ *
+ * Return: DMAMap pointer if found, or NULL if not found. Note that
+ * the returned DMAMap pointer is maintained internally. User should
+ * only read the content but never modify or free the content. Also,
+ * user is responsible to make sure the pointer is valid (say, no
+ * concurrent deletion in progress).
+ */
+const DMAMap *iova_tree_find_iova(const IOVATree *tree, const DMAMap *map);
+
+/**
* iova_tree_find_address:
*
* @tree: the iova tree to search from
@@ -120,6 +139,23 @@ const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova);
void iova_tree_foreach(IOVATree *tree, iova_tree_iterator iterator);
/**
+ * iova_tree_alloc_map:
+ *
+ * @tree: the iova tree to allocate from
+ * @map: the new map (as translated addr & size) to allocate in the iova region
+ * @iova_begin: the minimum address of the allocation
+ * @iova_end: the maximum addressable direction of the allocation
+ *
+ * Allocates a new region of a given size, between iova_min and iova_max.
+ *
+ * Return: Same as iova_tree_insert, but cannot overlap and can return error if
+ * iova tree is out of free contiguous range. The caller gets the assigned iova
+ * in map->iova.
+ */
+int iova_tree_alloc_map(IOVATree *tree, DMAMap *map, hwaddr iova_begin,
+ hwaddr iova_end);
+
+/**
* iova_tree_destroy:
*
* @tree: the iova tree to destroy
diff --git a/pc-bios/edk2-aarch64-code.fd.bz2 b/pc-bios/edk2-aarch64-code.fd.bz2
index 5bf3114..0262f5b 100644
--- a/pc-bios/edk2-aarch64-code.fd.bz2
+++ b/pc-bios/edk2-aarch64-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-arm-code.fd.bz2 b/pc-bios/edk2-arm-code.fd.bz2
index 7a98069..4ca97b4 100644
--- a/pc-bios/edk2-arm-code.fd.bz2
+++ b/pc-bios/edk2-arm-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-i386-code.fd.bz2 b/pc-bios/edk2-i386-code.fd.bz2
index e7b1bef..6e02c9b 100644
--- a/pc-bios/edk2-i386-code.fd.bz2
+++ b/pc-bios/edk2-i386-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-i386-secure-code.fd.bz2 b/pc-bios/edk2-i386-secure-code.fd.bz2
index b5df5be..a4b1cc9 100644
--- a/pc-bios/edk2-i386-secure-code.fd.bz2
+++ b/pc-bios/edk2-i386-secure-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-x86_64-code.fd.bz2 b/pc-bios/edk2-x86_64-code.fd.bz2
index e1654d4..37bfb0d 100644
--- a/pc-bios/edk2-x86_64-code.fd.bz2
+++ b/pc-bios/edk2-x86_64-code.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-x86_64-microvm.fd.bz2 b/pc-bios/edk2-x86_64-microvm.fd.bz2
new file mode 100644
index 0000000..1d65c61
--- /dev/null
+++ b/pc-bios/edk2-x86_64-microvm.fd.bz2
Binary files differ
diff --git a/pc-bios/edk2-x86_64-secure-code.fd.bz2 b/pc-bios/edk2-x86_64-secure-code.fd.bz2
index 767274c..76dc6d5 100644
--- a/pc-bios/edk2-x86_64-secure-code.fd.bz2
+++ b/pc-bios/edk2-x86_64-secure-code.fd.bz2
Binary files differ
diff --git a/roms/Makefile.edk2 b/roms/Makefile.edk2
index fdae0b5..485f224 100644
--- a/roms/Makefile.edk2
+++ b/roms/Makefile.edk2
@@ -13,6 +13,7 @@
SHELL = /bin/bash
+target = RELEASE
toolchain = $(shell source ./edk2-funcs.sh && qemu_edk2_get_toolchain $(1))
licenses := \
@@ -32,6 +33,7 @@ flashdevs := \
i386-secure-code \
x86_64-code \
x86_64-secure-code \
+ x86_64-microvm \
\
arm-vars \
i386-vars
@@ -50,7 +52,7 @@ all: $(foreach flashdev,$(flashdevs),../pc-bios/edk2-$(flashdev).fd.bz2) \
# we're building from a tarball and that they've already been fetched by
# make-release/tarball scripts.
submodules:
- if test -d edk2/.git; then \
+ if test -e edk2/.git; then \
cd edk2 && git submodule update --init --force -- \
ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 \
BaseTools/Source/C/BrotliCompress/brotli \
@@ -73,7 +75,7 @@ submodules:
-D NETWORK_TLS_ENABLE \
-D TPM2_ENABLE \
-D TPM2_CONFIG_ENABLE
- cp edk2/Build/ArmVirtQemu-AARCH64/DEBUG_$(call toolchain,aarch64)/FV/QEMU_EFI.fd \
+ cp edk2/Build/ArmVirtQemu-AARCH64/$(target)_$(call toolchain,aarch64)/FV/QEMU_EFI.fd \
$@
truncate --size=64M $@
@@ -87,7 +89,7 @@ submodules:
-D NETWORK_TLS_ENABLE \
-D TPM2_ENABLE \
-D TPM2_CONFIG_ENABLE
- cp edk2/Build/ArmVirtQemu-ARM/DEBUG_$(call toolchain,arm)/FV/QEMU_EFI.fd \
+ cp edk2/Build/ArmVirtQemu-ARM/$(target)_$(call toolchain,arm)/FV/QEMU_EFI.fd \
$@
truncate --size=64M $@
@@ -101,7 +103,7 @@ submodules:
-D NETWORK_TLS_ENABLE \
-D TPM_ENABLE \
-D TPM_CONFIG_ENABLE
- cp edk2/Build/OvmfIa32/DEBUG_$(call toolchain,i386)/FV/OVMF_CODE.fd $@
+ cp edk2/Build/OvmfIa32/$(target)_$(call toolchain,i386)/FV/OVMF_CODE.fd $@
../pc-bios/edk2-i386-secure-code.fd: submodules
+./edk2-build.sh \
@@ -115,7 +117,7 @@ submodules:
-D TPM_CONFIG_ENABLE \
-D SECURE_BOOT_ENABLE \
-D SMM_REQUIRE
- cp edk2/Build/OvmfIa32/DEBUG_$(call toolchain,i386)/FV/OVMF_CODE.fd $@
+ cp edk2/Build/OvmfIa32/$(target)_$(call toolchain,i386)/FV/OVMF_CODE.fd $@
../pc-bios/edk2-x86_64-code.fd: submodules
+./edk2-build.sh \
@@ -127,7 +129,7 @@ submodules:
-D NETWORK_TLS_ENABLE \
-D TPM_ENABLE \
-D TPM_CONFIG_ENABLE
- cp edk2/Build/OvmfX64/DEBUG_$(call toolchain,x86_64)/FV/OVMF_CODE.fd $@
+ cp edk2/Build/OvmfX64/$(target)_$(call toolchain,x86_64)/FV/OVMF_CODE.fd $@
../pc-bios/edk2-x86_64-secure-code.fd: submodules
+./edk2-build.sh \
@@ -142,15 +144,25 @@ submodules:
-D TPM_CONFIG_ENABLE \
-D SECURE_BOOT_ENABLE \
-D SMM_REQUIRE
- cp edk2/Build/Ovmf3264/DEBUG_$(call toolchain,x86_64)/FV/OVMF_CODE.fd $@
+ cp edk2/Build/Ovmf3264/$(target)_$(call toolchain,x86_64)/FV/OVMF_CODE.fd $@
+
+../pc-bios/edk2-x86_64-microvm.fd: submodules
+ +./edk2-build.sh \
+ x86_64 \
+ --arch=X64 \
+ --platform=OvmfPkg/Microvm/MicrovmX64.dsc \
+ -D NETWORK_IP6_ENABLE \
+ -D NETWORK_HTTP_BOOT_ENABLE \
+ -D NETWORK_TLS_ENABLE
+ cp edk2/Build/MicrovmX64/$(target)_$(call toolchain,x86_64)/FV/MICROVM.fd $@
../pc-bios/edk2-arm-vars.fd: ../pc-bios/edk2-arm-code.fd
- cp edk2/Build/ArmVirtQemu-ARM/DEBUG_$(call toolchain,arm)/FV/QEMU_VARS.fd \
+ cp edk2/Build/ArmVirtQemu-ARM/$(target)_$(call toolchain,arm)/FV/QEMU_VARS.fd \
$@
truncate --size=64M $@
../pc-bios/edk2-i386-vars.fd: ../pc-bios/edk2-i386-code.fd
- cp edk2/Build/OvmfIa32/DEBUG_$(call toolchain,i386)/FV/OVMF_VARS.fd $@
+ cp edk2/Build/OvmfIa32/$(target)_$(call toolchain,i386)/FV/OVMF_VARS.fd $@
# The license file accumulates several individual licenses from under edk2,
# prefixing each individual license with a header (generated by "tail") that
diff --git a/roms/edk2 b/roms/edk2
-Subproject 06dc822d045c2bb42e497487935485302486e15
+Subproject b24306f15daa2ff8510b06702114724b33895d3
diff --git a/roms/edk2-build.sh b/roms/edk2-build.sh
index d5391c7..ea79dc2 100755
--- a/roms/edk2-build.sh
+++ b/roms/edk2-build.sh
@@ -50,6 +50,6 @@ qemu_edk2_set_cross_env "$emulation_target"
build \
--cmd-len=65536 \
-n "$edk2_thread_count" \
- --buildtarget=DEBUG \
+ --buildtarget=RELEASE \
--tagname="$edk2_toolchain" \
"${args[@]}"
diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
index 67c38f0..5414fd6 100644
--- a/target/ppc/mmu-radix64.c
+++ b/target/ppc/mmu-radix64.c
@@ -204,7 +204,8 @@ static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type,
/* Check if requested access type is allowed */
need_prot = prot_for_access_type(access_type);
if (need_prot & ~*prot) { /* Page Protected for that Access */
- *fault_cause |= DSISR_PROTFAULT;
+ *fault_cause |= access_type == MMU_INST_FETCH ? SRR1_NOEXEC_GUARD :
+ DSISR_PROTFAULT;
return true;
}
diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
index 48a97b2..e67fbf2 100644
--- a/target/ppc/translate/vsx-impl.c.inc
+++ b/target/ppc/translate/vsx-impl.c.inc
@@ -1552,7 +1552,7 @@ static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2_uim2 *a)
tofs = vsr_full_offset(a->xt);
bofs = vsr_full_offset(a->xb);
bofs += a->uim << MO_32;
-#ifndef HOST_WORDS_BIG_ENDIAN
+#ifndef HOST_WORDS_BIGENDIAN
bofs ^= 8 | 4;
#endif
diff --git a/tests/avocado/boot_linux_console.py b/tests/avocado/boot_linux_console.py
index 9c618d4..b40a3ab 100644
--- a/tests/avocado/boot_linux_console.py
+++ b/tests/avocado/boot_linux_console.py
@@ -1165,11 +1165,14 @@ class BootLinuxConsole(LinuxKernelTest):
:avocado: tags=arch:ppc64
:avocado: tags=machine:ppce500
:avocado: tags=cpu:e5500
+ :avocado: tags=accel:tcg
"""
+ self.require_accelerator("tcg")
tar_hash = '6951d86d644b302898da2fd701739c9406527fe1'
self.do_test_advcal_2018('19', tar_hash, 'uImage')
def do_test_ppc64_powernv(self, proc):
+ self.require_accelerator("tcg")
images_url = ('https://github.com/open-power/op-build/releases/download/v2.7/')
kernel_url = images_url + 'zImage.epapr'
@@ -1194,6 +1197,7 @@ class BootLinuxConsole(LinuxKernelTest):
"""
:avocado: tags=arch:ppc64
:avocado: tags=machine:powernv8
+ :avocado: tags=accel:tcg
"""
self.do_test_ppc64_powernv('P8')
@@ -1201,6 +1205,7 @@ class BootLinuxConsole(LinuxKernelTest):
"""
:avocado: tags=arch:ppc64
:avocado: tags=machine:powernv9
+ :avocado: tags=accel:tcg
"""
self.do_test_ppc64_powernv('P9')
@@ -1208,7 +1213,13 @@ class BootLinuxConsole(LinuxKernelTest):
"""
:avocado: tags=arch:ppc
:avocado: tags=machine:g3beige
+ :avocado: tags=accel:tcg
"""
+ # TODO: g3beige works with kvm_pr but we don't have a
+ # reliable way ATM (e.g. looking at /proc/modules) to detect
+ # whether we're running kvm_hv or kvm_pr. For now let's
+ # disable this test if we don't have TCG support.
+ self.require_accelerator("tcg")
tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc'
self.vm.add_args('-M', 'graphics=off')
self.do_test_advcal_2018('15', tar_hash, 'invaders.elf')
@@ -1217,7 +1228,13 @@ class BootLinuxConsole(LinuxKernelTest):
"""
:avocado: tags=arch:ppc
:avocado: tags=machine:mac99
+ :avocado: tags=accel:tcg
"""
+ # TODO: mac99 works with kvm_pr but we don't have a
+ # reliable way ATM (e.g. looking at /proc/modules) to detect
+ # whether we're running kvm_hv or kvm_pr. For now let's
+ # disable this test if we don't have TCG support.
+ self.require_accelerator("tcg")
tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc'
self.vm.add_args('-M', 'graphics=off')
self.do_test_advcal_2018('15', tar_hash, 'invaders.elf')
diff --git a/tests/avocado/ppc_405.py b/tests/avocado/ppc_405.py
index a47f89b..4e7e01a 100644
--- a/tests/avocado/ppc_405.py
+++ b/tests/avocado/ppc_405.py
@@ -25,18 +25,12 @@ class Ppc405Machine(QemuSystemTest):
wait_for_console_pattern(self, 'AMCC PPC405EP Evaluation Board')
exec_command_and_wait_for_pattern(self, 'reset', 'AMCC PowerPC 405EP')
- def test_ppc_taihu(self):
- """
- :avocado: tags=arch:ppc
- :avocado: tags=machine:taihu
- :avocado: tags=cpu:405ep
- """
- self.do_test_ppc405()
-
def test_ppc_ref405ep(self):
"""
:avocado: tags=arch:ppc
:avocado: tags=machine:ref405ep
:avocado: tags=cpu:405ep
+ :avocado: tags=accel:tcg
"""
+ self.require_accelerator("tcg")
self.do_test_ppc405()
diff --git a/tests/avocado/ppc_74xx.py b/tests/avocado/ppc_74xx.py
index 556a9a7..f54757c 100644
--- a/tests/avocado/ppc_74xx.py
+++ b/tests/avocado/ppc_74xx.py
@@ -11,6 +11,7 @@ from avocado_qemu import wait_for_console_pattern
class ppc74xxCpu(QemuSystemTest):
"""
:avocado: tags=arch:ppc
+ :avocado: tags=accel:tcg
"""
timeout = 5
@@ -18,6 +19,7 @@ class ppc74xxCpu(QemuSystemTest):
"""
:avocado: tags=cpu:7400
"""
+ self.require_accelerator("tcg")
self.vm.set_console()
self.vm.launch()
wait_for_console_pattern(self, '>> OpenBIOS')
@@ -27,6 +29,7 @@ class ppc74xxCpu(QemuSystemTest):
"""
:avocado: tags=cpu:7410
"""
+ self.require_accelerator("tcg")
self.vm.set_console()
self.vm.launch()
wait_for_console_pattern(self, '>> OpenBIOS')
@@ -36,6 +39,7 @@ class ppc74xxCpu(QemuSystemTest):
"""
:avocado: tags=cpu:7441
"""
+ self.require_accelerator("tcg")
self.vm.set_console()
self.vm.launch()
wait_for_console_pattern(self, '>> OpenBIOS')
@@ -45,6 +49,7 @@ class ppc74xxCpu(QemuSystemTest):
"""
:avocado: tags=cpu:7445
"""
+ self.require_accelerator("tcg")
self.vm.set_console()
self.vm.launch()
wait_for_console_pattern(self, '>> OpenBIOS')
@@ -54,6 +59,7 @@ class ppc74xxCpu(QemuSystemTest):
"""
:avocado: tags=cpu:7447
"""
+ self.require_accelerator("tcg")
self.vm.set_console()
self.vm.launch()
wait_for_console_pattern(self, '>> OpenBIOS')
@@ -63,6 +69,7 @@ class ppc74xxCpu(QemuSystemTest):
"""
:avocado: tags=cpu:7447a
"""
+ self.require_accelerator("tcg")
self.vm.set_console()
self.vm.launch()
wait_for_console_pattern(self, '>> OpenBIOS')
@@ -72,6 +79,7 @@ class ppc74xxCpu(QemuSystemTest):
"""
:avocado: tags=cpu:7448
"""
+ self.require_accelerator("tcg")
self.vm.set_console()
self.vm.launch()
wait_for_console_pattern(self, '>> OpenBIOS')
@@ -81,6 +89,7 @@ class ppc74xxCpu(QemuSystemTest):
"""
:avocado: tags=cpu:7450
"""
+ self.require_accelerator("tcg")
self.vm.set_console()
self.vm.launch()
wait_for_console_pattern(self, '>> OpenBIOS')
@@ -90,6 +99,7 @@ class ppc74xxCpu(QemuSystemTest):
"""
:avocado: tags=cpu:7451
"""
+ self.require_accelerator("tcg")
self.vm.set_console()
self.vm.launch()
wait_for_console_pattern(self, '>> OpenBIOS')
@@ -99,6 +109,7 @@ class ppc74xxCpu(QemuSystemTest):
"""
:avocado: tags=cpu:7455
"""
+ self.require_accelerator("tcg")
self.vm.set_console()
self.vm.launch()
wait_for_console_pattern(self, '>> OpenBIOS')
@@ -108,6 +119,7 @@ class ppc74xxCpu(QemuSystemTest):
"""
:avocado: tags=cpu:7457
"""
+ self.require_accelerator("tcg")
self.vm.set_console()
self.vm.launch()
wait_for_console_pattern(self, '>> OpenBIOS')
@@ -117,6 +129,7 @@ class ppc74xxCpu(QemuSystemTest):
"""
:avocado: tags=cpu:7457a
"""
+ self.require_accelerator("tcg")
self.vm.set_console()
self.vm.launch()
wait_for_console_pattern(self, '>> OpenBIOS')
diff --git a/tests/avocado/ppc_bamboo.py b/tests/avocado/ppc_bamboo.py
index 40629e3..102ff25 100644
--- a/tests/avocado/ppc_bamboo.py
+++ b/tests/avocado/ppc_bamboo.py
@@ -20,7 +20,9 @@ class BambooMachine(QemuSystemTest):
:avocado: tags=machine:bamboo
:avocado: tags=cpu:440epb
:avocado: tags=device:rtl8139
+ :avocado: tags=accel:tcg
"""
+ self.require_accelerator("tcg")
tar_url = ('http://landley.net/aboriginal/downloads/binaries/'
'system-image-powerpc-440fp.tar.gz')
tar_hash = '53e5f16414b195b82d2c70272f81c2eedb39bad9'
diff --git a/tests/avocado/ppc_mpc8544ds.py b/tests/avocado/ppc_mpc8544ds.py
index 886f967..8d6a749 100644
--- a/tests/avocado/ppc_mpc8544ds.py
+++ b/tests/avocado/ppc_mpc8544ds.py
@@ -19,7 +19,9 @@ class Mpc8544dsMachine(QemuSystemTest):
"""
:avocado: tags=arch:ppc
:avocado: tags=machine:mpc8544ds
+ :avocado: tags=accel:tcg
"""
+ self.require_accelerator("tcg")
tar_url = ('https://www.qemu-advent-calendar.org'
'/2020/download/day17.tar.gz')
tar_hash = '7a5239542a7c4257aa4d3b7f6ddf08fb6775c494'
diff --git a/tests/avocado/ppc_prep_40p.py b/tests/avocado/ppc_prep_40p.py
index 4bd9565..d4f1eb7 100644
--- a/tests/avocado/ppc_prep_40p.py
+++ b/tests/avocado/ppc_prep_40p.py
@@ -28,7 +28,9 @@ class IbmPrep40pMachine(QemuSystemTest):
:avocado: tags=machine:40p
:avocado: tags=os:netbsd
:avocado: tags=slowness:high
+ :avocado: tags=accel:tcg
"""
+ self.require_accelerator("tcg")
bios_url = ('http://ftpmirror.your.org/pub/misc/'
'ftp.software.ibm.com/rs6000/firmware/'
'7020-40p/P12H0456.IMG')
@@ -51,7 +53,9 @@ class IbmPrep40pMachine(QemuSystemTest):
"""
:avocado: tags=arch:ppc
:avocado: tags=machine:40p
+ :avocado: tags=accel:tcg
"""
+ self.require_accelerator("tcg")
self.vm.set_console()
self.vm.add_args('-m', '192') # test fw_cfg
@@ -65,7 +69,9 @@ class IbmPrep40pMachine(QemuSystemTest):
:avocado: tags=arch:ppc
:avocado: tags=machine:40p
:avocado: tags=os:netbsd
+ :avocado: tags=accel:tcg
"""
+ self.require_accelerator("tcg")
drive_url = ('https://archive.netbsd.org/pub/NetBSD-archive/'
'NetBSD-7.1.2/iso/NetBSD-7.1.2-prep.iso')
drive_hash = 'ac6fa2707d888b36d6fa64de6e7fe48e'
diff --git a/tests/avocado/ppc_virtex_ml507.py b/tests/avocado/ppc_virtex_ml507.py
index a6912ee..6b07686 100644
--- a/tests/avocado/ppc_virtex_ml507.py
+++ b/tests/avocado/ppc_virtex_ml507.py
@@ -19,7 +19,9 @@ class VirtexMl507Machine(QemuSystemTest):
"""
:avocado: tags=arch:ppc
:avocado: tags=machine:virtex-ml507
+ :avocado: tags=accel:tcg
"""
+ self.require_accelerator("tcg")
tar_url = ('https://www.qemu-advent-calendar.org'
'/2020/download/hippo.tar.gz')
tar_hash = '306b95bfe7d147f125aa176a877e266db8ef914a'
diff --git a/tests/avocado/replay_kernel.py b/tests/avocado/replay_kernel.py
index c68a953..0b2b0dc 100644
--- a/tests/avocado/replay_kernel.py
+++ b/tests/avocado/replay_kernel.py
@@ -36,6 +36,9 @@ class ReplayKernelBase(LinuxKernelTest):
def run_vm(self, kernel_path, kernel_command_line, console_pattern,
record, shift, args, replay_path):
+ # icount requires TCG to be available
+ self.require_accelerator('tcg')
+
logger = logging.getLogger('replay')
start_time = time.time()
vm = self.get_vm()
@@ -243,6 +246,7 @@ class ReplayKernelNormal(ReplayKernelBase):
"""
:avocado: tags=arch:ppc64
:avocado: tags=machine:pseries
+ :avocado: tags=accel:tcg
"""
kernel_url = ('https://archives.fedoraproject.org/pub/archive'
'/fedora-secondary/releases/29/Everything/ppc64le/os'
diff --git a/tests/data/acpi/virt/SSDT.memhp b/tests/data/acpi/virt/SSDT.memhp
index 375d7b6..4c363a6 100644
--- a/tests/data/acpi/virt/SSDT.memhp
+++ b/tests/data/acpi/virt/SSDT.memhp
Binary files differ
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
index 7d8c74f..d25f82b 100644
--- a/tests/qtest/meson.build
+++ b/tests/qtest/meson.build
@@ -160,7 +160,9 @@ qtests_ppc = \
(slirp.found() ? ['test-netfilter'] : []) + \
(config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \
(config_all_devices.has_key('CONFIG_M48T59') ? ['m48t59-test'] : []) + \
- ['boot-order-test', 'prom-env-test', 'boot-serial-test'] \
+ (config_all_devices.has_key('CONFIG_TCG') ? ['prom-env-test'] : []) + \
+ (config_all_devices.has_key('CONFIG_TCG') ? ['boot-serial-test'] : []) + \
+ ['boot-order-test']
qtests_ppc64 = \
qtests_ppc + \
diff --git a/tests/tcg/ppc64/Makefile.target b/tests/tcg/ppc64/Makefile.target
index c949805..8197c28 100644
--- a/tests/tcg/ppc64/Makefile.target
+++ b/tests/tcg/ppc64/Makefile.target
@@ -27,5 +27,6 @@ run-sha512-vector: QEMU_OPTS+=-cpu POWER10
run-plugin-sha512-vector-with-%: QEMU_OPTS+=-cpu POWER10
PPC64_TESTS += signal_save_restore_xer
+PPC64_TESTS += xxspltw
TESTS += $(PPC64_TESTS)
diff --git a/tests/tcg/ppc64le/Makefile.target b/tests/tcg/ppc64le/Makefile.target
index 12d85e9..9624bb1 100644
--- a/tests/tcg/ppc64le/Makefile.target
+++ b/tests/tcg/ppc64le/Makefile.target
@@ -25,5 +25,6 @@ run-plugin-sha512-vector-with-%: QEMU_OPTS+=-cpu POWER10
PPC64LE_TESTS += mtfsf
PPC64LE_TESTS += signal_save_restore_xer
+PPC64LE_TESTS += xxspltw
TESTS += $(PPC64LE_TESTS)
diff --git a/tests/tcg/ppc64le/xxspltw.c b/tests/tcg/ppc64le/xxspltw.c
new file mode 100644
index 0000000..4cff78b
--- /dev/null
+++ b/tests/tcg/ppc64le/xxspltw.c
@@ -0,0 +1,46 @@
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <assert.h>
+
+#define WORD_A 0xAAAAAAAAUL
+#define WORD_B 0xBBBBBBBBUL
+#define WORD_C 0xCCCCCCCCUL
+#define WORD_D 0xDDDDDDDDUL
+
+#define DWORD_HI (WORD_A << 32 | WORD_B)
+#define DWORD_LO (WORD_C << 32 | WORD_D)
+
+#define TEST(HI, LO, UIM, RES) \
+ do { \
+ union { \
+ uint64_t u; \
+ double f; \
+ } h = { .u = HI }, l = { .u = LO }; \
+ /* \
+ * Use a pair of FPRs to load the VSR avoiding insns \
+ * newer than xxswapd. \
+ */ \
+ asm("xxmrghd 32, %0, %1\n\t" \
+ "xxspltw 32, 32, %2\n\t" \
+ "xxmrghd %0, 32, %0\n\t" \
+ "xxswapd 32, 32\n\t" \
+ "xxmrghd %1, 32, %1\n\t" \
+ : "+f" (h.f), "+f" (l.f) \
+ : "i" (UIM) \
+ : "v0"); \
+ printf("xxspltw(0x%016" PRIx64 "%016" PRIx64 ", %d) =" \
+ " %016" PRIx64 "%016" PRIx64 "\n", HI, LO, UIM, \
+ h.u, l.u); \
+ assert(h.u == (RES)); \
+ assert(l.u == (RES)); \
+ } while (0)
+
+int main(void)
+{
+ TEST(DWORD_HI, DWORD_LO, 0, WORD_A << 32 | WORD_A);
+ TEST(DWORD_HI, DWORD_LO, 1, WORD_B << 32 | WORD_B);
+ TEST(DWORD_HI, DWORD_LO, 2, WORD_C << 32 | WORD_C);
+ TEST(DWORD_HI, DWORD_LO, 3, WORD_D << 32 | WORD_D);
+ return 0;
+}
diff --git a/util/iova-tree.c b/util/iova-tree.c
index 23ea35b..6dff29c 100644
--- a/util/iova-tree.c
+++ b/util/iova-tree.c
@@ -16,6 +16,45 @@ struct IOVATree {
GTree *tree;
};
+/* Args to pass to iova_tree_alloc foreach function. */
+struct IOVATreeAllocArgs {
+ /* Size of the desired allocation */
+ size_t new_size;
+
+ /* The minimum address allowed in the allocation */
+ hwaddr iova_begin;
+
+ /* Map at the left of the hole, can be NULL if "this" is first one */
+ const DMAMap *prev;
+
+ /* Map at the right of the hole, can be NULL if "prev" is the last one */
+ const DMAMap *this;
+
+ /* If found, we fill in the IOVA here */
+ hwaddr iova_result;
+
+ /* Whether have we found a valid IOVA */
+ bool iova_found;
+};
+
+typedef struct IOVATreeFindIOVAArgs {
+ const DMAMap *needle;
+ const DMAMap *result;
+} IOVATreeFindIOVAArgs;
+
+/**
+ * Iterate args to the next hole
+ *
+ * @args: The alloc arguments
+ * @next: The next mapping in the tree. Can be NULL to signal the last one
+ */
+static void iova_tree_alloc_args_iterate(struct IOVATreeAllocArgs *args,
+ const DMAMap *next)
+{
+ args->prev = args->this;
+ args->this = next;
+}
+
static int iova_tree_compare(gconstpointer a, gconstpointer b, gpointer data)
{
const DMAMap *m1 = a, *m2 = b;
@@ -47,6 +86,35 @@ const DMAMap *iova_tree_find(const IOVATree *tree, const DMAMap *map)
return g_tree_lookup(tree->tree, map);
}
+static gboolean iova_tree_find_address_iterator(gpointer key, gpointer value,
+ gpointer data)
+{
+ const DMAMap *map = key;
+ IOVATreeFindIOVAArgs *args = data;
+ const DMAMap *needle;
+
+ g_assert(key == value);
+
+ needle = args->needle;
+ if (map->translated_addr + map->size < needle->translated_addr ||
+ needle->translated_addr + needle->size < map->translated_addr) {
+ return false;
+ }
+
+ args->result = map;
+ return true;
+}
+
+const DMAMap *iova_tree_find_iova(const IOVATree *tree, const DMAMap *map)
+{
+ IOVATreeFindIOVAArgs args = {
+ .needle = map,
+ };
+
+ g_tree_foreach(tree->tree, iova_tree_find_address_iterator, &args);
+ return args.result;
+}
+
const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova)
{
const DMAMap map = { .iova = iova, .size = 0 };
@@ -107,6 +175,108 @@ int iova_tree_remove(IOVATree *tree, const DMAMap *map)
return IOVA_OK;
}
+/**
+ * Try to find an unallocated IOVA range between prev and this elements.
+ *
+ * @args: Arguments to allocation
+ *
+ * Cases:
+ *
+ * (1) !prev, !this: No entries allocated, always succeed
+ *
+ * (2) !prev, this: We're iterating at the 1st element.
+ *
+ * (3) prev, !this: We're iterating at the last element.
+ *
+ * (4) prev, this: this is the most common case, we'll try to find a hole
+ * between "prev" and "this" mapping.
+ *
+ * Note that this function assumes the last valid iova is HWADDR_MAX, but it
+ * searches linearly so it's easy to discard the result if it's not the case.
+ */
+static void iova_tree_alloc_map_in_hole(struct IOVATreeAllocArgs *args)
+{
+ const DMAMap *prev = args->prev, *this = args->this;
+ uint64_t hole_start, hole_last;
+
+ if (this && this->iova + this->size < args->iova_begin) {
+ return;
+ }
+
+ hole_start = MAX(prev ? prev->iova + prev->size + 1 : 0, args->iova_begin);
+ hole_last = this ? this->iova : HWADDR_MAX;
+
+ if (hole_last - hole_start > args->new_size) {
+ args->iova_result = hole_start;
+ args->iova_found = true;
+ }
+}
+
+/**
+ * Foreach dma node in the tree, compare if there is a hole with its previous
+ * node (or minimum iova address allowed) and the node.
+ *
+ * @key: Node iterating
+ * @value: Node iterating
+ * @pargs: Struct to communicate with the outside world
+ *
+ * Return: false to keep iterating, true if needs break.
+ */
+static gboolean iova_tree_alloc_traverse(gpointer key, gpointer value,
+ gpointer pargs)
+{
+ struct IOVATreeAllocArgs *args = pargs;
+ DMAMap *node = value;
+
+ assert(key == value);
+
+ iova_tree_alloc_args_iterate(args, node);
+ iova_tree_alloc_map_in_hole(args);
+ return args->iova_found;
+}
+
+int iova_tree_alloc_map(IOVATree *tree, DMAMap *map, hwaddr iova_begin,
+ hwaddr iova_last)
+{
+ struct IOVATreeAllocArgs args = {
+ .new_size = map->size,
+ .iova_begin = iova_begin,
+ };
+
+ if (unlikely(iova_last < iova_begin)) {
+ return IOVA_ERR_INVALID;
+ }
+
+ /*
+ * Find a valid hole for the mapping
+ *
+ * Assuming low iova_begin, so no need to do a binary search to
+ * locate the first node.
+ *
+ * TODO: Replace all this with g_tree_node_first/next/last when available
+ * (from glib since 2.68). To do it with g_tree_foreach complicates the
+ * code a lot.
+ *
+ */
+ g_tree_foreach(tree->tree, iova_tree_alloc_traverse, &args);
+ if (!args.iova_found) {
+ /*
+ * Either tree is empty or the last hole is still not checked.
+ * g_tree_foreach does not compare (last, iova_last] range, so we check
+ * it here.
+ */
+ iova_tree_alloc_args_iterate(&args, NULL);
+ iova_tree_alloc_map_in_hole(&args);
+ }
+
+ if (!args.iova_found || args.iova_result + map->size > iova_last) {
+ return IOVA_ERR_NOMEM;
+ }
+
+ map->iova = args.iova_result;
+ return iova_tree_insert(tree, map);
+}
+
void iova_tree_destroy(IOVATree *tree)
{
g_tree_destroy(tree->tree);