aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2024-02-14 15:45:52 +0000
committerPeter Maydell <peter.maydell@linaro.org>2024-02-14 15:45:52 +0000
commit5767815218efd3cbfd409505ed824d5f356044ae (patch)
tree3451522a0e9f8fc03fa5d0af14cdf1ff259a2ab0 /hw
parent708322660e15e83a37fb6deb8470209307ef43a2 (diff)
parent1dd6954c3f5c5c610cf94b6f740118e565957293 (diff)
downloadqemu-5767815218efd3cbfd409505ed824d5f356044ae.zip
qemu-5767815218efd3cbfd409505ed824d5f356044ae.tar.gz
qemu-5767815218efd3cbfd409505ed824d5f356044ae.tar.bz2
Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging
virtio,pc,pci: features, cleanups, fixes vhost-user-snd support x2APIC mode with TCG support CXL update to r3.1 fixes, cleanups all over the place. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # -----BEGIN PGP SIGNATURE----- # # iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmXMoXUPHG1zdEByZWRo # YXQuY29tAAoJECgfDbjSjVRpFtMIAKUKD0hzJrwOyPo4xsRUMbsB3ehIsJsMKfOK # w+JWzTaojAG8ENPelWBdL2sEIs5U73VOchjLqHbH2m5sz6GJ13214amvdU/fYc8+ # /dU2ZKoAmaR5L1ovKO/fq07y/J6DrITZ5tosy2i84Xa8EnsL4j3wEPNVWsDi7dna # mvXUICSOOoJQ4O2YhSruKCQ8qIgF1/0Oi3u/rcrW3alSs8VQlrtQXxl6k+LbYqek # +Fytco3jMRHPvQ+GYUIwGuHjN15ghArcvbsV0GIa+24BPY5h7YbDYGbfasePT5OK # zDz51jitkoyDrQr+OzwOEe/X5+dVGhayRXfMtU5Qm53IE3y61qc= # =K4b1 # -----END PGP SIGNATURE----- # gpg: Signature made Wed 14 Feb 2024 11:18:13 GMT # gpg: using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469 # gpg: issuer "mst@redhat.com" # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full] # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" [full] # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: (60 commits) MAINTAINERS: Switch to my Enfabrica email virtio-gpu-rutabaga.c: override resource_destroy method virtio-gpu.c: add resource_destroy class method hw/display/virtio-gpu.c: use reset_bh class method hw/smbios: Fix port connector option validation hw/smbios: Fix OEM strings table option validation virtio-gpu: Correct virgl_renderer_resource_get_info() error check hw/cxl: Standardize all references on CXL r3.1 and minor updates hw/cxl: Update mailbox status registers. hw/cxl: Update RAS Capability Definitions for version 3. hw/cxl: Update link register definitions. hw/cxl: Update HDM Decoder capability to version 3 tests/acpi: Update DSDT.cxl to reflect change _STA return value. hw/i386: Fix _STA return value for ACPI0017 tests/acpi: Allow update of DSDT.cxl hw/mem/cxl_type3: Fix potential divide by zero reported by coverity hw/cxl: Pass NULL for a NULL MemoryRegionOps hw/cxl: Pass CXLComponentState to cache_mem_ops hw/cxl/device: read from register values in mdev_reg_read() hw/cxl/mbox: Remove dead code ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/arm/smmu-common.c2
-rw-r--r--hw/block/fdc-internal.h4
-rw-r--r--hw/block/fdc-isa.c18
-rw-r--r--hw/block/fdc-sysbus.c6
-rw-r--r--hw/char/parallel-isa.c14
-rw-r--r--hw/char/parallel.c2
-rw-r--r--hw/char/serial-isa.c14
-rw-r--r--hw/cxl/cxl-cdat.c11
-rw-r--r--hw/cxl/cxl-component-utils.c33
-rw-r--r--hw/cxl/cxl-device-utils.c31
-rw-r--r--hw/cxl/cxl-events.c2
-rw-r--r--hw/cxl/cxl-mailbox-utils.c92
-rw-r--r--hw/display/virtio-gpu-rutabaga.c47
-rw-r--r--hw/display/virtio-gpu-virgl.c2
-rw-r--r--hw/display/virtio-gpu.c27
-rw-r--r--hw/i386/acpi-build.c107
-rw-r--r--hw/i386/amd_iommu-stub.c26
-rw-r--r--hw/i386/amd_iommu.c29
-rw-r--r--hw/i386/amd_iommu.h16
-rw-r--r--hw/i386/intel_iommu.c6
-rw-r--r--hw/i386/kvm/apic.c3
-rw-r--r--hw/i386/meson.build3
-rw-r--r--hw/i386/x86.c14
-rw-r--r--hw/i386/xen/xen_apic.c3
-rw-r--r--hw/input/meson.build1
-rw-r--r--hw/input/vhost-user-input.c136
-rw-r--r--hw/intc/apic.c471
-rw-r--r--hw/intc/apic_common.c35
-rw-r--r--hw/intc/trace-events4
-rw-r--r--hw/isa/vt82c686.c65
-rw-r--r--hw/mem/cxl_type3.c67
-rw-r--r--hw/pci-bridge/cxl_downstream.c4
-rw-r--r--hw/pci-bridge/cxl_root_port.c4
-rw-r--r--hw/pci-bridge/cxl_upstream.c10
-rw-r--r--hw/ppc/pegasos2.c12
-rw-r--r--hw/virtio/Kconfig5
-rw-r--r--hw/virtio/meson.build27
-rw-r--r--hw/virtio/vhost-user-base.c371
-rw-r--r--hw/virtio/vhost-user-device-pci.c13
-rw-r--r--hw/virtio/vhost-user-device.c338
-rw-r--r--hw/virtio/vhost-user-gpio.c407
-rw-r--r--hw/virtio/vhost-user-i2c.c272
-rw-r--r--hw/virtio/vhost-user-input-pci.c3
-rw-r--r--hw/virtio/vhost-user-input.c58
-rw-r--r--hw/virtio/vhost-user-rng.c294
-rw-r--r--hw/virtio/vhost-user-snd-pci.c75
-rw-r--r--hw/virtio/vhost-user-snd.c67
-rw-r--r--hw/virtio/virtio-iommu.c4
48 files changed, 1515 insertions, 1740 deletions
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
index 9a8ac45..f58261b 100644
--- a/hw/arm/smmu-common.c
+++ b/hw/arm/smmu-common.c
@@ -675,6 +675,8 @@ static void smmu_base_reset_hold(Object *obj)
{
SMMUState *s = ARM_SMMU(obj);
+ memset(s->smmu_pcibus_by_bus_num, 0, sizeof(s->smmu_pcibus_by_bus_num));
+
g_hash_table_remove_all(s->configs);
g_hash_table_remove_all(s->iotlb);
}
diff --git a/hw/block/fdc-internal.h b/hw/block/fdc-internal.h
index 036392e..e219623 100644
--- a/hw/block/fdc-internal.h
+++ b/hw/block/fdc-internal.h
@@ -25,8 +25,6 @@
#ifndef HW_BLOCK_FDC_INTERNAL_H
#define HW_BLOCK_FDC_INTERNAL_H
-#include "exec/memory.h"
-#include "exec/ioport.h"
#include "hw/block/block.h"
#include "hw/block/fdc.h"
#include "qapi/qapi-types-block.h"
@@ -92,7 +90,6 @@ typedef struct FDrive {
} FDrive;
struct FDCtrl {
- MemoryRegion iomem;
qemu_irq irq;
/* Controller state */
QEMUTimer *result_timer;
@@ -140,7 +137,6 @@ struct FDCtrl {
/* Timers state */
uint8_t timer0;
uint8_t timer1;
- PortioList portio_list;
};
extern const FDFormat fd_formats[];
diff --git a/hw/block/fdc-isa.c b/hw/block/fdc-isa.c
index ad0921c..e43dc53 100644
--- a/hw/block/fdc-isa.c
+++ b/hw/block/fdc-isa.c
@@ -42,6 +42,7 @@
#include "sysemu/block-backend.h"
#include "sysemu/blockdev.h"
#include "sysemu/sysemu.h"
+#include "exec/ioport.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
@@ -60,6 +61,7 @@ struct FDCtrlISABus {
uint32_t irq;
uint32_t dma;
struct FDCtrl state;
+ PortioList portio_list;
int32_t bootindexA;
int32_t bootindexB;
};
@@ -91,7 +93,7 @@ static void isabus_fdc_realize(DeviceState *dev, Error **errp)
FDCtrl *fdctrl = &isa->state;
Error *err = NULL;
- isa_register_portio_list(isadev, &fdctrl->portio_list,
+ isa_register_portio_list(isadev, &isa->portio_list,
isa->iobase, fdc_portio_list, fdctrl,
"fdc");
@@ -190,6 +192,20 @@ static Aml *build_fdinfo_aml(int idx, FloppyDriveType type)
return dev;
}
+void isa_fdc_set_iobase(ISADevice *fdc, hwaddr iobase)
+{
+ FDCtrlISABus *isa = ISA_FDC(fdc);
+
+ fdc->ioport_id = iobase;
+ isa->iobase = iobase;
+ portio_list_set_address(&isa->portio_list, isa->iobase);
+}
+
+void isa_fdc_set_enabled(ISADevice *fdc, bool enabled)
+{
+ portio_list_set_enabled(&ISA_FDC(fdc)->portio_list, enabled);
+}
+
int cmos_get_fd_drive_type(FloppyDriveType fd0)
{
int val;
diff --git a/hw/block/fdc-sysbus.c b/hw/block/fdc-sysbus.c
index 266bc4d..035bc08 100644
--- a/hw/block/fdc-sysbus.c
+++ b/hw/block/fdc-sysbus.c
@@ -26,6 +26,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qom/object.h"
+#include "exec/memory.h"
#include "hw/sysbus.h"
#include "hw/block/fdc.h"
#include "migration/vmstate.h"
@@ -52,6 +53,7 @@ struct FDCtrlSysBus {
/*< public >*/
struct FDCtrl state;
+ MemoryRegion iomem;
};
static uint64_t fdctrl_read_mem(void *opaque, hwaddr reg, unsigned ize)
@@ -146,11 +148,11 @@ static void sysbus_fdc_common_instance_init(Object *obj)
qdev_set_legacy_instance_id(dev, 0 /* io */, 2); /* FIXME */
- memory_region_init_io(&fdctrl->iomem, obj,
+ memory_region_init_io(&sys->iomem, obj,
sbdc->use_strict_io ? &fdctrl_mem_strict_ops
: &fdctrl_mem_ops,
fdctrl, "fdc", 0x08);
- sysbus_init_mmio(sbd, &fdctrl->iomem);
+ sysbus_init_mmio(sbd, &sys->iomem);
sysbus_init_irq(sbd, &fdctrl->irq);
qdev_init_gpio_in(dev, fdctrl_handle_tc, 1);
diff --git a/hw/char/parallel-isa.c b/hw/char/parallel-isa.c
index ab0f879..a5ce6ee 100644
--- a/hw/char/parallel-isa.c
+++ b/hw/char/parallel-isa.c
@@ -41,3 +41,17 @@ void parallel_hds_isa_init(ISABus *bus, int n)
}
}
}
+
+void isa_parallel_set_iobase(ISADevice *parallel, hwaddr iobase)
+{
+ ISAParallelState *s = ISA_PARALLEL(parallel);
+
+ parallel->ioport_id = iobase;
+ s->iobase = iobase;
+ portio_list_set_address(&s->portio_list, s->iobase);
+}
+
+void isa_parallel_set_enabled(ISADevice *parallel, bool enabled)
+{
+ portio_list_set_enabled(&ISA_PARALLEL(parallel)->portio_list, enabled);
+}
diff --git a/hw/char/parallel.c b/hw/char/parallel.c
index bd488cd..c394635 100644
--- a/hw/char/parallel.c
+++ b/hw/char/parallel.c
@@ -532,7 +532,7 @@ static void parallel_isa_realizefn(DeviceState *dev, Error **errp)
s->status = dummy;
}
- isa_register_portio_list(isadev, &s->portio_list, base,
+ isa_register_portio_list(isadev, &isa->portio_list, base,
(s->hw_driver
? &isa_parallel_portio_hw_list[0]
: &isa_parallel_portio_sw_list[0]),
diff --git a/hw/char/serial-isa.c b/hw/char/serial-isa.c
index 1c793b2..329b352 100644
--- a/hw/char/serial-isa.c
+++ b/hw/char/serial-isa.c
@@ -184,3 +184,17 @@ void serial_hds_isa_init(ISABus *bus, int from, int to)
}
}
}
+
+void isa_serial_set_iobase(ISADevice *serial, hwaddr iobase)
+{
+ ISASerialState *s = ISA_SERIAL(serial);
+
+ serial->ioport_id = iobase;
+ s->iobase = iobase;
+ memory_region_set_address(&s->state.io, s->iobase);
+}
+
+void isa_serial_set_enabled(ISADevice *serial, bool enabled)
+{
+ memory_region_set_enabled(&ISA_SERIAL(serial)->state.io, enabled);
+}
diff --git a/hw/cxl/cxl-cdat.c b/hw/cxl/cxl-cdat.c
index 639a2db..2fea975 100644
--- a/hw/cxl/cxl-cdat.c
+++ b/hw/cxl/cxl-cdat.c
@@ -49,6 +49,7 @@ static void ct3_build_cdat(CDATObject *cdat, Error **errp)
g_autofree CDATTableHeader *cdat_header = NULL;
g_autofree CDATEntry *cdat_st = NULL;
uint8_t sum = 0;
+ uint8_t *hdr_buf;
int ent, i;
/* Use default table if fopen == NULL */
@@ -63,7 +64,7 @@ static void ct3_build_cdat(CDATObject *cdat, Error **errp)
cdat->built_buf_len = cdat->build_cdat_table(&cdat->built_buf,
cdat->private);
- if (!cdat->built_buf_len) {
+ if (cdat->built_buf_len <= 0) {
/* Build later as not all data available yet */
cdat->to_update = true;
return;
@@ -95,8 +96,12 @@ static void ct3_build_cdat(CDATObject *cdat, Error **errp)
/* For now, no runtime updates */
cdat_header->sequence = 0;
cdat_header->length += sizeof(CDATTableHeader);
- sum += cdat_header->revision + cdat_header->sequence +
- cdat_header->length;
+
+ hdr_buf = (uint8_t *)cdat_header;
+ for (i = 0; i < sizeof(*cdat_header); i++) {
+ sum += hdr_buf[i];
+ }
+
/* Sum of all bytes including checksum must be 0 */
cdat_header->checksum = ~sum + 1;
diff --git a/hw/cxl/cxl-component-utils.c b/hw/cxl/cxl-component-utils.c
index 29d4774..84ab503 100644
--- a/hw/cxl/cxl-component-utils.c
+++ b/hw/cxl/cxl-component-utils.c
@@ -13,7 +13,7 @@
#include "hw/pci/pci.h"
#include "hw/cxl/cxl.h"
-/* CXL r3.0 Section 8.2.4.19.1 CXL HDM Decoder Capability Register */
+/* CXL r3.1 Section 8.2.4.20.1 CXL HDM Decoder Capability Register */
int cxl_decoder_count_enc(int count)
{
switch (count) {
@@ -160,11 +160,11 @@ static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value,
}
/*
- * 8.2.3
+ * CXL r3.1 Section 8.2.3: Component Register Layout and Definition
* The access restrictions specified in Section 8.2.2 also apply to CXL 2.0
* Component Registers.
*
- * 8.2.2
+ * CXL r3.1 Section 8.2.2: Accessing Component Registers
* • A 32 bit register shall be accessed as a 4 Bytes quantity. Partial
* reads are not permitted.
* • A 64 bit register shall be accessed as a 8 Bytes quantity. Partial
@@ -197,9 +197,9 @@ void cxl_component_register_block_init(Object *obj,
CXL2_COMPONENT_BLOCK_SIZE);
/* io registers controls link which we don't care about in QEMU */
- memory_region_init_io(&cregs->io, obj, NULL, cregs, ".io",
+ memory_region_init_io(&cregs->io, obj, NULL, NULL, ".io",
CXL2_COMPONENT_IO_REGION_SIZE);
- memory_region_init_io(&cregs->cache_mem, obj, &cache_mem_ops, cregs,
+ memory_region_init_io(&cregs->cache_mem, obj, &cache_mem_ops, cxl_cstate,
".cache_mem", CXL2_COMPONENT_CM_REGION_SIZE);
memory_region_add_subregion(&cregs->component_registers, 0, &cregs->io);
@@ -243,6 +243,14 @@ static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk,
ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 1);
ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY,
POISON_ON_ERR_CAP, 0);
+ ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 3_6_12_WAY, 0);
+ ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 16_WAY, 0);
+ ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, UIO, 0);
+ ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY,
+ UIO_DECODER_COUNT, 0);
+ ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, MEMDATA_NXM_CAP, 0);
+ ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY,
+ SUPPORTED_COHERENCY_MODEL, 0); /* Unknown */
ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_GLOBAL_CONTROL,
HDM_DECODER_ENABLE, 0);
write_msk[R_CXL_HDM_DECODER_GLOBAL_CONTROL] = 0x3;
@@ -300,7 +308,8 @@ void cxl_component_register_init_common(uint32_t *reg_state,
/* CXL Capability Header Register */
ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ID, 1);
- ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, VERSION, 1);
+ ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, VERSION,
+ CXL_CAPABILITY_VERSION);
ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, CACHE_MEM_VERSION, 1);
ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ARRAY_SIZE, caps);
@@ -317,24 +326,24 @@ void cxl_component_register_init_common(uint32_t *reg_state,
CXL_##reg##_REGISTERS_OFFSET); \
} while (0)
- init_cap_reg(RAS, 2, 2);
+ init_cap_reg(RAS, 2, CXL_RAS_CAPABILITY_VERSION);
ras_init_common(reg_state, write_msk);
- init_cap_reg(LINK, 4, 2);
+ init_cap_reg(LINK, 4, CXL_LINK_CAPABILITY_VERSION);
if (caps < 3) {
return;
}
- init_cap_reg(HDM, 5, 1);
+ init_cap_reg(HDM, 5, CXL_HDM_CAPABILITY_VERSION);
hdm_init_common(reg_state, write_msk, type);
if (caps < 5) {
return;
}
- init_cap_reg(EXTSEC, 6, 1);
- init_cap_reg(SNOOP, 8, 1);
+ init_cap_reg(EXTSEC, 6, CXL_EXTSEC_CAP_VERSION);
+ init_cap_reg(SNOOP, 8, CXL_SNOOP_CAP_VERSION);
#undef init_cap_reg
}
@@ -459,7 +468,7 @@ void cxl_component_create_dvsec(CXLComponentState *cxl,
cxl->dvsec_offset += length;
}
-/* CXL r3.0 Section 8.2.4.19.7 CXL HDM Decoder n Control Register */
+/* CXL r3.1 Section 8.2.4.20.7 CXL HDM Decoder n Control Register */
uint8_t cxl_interleave_ways_enc(int iw, Error **errp)
{
switch (iw) {
diff --git a/hw/cxl/cxl-device-utils.c b/hw/cxl/cxl-device-utils.c
index 61a3c4d..035d034 100644
--- a/hw/cxl/cxl-device-utils.c
+++ b/hw/cxl/cxl-device-utils.c
@@ -13,7 +13,7 @@
/*
* Device registers have no restrictions per the spec, and so fall back to the
- * default memory mapped register rules in 8.2:
+ * default memory mapped register rules in CXL r3.1 Section 8.2:
* Software shall use CXL.io Memory Read and Write to access memory mapped
* register defined in this section. Unless otherwise specified, software
* shall restrict the accesses width based on the following:
@@ -229,12 +229,9 @@ static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value,
static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size)
{
- uint64_t retval = 0;
-
- retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MEDIA_STATUS, 1);
- retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MBOX_READY, 1);
+ CXLDeviceState *cxl_dstate = opaque;
- return retval;
+ return cxl_dstate->memdev_status;
}
static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value,
@@ -369,9 +366,21 @@ static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate)
ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
MSI_N, msi_n);
cxl_dstate->mbox_msi_n = msi_n;
+ ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
+ MBOX_READY_TIME, 0); /* Not reported */
+ ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
+ TYPE, 0); /* Inferred from class code */
}
-static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) { }
+static void memdev_reg_init_common(CXLDeviceState *cxl_dstate)
+{
+ uint64_t memdev_status_reg;
+
+ memdev_status_reg = FIELD_DP64(0, CXL_MEM_DEV_STS, MEDIA_STATUS, 1);
+ memdev_status_reg = FIELD_DP64(memdev_status_reg, CXL_MEM_DEV_STS,
+ MBOX_READY, 1);
+ cxl_dstate->memdev_status = memdev_status_reg;
+}
void cxl_device_register_init_t3(CXLType3Dev *ct3d)
{
@@ -384,13 +393,15 @@ void cxl_device_register_init_t3(CXLType3Dev *ct3d)
ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
- cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2);
+ cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1,
+ CXL_DEVICE_STATUS_VERSION);
device_reg_init_common(cxl_dstate);
- cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1);
+ cxl_device_cap_init(cxl_dstate, MAILBOX, 2, CXL_DEV_MAILBOX_VERSION);
mailbox_reg_init_common(cxl_dstate);
- cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1);
+ cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000,
+ CXL_MEM_DEV_STATUS_VERSION);
memdev_reg_init_common(cxl_dstate);
cxl_initialize_mailbox_t3(&ct3d->cci, DEVICE(ct3d),
diff --git a/hw/cxl/cxl-events.c b/hw/cxl/cxl-events.c
index affcf8a..d397718 100644
--- a/hw/cxl/cxl-events.c
+++ b/hw/cxl/cxl-events.c
@@ -204,7 +204,7 @@ CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds,
* record that will not be cleared when Clear Event Records is executed,
* the device shall return the Invalid Handle return code and shall not
* clear any of the specified event records."
- * -- CXL 3.0 8.2.9.2.3
+ * -- CXL r3.1 Section 8.2.9.2.3: Clear Event Records (0101h)
*/
entry = cxl_event_get_head(log);
for (nr = 0; entry && nr < pl->nr_recs; nr++) {
diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c
index 6eff56f..e5eb97c 100644
--- a/hw/cxl/cxl-mailbox-utils.c
+++ b/hw/cxl/cxl-mailbox-utils.c
@@ -86,7 +86,7 @@ enum {
#define MANAGEMENT_COMMAND 0x0
};
-/* CCI Message Format CXL r3.0 Figure 7-19 */
+/* CCI Message Format CXL r3.1 Figure 7-19 */
typedef struct CXLCCIMessage {
uint8_t category;
#define CXL_CCI_CAT_REQ 0
@@ -342,7 +342,7 @@ static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
-/* CXL r3.0 section 8.2.9.1.1: Identify (Opcode 0001h) */
+/* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */
static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -403,7 +403,7 @@ static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d,
}
}
-/* CXL r3 8.2.9.1.1 */
+/* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */
static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -455,7 +455,7 @@ static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
-/* CXL r3.0 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */
+/* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */
static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -463,14 +463,14 @@ static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd,
size_t *len_out,
CXLCCI *cci)
{
- /* CXL r3.0 Table 7-18: Get Physical Port State Request Payload */
+ /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */
struct cxl_fmapi_get_phys_port_state_req_pl {
uint8_t num_ports;
uint8_t ports[];
} QEMU_PACKED *in;
/*
- * CXL r3.0 Table 7-20: Get Physical Port State Port Information Block
+ * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block
* Format
*/
struct cxl_fmapi_port_state_info_block {
@@ -491,7 +491,7 @@ static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd,
uint8_t supported_ld_count;
} QEMU_PACKED;
- /* CXL r3.0 Table 7-19: Get Physical Port State Response Payload */
+ /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */
struct cxl_fmapi_get_phys_port_state_resp_pl {
uint8_t num_ports;
uint8_t rsv1[3];
@@ -579,7 +579,7 @@ static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
-/* CXL r3.0 8.2.9.1.2 */
+/* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */
static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -609,7 +609,7 @@ static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
-/* 8.2.9.2.1 */
+/* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */
static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len,
@@ -647,7 +647,7 @@ static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
-/* 8.2.9.3.1 */
+/* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */
static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -664,7 +664,7 @@ static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
-/* 8.2.9.3.2 */
+/* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */
static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -683,13 +683,13 @@ static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
-/* CXL 3.0 8.2.9.5.2.1 Command Effects Log (CEL) */
+/* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */
static const QemuUUID cel_uuid = {
.data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79,
0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17)
};
-/* 8.2.9.4.1 */
+/* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */
static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -715,7 +715,7 @@ static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
-/* 8.2.9.4.2 */
+/* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */
static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -732,14 +732,11 @@ static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd,
get_log = (void *)payload_in;
/*
- * 8.2.9.4.2
- * The device shall return Invalid Parameter if the Offset or Length
+ * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h)
+ * The device shall return Invalid Input if the Offset or Length
* fields attempt to access beyond the size of the log as reported by Get
* Supported Logs.
*
- * XXX: Spec is wrong, "Invalid Parameter" isn't a thing.
- * XXX: Spec doesn't address incorrect UUID incorrectness.
- *
* The CEL buffer is large enough to fit all commands in the emulation, so
* the only possible failure would be if the mailbox itself isn't big
* enough.
@@ -749,7 +746,7 @@ static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd,
}
if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) {
- return CXL_MBOX_UNSUPPORTED;
+ return CXL_MBOX_INVALID_LOG;
}
/* Store off everything to local variables so we can wipe out the payload */
@@ -760,7 +757,7 @@ static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
-/* 8.2.9.5.1.1 */
+/* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */
static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -815,6 +812,7 @@ static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
+/* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */
static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -851,6 +849,7 @@ static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
+/* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */
static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -879,6 +878,7 @@ static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
+/* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */
static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -940,7 +940,7 @@ static void __do_sanitization(CXLType3Dev *ct3d)
}
/*
- * CXL 3.0 spec section 8.2.9.8.5.1 - Sanitize.
+ * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h)
*
* Once the Sanitize command has started successfully, the device shall be
* placed in the media disabled state. If the command fails or is interrupted
@@ -1001,15 +1001,8 @@ static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd,
cxl_dev_disable_media(&ct3d->cxl_dstate);
- if (secs > 2) {
- /* sanitize when done */
- return CXL_MBOX_BG_STARTED;
- } else {
- __do_sanitization(ct3d);
- cxl_dev_enable_media(&ct3d->cxl_dstate);
-
- return CXL_MBOX_SUCCESS;
- }
+ /* sanitize when done */
+ return CXL_MBOX_BG_STARTED;
}
static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd,
@@ -1025,7 +1018,10 @@ static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd,
*len_out = 4;
return CXL_MBOX_SUCCESS;
}
+
/*
+ * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h)
+ *
* This is very inefficient, but good enough for now!
* Also the payload will always fit, so no need to handle the MORE flag and
* make this stateful. We may want to allow longer poison lists to aid
@@ -1110,6 +1106,7 @@ static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
+/* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */
static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -1153,6 +1150,7 @@ static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd,
return CXL_MBOX_SUCCESS;
}
+/* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */
static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
@@ -1387,27 +1385,21 @@ static void bg_timercb(void *opaque)
cci->bg.complete_pct = 100;
cci->bg.ret_code = ret;
- if (ret == CXL_MBOX_SUCCESS) {
- switch (cci->bg.opcode) {
- case 0x4400: /* sanitize */
- {
- CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
-
- __do_sanitization(ct3d);
- cxl_dev_enable_media(&ct3d->cxl_dstate);
- }
+ switch (cci->bg.opcode) {
+ case 0x4400: /* sanitize */
+ {
+ CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
+
+ __do_sanitization(ct3d);
+ cxl_dev_enable_media(&ct3d->cxl_dstate);
+ }
+ break;
+ case 0x4304: /* TODO: scan media */
+ break;
+ default:
+ __builtin_unreachable();
break;
- case 0x4304: /* TODO: scan media */
- break;
- default:
- __builtin_unreachable();
- break;
- }
}
-
- qemu_log("Background command %04xh finished: %s\n",
- cci->bg.opcode,
- ret == CXL_MBOX_SUCCESS ? "success" : "aborted");
} else {
/* estimate only */
cci->bg.complete_pct = 100 * now / total_time;
diff --git a/hw/display/virtio-gpu-rutabaga.c b/hw/display/virtio-gpu-rutabaga.c
index 9e67f9b..17bf701 100644
--- a/hw/display/virtio-gpu-rutabaga.c
+++ b/hw/display/virtio-gpu-rutabaga.c
@@ -148,14 +148,38 @@ rutabaga_cmd_create_resource_3d(VirtIOGPU *g,
}
static void
+virtio_gpu_rutabaga_resource_unref(VirtIOGPU *g,
+ struct virtio_gpu_simple_resource *res,
+ Error **errp)
+{
+ int32_t result;
+ VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
+
+ result = rutabaga_resource_unref(vr->rutabaga, res->resource_id);
+ if (result) {
+ error_setg_errno(errp,
+ (int)result,
+ "%s: rutabaga_resource_unref returned %"PRIi32
+ " for resource_id = %"PRIu32, __func__, result,
+ res->resource_id);
+ }
+
+ if (res->image) {
+ pixman_image_unref(res->image);
+ }
+
+ QTAILQ_REMOVE(&g->reslist, res, next);
+ g_free(res);
+}
+
+static void
rutabaga_cmd_resource_unref(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
- int32_t result;
+ int32_t result = 0;
struct virtio_gpu_simple_resource *res;
struct virtio_gpu_resource_unref unref;
-
- VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
+ Error *local_err = NULL;
VIRTIO_GPU_FILL_CMD(unref);
@@ -164,15 +188,14 @@ rutabaga_cmd_resource_unref(VirtIOGPU *g,
res = virtio_gpu_find_resource(g, unref.resource_id);
CHECK(res, cmd);
- result = rutabaga_resource_unref(vr->rutabaga, unref.resource_id);
- CHECK(!result, cmd);
-
- if (res->image) {
- pixman_image_unref(res->image);
+ virtio_gpu_rutabaga_resource_unref(g, res, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ /* local_err was freed, do not reuse it. */
+ local_err = NULL;
+ result = 1;
}
-
- QTAILQ_REMOVE(&g->reslist, res, next);
- g_free(res);
+ CHECK(!result, cmd);
}
static void
@@ -1099,7 +1122,7 @@ static void virtio_gpu_rutabaga_class_init(ObjectClass *klass, void *data)
vgc->handle_ctrl = virtio_gpu_rutabaga_handle_ctrl;
vgc->process_cmd = virtio_gpu_rutabaga_process_cmd;
vgc->update_cursor_data = virtio_gpu_rutabaga_update_cursor;
-
+ vgc->resource_destroy = virtio_gpu_rutabaga_resource_unref;
vdc->realize = virtio_gpu_rutabaga_realize;
device_class_set_props(dc, virtio_gpu_rutabaga_properties);
}
diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
index 8bb7a2c..9f34d0e 100644
--- a/hw/display/virtio-gpu-virgl.c
+++ b/hw/display/virtio-gpu-virgl.c
@@ -181,7 +181,7 @@ static void virgl_cmd_set_scanout(VirtIOGPU *g,
memset(&info, 0, sizeof(info));
ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
#endif
- if (ret == -1) {
+ if (ret) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: illegal resource specified %d\n",
__func__, ss.resource_id);
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index f8a675e..1c1ee23 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -402,7 +402,8 @@ static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
}
static void virtio_gpu_resource_destroy(VirtIOGPU *g,
- struct virtio_gpu_simple_resource *res)
+ struct virtio_gpu_simple_resource *res,
+ Error **errp)
{
int i;
@@ -438,7 +439,11 @@ static void virtio_gpu_resource_unref(VirtIOGPU *g,
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
return;
}
- virtio_gpu_resource_destroy(g, res);
+ /*
+ * virtio_gpu_resource_destroy does not set any errors, so pass a NULL errp
+ * to ignore them.
+ */
+ virtio_gpu_resource_destroy(g, res, NULL);
}
static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
@@ -1488,11 +1493,24 @@ static void virtio_gpu_device_unrealize(DeviceState *qdev)
static void virtio_gpu_reset_bh(void *opaque)
{
VirtIOGPU *g = VIRTIO_GPU(opaque);
+ VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
struct virtio_gpu_simple_resource *res, *tmp;
+ uint32_t resource_id;
+ Error *local_err = NULL;
int i = 0;
QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
- virtio_gpu_resource_destroy(g, res);
+ resource_id = res->resource_id;
+ vgc->resource_destroy(g, res, &local_err);
+ if (local_err) {
+ error_append_hint(&local_err, "%s: %s resource_destroy"
+ "for resource_id = %"PRIu32" failed.\n",
+ __func__, object_get_typename(OBJECT(g)),
+ resource_id);
+ /* error_report_err frees the error object for us */
+ error_report_err(local_err);
+ local_err = NULL;
+ }
}
for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
@@ -1515,7 +1533,7 @@ void virtio_gpu_reset(VirtIODevice *vdev)
qemu_cond_wait_bql(&g->reset_cond);
}
} else {
- virtio_gpu_reset_bh(g);
+ aio_bh_call(g->reset_bh);
}
while (!QTAILQ_EMPTY(&g->cmdq)) {
@@ -1632,6 +1650,7 @@ static void virtio_gpu_class_init(ObjectClass *klass, void *data)
vgc->handle_ctrl = virtio_gpu_handle_ctrl;
vgc->process_cmd = virtio_gpu_simple_process_cmd;
vgc->update_cursor_data = virtio_gpu_update_cursor_data;
+ vgc->resource_destroy = virtio_gpu_resource_destroy;
vgbc->gl_flushed = virtio_gpu_handle_gl_flushed;
vdc->realize = virtio_gpu_device_realize;
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index e990b0a..d3ce96d 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -1415,7 +1415,7 @@ static void build_acpi0017(Aml *table)
aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0017")));
method = aml_method("_STA", 0, AML_NOTSERIALIZED);
- aml_append(method, aml_return(aml_int(0x01)));
+ aml_append(method, aml_return(aml_int(0x0B)));
aml_append(dev, method);
build_cxl_dsm_method(dev);
@@ -2333,30 +2333,23 @@ static void
build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id,
const char *oem_table_id)
{
- int ivhd_table_len = 24;
AMDVIState *s = AMD_IOMMU_DEVICE(x86_iommu_get_default());
GArray *ivhd_blob = g_array_new(false, true, 1);
AcpiTable table = { .sig = "IVRS", .rev = 1, .oem_id = oem_id,
.oem_table_id = oem_table_id };
+ uint64_t feature_report;
acpi_table_begin(&table, table_data);
/* IVinfo - IO virtualization information common to all
* IOMMU units in a system
*/
- build_append_int_noprefix(table_data, 40UL << 8/* PASize */, 4);
+ build_append_int_noprefix(table_data,
+ (1UL << 0) | /* EFRSup */
+ (40UL << 8), /* PASize */
+ 4);
/* reserved */
build_append_int_noprefix(table_data, 0, 8);
- /* IVHD definition - type 10h */
- build_append_int_noprefix(table_data, 0x10, 1);
- /* virtualization flags */
- build_append_int_noprefix(table_data,
- (1UL << 0) | /* HtTunEn */
- (1UL << 4) | /* iotblSup */
- (1UL << 6) | /* PrefSup */
- (1UL << 7), /* PPRSup */
- 1);
-
/*
* A PCI bus walk, for each PCI host bridge, is necessary to create a
* complete set of IVHD entries. Do this into a separate blob so that we
@@ -2376,18 +2369,34 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id,
build_append_int_noprefix(ivhd_blob, 0x0000001, 4);
}
- ivhd_table_len += ivhd_blob->len;
-
/*
* When interrupt remapping is supported, we add a special IVHD device
- * for type IO-APIC.
+ * for type IO-APIC
+ * Refer to spec - Table 95: IVHD device entry type codes
+ *
+ * Linux IOMMU driver checks for the special IVHD device (type IO-APIC).
+ * See Linux kernel commit 'c2ff5cf5294bcbd7fa50f7d860e90a66db7e5059'
*/
if (x86_iommu_ir_supported(x86_iommu_get_default())) {
- ivhd_table_len += 8;
+ build_append_int_noprefix(ivhd_blob,
+ (0x1ull << 56) | /* type IOAPIC */
+ (IOAPIC_SB_DEVID << 40) | /* IOAPIC devid */
+ 0x48, /* special device */
+ 8);
}
+ /* IVHD definition - type 10h */
+ build_append_int_noprefix(table_data, 0x10, 1);
+ /* virtualization flags */
+ build_append_int_noprefix(table_data,
+ (1UL << 0) | /* HtTunEn */
+ (1UL << 4) | /* iotblSup */
+ (1UL << 6) | /* PrefSup */
+ (1UL << 7), /* PPRSup */
+ 1);
+
/* IVHD length */
- build_append_int_noprefix(table_data, ivhd_table_len, 2);
+ build_append_int_noprefix(table_data, ivhd_blob->len + 24, 2);
/* DeviceID */
build_append_int_noprefix(table_data,
object_property_get_int(OBJECT(&s->pci), "addr",
@@ -2401,31 +2410,53 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id,
/* IOMMU info */
build_append_int_noprefix(table_data, 0, 2);
/* IOMMU Feature Reporting */
+ feature_report = (48UL << 30) | /* HATS */
+ (48UL << 28) | /* GATS */
+ (1UL << 2) | /* GTSup */
+ (1UL << 6); /* GASup */
+ if (s->xtsup) {
+ feature_report |= (1UL << 0); /* XTSup */
+ }
+ build_append_int_noprefix(table_data, feature_report, 4);
+
+ /* IVHD entries as found above */
+ g_array_append_vals(table_data, ivhd_blob->data, ivhd_blob->len);
+
+ /* IVHD definition - type 11h */
+ build_append_int_noprefix(table_data, 0x11, 1);
+ /* virtualization flags */
build_append_int_noprefix(table_data,
- (48UL << 30) | /* HATS */
- (48UL << 28) | /* GATS */
- (1UL << 2) | /* GTSup */
- (1UL << 6), /* GASup */
- 4);
+ (1UL << 0) | /* HtTunEn */
+ (1UL << 4), /* iotblSup */
+ 1);
+
+ /* IVHD length */
+ build_append_int_noprefix(table_data, ivhd_blob->len + 40, 2);
+ /* DeviceID */
+ build_append_int_noprefix(table_data,
+ object_property_get_int(OBJECT(&s->pci), "addr",
+ &error_abort), 2);
+ /* Capability offset */
+ build_append_int_noprefix(table_data, s->pci.capab_offset, 2);
+ /* IOMMU base address */
+ build_append_int_noprefix(table_data, s->mmio.addr, 8);
+ /* PCI Segment Group */
+ build_append_int_noprefix(table_data, 0, 2);
+ /* IOMMU info */
+ build_append_int_noprefix(table_data, 0, 2);
+ /* IOMMU Attributes */
+ build_append_int_noprefix(table_data, 0, 4);
+ /* EFR Register Image */
+ build_append_int_noprefix(table_data,
+ amdvi_extended_feature_register(s),
+ 8);
+ /* EFR Register Image 2 */
+ build_append_int_noprefix(table_data, 0, 8);
/* IVHD entries as found above */
g_array_append_vals(table_data, ivhd_blob->data, ivhd_blob->len);
- g_array_free(ivhd_blob, TRUE);
- /*
- * Add a special IVHD device type.
- * Refer to spec - Table 95: IVHD device entry type codes
- *
- * Linux IOMMU driver checks for the special IVHD device (type IO-APIC).
- * See Linux kernel commit 'c2ff5cf5294bcbd7fa50f7d860e90a66db7e5059'
- */
- if (x86_iommu_ir_supported(x86_iommu_get_default())) {
- build_append_int_noprefix(table_data,
- (0x1ull << 56) | /* type IOAPIC */
- (IOAPIC_SB_DEVID << 40) | /* IOAPIC devid */
- 0x48, /* special device */
- 8);
- }
+ g_array_free(ivhd_blob, TRUE);
acpi_table_end(linker, &table);
}
diff --git a/hw/i386/amd_iommu-stub.c b/hw/i386/amd_iommu-stub.c
new file mode 100644
index 0000000..d62a373
--- /dev/null
+++ b/hw/i386/amd_iommu-stub.c
@@ -0,0 +1,26 @@
+/*
+ * Stubs for AMD IOMMU emulation
+ *
+ * Copyright (C) 2023 Bui Quang Minh <minhquangbui99@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "amd_iommu.h"
+
+uint64_t amdvi_extended_feature_register(AMDVIState *s)
+{
+ return AMDVI_DEFAULT_EXT_FEATURES;
+}
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
index 4203144..7329553 100644
--- a/hw/i386/amd_iommu.c
+++ b/hw/i386/amd_iommu.c
@@ -31,6 +31,7 @@
#include "hw/i386/apic_internal.h"
#include "trace.h"
#include "hw/i386/apic-msidef.h"
+#include "hw/qdev-properties.h"
/* used AMD-Vi MMIO registers */
const char *amdvi_mmio_low[] = {
@@ -74,6 +75,16 @@ typedef struct AMDVIIOTLBEntry {
uint64_t page_mask; /* physical page size */
} AMDVIIOTLBEntry;
+uint64_t amdvi_extended_feature_register(AMDVIState *s)
+{
+ uint64_t feature = AMDVI_DEFAULT_EXT_FEATURES;
+ if (s->xtsup) {
+ feature |= AMDVI_FEATURE_XT;
+ }
+
+ return feature;
+}
+
/* configure MMIO registers at startup/reset */
static void amdvi_set_quad(AMDVIState *s, hwaddr addr, uint64_t val,
uint64_t romask, uint64_t w1cmask)
@@ -1155,7 +1166,12 @@ static int amdvi_int_remap_ga(AMDVIState *iommu,
irq->vector = irte.hi.fields.vector;
irq->dest_mode = irte.lo.fields_remap.dm;
irq->redir_hint = irte.lo.fields_remap.rq_eoi;
- irq->dest = irte.lo.fields_remap.destination;
+ if (iommu->xtsup) {
+ irq->dest = irte.lo.fields_remap.destination |
+ (irte.hi.fields.destination_hi << 24);
+ } else {
+ irq->dest = irte.lo.fields_remap.destination & 0xff;
+ }
return 0;
}
@@ -1505,8 +1521,9 @@ static void amdvi_init(AMDVIState *s)
/* reset MMIO */
memset(s->mmior, 0, AMDVI_MMIO_SIZE);
- amdvi_set_quad(s, AMDVI_MMIO_EXT_FEATURES, AMDVI_EXT_FEATURES,
- 0xffffffffffffffef, 0);
+ amdvi_set_quad(s, AMDVI_MMIO_EXT_FEATURES,
+ amdvi_extended_feature_register(s),
+ 0xffffffffffffffef, 0);
amdvi_set_quad(s, AMDVI_MMIO_STATUS, 0, 0x98, 0x67);
}
@@ -1589,6 +1606,11 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp)
amdvi_init(s);
}
+static Property amdvi_properties[] = {
+ DEFINE_PROP_BOOL("xtsup", AMDVIState, xtsup, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
static const VMStateDescription vmstate_amdvi_sysbus = {
.name = "amd-iommu",
.unmigratable = 1
@@ -1615,6 +1637,7 @@ static void amdvi_sysbus_class_init(ObjectClass *klass, void *data)
dc->user_creatable = true;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device";
+ device_class_set_props(dc, amdvi_properties);
}
static const TypeInfo amdvi_sysbus = {
diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
index c5065a3..73619fe 100644
--- a/hw/i386/amd_iommu.h
+++ b/hw/i386/amd_iommu.h
@@ -154,6 +154,7 @@
#define AMDVI_FEATURE_PREFETCH (1ULL << 0) /* page prefetch */
#define AMDVI_FEATURE_PPR (1ULL << 1) /* PPR Support */
+#define AMDVI_FEATURE_XT (1ULL << 2) /* x2APIC Support */
#define AMDVI_FEATURE_GT (1ULL << 4) /* Guest Translation */
#define AMDVI_FEATURE_IA (1ULL << 6) /* inval all support */
#define AMDVI_FEATURE_GA (1ULL << 7) /* guest VAPIC support */
@@ -173,8 +174,9 @@
#define AMDVI_IOTLB_MAX_SIZE 1024
#define AMDVI_DEVID_SHIFT 36
-/* extended feature support */
-#define AMDVI_EXT_FEATURES (AMDVI_FEATURE_PREFETCH | AMDVI_FEATURE_PPR | \
+/* default extended feature */
+#define AMDVI_DEFAULT_EXT_FEATURES \
+ (AMDVI_FEATURE_PREFETCH | AMDVI_FEATURE_PPR | \
AMDVI_FEATURE_IA | AMDVI_FEATURE_GT | AMDVI_FEATURE_HE | \
AMDVI_GATS_MODE | AMDVI_HATS_MODE | AMDVI_FEATURE_GA)
@@ -276,8 +278,8 @@ union irte_ga_lo {
dm:1,
/* ------ */
guest_mode:1,
- destination:8,
- rsvd_1:48;
+ destination:24,
+ rsvd_1:32;
} fields_remap;
};
@@ -285,7 +287,8 @@ union irte_ga_hi {
uint64_t val;
struct {
uint64_t vector:8,
- rsvd_2:56;
+ rsvd_2:48,
+ destination_hi:8;
} fields;
};
@@ -364,6 +367,9 @@ struct AMDVIState {
/* Interrupt remapping */
bool ga_enabled;
+ bool xtsup;
};
+uint64_t amdvi_extended_feature_register(AMDVIState *s);
+
#endif
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 1a07fad..cf93318 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -4124,11 +4124,7 @@ static bool vtd_decide_config(IntelIOMMUState *s, Error **errp)
ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
}
if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) {
- if (!kvm_irqchip_is_split()) {
- error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split");
- return false;
- }
- if (kvm_enabled() && !kvm_enable_x2apic()) {
+ if (kvm_irqchip_is_split() && !kvm_enable_x2apic()) {
error_setg(errp, "eim=on requires support on the KVM side"
"(X2APIC_API, first shipped in v4.7)");
return false;
diff --git a/hw/i386/kvm/apic.c b/hw/i386/kvm/apic.c
index 1e89ca0..a72c28e 100644
--- a/hw/i386/kvm/apic.c
+++ b/hw/i386/kvm/apic.c
@@ -95,9 +95,10 @@ void kvm_get_apic_state(DeviceState *dev, struct kvm_lapic_state *kapic)
apic_next_timer(s, s->initial_count_load_time);
}
-static void kvm_apic_set_base(APICCommonState *s, uint64_t val)
+static int kvm_apic_set_base(APICCommonState *s, uint64_t val)
{
s->apicbase = val;
+ return 0;
}
static void kvm_apic_set_tpr(APICCommonState *s, uint8_t val)
diff --git a/hw/i386/meson.build b/hw/i386/meson.build
index 369c6bf..b9c1ca3 100644
--- a/hw/i386/meson.build
+++ b/hw/i386/meson.build
@@ -9,7 +9,8 @@ i386_ss.add(files(
i386_ss.add(when: 'CONFIG_X86_IOMMU', if_true: files('x86-iommu.c'),
if_false: files('x86-iommu-stub.c'))
-i386_ss.add(when: 'CONFIG_AMD_IOMMU', if_true: files('amd_iommu.c'))
+i386_ss.add(when: 'CONFIG_AMD_IOMMU', if_true: files('amd_iommu.c'),
+ if_false: files('amd_iommu-stub.c'))
i386_ss.add(when: 'CONFIG_I440FX', if_true: files('pc_piix.c'))
i386_ss.add(when: 'CONFIG_MICROVM', if_true: files('microvm.c', 'acpi-microvm.c', 'microvm-dt.c'))
i386_ss.add(when: 'CONFIG_Q35', if_true: files('pc_q35.c'))
diff --git a/hw/i386/x86.c b/hw/i386/x86.c
index 2b6291a..684dce9 100644
--- a/hw/i386/x86.c
+++ b/hw/i386/x86.c
@@ -137,7 +137,7 @@ void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version)
* a literal `0` in configurations where kvm_* aren't defined)
*/
if (kvm_enabled() && x86ms->apic_id_limit > 255 &&
- (!kvm_irqchip_in_kernel() || !kvm_enable_x2apic())) {
+ kvm_irqchip_in_kernel() && !kvm_enable_x2apic()) {
error_report("current -smp configuration requires kernel "
"irqchip and X2APIC API support.");
exit(EXIT_FAILURE);
@@ -147,6 +147,10 @@ void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version)
kvm_set_max_apic_id(x86ms->apic_id_limit);
}
+ if (!kvm_irqchip_in_kernel()) {
+ apic_set_max_apic_id(x86ms->apic_id_limit);
+ }
+
possible_cpus = mc->possible_cpu_arch_ids(ms);
for (i = 0; i < ms->smp.cpus; i++) {
x86_cpu_new(x86ms, possible_cpus->cpus[i].arch_id, &error_fatal);
@@ -516,10 +520,10 @@ static void x86_nmi(NMIState *n, int cpu_index, Error **errp)
CPU_FOREACH(cs) {
X86CPU *cpu = X86_CPU(cs);
- if (!cpu->apic_state) {
- cpu_interrupt(cs, CPU_INTERRUPT_NMI);
- } else {
+ if (cpu_is_apic_enabled(cpu->apic_state)) {
apic_deliver_nmi(cpu->apic_state);
+ } else {
+ cpu_interrupt(cs, CPU_INTERRUPT_NMI);
}
}
}
@@ -551,7 +555,7 @@ static void pic_irq_request(void *opaque, int irq, int level)
X86CPU *cpu = X86_CPU(cs);
trace_x86_pic_interrupt(irq, level);
- if (cpu->apic_state && !kvm_irqchip_in_kernel() &&
+ if (cpu_is_apic_enabled(cpu->apic_state) && !kvm_irqchip_in_kernel() &&
!whpx_apic_in_platform()) {
CPU_FOREACH(cs) {
cpu = X86_CPU(cs);
diff --git a/hw/i386/xen/xen_apic.c b/hw/i386/xen/xen_apic.c
index 7c7a60b..101e16a 100644
--- a/hw/i386/xen/xen_apic.c
+++ b/hw/i386/xen/xen_apic.c
@@ -49,8 +49,9 @@ static void xen_apic_realize(DeviceState *dev, Error **errp)
msi_nonbroken = true;
}
-static void xen_apic_set_base(APICCommonState *s, uint64_t val)
+static int xen_apic_set_base(APICCommonState *s, uint64_t val)
{
+ return 0;
}
static void xen_apic_set_tpr(APICCommonState *s, uint8_t val)
diff --git a/hw/input/meson.build b/hw/input/meson.build
index 640556b..3cc8ab8 100644
--- a/hw/input/meson.build
+++ b/hw/input/meson.build
@@ -11,7 +11,6 @@ system_ss.add(when: 'CONFIG_TSC2005', if_true: files('tsc2005.c'))
system_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input.c'))
system_ss.add(when: 'CONFIG_VIRTIO_INPUT', if_true: files('virtio-input-hid.c'))
system_ss.add(when: 'CONFIG_VIRTIO_INPUT_HOST', if_true: files('virtio-input-host.c'))
-system_ss.add(when: 'CONFIG_VHOST_USER_INPUT', if_true: files('vhost-user-input.c'))
system_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_keypad.c'))
system_ss.add(when: 'CONFIG_TSC210X', if_true: files('tsc210x.c'))
diff --git a/hw/input/vhost-user-input.c b/hw/input/vhost-user-input.c
deleted file mode 100644
index 4ee3542..0000000
--- a/hw/input/vhost-user-input.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * This work is licensed under the terms of the GNU GPL, version 2 or
- * (at your option) any later version. See the COPYING file in the
- * top-level directory.
- */
-
-#include "qemu/osdep.h"
-#include "qemu/error-report.h"
-#include "qapi/error.h"
-
-#include "hw/virtio/virtio-input.h"
-
-static int vhost_input_config_change(struct vhost_dev *dev)
-{
- error_report("vhost-user-input: unhandled backend config change");
- return -1;
-}
-
-static const VhostDevConfigOps config_ops = {
- .vhost_dev_config_notifier = vhost_input_config_change,
-};
-
-static void vhost_input_realize(DeviceState *dev, Error **errp)
-{
- VHostUserInput *vhi = VHOST_USER_INPUT(dev);
- VirtIOInput *vinput = VIRTIO_INPUT(dev);
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
-
- vhost_dev_set_config_notifier(&vhi->vhost->dev, &config_ops);
- vinput->cfg_size = sizeof_field(virtio_input_config, u);
- if (vhost_user_backend_dev_init(vhi->vhost, vdev, 2, errp) == -1) {
- return;
- }
-}
-
-static void vhost_input_change_active(VirtIOInput *vinput)
-{
- VHostUserInput *vhi = VHOST_USER_INPUT(vinput);
-
- if (vinput->active) {
- vhost_user_backend_start(vhi->vhost);
- } else {
- vhost_user_backend_stop(vhi->vhost);
- }
-}
-
-static void vhost_input_get_config(VirtIODevice *vdev, uint8_t *config_data)
-{
- VirtIOInput *vinput = VIRTIO_INPUT(vdev);
- VHostUserInput *vhi = VHOST_USER_INPUT(vdev);
- Error *local_err = NULL;
- int ret;
-
- memset(config_data, 0, vinput->cfg_size);
-
- ret = vhost_dev_get_config(&vhi->vhost->dev, config_data, vinput->cfg_size,
- &local_err);
- if (ret) {
- error_report_err(local_err);
- return;
- }
-}
-
-static void vhost_input_set_config(VirtIODevice *vdev,
- const uint8_t *config_data)
-{
- VHostUserInput *vhi = VHOST_USER_INPUT(vdev);
- int ret;
-
- ret = vhost_dev_set_config(&vhi->vhost->dev, config_data,
- 0, sizeof(virtio_input_config),
- VHOST_SET_CONFIG_TYPE_FRONTEND);
- if (ret) {
- error_report("vhost-user-input: set device config space failed");
- return;
- }
-
- virtio_notify_config(vdev);
-}
-
-static struct vhost_dev *vhost_input_get_vhost(VirtIODevice *vdev)
-{
- VHostUserInput *vhi = VHOST_USER_INPUT(vdev);
- return &vhi->vhost->dev;
-}
-
-static const VMStateDescription vmstate_vhost_input = {
- .name = "vhost-user-input",
- .unmigratable = 1,
-};
-
-static void vhost_input_class_init(ObjectClass *klass, void *data)
-{
- VirtIOInputClass *vic = VIRTIO_INPUT_CLASS(klass);
- VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->vmsd = &vmstate_vhost_input;
- vdc->get_config = vhost_input_get_config;
- vdc->set_config = vhost_input_set_config;
- vdc->get_vhost = vhost_input_get_vhost;
- vic->realize = vhost_input_realize;
- vic->change_active = vhost_input_change_active;
-}
-
-static void vhost_input_init(Object *obj)
-{
- VHostUserInput *vhi = VHOST_USER_INPUT(obj);
-
- vhi->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND));
- object_property_add_alias(obj, "chardev",
- OBJECT(vhi->vhost), "chardev");
-}
-
-static void vhost_input_finalize(Object *obj)
-{
- VHostUserInput *vhi = VHOST_USER_INPUT(obj);
-
- object_unref(OBJECT(vhi->vhost));
-}
-
-static const TypeInfo vhost_input_info = {
- .name = TYPE_VHOST_USER_INPUT,
- .parent = TYPE_VIRTIO_INPUT,
- .instance_size = sizeof(VHostUserInput),
- .instance_init = vhost_input_init,
- .instance_finalize = vhost_input_finalize,
- .class_init = vhost_input_class_init,
-};
-
-static void vhost_input_register_types(void)
-{
- type_register_static(&vhost_input_info);
-}
-
-type_init(vhost_input_register_types)
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index ac3d47d..1d887d6 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -32,14 +32,13 @@
#include "qapi/error.h"
#include "qom/object.h"
-#define MAX_APICS 255
-#define MAX_APIC_WORDS 8
-
#define SYNC_FROM_VAPIC 0x1
#define SYNC_TO_VAPIC 0x2
#define SYNC_ISR_IRR_TO_VAPIC 0x4
-static APICCommonState *local_apics[MAX_APICS + 1];
+static APICCommonState **local_apics;
+static uint32_t max_apics;
+static uint32_t max_apic_words;
#define TYPE_APIC "apic"
/*This is reusing the APICCommonState typedef from APIC_COMMON */
@@ -49,7 +48,19 @@ DECLARE_INSTANCE_CHECKER(APICCommonState, APIC,
static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode);
static void apic_update_irq(APICCommonState *s);
static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
- uint8_t dest, uint8_t dest_mode);
+ uint32_t dest, uint8_t dest_mode);
+
+void apic_set_max_apic_id(uint32_t max_apic_id)
+{
+ int word_size = 32;
+
+ /* round up the max apic id to next multiple of words */
+ max_apics = (max_apic_id + word_size - 1) & ~(word_size - 1);
+
+ local_apics = g_malloc0(sizeof(*local_apics) * max_apics);
+ max_apic_words = max_apics >> 5;
+}
+
/* Find first bit starting from msb */
static int apic_fls_bit(uint32_t value)
@@ -199,10 +210,10 @@ static void apic_external_nmi(APICCommonState *s)
#define foreach_apic(apic, deliver_bitmask, code) \
{\
int __i, __j;\
- for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\
+ for (__i = 0; __i < max_apic_words; __i++) {\
uint32_t __mask = deliver_bitmask[__i];\
if (__mask) {\
- for(__j = 0; __j < 32; __j++) {\
+ for (__j = 0; __j < 32; __j++) {\
if (__mask & (1U << __j)) {\
apic = local_apics[__i * 32 + __j];\
if (apic) {\
@@ -226,7 +237,7 @@ static void apic_bus_deliver(const uint32_t *deliver_bitmask,
{
int i, d;
d = -1;
- for(i = 0; i < MAX_APIC_WORDS; i++) {
+ for (i = 0; i < max_apic_words; i++) {
if (deliver_bitmask[i]) {
d = i * 32 + apic_ffs_bit(deliver_bitmask[i]);
break;
@@ -276,20 +287,70 @@ static void apic_bus_deliver(const uint32_t *deliver_bitmask,
apic_set_irq(apic_iter, vector_num, trigger_mode) );
}
-void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode,
- uint8_t vector_num, uint8_t trigger_mode)
+static void apic_deliver_irq(uint32_t dest, uint8_t dest_mode,
+ uint8_t delivery_mode, uint8_t vector_num,
+ uint8_t trigger_mode)
{
- uint32_t deliver_bitmask[MAX_APIC_WORDS];
+ uint32_t *deliver_bitmask = g_malloc(max_apic_words * sizeof(uint32_t));
trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num,
trigger_mode);
apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
+ g_free(deliver_bitmask);
+}
+
+bool is_x2apic_mode(DeviceState *dev)
+{
+ APICCommonState *s = APIC(dev);
+
+ return s->apicbase & MSR_IA32_APICBASE_EXTD;
}
-static void apic_set_base(APICCommonState *s, uint64_t val)
+static int apic_set_base_check(APICCommonState *s, uint64_t val)
{
+ /* Enable x2apic when x2apic is not supported by CPU */
+ if (!cpu_has_x2apic_feature(&s->cpu->env) &&
+ val & MSR_IA32_APICBASE_EXTD) {
+ return -1;
+ }
+
+ /*
+ * Transition into invalid state
+ * (s->apicbase & MSR_IA32_APICBASE_ENABLE == 0) &&
+ * (s->apicbase & MSR_IA32_APICBASE_EXTD) == 1
+ */
+ if (!(val & MSR_IA32_APICBASE_ENABLE) &&
+ (val & MSR_IA32_APICBASE_EXTD)) {
+ return -1;
+ }
+
+ /* Invalid transition from disabled mode to x2APIC */
+ if (!(s->apicbase & MSR_IA32_APICBASE_ENABLE) &&
+ !(s->apicbase & MSR_IA32_APICBASE_EXTD) &&
+ (val & MSR_IA32_APICBASE_ENABLE) &&
+ (val & MSR_IA32_APICBASE_EXTD)) {
+ return -1;
+ }
+
+ /* Invalid transition from x2APIC to xAPIC */
+ if ((s->apicbase & MSR_IA32_APICBASE_ENABLE) &&
+ (s->apicbase & MSR_IA32_APICBASE_EXTD) &&
+ (val & MSR_IA32_APICBASE_ENABLE) &&
+ !(val & MSR_IA32_APICBASE_EXTD)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int apic_set_base(APICCommonState *s, uint64_t val)
+{
+ if (apic_set_base_check(s, val) < 0) {
+ return -1;
+ }
+
s->apicbase = (val & 0xfffff000) |
(s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE));
/* if disabled, cannot be enabled again */
@@ -298,6 +359,25 @@ static void apic_set_base(APICCommonState *s, uint64_t val)
cpu_clear_apic_feature(&s->cpu->env);
s->spurious_vec &= ~APIC_SV_ENABLE;
}
+
+ /* Transition from disabled mode to xAPIC */
+ if (!(s->apicbase & MSR_IA32_APICBASE_ENABLE) &&
+ (val & MSR_IA32_APICBASE_ENABLE)) {
+ s->apicbase |= MSR_IA32_APICBASE_ENABLE;
+ cpu_set_apic_feature(&s->cpu->env);
+ }
+
+ /* Transition from xAPIC to x2APIC */
+ if (cpu_has_x2apic_feature(&s->cpu->env) &&
+ !(s->apicbase & MSR_IA32_APICBASE_EXTD) &&
+ (val & MSR_IA32_APICBASE_EXTD)) {
+ s->apicbase |= MSR_IA32_APICBASE_EXTD;
+
+ s->log_dest = ((s->initial_apic_id & 0xffff0) << 16) |
+ (1 << (s->initial_apic_id & 0xf));
+ }
+
+ return 0;
}
static void apic_set_tpr(APICCommonState *s, uint8_t val)
@@ -435,57 +515,123 @@ static void apic_eoi(APICCommonState *s)
apic_update_irq(s);
}
-static int apic_find_dest(uint8_t dest)
+static bool apic_match_dest(APICCommonState *apic, uint32_t dest)
{
- APICCommonState *apic = local_apics[dest];
- int i;
+ if (is_x2apic_mode(&apic->parent_obj)) {
+ return apic->initial_apic_id == dest;
+ } else {
+ return apic->id == (uint8_t)dest;
+ }
+}
- if (apic && apic->id == dest)
- return dest; /* shortcut in case apic->id == local_apics[dest]->id */
+static void apic_find_dest(uint32_t *deliver_bitmask, uint32_t dest)
+{
+ APICCommonState *apic = NULL;
+ int i;
- for (i = 0; i < MAX_APICS; i++) {
+ for (i = 0; i < max_apics; i++) {
apic = local_apics[i];
- if (apic && apic->id == dest)
- return i;
- if (!apic)
- break;
+ if (apic && apic_match_dest(apic, dest)) {
+ apic_set_bit(deliver_bitmask, i);
+ }
}
+}
- return -1;
+/*
+ * Deliver interrupt to x2APIC CPUs if it is x2APIC broadcast.
+ * Otherwise, deliver interrupt to xAPIC CPUs if it is xAPIC
+ * broadcast.
+ */
+static void apic_get_broadcast_bitmask(uint32_t *deliver_bitmask,
+ bool is_x2apic_broadcast)
+{
+ int i;
+ APICCommonState *apic_iter;
+
+ for (i = 0; i < max_apics; i++) {
+ apic_iter = local_apics[i];
+ if (apic_iter) {
+ bool apic_in_x2apic = is_x2apic_mode(&apic_iter->parent_obj);
+
+ if (is_x2apic_broadcast && apic_in_x2apic) {
+ apic_set_bit(deliver_bitmask, i);
+ } else if (!is_x2apic_broadcast && !apic_in_x2apic) {
+ apic_set_bit(deliver_bitmask, i);
+ }
+ }
+ }
}
static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
- uint8_t dest, uint8_t dest_mode)
+ uint32_t dest, uint8_t dest_mode)
{
- APICCommonState *apic_iter;
+ APICCommonState *apic;
int i;
- if (dest_mode == 0) {
- if (dest == 0xff) {
- memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t));
+ memset(deliver_bitmask, 0x00, max_apic_words * sizeof(uint32_t));
+
+ /*
+ * x2APIC broadcast is delivered to all x2APIC CPUs regardless of
+ * destination mode. In case the destination mode is physical, it is
+ * broadcasted to all xAPIC CPUs too. Otherwise, if the destination
+ * mode is logical, we need to continue checking if xAPIC CPUs accepts
+ * the interrupt.
+ */
+ if (dest == 0xffffffff) {
+ if (dest_mode == APIC_DESTMODE_PHYSICAL) {
+ memset(deliver_bitmask, 0xff, max_apic_words * sizeof(uint32_t));
+ return;
} else {
- int idx = apic_find_dest(dest);
- memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t));
- if (idx >= 0)
- apic_set_bit(deliver_bitmask, idx);
+ apic_get_broadcast_bitmask(deliver_bitmask, true);
+ }
+ }
+
+ if (dest_mode == APIC_DESTMODE_PHYSICAL) {
+ apic_find_dest(deliver_bitmask, dest);
+ /* Any APIC in xAPIC mode will interpret 0xFF as broadcast */
+ if (dest == 0xff) {
+ apic_get_broadcast_bitmask(deliver_bitmask, false);
}
} else {
- /* XXX: cluster mode */
- memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t));
- for(i = 0; i < MAX_APICS; i++) {
- apic_iter = local_apics[i];
- if (apic_iter) {
- if (apic_iter->dest_mode == 0xf) {
- if (dest & apic_iter->log_dest)
- apic_set_bit(deliver_bitmask, i);
- } else if (apic_iter->dest_mode == 0x0) {
- if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) &&
- (dest & apic_iter->log_dest & 0x0f)) {
+ /* XXX: logical mode */
+ for (i = 0; i < max_apics; i++) {
+ apic = local_apics[i];
+ if (apic) {
+ /* x2APIC logical mode */
+ if (apic->apicbase & MSR_IA32_APICBASE_EXTD) {
+ if ((dest >> 16) == (apic->extended_log_dest >> 16) &&
+ (dest & apic->extended_log_dest & 0xffff)) {
apic_set_bit(deliver_bitmask, i);
}
+ continue;
}
- } else {
- break;
+
+ /* xAPIC logical mode */
+ dest = (uint8_t)dest;
+ if (apic->dest_mode == APIC_DESTMODE_LOGICAL_FLAT) {
+ if (dest & apic->log_dest) {
+ apic_set_bit(deliver_bitmask, i);
+ }
+ } else if (apic->dest_mode == APIC_DESTMODE_LOGICAL_CLUSTER) {
+ /*
+ * In cluster model of xAPIC logical mode IPI, 4 higher
+ * bits are used as cluster address, 4 lower bits are
+ * the bitmask for local APICs in the cluster. The IPI
+ * is delivered to an APIC if the cluster address
+ * matches and the APIC's address bit in the cluster is
+ * set in bitmask of destination ID in IPI.
+ *
+ * The cluster address ranges from 0 - 14, the cluster
+ * address 15 (0xf) is the broadcast address to all
+ * clusters.
+ */
+ if ((dest & 0xf0) == 0xf0 ||
+ (dest & 0xf0) == (apic->log_dest & 0xf0)) {
+ if (dest & apic->log_dest & 0x0f) {
+ apic_set_bit(deliver_bitmask, i);
+ }
+ }
+ }
}
}
}
@@ -509,29 +655,36 @@ void apic_sipi(DeviceState *dev)
s->wait_for_sipi = 0;
}
-static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode,
+static void apic_deliver(DeviceState *dev, uint32_t dest, uint8_t dest_mode,
uint8_t delivery_mode, uint8_t vector_num,
- uint8_t trigger_mode)
+ uint8_t trigger_mode, uint8_t dest_shorthand)
{
APICCommonState *s = APIC(dev);
- uint32_t deliver_bitmask[MAX_APIC_WORDS];
- int dest_shorthand = (s->icr[0] >> 18) & 3;
APICCommonState *apic_iter;
+ uint32_t deliver_bitmask_size = max_apic_words * sizeof(uint32_t);
+ uint32_t *deliver_bitmask = g_malloc(deliver_bitmask_size);
+ uint32_t current_apic_id;
+
+ if (is_x2apic_mode(dev)) {
+ current_apic_id = s->initial_apic_id;
+ } else {
+ current_apic_id = s->id;
+ }
switch (dest_shorthand) {
case 0:
apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode);
break;
case 1:
- memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask));
- apic_set_bit(deliver_bitmask, s->id);
+ memset(deliver_bitmask, 0x00, deliver_bitmask_size);
+ apic_set_bit(deliver_bitmask, current_apic_id);
break;
case 2:
- memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask));
+ memset(deliver_bitmask, 0xff, deliver_bitmask_size);
break;
case 3:
- memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask));
- apic_reset_bit(deliver_bitmask, s->id);
+ memset(deliver_bitmask, 0xff, deliver_bitmask_size);
+ apic_reset_bit(deliver_bitmask, current_apic_id);
break;
}
@@ -555,6 +708,7 @@ static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode,
}
apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
+ g_free(deliver_bitmask);
}
static bool apic_check_pic(APICCommonState *s)
@@ -636,27 +790,26 @@ static void apic_timer(void *opaque)
apic_timer_update(s, s->next_time);
}
-static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size)
+static int apic_register_read(int index, uint64_t *value)
{
DeviceState *dev;
APICCommonState *s;
uint32_t val;
- int index;
-
- if (size < 4) {
- return 0;
- }
+ int ret = 0;
dev = cpu_get_current_apic();
if (!dev) {
- return 0;
+ return -1;
}
s = APIC(dev);
- index = (addr >> 4) & 0xff;
switch(index) {
case 0x02: /* id */
- val = s->id << 24;
+ if (is_x2apic_mode(dev)) {
+ val = s->initial_apic_id;
+ } else {
+ val = s->id << 24;
+ }
break;
case 0x03: /* version */
val = s->version | ((APIC_LVT_NB - 1) << 16);
@@ -679,10 +832,19 @@ static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size)
val = 0;
break;
case 0x0d:
- val = s->log_dest << 24;
+ if (is_x2apic_mode(dev)) {
+ val = s->extended_log_dest;
+ } else {
+ val = s->log_dest << 24;
+ }
break;
case 0x0e:
- val = (s->dest_mode << 28) | 0xfffffff;
+ if (is_x2apic_mode(dev)) {
+ val = 0;
+ ret = -1;
+ } else {
+ val = (s->dest_mode << 28) | 0xfffffff;
+ }
break;
case 0x0f:
val = s->spurious_vec;
@@ -718,17 +880,56 @@ static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size)
default:
s->esr |= APIC_ESR_ILLEGAL_ADDRESS;
val = 0;
+ ret = -1;
break;
}
- trace_apic_mem_readl(addr, val);
+
+ trace_apic_register_read(index, val);
+ *value = val;
+ return ret;
+}
+
+static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size)
+{
+ uint64_t val;
+ int index;
+
+ if (size < 4) {
+ return 0;
+ }
+
+ index = (addr >> 4) & 0xff;
+ apic_register_read(index, &val);
+
return val;
}
+int apic_msr_read(int index, uint64_t *val)
+{
+ DeviceState *dev;
+
+ dev = cpu_get_current_apic();
+ if (!dev) {
+ return -1;
+ }
+
+ if (!is_x2apic_mode(dev)) {
+ return -1;
+ }
+
+ return apic_register_read(index, val);
+}
+
static void apic_send_msi(MSIMessage *msi)
{
uint64_t addr = msi->address;
uint32_t data = msi->data;
- uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+ uint32_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+ /*
+ * The higher 3 bytes of destination id is stored in higher word of
+ * msi address. See x86_iommu_irq_to_msi_message()
+ */
+ dest = dest | (addr >> 32);
uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
@@ -737,38 +938,25 @@ static void apic_send_msi(MSIMessage *msi)
apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode);
}
-static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val,
- unsigned size)
+static int apic_register_write(int index, uint64_t val)
{
DeviceState *dev;
APICCommonState *s;
- int index = (addr >> 4) & 0xff;
-
- if (size < 4) {
- return;
- }
-
- if (addr > 0xfff || !index) {
- /* MSI and MMIO APIC are at the same memory location,
- * but actually not on the global bus: MSI is on PCI bus
- * APIC is connected directly to the CPU.
- * Mapping them on the global bus happens to work because
- * MSI registers are reserved in APIC MMIO and vice versa. */
- MSIMessage msi = { .address = addr, .data = val };
- apic_send_msi(&msi);
- return;
- }
dev = cpu_get_current_apic();
if (!dev) {
- return;
+ return -1;
}
s = APIC(dev);
- trace_apic_mem_writel(addr, val);
+ trace_apic_register_write(index, val);
switch(index) {
case 0x02:
+ if (is_x2apic_mode(dev)) {
+ return -1;
+ }
+
s->id = (val >> 24);
break;
case 0x03:
@@ -788,9 +976,17 @@ static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val,
apic_eoi(s);
break;
case 0x0d:
+ if (is_x2apic_mode(dev)) {
+ return -1;
+ }
+
s->log_dest = val >> 24;
break;
case 0x0e:
+ if (is_x2apic_mode(dev)) {
+ return -1;
+ }
+
s->dest_mode = val >> 28;
break;
case 0x0f:
@@ -802,13 +998,27 @@ static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val,
case 0x20 ... 0x27:
case 0x28:
break;
- case 0x30:
+ case 0x30: {
+ uint32_t dest;
+
s->icr[0] = val;
- apic_deliver(dev, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1,
+ if (is_x2apic_mode(dev)) {
+ s->icr[1] = val >> 32;
+ dest = s->icr[1];
+ } else {
+ dest = (s->icr[1] >> 24) & 0xff;
+ }
+
+ apic_deliver(dev, dest, (s->icr[0] >> 11) & 1,
(s->icr[0] >> 8) & 7, (s->icr[0] & 0xff),
- (s->icr[0] >> 15) & 1);
+ (s->icr[0] >> 15) & 1, (s->icr[0] >> 18) & 3);
break;
+ }
case 0x31:
+ if (is_x2apic_mode(dev)) {
+ return -1;
+ }
+
s->icr[1] = val;
break;
case 0x32 ... 0x37:
@@ -837,10 +1047,70 @@ static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val,
s->count_shift = (v + 1) & 7;
}
break;
+ case 0x3f: {
+ int vector = val & 0xff;
+
+ if (!is_x2apic_mode(dev)) {
+ return -1;
+ }
+
+ /*
+ * Self IPI is identical to IPI with
+ * - Destination shorthand: 1 (Self)
+ * - Trigger mode: 0 (Edge)
+ * - Delivery mode: 0 (Fixed)
+ */
+ apic_deliver(dev, 0, 0, APIC_DM_FIXED, vector, 0, 1);
+
+ break;
+ }
default:
s->esr |= APIC_ESR_ILLEGAL_ADDRESS;
- break;
+ return -1;
+ }
+
+ return 0;
+}
+
+static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ int index = (addr >> 4) & 0xff;
+
+ if (size < 4) {
+ return;
+ }
+
+ if (addr > 0xfff || !index) {
+ /*
+ * MSI and MMIO APIC are at the same memory location,
+ * but actually not on the global bus: MSI is on PCI bus
+ * APIC is connected directly to the CPU.
+ * Mapping them on the global bus happens to work because
+ * MSI registers are reserved in APIC MMIO and vice versa.
+ */
+ MSIMessage msi = { .address = addr, .data = val };
+ apic_send_msi(&msi);
+ return;
+ }
+
+ apic_register_write(index, val);
+}
+
+int apic_msr_write(int index, uint64_t val)
+{
+ DeviceState *dev;
+
+ dev = cpu_get_current_apic();
+ if (!dev) {
+ return -1;
}
+
+ if (!is_x2apic_mode(dev)) {
+ return -1;
+ }
+
+ return apic_register_write(index, val);
}
static void apic_pre_save(APICCommonState *s)
@@ -871,12 +1141,6 @@ static void apic_realize(DeviceState *dev, Error **errp)
{
APICCommonState *s = APIC(dev);
- if (s->id >= MAX_APICS) {
- error_setg(errp, "%s initialization failed. APIC ID %d is invalid",
- object_get_typename(OBJECT(dev)), s->id);
- return;
- }
-
if (kvm_enabled()) {
warn_report("Userspace local APIC is deprecated for KVM.");
warn_report("Do not use kernel-irqchip except for the -M isapc machine type.");
@@ -893,7 +1157,16 @@ static void apic_realize(DeviceState *dev, Error **errp)
s->io_memory.disable_reentrancy_guard = true;
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s);
- local_apics[s->id] = s;
+
+ /*
+ * The --machine none does not call apic_set_max_apic_id before creating
+ * apic, so we need to call it here and set it to 1 which is the max cpus
+ * in machine none.
+ */
+ if (!local_apics) {
+ apic_set_max_apic_id(1);
+ }
+ local_apics[s->initial_apic_id] = s;
msi_nonbroken = true;
}
@@ -903,7 +1176,7 @@ static void apic_unrealize(DeviceState *dev)
APICCommonState *s = APIC(dev);
timer_free(s->timer);
- local_apics[s->id] = NULL;
+ local_apics[s->initial_apic_id] = NULL;
}
static void apic_class_init(ObjectClass *klass, void *data)
diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c
index 6c100b4..d8fc1e2 100644
--- a/hw/intc/apic_common.c
+++ b/hw/intc/apic_common.c
@@ -35,20 +35,19 @@
bool apic_report_tpr_access;
-void cpu_set_apic_base(DeviceState *dev, uint64_t val)
+int cpu_set_apic_base(DeviceState *dev, uint64_t val)
{
trace_cpu_set_apic_base(val);
if (dev) {
APICCommonState *s = APIC_COMMON(dev);
APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
- /* switching to x2APIC, reset possibly modified xAPIC ID */
- if (!(s->apicbase & MSR_IA32_APICBASE_EXTD) &&
- (val & MSR_IA32_APICBASE_EXTD)) {
- s->id = s->initial_apic_id;
- }
- info->set_base(s, val);
+ /* Reset possibly modified xAPIC ID */
+ s->id = s->initial_apic_id;
+ return info->set_base(s, val);
}
+
+ return 0;
}
uint64_t cpu_get_apic_base(DeviceState *dev)
@@ -63,6 +62,19 @@ uint64_t cpu_get_apic_base(DeviceState *dev)
}
}
+bool cpu_is_apic_enabled(DeviceState *dev)
+{
+ APICCommonState *s;
+
+ if (!dev) {
+ return false;
+ }
+
+ s = APIC_COMMON(dev);
+
+ return s->apicbase & MSR_IA32_APICBASE_ENABLE;
+}
+
void cpu_set_apic_tpr(DeviceState *dev, uint8_t val)
{
APICCommonState *s;
@@ -287,6 +299,10 @@ static void apic_common_realize(DeviceState *dev, Error **errp)
}
vmstate_register_with_alias_id(NULL, instance_id, &vmstate_apic_common,
s, -1, 0, NULL);
+
+ /* APIC LDR in x2APIC mode */
+ s->extended_log_dest = ((s->initial_apic_id >> 4) << 16) |
+ (1 << (s->initial_apic_id & 0xf));
}
static void apic_common_unrealize(DeviceState *dev)
@@ -427,6 +443,11 @@ static void apic_common_set_id(Object *obj, Visitor *v, const char *name,
return;
}
+ if (value >= 255 && !cpu_has_x2apic_feature(&s->cpu->env)) {
+ error_setg(errp, "APIC ID %d requires x2APIC feature in CPU", value);
+ return;
+ }
+
s->initial_apic_id = value;
s->id = (uint8_t)value;
}
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
index 36ff71f..1ef29d0 100644
--- a/hw/intc/trace-events
+++ b/hw/intc/trace-events
@@ -14,8 +14,8 @@ cpu_get_apic_base(uint64_t val) "0x%016"PRIx64
# apic.c
apic_local_deliver(int vector, uint32_t lvt) "vector %d delivery mode %d"
apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, uint8_t vector_num, uint8_t trigger_mode) "dest %d dest_mode %d delivery_mode %d vector %d trigger_mode %d"
-apic_mem_readl(uint64_t addr, uint32_t val) "0x%"PRIx64" = 0x%08x"
-apic_mem_writel(uint64_t addr, uint32_t val) "0x%"PRIx64" = 0x%08x"
+apic_register_read(uint8_t reg, uint64_t val) "register 0x%02x = 0x%"PRIx64
+apic_register_write(uint8_t reg, uint64_t val) "register 0x%02x = 0x%"PRIx64
# ioapic.c
ioapic_set_remote_irr(int n) "set remote irr for pin %d"
diff --git a/hw/isa/vt82c686.c b/hw/isa/vt82c686.c
index a99eae4..0c504de 100644
--- a/hw/isa/vt82c686.c
+++ b/hw/isa/vt82c686.c
@@ -15,6 +15,9 @@
#include "qemu/osdep.h"
#include "hw/isa/vt82c686.h"
+#include "hw/block/fdc.h"
+#include "hw/char/parallel-isa.h"
+#include "hw/char/serial.h"
#include "hw/pci/pci.h"
#include "hw/qdev-properties.h"
#include "hw/ide/pci.h"
@@ -323,6 +326,17 @@ static uint64_t via_superio_cfg_read(void *opaque, hwaddr addr, unsigned size)
return val;
}
+static void via_superio_devices_enable(ViaSuperIOState *s, uint8_t data)
+{
+ ISASuperIOClass *ic = ISA_SUPERIO_GET_CLASS(s);
+
+ isa_parallel_set_enabled(s->superio.parallel[0], (data & 0x3) != 3);
+ for (int i = 0; i < ic->serial.count; i++) {
+ isa_serial_set_enabled(s->superio.serial[i], data & BIT(i + 2));
+ }
+ isa_fdc_set_enabled(s->superio.floppy, data & BIT(4));
+}
+
static void via_superio_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -368,7 +382,25 @@ static void vt82c686b_superio_cfg_write(void *opaque, hwaddr addr,
case 0xfd ... 0xff:
/* ignore write to read only registers */
return;
- /* case 0xe6 ... 0xe8: Should set base port of parallel and serial */
+ case 0xe2:
+ data &= 0x1f;
+ via_superio_devices_enable(sc, data);
+ break;
+ case 0xe3:
+ data &= 0xfc;
+ isa_fdc_set_iobase(sc->superio.floppy, data << 2);
+ break;
+ case 0xe6:
+ isa_parallel_set_iobase(sc->superio.parallel[0], data << 2);
+ break;
+ case 0xe7:
+ data &= 0xfe;
+ isa_serial_set_iobase(sc->superio.serial[0], data << 2);
+ break;
+ case 0xe8:
+ data &= 0xfe;
+ isa_serial_set_iobase(sc->superio.serial[1], data << 2);
+ break;
default:
qemu_log_mask(LOG_UNIMP,
"via_superio_cfg: unimplemented register 0x%x\n", idx);
@@ -395,9 +427,14 @@ static void vt82c686b_superio_reset(DeviceState *dev)
/* Device ID */
vt82c686b_superio_cfg_write(s, 0, 0xe0, 1);
vt82c686b_superio_cfg_write(s, 1, 0x3c, 1);
- /* Function select - all disabled */
+ /*
+ * Function select - only serial enabled
+ * Fuloong 2e's rescue-yl prints to the serial console w/o enabling it. This
+ * suggests that the serial ports are enabled by default, so override the
+ * datasheet.
+ */
vt82c686b_superio_cfg_write(s, 0, 0xe2, 1);
- vt82c686b_superio_cfg_write(s, 1, 0x03, 1);
+ vt82c686b_superio_cfg_write(s, 1, 0x0f, 1);
/* Floppy ctrl base addr 0x3f0-7 */
vt82c686b_superio_cfg_write(s, 0, 0xe3, 1);
vt82c686b_superio_cfg_write(s, 1, 0xfc, 1);
@@ -465,6 +502,21 @@ static void vt8231_superio_cfg_write(void *opaque, hwaddr addr,
case 0xfd:
/* ignore write to read only registers */
return;
+ case 0xf2:
+ data &= 0x17;
+ via_superio_devices_enable(sc, data);
+ break;
+ case 0xf4:
+ data &= 0xfe;
+ isa_serial_set_iobase(sc->superio.serial[0], data << 2);
+ break;
+ case 0xf6:
+ isa_parallel_set_iobase(sc->superio.parallel[0], data << 2);
+ break;
+ case 0xf7:
+ data &= 0xfc;
+ isa_fdc_set_iobase(sc->superio.floppy, data << 2);
+ break;
default:
qemu_log_mask(LOG_UNIMP,
"via_superio_cfg: unimplemented register 0x%x\n", idx);
@@ -513,12 +565,6 @@ static void vt8231_superio_init(Object *obj)
VIA_SUPERIO(obj)->io_ops = &vt8231_superio_cfg_ops;
}
-static uint16_t vt8231_superio_serial_iobase(ISASuperIODevice *sio,
- uint8_t index)
-{
- return 0x2f8; /* FIXME: This should be settable via registers f2-f4 */
-}
-
static void vt8231_superio_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -526,7 +572,6 @@ static void vt8231_superio_class_init(ObjectClass *klass, void *data)
dc->reset = vt8231_superio_reset;
sc->serial.count = 1;
- sc->serial.get_iobase = vt8231_superio_serial_iobase;
sc->parallel.count = 1;
sc->ide.count = 0; /* emulated by via-ide */
sc->floppy.count = 1;
diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c
index 52647b4..e880180 100644
--- a/hw/mem/cxl_type3.c
+++ b/hw/mem/cxl_type3.c
@@ -42,9 +42,9 @@ enum {
CT3_CDAT_NUM_ENTRIES
};
-static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
- int dsmad_handle, MemoryRegion *mr,
- bool is_pmem, uint64_t dpa_base)
+static void ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
+ int dsmad_handle, MemoryRegion *mr,
+ bool is_pmem, uint64_t dpa_base)
{
g_autofree CDATDsmas *dsmas = NULL;
g_autofree CDATDslbis *dslbis0 = NULL;
@@ -54,9 +54,6 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
g_autofree CDATDsemts *dsemts = NULL;
dsmas = g_malloc(sizeof(*dsmas));
- if (!dsmas) {
- return -ENOMEM;
- }
*dsmas = (CDATDsmas) {
.header = {
.type = CDAT_TYPE_DSMAS,
@@ -70,9 +67,6 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
/* For now, no memory side cache, plausiblish numbers */
dslbis0 = g_malloc(sizeof(*dslbis0));
- if (!dslbis0) {
- return -ENOMEM;
- }
*dslbis0 = (CDATDslbis) {
.header = {
.type = CDAT_TYPE_DSLBIS,
@@ -86,9 +80,6 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
};
dslbis1 = g_malloc(sizeof(*dslbis1));
- if (!dslbis1) {
- return -ENOMEM;
- }
*dslbis1 = (CDATDslbis) {
.header = {
.type = CDAT_TYPE_DSLBIS,
@@ -102,9 +93,6 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
};
dslbis2 = g_malloc(sizeof(*dslbis2));
- if (!dslbis2) {
- return -ENOMEM;
- }
*dslbis2 = (CDATDslbis) {
.header = {
.type = CDAT_TYPE_DSLBIS,
@@ -118,9 +106,6 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
};
dslbis3 = g_malloc(sizeof(*dslbis3));
- if (!dslbis3) {
- return -ENOMEM;
- }
*dslbis3 = (CDATDslbis) {
.header = {
.type = CDAT_TYPE_DSLBIS,
@@ -134,9 +119,6 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
};
dsemts = g_malloc(sizeof(*dsemts));
- if (!dsemts) {
- return -ENOMEM;
- }
*dsemts = (CDATDsemts) {
.header = {
.type = CDAT_TYPE_DSEMTS,
@@ -159,8 +141,6 @@ static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
cdat_table[CT3_CDAT_DSLBIS2] = g_steal_pointer(&dslbis2);
cdat_table[CT3_CDAT_DSLBIS3] = g_steal_pointer(&dslbis3);
cdat_table[CT3_CDAT_DSEMTS] = g_steal_pointer(&dsemts);
-
- return 0;
}
static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
@@ -171,7 +151,6 @@ static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
int dsmad_handle = 0;
int cur_ent = 0;
int len = 0;
- int rc, i;
if (!ct3d->hostpmem && !ct3d->hostvmem) {
return 0;
@@ -194,27 +173,18 @@ static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
}
table = g_malloc0(len * sizeof(*table));
- if (!table) {
- return -ENOMEM;
- }
/* Now fill them in */
if (volatile_mr) {
- rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr,
- false, 0);
- if (rc < 0) {
- return rc;
- }
+ ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr,
+ false, 0);
cur_ent = CT3_CDAT_NUM_ENTRIES;
}
if (nonvolatile_mr) {
uint64_t base = volatile_mr ? memory_region_size(volatile_mr) : 0;
- rc = ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++,
- nonvolatile_mr, true, base);
- if (rc < 0) {
- goto error_cleanup;
- }
+ ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++,
+ nonvolatile_mr, true, base);
cur_ent += CT3_CDAT_NUM_ENTRIES;
}
assert(len == cur_ent);
@@ -222,11 +192,6 @@ static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
*cdat_table = g_steal_pointer(&table);
return len;
-error_cleanup:
- for (i = 0; i < cur_ent; i++) {
- g_free(table[i]);
- }
- return rc;
}
static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv)
@@ -354,7 +319,7 @@ static void build_dvsecs(CXLType3Dev *ct3d)
cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
PCIE_CXL_DEVICE_DVSEC_LENGTH,
PCIE_CXL_DEVICE_DVSEC,
- PCIE_CXL2_DEVICE_DVSEC_REVID, dvsec);
+ PCIE_CXL31_DEVICE_DVSEC_REVID, dvsec);
dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
.rsvd = 0,
@@ -381,9 +346,9 @@ static void build_dvsecs(CXLType3Dev *ct3d)
.rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
};
cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
- PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
+ PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH,
PCIE_FLEXBUS_PORT_DVSEC,
- PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
+ PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID, dvsec);
}
static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
@@ -829,8 +794,13 @@ static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
}
if (((uint64_t)host_addr < decoder_base) ||
(hpa_offset >= decoder_size)) {
- dpa_base += decoder_size /
- cxl_interleave_ways_dec(iw, &error_fatal);
+ int decoded_iw = cxl_interleave_ways_dec(iw, &error_fatal);
+
+ if (decoded_iw == 0) {
+ return false;
+ }
+
+ dpa_base += decoder_size / decoded_iw;
continue;
}
@@ -1168,9 +1138,6 @@ void qmp_cxl_inject_uncorrectable_errors(const char *path,
}
cxl_err = g_malloc0(sizeof(*cxl_err));
- if (!cxl_err) {
- return;
- }
cxl_err->type = cxl_err_code;
while (header && header_count < 32) {
diff --git a/hw/pci-bridge/cxl_downstream.c b/hw/pci-bridge/cxl_downstream.c
index 405a133..742da07 100644
--- a/hw/pci-bridge/cxl_downstream.c
+++ b/hw/pci-bridge/cxl_downstream.c
@@ -109,9 +109,9 @@ static void build_dvsecs(CXLComponentState *cxl)
.rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
};
cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT,
- PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
+ PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH,
PCIE_FLEXBUS_PORT_DVSEC,
- PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
+ PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID, dvsec);
dvsec = (uint8_t *)&(CXLDVSECPortGPF){
.rsvd = 0,
diff --git a/hw/pci-bridge/cxl_root_port.c b/hw/pci-bridge/cxl_root_port.c
index 8f97697..62f9699 100644
--- a/hw/pci-bridge/cxl_root_port.c
+++ b/hw/pci-bridge/cxl_root_port.c
@@ -129,9 +129,9 @@ static void build_dvsecs(CXLComponentState *cxl)
.rcvd_mod_ts_data_phase1 = 0xef,
};
cxl_component_create_dvsec(cxl, CXL2_ROOT_PORT,
- PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
+ PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH,
PCIE_FLEXBUS_PORT_DVSEC,
- PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
+ PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID, dvsec);
dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
.rsvd = 0,
diff --git a/hw/pci-bridge/cxl_upstream.c b/hw/pci-bridge/cxl_upstream.c
index 3673718..e87eb40 100644
--- a/hw/pci-bridge/cxl_upstream.c
+++ b/hw/pci-bridge/cxl_upstream.c
@@ -121,9 +121,9 @@ static void build_dvsecs(CXLComponentState *cxl)
.rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
};
cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT,
- PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
+ PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH,
PCIE_FLEXBUS_PORT_DVSEC,
- PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
+ PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID, dvsec);
dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
.rsvd = 0,
@@ -228,9 +228,6 @@ static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
sslbis_size = sizeof(CDATSslbis) + sizeof(*sslbis_latency->sslbe) * count;
sslbis_latency = g_malloc(sslbis_size);
- if (!sslbis_latency) {
- return -ENOMEM;
- }
*sslbis_latency = (CDATSslbis) {
.sslbis_header = {
.header = {
@@ -251,9 +248,6 @@ static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
}
sslbis_bandwidth = g_malloc(sslbis_size);
- if (!sslbis_bandwidth) {
- return 0;
- }
*sslbis_bandwidth = (CDATSslbis) {
.sslbis_header = {
.header = {
diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c
index d84f3f9..04d6dec 100644
--- a/hw/ppc/pegasos2.c
+++ b/hw/ppc/pegasos2.c
@@ -285,6 +285,12 @@ static void pegasos2_pci_config_write(Pegasos2MachineState *pm, int bus,
pegasos2_mv_reg_write(pm, pcicfg + 4, len, val);
}
+static void pegasos2_superio_write(uint8_t addr, uint8_t val)
+{
+ cpu_physical_memory_write(PCI1_IO_BASE + 0x3f0, &addr, 1);
+ cpu_physical_memory_write(PCI1_IO_BASE + 0x3f1, &val, 1);
+}
+
static void pegasos2_machine_reset(MachineState *machine, ShutdownCause reason)
{
Pegasos2MachineState *pm = PEGASOS2_MACHINE(machine);
@@ -311,6 +317,12 @@ static void pegasos2_machine_reset(MachineState *machine, ShutdownCause reason)
pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) |
PCI_INTERRUPT_LINE, 2, 0x9);
pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) |
+ 0x50, 1, 0x6);
+ pegasos2_superio_write(0xf4, 0xbe);
+ pegasos2_superio_write(0xf6, 0xef);
+ pegasos2_superio_write(0xf7, 0xfc);
+ pegasos2_superio_write(0xf2, 0x14);
+ pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) |
0x50, 1, 0x2);
pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) |
0x55, 1, 0x90);
diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig
index 92c9cf6..aa63ff7 100644
--- a/hw/virtio/Kconfig
+++ b/hw/virtio/Kconfig
@@ -101,6 +101,11 @@ config VHOST_VDPA_DEV
default y
depends on VIRTIO && VHOST_VDPA && LINUX
+config VHOST_USER_SND
+ bool
+ default y
+ depends on VIRTIO && VHOST_USER
+
config VHOST_USER_SCMI
bool
default y
diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build
index 47baf00..d7f18c9 100644
--- a/hw/virtio/meson.build
+++ b/hw/virtio/meson.build
@@ -17,8 +17,28 @@ if have_vhost
if have_vhost_user
# fixme - this really should be generic
specific_virtio_ss.add(files('vhost-user.c'))
+ system_virtio_ss.add(files('vhost-user-base.c'))
+
+ # MMIO Stubs
system_virtio_ss.add(files('vhost-user-device.c'))
+ system_virtio_ss.add(when: 'CONFIG_VHOST_USER_GPIO', if_true: files('vhost-user-gpio.c'))
+ system_virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c'))
+ system_virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c'))
+ system_virtio_ss.add(when: 'CONFIG_VHOST_USER_SND', if_true: files('vhost-user-snd.c'))
+ system_virtio_ss.add(when: 'CONFIG_VHOST_USER_INPUT', if_true: files('vhost-user-input.c'))
+
+ # PCI Stubs
system_virtio_ss.add(when: 'CONFIG_VIRTIO_PCI', if_true: files('vhost-user-device-pci.c'))
+ system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_GPIO'],
+ if_true: files('vhost-user-gpio-pci.c'))
+ system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_I2C'],
+ if_true: files('vhost-user-i2c-pci.c'))
+ system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_RNG'],
+ if_true: files('vhost-user-rng-pci.c'))
+ system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_SND'],
+ if_true: files('vhost-user-snd-pci.c'))
+ system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_INPUT'],
+ if_true: files('vhost-user-input-pci.c'))
endif
if have_vhost_vdpa
system_virtio_ss.add(files('vhost-vdpa.c'))
@@ -35,10 +55,6 @@ specific_virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock.c
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock.c'))
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng.c'))
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c'))
-specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c'))
-specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c'))
-specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_GPIO', if_true: files('vhost-user-gpio.c'))
-specific_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_GPIO'], if_true: files('vhost-user-gpio-pci.c'))
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_SCMI', if_true: files('vhost-user-scmi.c'))
specific_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_SCMI'], if_true: files('vhost-user-scmi-pci.c'))
@@ -46,9 +62,6 @@ virtio_pci_ss = ss.source_set()
virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk-pci.c'))
-virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c-pci.c'))
-virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_INPUT', if_true: files('vhost-user-input-pci.c'))
-virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_SCSI', if_true: files('vhost-user-scsi-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VHOST_SCSI', if_true: files('vhost-scsi-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs-pci.c'))
diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
new file mode 100644
index 0000000..a831671
--- /dev/null
+++ b/hw/virtio/vhost-user-base.c
@@ -0,0 +1,371 @@
+/*
+ * Base vhost-user-base implementation. This can be used to derive a
+ * more fully specified vhost-user backend either generically (see
+ * vhost-user-device) or via a specific stub for a device which
+ * encapsulates some fixed parameters.
+ *
+ * Copyright (c) 2023 Linaro Ltd
+ * Author: Alex Bennée <alex.bennee@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/qdev-properties.h"
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/vhost-user-base.h"
+#include "qemu/error-report.h"
+
+static void vub_start(VirtIODevice *vdev)
+{
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ VHostUserBase *vub = VHOST_USER_BASE(vdev);
+ int ret, i;
+
+ if (!k->set_guest_notifiers) {
+ error_report("binding does not support guest notifiers");
+ return;
+ }
+
+ ret = vhost_dev_enable_notifiers(&vub->vhost_dev, vdev);
+ if (ret < 0) {
+ error_report("Error enabling host notifiers: %d", -ret);
+ return;
+ }
+
+ ret = k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, true);
+ if (ret < 0) {
+ error_report("Error binding guest notifier: %d", -ret);
+ goto err_host_notifiers;
+ }
+
+ vub->vhost_dev.acked_features = vdev->guest_features;
+
+ ret = vhost_dev_start(&vub->vhost_dev, vdev, true);
+ if (ret < 0) {
+ error_report("Error starting vhost-user-base: %d", -ret);
+ goto err_guest_notifiers;
+ }
+
+ /*
+ * guest_notifier_mask/pending not used yet, so just unmask
+ * everything here. virtio-pci will do the right thing by
+ * enabling/disabling irqfd.
+ */
+ for (i = 0; i < vub->vhost_dev.nvqs; i++) {
+ vhost_virtqueue_mask(&vub->vhost_dev, vdev, i, false);
+ }
+
+ return;
+
+err_guest_notifiers:
+ k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, false);
+err_host_notifiers:
+ vhost_dev_disable_notifiers(&vub->vhost_dev, vdev);
+}
+
+static void vub_stop(VirtIODevice *vdev)
+{
+ VHostUserBase *vub = VHOST_USER_BASE(vdev);
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ int ret;
+
+ if (!k->set_guest_notifiers) {
+ return;
+ }
+
+ vhost_dev_stop(&vub->vhost_dev, vdev, true);
+
+ ret = k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, false);
+ if (ret < 0) {
+ error_report("vhost guest notifier cleanup failed: %d", ret);
+ return;
+ }
+
+ vhost_dev_disable_notifiers(&vub->vhost_dev, vdev);
+}
+
+static void vub_set_status(VirtIODevice *vdev, uint8_t status)
+{
+ VHostUserBase *vub = VHOST_USER_BASE(vdev);
+ bool should_start = virtio_device_should_start(vdev, status);
+
+ if (vhost_dev_is_started(&vub->vhost_dev) == should_start) {
+ return;
+ }
+
+ if (should_start) {
+ vub_start(vdev);
+ } else {
+ vub_stop(vdev);
+ }
+}
+
+/*
+ * For an implementation where everything is delegated to the backend
+ * we don't do anything other than return the full feature set offered
+ * by the daemon (module the reserved feature bit).
+ */
+static uint64_t vub_get_features(VirtIODevice *vdev,
+ uint64_t requested_features, Error **errp)
+{
+ VHostUserBase *vub = VHOST_USER_BASE(vdev);
+ /* This should be set when the vhost connection initialises */
+ g_assert(vub->vhost_dev.features);
+ return vub->vhost_dev.features & ~(1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
+}
+
+/*
+ * To handle VirtIO config we need to know the size of the config
+ * space. We don't cache the config but re-fetch it from the guest
+ * every time in case something has changed.
+ */
+static void vub_get_config(VirtIODevice *vdev, uint8_t *config)
+{
+ VHostUserBase *vub = VHOST_USER_BASE(vdev);
+ Error *local_err = NULL;
+
+ /*
+ * There will have been a warning during vhost_dev_init, but lets
+ * assert here as nothing will go right now.
+ */
+ g_assert(vub->config_size && vub->vhost_user.supports_config == true);
+
+ if (vhost_dev_get_config(&vub->vhost_dev, config,
+ vub->config_size, &local_err)) {
+ error_report_err(local_err);
+ }
+}
+
+static void vub_set_config(VirtIODevice *vdev, const uint8_t *config_data)
+{
+ VHostUserBase *vub = VHOST_USER_BASE(vdev);
+ int ret;
+
+ g_assert(vub->config_size && vub->vhost_user.supports_config == true);
+
+ ret = vhost_dev_set_config(&vub->vhost_dev, config_data,
+ 0, vub->config_size,
+ VHOST_SET_CONFIG_TYPE_FRONTEND);
+ if (ret) {
+ error_report("vhost guest set device config space failed: %d", ret);
+ return;
+ }
+}
+
+/*
+ * When the daemon signals an update to the config we just need to
+ * signal the guest as we re-read the config on demand above.
+ */
+static int vub_config_notifier(struct vhost_dev *dev)
+{
+ virtio_notify_config(dev->vdev);
+ return 0;
+}
+
+const VhostDevConfigOps vub_config_ops = {
+ .vhost_dev_config_notifier = vub_config_notifier,
+};
+
+static void vub_handle_output(VirtIODevice *vdev, VirtQueue *vq)
+{
+ /*
+ * Not normally called; it's the daemon that handles the queue;
+ * however virtio's cleanup path can call this.
+ */
+}
+
+static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserBase *vub)
+{
+ vhost_user_cleanup(&vub->vhost_user);
+
+ for (int i = 0; i < vub->num_vqs; i++) {
+ VirtQueue *vq = g_ptr_array_index(vub->vqs, i);
+ virtio_delete_queue(vq);
+ }
+
+ virtio_cleanup(vdev);
+}
+
+static int vub_connect(DeviceState *dev)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserBase *vub = VHOST_USER_BASE(vdev);
+ struct vhost_dev *vhost_dev = &vub->vhost_dev;
+
+ if (vub->connected) {
+ return 0;
+ }
+ vub->connected = true;
+
+ /*
+ * If we support VHOST_USER_GET_CONFIG we must enable the notifier
+ * so we can ping the guest when it updates.
+ */
+ if (vub->vhost_user.supports_config) {
+ vhost_dev_set_config_notifier(vhost_dev, &vub_config_ops);
+ }
+
+ /* restore vhost state */
+ if (virtio_device_started(vdev, vdev->status)) {
+ vub_start(vdev);
+ }
+
+ return 0;
+}
+
+static void vub_event(void *opaque, QEMUChrEvent event);
+
+static void vub_disconnect(DeviceState *dev)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserBase *vub = VHOST_USER_BASE(vdev);
+
+ if (!vub->connected) {
+ return;
+ }
+ vub->connected = false;
+
+ vub_stop(vdev);
+ vhost_dev_cleanup(&vub->vhost_dev);
+
+ /* Re-instate the event handler for new connections */
+ qemu_chr_fe_set_handlers(&vub->chardev,
+ NULL, NULL, vub_event,
+ NULL, dev, NULL, true);
+}
+
+static void vub_event(void *opaque, QEMUChrEvent event)
+{
+ DeviceState *dev = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserBase *vub = VHOST_USER_BASE(vdev);
+
+ switch (event) {
+ case CHR_EVENT_OPENED:
+ if (vub_connect(dev) < 0) {
+ qemu_chr_fe_disconnect(&vub->chardev);
+ return;
+ }
+ break;
+ case CHR_EVENT_CLOSED:
+ /* defer close until later to avoid circular close */
+ vhost_user_async_close(dev, &vub->chardev, &vub->vhost_dev,
+ vub_disconnect, vub_event);
+ break;
+ case CHR_EVENT_BREAK:
+ case CHR_EVENT_MUX_IN:
+ case CHR_EVENT_MUX_OUT:
+ /* Ignore */
+ break;
+ }
+}
+
+static void vub_device_realize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserBase *vub = VHOST_USER_BASE(dev);
+ int ret;
+
+ if (!vub->chardev.chr) {
+ error_setg(errp, "vhost-user-base: missing chardev");
+ return;
+ }
+
+ if (!vub->virtio_id) {
+ error_setg(errp, "vhost-user-base: need to define device id");
+ return;
+ }
+
+ if (!vub->num_vqs) {
+ vub->num_vqs = 1; /* reasonable default? */
+ }
+
+ if (!vub->vq_size) {
+ vub->vq_size = 64;
+ }
+
+ /*
+ * We can't handle config requests unless we know the size of the
+ * config region, specialisations of the vhost-user-base will be
+ * able to set this.
+ */
+ if (vub->config_size) {
+ vub->vhost_user.supports_config = true;
+ }
+
+ if (!vhost_user_init(&vub->vhost_user, &vub->chardev, errp)) {
+ return;
+ }
+
+ virtio_init(vdev, vub->virtio_id, vub->config_size);
+
+ /*
+ * Disable guest notifiers, by default all notifications will be via the
+ * asynchronous vhost-user socket.
+ */
+ vdev->use_guest_notifier_mask = false;
+
+ /* Allocate queues */
+ vub->vqs = g_ptr_array_sized_new(vub->num_vqs);
+ for (int i = 0; i < vub->num_vqs; i++) {
+ g_ptr_array_add(vub->vqs,
+ virtio_add_queue(vdev, vub->vq_size,
+ vub_handle_output));
+ }
+
+ vub->vhost_dev.nvqs = vub->num_vqs;
+ vub->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vub->vhost_dev.nvqs);
+
+ /* connect to backend */
+ ret = vhost_dev_init(&vub->vhost_dev, &vub->vhost_user,
+ VHOST_BACKEND_TYPE_USER, 0, errp);
+
+ if (ret < 0) {
+ do_vhost_user_cleanup(vdev, vub);
+ }
+
+ qemu_chr_fe_set_handlers(&vub->chardev, NULL, NULL, vub_event, NULL,
+ dev, NULL, true);
+}
+
+static void vub_device_unrealize(DeviceState *dev)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserBase *vub = VHOST_USER_BASE(dev);
+ struct vhost_virtqueue *vhost_vqs = vub->vhost_dev.vqs;
+
+ /* This will stop vhost backend if appropriate. */
+ vub_set_status(vdev, 0);
+ vhost_dev_cleanup(&vub->vhost_dev);
+ g_free(vhost_vqs);
+ do_vhost_user_cleanup(vdev, vub);
+}
+
+static void vub_class_init(ObjectClass *klass, void *data)
+{
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+
+ vdc->realize = vub_device_realize;
+ vdc->unrealize = vub_device_unrealize;
+ vdc->get_features = vub_get_features;
+ vdc->get_config = vub_get_config;
+ vdc->set_config = vub_set_config;
+ vdc->set_status = vub_set_status;
+}
+
+static const TypeInfo vub_types[] = {
+ {
+ .name = TYPE_VHOST_USER_BASE,
+ .parent = TYPE_VIRTIO_DEVICE,
+ .instance_size = sizeof(VHostUserBase),
+ .class_init = vub_class_init,
+ .class_size = sizeof(VHostUserBaseClass),
+ .abstract = true
+ }
+};
+
+DEFINE_TYPES(vub_types)
diff --git a/hw/virtio/vhost-user-device-pci.c b/hw/virtio/vhost-user-device-pci.c
index 41f9b79..efaf55d 100644
--- a/hw/virtio/vhost-user-device-pci.c
+++ b/hw/virtio/vhost-user-device-pci.c
@@ -9,21 +9,18 @@
#include "qemu/osdep.h"
#include "hw/qdev-properties.h"
-#include "hw/virtio/vhost-user-device.h"
+#include "hw/virtio/vhost-user-base.h"
#include "hw/virtio/virtio-pci.h"
struct VHostUserDevicePCI {
VirtIOPCIProxy parent_obj;
+
VHostUserBase vub;
};
-typedef struct VHostUserDevicePCI VHostUserDevicePCI;
-
#define TYPE_VHOST_USER_DEVICE_PCI "vhost-user-device-pci-base"
-DECLARE_INSTANCE_CHECKER(VHostUserDevicePCI,
- VHOST_USER_DEVICE_PCI,
- TYPE_VHOST_USER_DEVICE_PCI)
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserDevicePCI, VHOST_USER_DEVICE_PCI)
static void vhost_user_device_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
@@ -39,6 +36,10 @@ static void vhost_user_device_pci_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ /* Reason: stop users confusing themselves */
+ dc->user_creatable = false;
+
k->realize = vhost_user_device_pci_realize;
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
diff --git a/hw/virtio/vhost-user-device.c b/hw/virtio/vhost-user-device.c
index 2b028ca..67aa934 100644
--- a/hw/virtio/vhost-user-device.c
+++ b/hw/virtio/vhost-user-device.c
@@ -1,7 +1,10 @@
/*
- * Generic vhost-user stub. This can be used to connect to any
- * vhost-user backend. All configuration details must be handled by
- * the vhost-user daemon itself
+ * Generic vhost-user-device implementation for any vhost-user-backend
+ *
+ * This is a concrete implementation of vhost-user-base which can be
+ * configured via properties. It is useful for development and
+ * prototyping. It expects configuration details (if any) to be
+ * handled by the vhost-user daemon itself.
*
* Copyright (c) 2023 Linaro Ltd
* Author: Alex Bennée <alex.bennee@linaro.org>
@@ -13,329 +16,9 @@
#include "qapi/error.h"
#include "hw/qdev-properties.h"
#include "hw/virtio/virtio-bus.h"
-#include "hw/virtio/vhost-user-device.h"
+#include "hw/virtio/vhost-user-base.h"
#include "qemu/error-report.h"
-static void vub_start(VirtIODevice *vdev)
-{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VHostUserBase *vub = VHOST_USER_BASE(vdev);
- int ret, i;
-
- if (!k->set_guest_notifiers) {
- error_report("binding does not support guest notifiers");
- return;
- }
-
- ret = vhost_dev_enable_notifiers(&vub->vhost_dev, vdev);
- if (ret < 0) {
- error_report("Error enabling host notifiers: %d", -ret);
- return;
- }
-
- ret = k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, true);
- if (ret < 0) {
- error_report("Error binding guest notifier: %d", -ret);
- goto err_host_notifiers;
- }
-
- vub->vhost_dev.acked_features = vdev->guest_features;
-
- ret = vhost_dev_start(&vub->vhost_dev, vdev, true);
- if (ret < 0) {
- error_report("Error starting vhost-user-device: %d", -ret);
- goto err_guest_notifiers;
- }
-
- /*
- * guest_notifier_mask/pending not used yet, so just unmask
- * everything here. virtio-pci will do the right thing by
- * enabling/disabling irqfd.
- */
- for (i = 0; i < vub->vhost_dev.nvqs; i++) {
- vhost_virtqueue_mask(&vub->vhost_dev, vdev, i, false);
- }
-
- return;
-
-err_guest_notifiers:
- k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, false);
-err_host_notifiers:
- vhost_dev_disable_notifiers(&vub->vhost_dev, vdev);
-}
-
-static void vub_stop(VirtIODevice *vdev)
-{
- VHostUserBase *vub = VHOST_USER_BASE(vdev);
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- int ret;
-
- if (!k->set_guest_notifiers) {
- return;
- }
-
- vhost_dev_stop(&vub->vhost_dev, vdev, true);
-
- ret = k->set_guest_notifiers(qbus->parent, vub->vhost_dev.nvqs, false);
- if (ret < 0) {
- error_report("vhost guest notifier cleanup failed: %d", ret);
- return;
- }
-
- vhost_dev_disable_notifiers(&vub->vhost_dev, vdev);
-}
-
-static void vub_set_status(VirtIODevice *vdev, uint8_t status)
-{
- VHostUserBase *vub = VHOST_USER_BASE(vdev);
- bool should_start = virtio_device_should_start(vdev, status);
-
- if (vhost_dev_is_started(&vub->vhost_dev) == should_start) {
- return;
- }
-
- if (should_start) {
- vub_start(vdev);
- } else {
- vub_stop(vdev);
- }
-}
-
-/*
- * For an implementation where everything is delegated to the backend
- * we don't do anything other than return the full feature set offered
- * by the daemon (module the reserved feature bit).
- */
-static uint64_t vub_get_features(VirtIODevice *vdev,
- uint64_t requested_features, Error **errp)
-{
- VHostUserBase *vub = VHOST_USER_BASE(vdev);
- /* This should be set when the vhost connection initialises */
- g_assert(vub->vhost_dev.features);
- return vub->vhost_dev.features & ~(1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
-}
-
-/*
- * To handle VirtIO config we need to know the size of the config
- * space. We don't cache the config but re-fetch it from the guest
- * every time in case something has changed.
- */
-static void vub_get_config(VirtIODevice *vdev, uint8_t *config)
-{
- VHostUserBase *vub = VHOST_USER_BASE(vdev);
- Error *local_err = NULL;
-
- /*
- * There will have been a warning during vhost_dev_init, but lets
- * assert here as nothing will go right now.
- */
- g_assert(vub->config_size && vub->vhost_user.supports_config == true);
-
- if (vhost_dev_get_config(&vub->vhost_dev, config,
- vub->config_size, &local_err)) {
- error_report_err(local_err);
- }
-}
-
-/*
- * When the daemon signals an update to the config we just need to
- * signal the guest as we re-read the config on demand above.
- */
-static int vub_config_notifier(struct vhost_dev *dev)
-{
- virtio_notify_config(dev->vdev);
- return 0;
-}
-
-const VhostDevConfigOps vub_config_ops = {
- .vhost_dev_config_notifier = vub_config_notifier,
-};
-
-static void vub_handle_output(VirtIODevice *vdev, VirtQueue *vq)
-{
- /*
- * Not normally called; it's the daemon that handles the queue;
- * however virtio's cleanup path can call this.
- */
-}
-
-static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserBase *vub)
-{
- vhost_user_cleanup(&vub->vhost_user);
-
- for (int i = 0; i < vub->num_vqs; i++) {
- VirtQueue *vq = g_ptr_array_index(vub->vqs, i);
- virtio_delete_queue(vq);
- }
-
- virtio_cleanup(vdev);
-}
-
-static int vub_connect(DeviceState *dev)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserBase *vub = VHOST_USER_BASE(vdev);
- struct vhost_dev *vhost_dev = &vub->vhost_dev;
-
- if (vub->connected) {
- return 0;
- }
- vub->connected = true;
-
- /*
- * If we support VHOST_USER_GET_CONFIG we must enable the notifier
- * so we can ping the guest when it updates.
- */
- if (vub->vhost_user.supports_config) {
- vhost_dev_set_config_notifier(vhost_dev, &vub_config_ops);
- }
-
- /* restore vhost state */
- if (virtio_device_started(vdev, vdev->status)) {
- vub_start(vdev);
- }
-
- return 0;
-}
-
-static void vub_disconnect(DeviceState *dev)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserBase *vub = VHOST_USER_BASE(vdev);
-
- if (!vub->connected) {
- return;
- }
- vub->connected = false;
-
- if (vhost_dev_is_started(&vub->vhost_dev)) {
- vub_stop(vdev);
- }
-}
-
-static void vub_event(void *opaque, QEMUChrEvent event)
-{
- DeviceState *dev = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserBase *vub = VHOST_USER_BASE(vdev);
-
- switch (event) {
- case CHR_EVENT_OPENED:
- if (vub_connect(dev) < 0) {
- qemu_chr_fe_disconnect(&vub->chardev);
- return;
- }
- break;
- case CHR_EVENT_CLOSED:
- vub_disconnect(dev);
- break;
- case CHR_EVENT_BREAK:
- case CHR_EVENT_MUX_IN:
- case CHR_EVENT_MUX_OUT:
- /* Ignore */
- break;
- }
-}
-
-static void vub_device_realize(DeviceState *dev, Error **errp)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserBase *vub = VHOST_USER_BASE(dev);
- int ret;
-
- if (!vub->chardev.chr) {
- error_setg(errp, "vhost-user-device: missing chardev");
- return;
- }
-
- if (!vub->virtio_id) {
- error_setg(errp, "vhost-user-device: need to define device id");
- return;
- }
-
- if (!vub->num_vqs) {
- vub->num_vqs = 1; /* reasonable default? */
- }
-
- /*
- * We can't handle config requests unless we know the size of the
- * config region, specialisations of the vhost-user-device will be
- * able to set this.
- */
- if (vub->config_size) {
- vub->vhost_user.supports_config = true;
- }
-
- if (!vhost_user_init(&vub->vhost_user, &vub->chardev, errp)) {
- return;
- }
-
- virtio_init(vdev, vub->virtio_id, vub->config_size);
-
- /*
- * Disable guest notifiers, by default all notifications will be via the
- * asynchronous vhost-user socket.
- */
- vdev->use_guest_notifier_mask = false;
-
- /* Allocate queues */
- vub->vqs = g_ptr_array_sized_new(vub->num_vqs);
- for (int i = 0; i < vub->num_vqs; i++) {
- g_ptr_array_add(vub->vqs,
- virtio_add_queue(vdev, 4, vub_handle_output));
- }
-
- vub->vhost_dev.nvqs = vub->num_vqs;
- vub->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vub->vhost_dev.nvqs);
-
- /* connect to backend */
- ret = vhost_dev_init(&vub->vhost_dev, &vub->vhost_user,
- VHOST_BACKEND_TYPE_USER, 0, errp);
-
- if (ret < 0) {
- do_vhost_user_cleanup(vdev, vub);
- }
-
- qemu_chr_fe_set_handlers(&vub->chardev, NULL, NULL, vub_event, NULL,
- dev, NULL, true);
-}
-
-static void vub_device_unrealize(DeviceState *dev)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserBase *vub = VHOST_USER_BASE(dev);
- struct vhost_virtqueue *vhost_vqs = vub->vhost_dev.vqs;
-
- /* This will stop vhost backend if appropriate. */
- vub_set_status(vdev, 0);
- vhost_dev_cleanup(&vub->vhost_dev);
- g_free(vhost_vqs);
- do_vhost_user_cleanup(vdev, vub);
-}
-
-static void vub_class_init(ObjectClass *klass, void *data)
-{
- VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
-
- vdc->realize = vub_device_realize;
- vdc->unrealize = vub_device_unrealize;
- vdc->get_features = vub_get_features;
- vdc->get_config = vub_get_config;
- vdc->set_status = vub_set_status;
-}
-
-static const TypeInfo vub_info = {
- .name = TYPE_VHOST_USER_BASE,
- .parent = TYPE_VIRTIO_DEVICE,
- .instance_size = sizeof(VHostUserBase),
- .class_init = vub_class_init,
- .class_size = sizeof(VHostUserBaseClass),
- .abstract = true
-};
-
-
/*
* The following is a concrete implementation of the base class which
* allows the user to define the key parameters via the command line.
@@ -349,6 +32,7 @@ static const VMStateDescription vud_vmstate = {
static Property vud_properties[] = {
DEFINE_PROP_CHR("chardev", VHostUserBase, chardev),
DEFINE_PROP_UINT16("virtio-id", VHostUserBase, virtio_id, 0),
+ DEFINE_PROP_UINT32("vq_size", VHostUserBase, vq_size, 64),
DEFINE_PROP_UINT32("num_vqs", VHostUserBase, num_vqs, 1),
DEFINE_PROP_UINT32("config_size", VHostUserBase, config_size, 0),
DEFINE_PROP_END_OF_LIST(),
@@ -358,6 +42,9 @@ static void vud_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ /* Reason: stop inexperienced users confusing themselves */
+ dc->user_creatable = false;
+
device_class_set_props(dc, vud_properties);
dc->vmsd = &vud_vmstate;
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
@@ -366,14 +53,11 @@ static void vud_class_init(ObjectClass *klass, void *data)
static const TypeInfo vud_info = {
.name = TYPE_VHOST_USER_DEVICE,
.parent = TYPE_VHOST_USER_BASE,
- .instance_size = sizeof(VHostUserBase),
.class_init = vud_class_init,
- .class_size = sizeof(VHostUserBaseClass),
};
static void vu_register_types(void)
{
- type_register_static(&vub_info);
type_register_static(&vud_info);
}
diff --git a/hw/virtio/vhost-user-gpio.c b/hw/virtio/vhost-user-gpio.c
index a83437a..9f37c25 100644
--- a/hw/virtio/vhost-user-gpio.c
+++ b/hw/virtio/vhost-user-gpio.c
@@ -11,388 +11,25 @@
#include "hw/qdev-properties.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/vhost-user-gpio.h"
-#include "qemu/error-report.h"
#include "standard-headers/linux/virtio_ids.h"
-#include "trace.h"
+#include "standard-headers/linux/virtio_gpio.h"
-#define VHOST_NVQS 2
-
-/* Features required from VirtIO */
-static const int feature_bits[] = {
- VIRTIO_F_VERSION_1,
- VIRTIO_F_NOTIFY_ON_EMPTY,
- VIRTIO_RING_F_INDIRECT_DESC,
- VIRTIO_RING_F_EVENT_IDX,
- VIRTIO_GPIO_F_IRQ,
- VIRTIO_F_RING_RESET,
- VHOST_INVALID_FEATURE_BIT
+static Property vgpio_properties[] = {
+ DEFINE_PROP_CHR("chardev", VHostUserBase, chardev),
+ DEFINE_PROP_END_OF_LIST(),
};
-static void vu_gpio_get_config(VirtIODevice *vdev, uint8_t *config)
+static void vgpio_realize(DeviceState *dev, Error **errp)
{
- VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
+ VHostUserBase *vub = VHOST_USER_BASE(dev);
+ VHostUserBaseClass *vubc = VHOST_USER_BASE_GET_CLASS(dev);
- memcpy(config, &gpio->config, sizeof(gpio->config));
-}
+ /* Fixed for GPIO */
+ vub->virtio_id = VIRTIO_ID_GPIO;
+ vub->num_vqs = 2;
+ vub->config_size = sizeof(struct virtio_gpio_config);
-static int vu_gpio_config_notifier(struct vhost_dev *dev)
-{
- VHostUserGPIO *gpio = VHOST_USER_GPIO(dev->vdev);
-
- memcpy(dev->vdev->config, &gpio->config, sizeof(gpio->config));
- virtio_notify_config(dev->vdev);
-
- return 0;
-}
-
-const VhostDevConfigOps gpio_ops = {
- .vhost_dev_config_notifier = vu_gpio_config_notifier,
-};
-
-static int vu_gpio_start(VirtIODevice *vdev)
-{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
- struct vhost_dev *vhost_dev = &gpio->vhost_dev;
- int ret, i;
-
- if (!k->set_guest_notifiers) {
- error_report("binding does not support guest notifiers");
- return -ENOSYS;
- }
-
- ret = vhost_dev_enable_notifiers(vhost_dev, vdev);
- if (ret < 0) {
- error_report("Error enabling host notifiers: %d", ret);
- return ret;
- }
-
- ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, true);
- if (ret < 0) {
- error_report("Error binding guest notifier: %d", ret);
- goto err_host_notifiers;
- }
-
- /*
- * Before we start up we need to ensure we have the final feature
- * set needed for the vhost configuration. The backend may also
- * apply backend_features when the feature set is sent.
- */
- vhost_ack_features(&gpio->vhost_dev, feature_bits, vdev->guest_features);
-
- ret = vhost_dev_start(&gpio->vhost_dev, vdev, false);
- if (ret < 0) {
- error_report("Error starting vhost-user-gpio: %d", ret);
- goto err_guest_notifiers;
- }
- gpio->started_vu = true;
-
- /*
- * guest_notifier_mask/pending not used yet, so just unmask
- * everything here. virtio-pci will do the right thing by
- * enabling/disabling irqfd.
- */
- for (i = 0; i < gpio->vhost_dev.nvqs; i++) {
- vhost_virtqueue_mask(&gpio->vhost_dev, vdev, i, false);
- }
-
- /*
- * As we must have VHOST_USER_F_PROTOCOL_FEATURES (because
- * VHOST_USER_GET_CONFIG requires it) we need to explicitly enable
- * the vrings.
- */
- g_assert(vhost_dev->vhost_ops &&
- vhost_dev->vhost_ops->vhost_set_vring_enable);
- ret = vhost_dev->vhost_ops->vhost_set_vring_enable(vhost_dev, true);
- if (ret == 0) {
- return 0;
- }
-
- error_report("Failed to start vrings for vhost-user-gpio: %d", ret);
-
-err_guest_notifiers:
- k->set_guest_notifiers(qbus->parent, gpio->vhost_dev.nvqs, false);
-err_host_notifiers:
- vhost_dev_disable_notifiers(&gpio->vhost_dev, vdev);
-
- return ret;
-}
-
-static void vu_gpio_stop(VirtIODevice *vdev)
-{
- VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- struct vhost_dev *vhost_dev = &gpio->vhost_dev;
- int ret;
-
- if (!gpio->started_vu) {
- return;
- }
- gpio->started_vu = false;
-
- if (!k->set_guest_notifiers) {
- return;
- }
-
- vhost_dev_stop(vhost_dev, vdev, false);
-
- ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false);
- if (ret < 0) {
- error_report("vhost guest notifier cleanup failed: %d", ret);
- return;
- }
-
- vhost_dev_disable_notifiers(vhost_dev, vdev);
-}
-
-static void vu_gpio_set_status(VirtIODevice *vdev, uint8_t status)
-{
- VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
- bool should_start = virtio_device_should_start(vdev, status);
-
- trace_virtio_gpio_set_status(status);
-
- if (!gpio->connected) {
- return;
- }
-
- if (vhost_dev_is_started(&gpio->vhost_dev) == should_start) {
- return;
- }
-
- if (should_start) {
- if (vu_gpio_start(vdev)) {
- qemu_chr_fe_disconnect(&gpio->chardev);
- }
- } else {
- vu_gpio_stop(vdev);
- }
-}
-
-static uint64_t vu_gpio_get_features(VirtIODevice *vdev, uint64_t features,
- Error **errp)
-{
- VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
-
- return vhost_get_features(&gpio->vhost_dev, feature_bits, features);
-}
-
-static void vu_gpio_handle_output(VirtIODevice *vdev, VirtQueue *vq)
-{
- /*
- * Not normally called; it's the daemon that handles the queue;
- * however virtio's cleanup path can call this.
- */
-}
-
-static void vu_gpio_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
-{
- VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
-
- /*
- * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
- * as the macro of configure interrupt's IDX, If this driver does not
- * support, the function will return
- */
-
- if (idx == VIRTIO_CONFIG_IRQ_IDX) {
- return;
- }
-
- vhost_virtqueue_mask(&gpio->vhost_dev, vdev, idx, mask);
-}
-
-static struct vhost_dev *vu_gpio_get_vhost(VirtIODevice *vdev)
-{
- VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
- return &gpio->vhost_dev;
-}
-
-static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserGPIO *gpio)
-{
- virtio_delete_queue(gpio->command_vq);
- virtio_delete_queue(gpio->interrupt_vq);
- g_free(gpio->vhost_vqs);
- virtio_cleanup(vdev);
- vhost_user_cleanup(&gpio->vhost_user);
-}
-
-static int vu_gpio_connect(DeviceState *dev, Error **errp)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
- struct vhost_dev *vhost_dev = &gpio->vhost_dev;
- int ret;
-
- if (gpio->connected) {
- return 0;
- }
-
- vhost_dev_set_config_notifier(vhost_dev, &gpio_ops);
- gpio->vhost_user.supports_config = true;
-
- gpio->vhost_dev.nvqs = VHOST_NVQS;
- gpio->vhost_dev.vqs = gpio->vhost_vqs;
-
- ret = vhost_dev_init(vhost_dev, &gpio->vhost_user,
- VHOST_BACKEND_TYPE_USER, 0, errp);
- if (ret < 0) {
- return ret;
- }
-
- gpio->connected = true;
-
- /* restore vhost state */
- if (virtio_device_started(vdev, vdev->status)) {
- vu_gpio_start(vdev);
- }
-
- return 0;
-}
-
-static void vu_gpio_event(void *opaque, QEMUChrEvent event);
-
-static void vu_gpio_disconnect(DeviceState *dev)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
-
- if (!gpio->connected) {
- return;
- }
- gpio->connected = false;
-
- vu_gpio_stop(vdev);
- vhost_dev_cleanup(&gpio->vhost_dev);
-
- /* Re-instate the event handler for new connections */
- qemu_chr_fe_set_handlers(&gpio->chardev,
- NULL, NULL, vu_gpio_event,
- NULL, dev, NULL, true);
-}
-
-static void vu_gpio_event(void *opaque, QEMUChrEvent event)
-{
- DeviceState *dev = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
- Error *local_err = NULL;
-
- switch (event) {
- case CHR_EVENT_OPENED:
- if (vu_gpio_connect(dev, &local_err) < 0) {
- qemu_chr_fe_disconnect(&gpio->chardev);
- return;
- }
- break;
- case CHR_EVENT_CLOSED:
- /* defer close until later to avoid circular close */
- vhost_user_async_close(dev, &gpio->chardev, &gpio->vhost_dev,
- vu_gpio_disconnect, vu_gpio_event);
- break;
- case CHR_EVENT_BREAK:
- case CHR_EVENT_MUX_IN:
- case CHR_EVENT_MUX_OUT:
- /* Ignore */
- break;
- }
-}
-
-static int vu_gpio_realize_connect(VHostUserGPIO *gpio, Error **errp)
-{
- VirtIODevice *vdev = &gpio->parent_obj;
- DeviceState *dev = &vdev->parent_obj;
- struct vhost_dev *vhost_dev = &gpio->vhost_dev;
- int ret;
-
- ret = qemu_chr_fe_wait_connected(&gpio->chardev, errp);
- if (ret < 0) {
- return ret;
- }
-
- /*
- * vu_gpio_connect() may have already connected (via the event
- * callback) in which case it will just report success.
- */
- ret = vu_gpio_connect(dev, errp);
- if (ret < 0) {
- qemu_chr_fe_disconnect(&gpio->chardev);
- return ret;
- }
- g_assert(gpio->connected);
-
- ret = vhost_dev_get_config(vhost_dev, (uint8_t *)&gpio->config,
- sizeof(gpio->config), errp);
-
- if (ret < 0) {
- error_report("vhost-user-gpio: get config failed");
-
- qemu_chr_fe_disconnect(&gpio->chardev);
- vhost_dev_cleanup(vhost_dev);
- return ret;
- }
-
- return 0;
-}
-
-static void vu_gpio_device_realize(DeviceState *dev, Error **errp)
-{
- ERRP_GUARD();
-
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserGPIO *gpio = VHOST_USER_GPIO(dev);
- int retries, ret;
-
- if (!gpio->chardev.chr) {
- error_setg(errp, "vhost-user-gpio: chardev is mandatory");
- return;
- }
-
- if (!vhost_user_init(&gpio->vhost_user, &gpio->chardev, errp)) {
- return;
- }
-
- virtio_init(vdev, VIRTIO_ID_GPIO, sizeof(gpio->config));
-
- gpio->command_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output);
- gpio->interrupt_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output);
- gpio->vhost_vqs = g_new0(struct vhost_virtqueue, VHOST_NVQS);
-
- gpio->connected = false;
-
- qemu_chr_fe_set_handlers(&gpio->chardev, NULL, NULL, vu_gpio_event, NULL,
- dev, NULL, true);
-
- retries = VU_REALIZE_CONN_RETRIES;
- g_assert(!*errp);
- do {
- if (*errp) {
- error_prepend(errp, "Reconnecting after error: ");
- error_report_err(*errp);
- *errp = NULL;
- }
- ret = vu_gpio_realize_connect(gpio, errp);
- } while (ret < 0 && retries--);
-
- if (ret < 0) {
- do_vhost_user_cleanup(vdev, gpio);
- }
-
- return;
-}
-
-static void vu_gpio_device_unrealize(DeviceState *dev)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserGPIO *gpio = VHOST_USER_GPIO(dev);
-
- vu_gpio_set_status(vdev, 0);
- qemu_chr_fe_set_handlers(&gpio->chardev, NULL, NULL, NULL, NULL, NULL, NULL,
- false);
- vhost_dev_cleanup(&gpio->vhost_dev);
- do_vhost_user_cleanup(vdev, gpio);
+ vubc->parent_realize(dev, errp);
}
static const VMStateDescription vu_gpio_vmstate = {
@@ -400,31 +37,21 @@ static const VMStateDescription vu_gpio_vmstate = {
.unmigratable = 1,
};
-static Property vu_gpio_properties[] = {
- DEFINE_PROP_CHR("chardev", VHostUserGPIO, chardev),
- DEFINE_PROP_END_OF_LIST(),
-};
-
static void vu_gpio_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+ VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass);
- device_class_set_props(dc, vu_gpio_properties);
dc->vmsd = &vu_gpio_vmstate;
+ device_class_set_props(dc, vgpio_properties);
+ device_class_set_parent_realize(dc, vgpio_realize,
+ &vubc->parent_realize);
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
- vdc->realize = vu_gpio_device_realize;
- vdc->unrealize = vu_gpio_device_unrealize;
- vdc->get_features = vu_gpio_get_features;
- vdc->get_config = vu_gpio_get_config;
- vdc->set_status = vu_gpio_set_status;
- vdc->guest_notifier_mask = vu_gpio_guest_notifier_mask;
- vdc->get_vhost = vu_gpio_get_vhost;
}
static const TypeInfo vu_gpio_info = {
.name = TYPE_VHOST_USER_GPIO,
- .parent = TYPE_VIRTIO_DEVICE,
+ .parent = TYPE_VHOST_USER_BASE,
.instance_size = sizeof(VHostUserGPIO),
.class_init = vu_gpio_class_init,
};
diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c
index 4eef3f0..a464f5e 100644
--- a/hw/virtio/vhost-user-i2c.c
+++ b/hw/virtio/vhost-user-i2c.c
@@ -14,253 +14,22 @@
#include "qemu/error-report.h"
#include "standard-headers/linux/virtio_ids.h"
-static const int feature_bits[] = {
- VIRTIO_I2C_F_ZERO_LENGTH_REQUEST,
- VIRTIO_F_RING_RESET,
- VHOST_INVALID_FEATURE_BIT
+static Property vi2c_properties[] = {
+ DEFINE_PROP_CHR("chardev", VHostUserBase, chardev),
+ DEFINE_PROP_END_OF_LIST(),
};
-static void vu_i2c_start(VirtIODevice *vdev)
-{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VHostUserI2C *i2c = VHOST_USER_I2C(vdev);
- int ret, i;
-
- if (!k->set_guest_notifiers) {
- error_report("binding does not support guest notifiers");
- return;
- }
-
- ret = vhost_dev_enable_notifiers(&i2c->vhost_dev, vdev);
- if (ret < 0) {
- error_report("Error enabling host notifiers: %d", -ret);
- return;
- }
-
- ret = k->set_guest_notifiers(qbus->parent, i2c->vhost_dev.nvqs, true);
- if (ret < 0) {
- error_report("Error binding guest notifier: %d", -ret);
- goto err_host_notifiers;
- }
-
- i2c->vhost_dev.acked_features = vdev->guest_features;
-
- ret = vhost_dev_start(&i2c->vhost_dev, vdev, true);
- if (ret < 0) {
- error_report("Error starting vhost-user-i2c: %d", -ret);
- goto err_guest_notifiers;
- }
-
- /*
- * guest_notifier_mask/pending not used yet, so just unmask
- * everything here. virtio-pci will do the right thing by
- * enabling/disabling irqfd.
- */
- for (i = 0; i < i2c->vhost_dev.nvqs; i++) {
- vhost_virtqueue_mask(&i2c->vhost_dev, vdev, i, false);
- }
-
- return;
-
-err_guest_notifiers:
- k->set_guest_notifiers(qbus->parent, i2c->vhost_dev.nvqs, false);
-err_host_notifiers:
- vhost_dev_disable_notifiers(&i2c->vhost_dev, vdev);
-}
-
-static void vu_i2c_stop(VirtIODevice *vdev)
-{
- VHostUserI2C *i2c = VHOST_USER_I2C(vdev);
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- int ret;
-
- if (!k->set_guest_notifiers) {
- return;
- }
-
- vhost_dev_stop(&i2c->vhost_dev, vdev, true);
-
- ret = k->set_guest_notifiers(qbus->parent, i2c->vhost_dev.nvqs, false);
- if (ret < 0) {
- error_report("vhost guest notifier cleanup failed: %d", ret);
- return;
- }
-
- vhost_dev_disable_notifiers(&i2c->vhost_dev, vdev);
-}
-
-static void vu_i2c_set_status(VirtIODevice *vdev, uint8_t status)
-{
- VHostUserI2C *i2c = VHOST_USER_I2C(vdev);
- bool should_start = virtio_device_should_start(vdev, status);
-
- if (vhost_dev_is_started(&i2c->vhost_dev) == should_start) {
- return;
- }
-
- if (should_start) {
- vu_i2c_start(vdev);
- } else {
- vu_i2c_stop(vdev);
- }
-}
-
-static uint64_t vu_i2c_get_features(VirtIODevice *vdev,
- uint64_t requested_features, Error **errp)
-{
- VHostUserI2C *i2c = VHOST_USER_I2C(vdev);
-
- virtio_add_feature(&requested_features, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST);
- return vhost_get_features(&i2c->vhost_dev, feature_bits, requested_features);
-}
-
-static void vu_i2c_handle_output(VirtIODevice *vdev, VirtQueue *vq)
-{
- /*
- * Not normally called; it's the daemon that handles the queue;
- * however virtio's cleanup path can call this.
- */
-}
-
-static void vu_i2c_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
-{
- VHostUserI2C *i2c = VHOST_USER_I2C(vdev);
-
- /*
- * We don't support interrupts, return early if index is set to
- * VIRTIO_CONFIG_IRQ_IDX.
- */
- if (idx == VIRTIO_CONFIG_IRQ_IDX) {
- return;
- }
-
- vhost_virtqueue_mask(&i2c->vhost_dev, vdev, idx, mask);
-}
-
-static bool vu_i2c_guest_notifier_pending(VirtIODevice *vdev, int idx)
-{
- VHostUserI2C *i2c = VHOST_USER_I2C(vdev);
-
- /*
- * We don't support interrupts, return early if index is set to
- * VIRTIO_CONFIG_IRQ_IDX.
- */
- if (idx == VIRTIO_CONFIG_IRQ_IDX) {
- return false;
- }
-
- return vhost_virtqueue_pending(&i2c->vhost_dev, idx);
-}
-
-static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserI2C *i2c)
-{
- vhost_user_cleanup(&i2c->vhost_user);
- virtio_delete_queue(i2c->vq);
- virtio_cleanup(vdev);
-}
-
-static int vu_i2c_connect(DeviceState *dev)
+static void vi2c_realize(DeviceState *dev, Error **errp)
{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserI2C *i2c = VHOST_USER_I2C(vdev);
+ VHostUserBase *vub = VHOST_USER_BASE(dev);
+ VHostUserBaseClass *vubc = VHOST_USER_BASE_GET_CLASS(dev);
- if (i2c->connected) {
- return 0;
- }
- i2c->connected = true;
+ /* Fixed for I2C */
+ vub->virtio_id = VIRTIO_ID_I2C_ADAPTER;
+ vub->num_vqs = 1;
+ vub->vq_size = 4;
- /* restore vhost state */
- if (virtio_device_started(vdev, vdev->status)) {
- vu_i2c_start(vdev);
- }
-
- return 0;
-}
-
-static void vu_i2c_disconnect(DeviceState *dev)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserI2C *i2c = VHOST_USER_I2C(vdev);
-
- if (!i2c->connected) {
- return;
- }
- i2c->connected = false;
-
- if (vhost_dev_is_started(&i2c->vhost_dev)) {
- vu_i2c_stop(vdev);
- }
-}
-
-static void vu_i2c_event(void *opaque, QEMUChrEvent event)
-{
- DeviceState *dev = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserI2C *i2c = VHOST_USER_I2C(vdev);
-
- switch (event) {
- case CHR_EVENT_OPENED:
- if (vu_i2c_connect(dev) < 0) {
- qemu_chr_fe_disconnect(&i2c->chardev);
- return;
- }
- break;
- case CHR_EVENT_CLOSED:
- vu_i2c_disconnect(dev);
- break;
- case CHR_EVENT_BREAK:
- case CHR_EVENT_MUX_IN:
- case CHR_EVENT_MUX_OUT:
- /* Ignore */
- break;
- }
-}
-
-static void vu_i2c_device_realize(DeviceState *dev, Error **errp)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserI2C *i2c = VHOST_USER_I2C(dev);
- int ret;
-
- if (!i2c->chardev.chr) {
- error_setg(errp, "vhost-user-i2c: missing chardev");
- return;
- }
-
- if (!vhost_user_init(&i2c->vhost_user, &i2c->chardev, errp)) {
- return;
- }
-
- virtio_init(vdev, VIRTIO_ID_I2C_ADAPTER, 0);
-
- i2c->vhost_dev.nvqs = 1;
- i2c->vq = virtio_add_queue(vdev, 4, vu_i2c_handle_output);
- i2c->vhost_dev.vqs = g_new0(struct vhost_virtqueue, i2c->vhost_dev.nvqs);
-
- ret = vhost_dev_init(&i2c->vhost_dev, &i2c->vhost_user,
- VHOST_BACKEND_TYPE_USER, 0, errp);
- if (ret < 0) {
- g_free(i2c->vhost_dev.vqs);
- do_vhost_user_cleanup(vdev, i2c);
- }
-
- qemu_chr_fe_set_handlers(&i2c->chardev, NULL, NULL, vu_i2c_event, NULL,
- dev, NULL, true);
-}
-
-static void vu_i2c_device_unrealize(DeviceState *dev)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserI2C *i2c = VHOST_USER_I2C(dev);
- struct vhost_virtqueue *vhost_vqs = i2c->vhost_dev.vqs;
-
- /* This will stop vhost backend if appropriate. */
- vu_i2c_set_status(vdev, 0);
- vhost_dev_cleanup(&i2c->vhost_dev);
- g_free(vhost_vqs);
- do_vhost_user_cleanup(vdev, i2c);
+ vubc->parent_realize(dev, errp);
}
static const VMStateDescription vu_i2c_vmstate = {
@@ -268,30 +37,21 @@ static const VMStateDescription vu_i2c_vmstate = {
.unmigratable = 1,
};
-static Property vu_i2c_properties[] = {
- DEFINE_PROP_CHR("chardev", VHostUserI2C, chardev),
- DEFINE_PROP_END_OF_LIST(),
-};
-
static void vu_i2c_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+ VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass);
- device_class_set_props(dc, vu_i2c_properties);
dc->vmsd = &vu_i2c_vmstate;
+ device_class_set_props(dc, vi2c_properties);
+ device_class_set_parent_realize(dc, vi2c_realize,
+ &vubc->parent_realize);
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
- vdc->realize = vu_i2c_device_realize;
- vdc->unrealize = vu_i2c_device_unrealize;
- vdc->get_features = vu_i2c_get_features;
- vdc->set_status = vu_i2c_set_status;
- vdc->guest_notifier_mask = vu_i2c_guest_notifier_mask;
- vdc->guest_notifier_pending = vu_i2c_guest_notifier_pending;
}
static const TypeInfo vu_i2c_info = {
.name = TYPE_VHOST_USER_I2C,
- .parent = TYPE_VIRTIO_DEVICE,
+ .parent = TYPE_VHOST_USER_BASE,
.instance_size = sizeof(VHostUserI2C),
.class_init = vu_i2c_class_init,
};
diff --git a/hw/virtio/vhost-user-input-pci.c b/hw/virtio/vhost-user-input-pci.c
index b858898..3f4761c 100644
--- a/hw/virtio/vhost-user-input-pci.c
+++ b/hw/virtio/vhost-user-input-pci.c
@@ -30,9 +30,6 @@ static void vhost_user_input_pci_instance_init(Object *obj)
virtio_instance_init_common(obj, &dev->vhi, sizeof(dev->vhi),
TYPE_VHOST_USER_INPUT);
-
- object_property_add_alias(obj, "chardev",
- OBJECT(&dev->vhi), "chardev");
}
static const VirtioPCIDeviceTypeInfo vhost_user_input_pci_info = {
diff --git a/hw/virtio/vhost-user-input.c b/hw/virtio/vhost-user-input.c
new file mode 100644
index 0000000..bedec04
--- /dev/null
+++ b/hw/virtio/vhost-user-input.c
@@ -0,0 +1,58 @@
+/*
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/virtio/virtio-input.h"
+
+static Property vinput_properties[] = {
+ DEFINE_PROP_CHR("chardev", VHostUserBase, chardev),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vinput_realize(DeviceState *dev, Error **errp)
+{
+ VHostUserBase *vub = VHOST_USER_BASE(dev);
+ VHostUserBaseClass *vubc = VHOST_USER_BASE_GET_CLASS(dev);
+
+ /* Fixed for input device */
+ vub->virtio_id = VIRTIO_ID_INPUT;
+ vub->num_vqs = 2;
+ vub->vq_size = 4;
+ vub->config_size = sizeof(virtio_input_config);
+
+ vubc->parent_realize(dev, errp);
+}
+
+static const VMStateDescription vmstate_vhost_input = {
+ .name = "vhost-user-input",
+ .unmigratable = 1,
+};
+
+static void vhost_input_class_init(ObjectClass *klass, void *data)
+{
+ VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->vmsd = &vmstate_vhost_input;
+ device_class_set_props(dc, vinput_properties);
+ device_class_set_parent_realize(dc, vinput_realize,
+ &vubc->parent_realize);
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+}
+
+static const TypeInfo vhost_input_info = {
+ .name = TYPE_VHOST_USER_INPUT,
+ .parent = TYPE_VHOST_USER_BASE,
+ .instance_size = sizeof(VHostUserInput),
+ .class_init = vhost_input_class_init,
+};
+
+static void vhost_input_register_types(void)
+{
+ type_register_static(&vhost_input_info);
+}
+
+type_init(vhost_input_register_types)
diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c
index 24ac1a2..01879c8 100644
--- a/hw/virtio/vhost-user-rng.c
+++ b/hw/virtio/vhost-user-rng.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
*
- * Implementation seriously tailored on vhost-user-i2c.c
+ * Simple wrapper of the generic vhost-user-device.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
@@ -13,297 +13,47 @@
#include "hw/qdev-properties.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/vhost-user-rng.h"
-#include "qemu/error-report.h"
#include "standard-headers/linux/virtio_ids.h"
-static const int feature_bits[] = {
- VIRTIO_F_RING_RESET,
- VHOST_INVALID_FEATURE_BIT
-};
-
-static void vu_rng_start(VirtIODevice *vdev)
-{
- VHostUserRNG *rng = VHOST_USER_RNG(vdev);
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- int ret;
- int i;
-
- if (!k->set_guest_notifiers) {
- error_report("binding does not support guest notifiers");
- return;
- }
-
- ret = vhost_dev_enable_notifiers(&rng->vhost_dev, vdev);
- if (ret < 0) {
- error_report("Error enabling host notifiers: %d", -ret);
- return;
- }
-
- ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, true);
- if (ret < 0) {
- error_report("Error binding guest notifier: %d", -ret);
- goto err_host_notifiers;
- }
-
- rng->vhost_dev.acked_features = vdev->guest_features;
- ret = vhost_dev_start(&rng->vhost_dev, vdev, true);
- if (ret < 0) {
- error_report("Error starting vhost-user-rng: %d", -ret);
- goto err_guest_notifiers;
- }
-
- /*
- * guest_notifier_mask/pending not used yet, so just unmask
- * everything here. virtio-pci will do the right thing by
- * enabling/disabling irqfd.
- */
- for (i = 0; i < rng->vhost_dev.nvqs; i++) {
- vhost_virtqueue_mask(&rng->vhost_dev, vdev, i, false);
- }
-
- return;
-
-err_guest_notifiers:
- k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false);
-err_host_notifiers:
- vhost_dev_disable_notifiers(&rng->vhost_dev, vdev);
-}
-
-static void vu_rng_stop(VirtIODevice *vdev)
-{
- VHostUserRNG *rng = VHOST_USER_RNG(vdev);
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- int ret;
-
- if (!k->set_guest_notifiers) {
- return;
- }
-
- vhost_dev_stop(&rng->vhost_dev, vdev, true);
-
- ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false);
- if (ret < 0) {
- error_report("vhost guest notifier cleanup failed: %d", ret);
- return;
- }
-
- vhost_dev_disable_notifiers(&rng->vhost_dev, vdev);
-}
-
-static void vu_rng_set_status(VirtIODevice *vdev, uint8_t status)
-{
- VHostUserRNG *rng = VHOST_USER_RNG(vdev);
- bool should_start = virtio_device_should_start(vdev, status);
-
- if (vhost_dev_is_started(&rng->vhost_dev) == should_start) {
- return;
- }
-
- if (should_start) {
- vu_rng_start(vdev);
- } else {
- vu_rng_stop(vdev);
- }
-}
-
-static uint64_t vu_rng_get_features(VirtIODevice *vdev,
- uint64_t requested_features, Error **errp)
-{
- VHostUserRNG *rng = VHOST_USER_RNG(vdev);
-
- return vhost_get_features(&rng->vhost_dev, feature_bits,
- requested_features);
-}
-
-static void vu_rng_handle_output(VirtIODevice *vdev, VirtQueue *vq)
-{
- /*
- * Not normally called; it's the daemon that handles the queue;
- * however virtio's cleanup path can call this.
- */
-}
-
-static void vu_rng_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
-{
- VHostUserRNG *rng = VHOST_USER_RNG(vdev);
-
- /*
- * We don't support interrupts, return early if index is set to
- * VIRTIO_CONFIG_IRQ_IDX.
- */
- if (idx == VIRTIO_CONFIG_IRQ_IDX) {
- return;
- }
-
- vhost_virtqueue_mask(&rng->vhost_dev, vdev, idx, mask);
-}
-
-static bool vu_rng_guest_notifier_pending(VirtIODevice *vdev, int idx)
-{
- VHostUserRNG *rng = VHOST_USER_RNG(vdev);
-
- /*
- * We don't support interrupts, return early if index is set to
- * VIRTIO_CONFIG_IRQ_IDX.
- */
- if (idx == VIRTIO_CONFIG_IRQ_IDX) {
- return false;
- }
-
- return vhost_virtqueue_pending(&rng->vhost_dev, idx);
-}
-
-static void vu_rng_connect(DeviceState *dev)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserRNG *rng = VHOST_USER_RNG(vdev);
-
- if (rng->connected) {
- return;
- }
-
- rng->connected = true;
-
- /* restore vhost state */
- if (virtio_device_started(vdev, vdev->status)) {
- vu_rng_start(vdev);
- }
-}
-
-static void vu_rng_disconnect(DeviceState *dev)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserRNG *rng = VHOST_USER_RNG(vdev);
-
- if (!rng->connected) {
- return;
- }
-
- rng->connected = false;
-
- if (vhost_dev_is_started(&rng->vhost_dev)) {
- vu_rng_stop(vdev);
- }
-}
-
-static void vu_rng_event(void *opaque, QEMUChrEvent event)
-{
- DeviceState *dev = opaque;
-
- switch (event) {
- case CHR_EVENT_OPENED:
- vu_rng_connect(dev);
- break;
- case CHR_EVENT_CLOSED:
- vu_rng_disconnect(dev);
- break;
- case CHR_EVENT_BREAK:
- case CHR_EVENT_MUX_IN:
- case CHR_EVENT_MUX_OUT:
- /* Ignore */
- break;
- }
-}
-
-static void vu_rng_device_realize(DeviceState *dev, Error **errp)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserRNG *rng = VHOST_USER_RNG(dev);
- int ret;
-
- if (!rng->chardev.chr) {
- error_setg(errp, "missing chardev");
- return;
- }
-
- if (!vhost_user_init(&rng->vhost_user, &rng->chardev, errp)) {
- return;
- }
-
- virtio_init(vdev, VIRTIO_ID_RNG, 0);
-
- rng->req_vq = virtio_add_queue(vdev, 4, vu_rng_handle_output);
- if (!rng->req_vq) {
- error_setg_errno(errp, -1, "virtio_add_queue() failed");
- goto virtio_add_queue_failed;
- }
-
- rng->vhost_dev.nvqs = 1;
- rng->vhost_dev.vqs = g_new0(struct vhost_virtqueue, rng->vhost_dev.nvqs);
- ret = vhost_dev_init(&rng->vhost_dev, &rng->vhost_user,
- VHOST_BACKEND_TYPE_USER, 0, errp);
- if (ret < 0) {
- error_setg_errno(errp, -ret, "vhost_dev_init() failed");
- goto vhost_dev_init_failed;
- }
-
- qemu_chr_fe_set_handlers(&rng->chardev, NULL, NULL, vu_rng_event, NULL,
- dev, NULL, true);
-
- return;
-
-vhost_dev_init_failed:
- g_free(rng->vhost_dev.vqs);
- virtio_delete_queue(rng->req_vq);
-virtio_add_queue_failed:
- virtio_cleanup(vdev);
- vhost_user_cleanup(&rng->vhost_user);
-}
-
-static void vu_rng_device_unrealize(DeviceState *dev)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- VHostUserRNG *rng = VHOST_USER_RNG(dev);
- struct vhost_virtqueue *vhost_vqs = rng->vhost_dev.vqs;
-
- vu_rng_set_status(vdev, 0);
-
- vhost_dev_cleanup(&rng->vhost_dev);
- g_free(vhost_vqs);
- virtio_delete_queue(rng->req_vq);
- virtio_cleanup(vdev);
- vhost_user_cleanup(&rng->vhost_user);
-}
-
-static struct vhost_dev *vu_rng_get_vhost(VirtIODevice *vdev)
-{
- VHostUserRNG *rng = VHOST_USER_RNG(vdev);
- return &rng->vhost_dev;
-}
-
static const VMStateDescription vu_rng_vmstate = {
.name = "vhost-user-rng",
.unmigratable = 1,
};
-static Property vu_rng_properties[] = {
- DEFINE_PROP_CHR("chardev", VHostUserRNG, chardev),
+static Property vrng_properties[] = {
+ DEFINE_PROP_CHR("chardev", VHostUserBase, chardev),
DEFINE_PROP_END_OF_LIST(),
};
+static void vu_rng_base_realize(DeviceState *dev, Error **errp)
+{
+ VHostUserBase *vub = VHOST_USER_BASE(dev);
+ VHostUserBaseClass *vubs = VHOST_USER_BASE_GET_CLASS(dev);
+
+ /* Fixed for RNG */
+ vub->virtio_id = VIRTIO_ID_RNG;
+ vub->num_vqs = 1;
+ vub->vq_size = 4;
+
+ vubs->parent_realize(dev, errp);
+}
+
static void vu_rng_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+ VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass);
- device_class_set_props(dc, vu_rng_properties);
dc->vmsd = &vu_rng_vmstate;
- set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+ device_class_set_props(dc, vrng_properties);
+ device_class_set_parent_realize(dc, vu_rng_base_realize,
+ &vubc->parent_realize);
- vdc->realize = vu_rng_device_realize;
- vdc->unrealize = vu_rng_device_unrealize;
- vdc->get_features = vu_rng_get_features;
- vdc->set_status = vu_rng_set_status;
- vdc->guest_notifier_mask = vu_rng_guest_notifier_mask;
- vdc->guest_notifier_pending = vu_rng_guest_notifier_pending;
- vdc->get_vhost = vu_rng_get_vhost;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
}
static const TypeInfo vu_rng_info = {
.name = TYPE_VHOST_USER_RNG,
- .parent = TYPE_VIRTIO_DEVICE,
+ .parent = TYPE_VHOST_USER_BASE,
.instance_size = sizeof(VHostUserRNG),
.class_init = vu_rng_class_init,
};
diff --git a/hw/virtio/vhost-user-snd-pci.c b/hw/virtio/vhost-user-snd-pci.c
new file mode 100644
index 0000000..d61cfda
--- /dev/null
+++ b/hw/virtio/vhost-user-snd-pci.c
@@ -0,0 +1,75 @@
+/*
+ * Vhost-user Sound virtio device PCI glue
+ *
+ * Copyright (c) 2023 Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hw/qdev-properties.h"
+#include "hw/virtio/vhost-user-snd.h"
+#include "hw/virtio/virtio-pci.h"
+
+struct VHostUserSoundPCI {
+ VirtIOPCIProxy parent_obj;
+ VHostUserSound vdev;
+};
+
+typedef struct VHostUserSoundPCI VHostUserSoundPCI;
+
+#define TYPE_VHOST_USER_SND_PCI "vhost-user-snd-pci-base"
+
+DECLARE_INSTANCE_CHECKER(VHostUserSoundPCI, VHOST_USER_SND_PCI,
+ TYPE_VHOST_USER_SND_PCI)
+
+static Property vhost_user_snd_pci_properties[] = {
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vhost_user_snd_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VHostUserSoundPCI *dev = VHOST_USER_SND_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+
+ vpci_dev->nvectors = 1;
+
+ qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
+}
+
+static void vhost_user_snd_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+ k->realize = vhost_user_snd_pci_realize;
+ set_bit(DEVICE_CATEGORY_SOUND, dc->categories);
+ device_class_set_props(dc, vhost_user_snd_pci_properties);
+ pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */
+ pcidev_k->revision = 0x00;
+ pcidev_k->class_id = PCI_CLASS_MULTIMEDIA_AUDIO;
+}
+
+static void vhost_user_snd_pci_instance_init(Object *obj)
+{
+ VHostUserSoundPCI *dev = VHOST_USER_SND_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VHOST_USER_SND);
+}
+
+static const VirtioPCIDeviceTypeInfo vhost_user_snd_pci_info = {
+ .base_name = TYPE_VHOST_USER_SND_PCI,
+ .non_transitional_name = "vhost-user-snd-pci",
+ .instance_size = sizeof(VHostUserSoundPCI),
+ .instance_init = vhost_user_snd_pci_instance_init,
+ .class_init = vhost_user_snd_pci_class_init,
+};
+
+static void vhost_user_snd_pci_register(void)
+{
+ virtio_pci_types_register(&vhost_user_snd_pci_info);
+}
+
+type_init(vhost_user_snd_pci_register);
diff --git a/hw/virtio/vhost-user-snd.c b/hw/virtio/vhost-user-snd.c
new file mode 100644
index 0000000..9a21754
--- /dev/null
+++ b/hw/virtio/vhost-user-snd.c
@@ -0,0 +1,67 @@
+/*
+ * Vhost-user snd virtio device
+ *
+ * Copyright (c) 2023 Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
+ *
+ * Simple wrapper of the generic vhost-user-device.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/qdev-properties.h"
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/vhost-user-snd.h"
+#include "standard-headers/linux/virtio_ids.h"
+#include "standard-headers/linux/virtio_snd.h"
+
+static const VMStateDescription vu_snd_vmstate = {
+ .name = "vhost-user-snd",
+ .unmigratable = 1,
+};
+
+static Property vsnd_properties[] = {
+ DEFINE_PROP_CHR("chardev", VHostUserBase, chardev),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vu_snd_base_realize(DeviceState *dev, Error **errp)
+{
+ VHostUserBase *vub = VHOST_USER_BASE(dev);
+ VHostUserBaseClass *vubs = VHOST_USER_BASE_GET_CLASS(dev);
+
+ vub->virtio_id = VIRTIO_ID_SOUND;
+ vub->num_vqs = 4;
+ vub->config_size = sizeof(struct virtio_snd_config);
+ vub->vq_size = 64;
+
+ vubs->parent_realize(dev, errp);
+}
+
+static void vu_snd_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VHostUserBaseClass *vubc = VHOST_USER_BASE_CLASS(klass);
+
+ dc->vmsd = &vu_snd_vmstate;
+ device_class_set_props(dc, vsnd_properties);
+ device_class_set_parent_realize(dc, vu_snd_base_realize,
+ &vubc->parent_realize);
+
+ set_bit(DEVICE_CATEGORY_SOUND, dc->categories);
+}
+
+static const TypeInfo vu_snd_info = {
+ .name = TYPE_VHOST_USER_SND,
+ .parent = TYPE_VHOST_USER_BASE,
+ .instance_size = sizeof(VHostUserSound),
+ .class_init = vu_snd_class_init,
+};
+
+static void vu_snd_register_types(void)
+{
+ type_register_static(&vu_snd_info);
+}
+
+type_init(vu_snd_register_types)
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index 8a4bd93..86623d5 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -1264,6 +1264,8 @@ static void virtio_iommu_system_reset(void *opaque)
trace_virtio_iommu_system_reset();
+ memset(s->iommu_pcibus_by_bus_num, 0, sizeof(s->iommu_pcibus_by_bus_num));
+
/*
* config.bypass is sticky across device reset, but should be restored on
* system reset
@@ -1302,8 +1304,6 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
virtio_init(vdev, VIRTIO_ID_IOMMU, sizeof(struct virtio_iommu_config));
- memset(s->iommu_pcibus_by_bus_num, 0, sizeof(s->iommu_pcibus_by_bus_num));
-
s->req_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE,
virtio_iommu_handle_command);
s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL);