aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/acpi/aml-build.c2
-rw-r--r--hw/acpi/cpu.c53
-rw-r--r--hw/cxl/cxl-mailbox-utils.c4
-rw-r--r--hw/net/vhost_net.c35
-rw-r--r--hw/virtio/vhost.c13
5 files changed, 46 insertions, 61 deletions
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index 6a76626..72282b1 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -1960,7 +1960,7 @@ static void build_append_srat_acpi_device_handle(GArray *table_data,
{
assert(strlen(hid) == 8);
/* Device Handle - ACPI */
- for (int i = 0; i < sizeof(hid); i++) {
+ for (int i = 0; i < 8; i++) {
build_append_int_noprefix(table_data, hid[i], 1);
}
build_append_int_noprefix(table_data, uid, 4);
diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c
index 23443f0..5cb60ca 100644
--- a/hw/acpi/cpu.c
+++ b/hw/acpi/cpu.c
@@ -50,18 +50,6 @@ void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list)
}
}
-static bool check_cpu_enabled_status(DeviceState *dev)
-{
- CPUClass *k = dev ? CPU_GET_CLASS(dev) : NULL;
- CPUState *cpu = CPU(dev);
-
- if (cpu && (!k->cpu_enabled_status || k->cpu_enabled_status(cpu))) {
- return true;
- }
-
- return false;
-}
-
static uint64_t cpu_hotplug_rd(void *opaque, hwaddr addr, unsigned size)
{
uint64_t val = 0;
@@ -75,11 +63,10 @@ static uint64_t cpu_hotplug_rd(void *opaque, hwaddr addr, unsigned size)
cdev = &cpu_st->devs[cpu_st->selector];
switch (addr) {
case ACPI_CPU_FLAGS_OFFSET_RW: /* pack and return is_* fields */
- val |= check_cpu_enabled_status(DEVICE(cdev->cpu)) ? 1 : 0;
+ val |= cdev->cpu ? 1 : 0;
val |= cdev->is_inserting ? 2 : 0;
val |= cdev->is_removing ? 4 : 0;
val |= cdev->fw_remove ? 16 : 0;
- val |= cdev->cpu ? 32 : 0;
trace_cpuhp_acpi_read_flags(cpu_st->selector, val);
break;
case ACPI_CPU_CMD_DATA_OFFSET_RW:
@@ -246,17 +233,6 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
memory_region_add_subregion(as, base_addr, &state->ctrl_reg);
}
-static bool should_remain_acpi_present(DeviceState *dev)
-{
- CPUClass *k = CPU_GET_CLASS(dev);
- /*
- * A system may contain CPUs that are always present on one die, NUMA node,
- * or socket, yet may be non-present on another simultaneously. Check from
- * architecture specific code.
- */
- return k->cpu_persistent_status && k->cpu_persistent_status(CPU(dev));
-}
-
static AcpiCpuStatus *get_cpu_status(CPUHotplugState *cpu_st, DeviceState *dev)
{
CPUClass *k = CPU_GET_CLASS(dev);
@@ -313,9 +289,7 @@ void acpi_cpu_unplug_cb(CPUHotplugState *cpu_st,
return;
}
- if (!should_remain_acpi_present(dev)) {
- cdev->cpu = NULL;
- }
+ cdev->cpu = NULL;
}
static const VMStateDescription vmstate_cpuhp_sts = {
@@ -362,7 +336,6 @@ const VMStateDescription vmstate_cpu_hotplug = {
#define CPU_REMOVE_EVENT "CRMV"
#define CPU_EJECT_EVENT "CEJ0"
#define CPU_FW_EJECT_EVENT "CEJF"
-#define CPU_PRESENT "CPRS"
void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
build_madt_cpu_fn build_madt_cpu, hwaddr base_addr,
@@ -423,9 +396,7 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
aml_append(field, aml_named_field(CPU_EJECT_EVENT, 1));
/* tell firmware to do device eject, write only */
aml_append(field, aml_named_field(CPU_FW_EJECT_EVENT, 1));
- /* 1 if present, read only */
- aml_append(field, aml_named_field(CPU_PRESENT, 1));
- aml_append(field, aml_reserved_field(2));
+ aml_append(field, aml_reserved_field(3));
aml_append(field, aml_named_field(CPU_COMMAND, 8));
aml_append(cpu_ctrl_dev, field);
@@ -455,7 +426,6 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
Aml *ctrl_lock = aml_name("%s.%s", cphp_res_path, CPU_LOCK);
Aml *cpu_selector = aml_name("%s.%s", cphp_res_path, CPU_SELECTOR);
Aml *is_enabled = aml_name("%s.%s", cphp_res_path, CPU_ENABLED);
- Aml *is_present = aml_name("%s.%s", cphp_res_path, CPU_PRESENT);
Aml *cpu_cmd = aml_name("%s.%s", cphp_res_path, CPU_COMMAND);
Aml *cpu_data = aml_name("%s.%s", cphp_res_path, CPU_DATA);
Aml *ins_evt = aml_name("%s.%s", cphp_res_path, CPU_INSERT_EVENT);
@@ -484,26 +454,13 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
{
Aml *idx = aml_arg(0);
Aml *sta = aml_local(0);
- Aml *ifctx2;
- Aml *else_ctx;
aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
aml_append(method, aml_store(idx, cpu_selector));
aml_append(method, aml_store(zero, sta));
- ifctx = aml_if(aml_equal(is_present, one));
+ ifctx = aml_if(aml_equal(is_enabled, one));
{
- ifctx2 = aml_if(aml_equal(is_enabled, one));
- {
- /* cpu is present and enabled */
- aml_append(ifctx2, aml_store(aml_int(0xF), sta));
- }
- aml_append(ifctx, ifctx2);
- else_ctx = aml_else();
- {
- /* cpu is present but disabled */
- aml_append(else_ctx, aml_store(aml_int(0xD), sta));
- }
- aml_append(ifctx, else_ctx);
+ aml_append(ifctx, aml_store(aml_int(0xF), sta));
}
aml_append(method, ifctx);
aml_append(method, aml_release(ctrl_lock));
diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c
index 2d4d62c..ce9aa18 100644
--- a/hw/cxl/cxl-mailbox-utils.c
+++ b/hw/cxl/cxl-mailbox-utils.c
@@ -1288,6 +1288,10 @@ static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd,
set_feat_info->data_offset = hdr->offset;
bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader);
+ if (bytes_to_copy == 0) {
+ return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
+ }
+
if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) {
if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) {
return CXL_MBOX_UNSUPPORTED;
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 997aab0..891f235 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -229,10 +229,25 @@ static int vhost_net_enable_notifiers(VirtIODevice *dev,
int nvhosts = data_queue_pairs + cvq;
struct vhost_net *net;
struct vhost_dev *hdev;
- int r, i, j;
+ int r, i, j, k;
NetClientState *peer;
/*
+ * We will pass the notifiers to the kernel, make sure that QEMU
+ * doesn't interfere.
+ */
+ for (i = 0; i < nvhosts; i++) {
+ r = virtio_device_grab_ioeventfd(dev);
+ if (r < 0) {
+ error_report("vhost %d binding does not support host notifiers", i);
+ for (k = 0; k < i; k++) {
+ virtio_device_release_ioeventfd(dev);
+ }
+ return r;
+ }
+ }
+
+ /*
* Batch all the host notifiers in a single transaction to avoid
* quadratic time complexity in address_space_update_ioeventfds().
*/
@@ -247,16 +262,6 @@ static int vhost_net_enable_notifiers(VirtIODevice *dev,
net = get_vhost_net(peer);
hdev = &net->dev;
- /*
- * We will pass the notifiers to the kernel, make sure that QEMU
- * doesn't interfere.
- */
- r = virtio_device_grab_ioeventfd(dev);
- if (r < 0) {
- error_report("binding does not support host notifiers");
- memory_region_transaction_commit();
- goto fail_nvhosts;
- }
for (j = 0; j < hdev->nvqs; j++) {
r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus),
@@ -277,6 +282,14 @@ static int vhost_net_enable_notifiers(VirtIODevice *dev,
return 0;
fail_nvhosts:
vhost_net_disable_notifiers_nvhosts(dev, ncs, data_queue_pairs, i);
+ /*
+ * This for loop starts from i+1, not i, because the i-th ioeventfd
+ * has already been released in vhost_dev_disable_notifiers_nvqs().
+ */
+ for (k = i + 1; k < nvhosts; k++) {
+ virtio_device_release_ioeventfd(dev);
+ }
+
return r;
}
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 76f9b2a..c40f48a 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -2095,11 +2095,22 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
* vhost-kernel code requires for this.*/
for (i = 0; i < hdev->nvqs; ++i) {
struct vhost_virtqueue *vq = hdev->vqs + i;
- vhost_device_iotlb_miss(hdev, vq->used_phys, true);
+ r = vhost_device_iotlb_miss(hdev, vq->used_phys, true);
+ if (r) {
+ goto fail_iotlb;
+ }
}
}
vhost_start_config_intr(hdev);
return 0;
+fail_iotlb:
+ if (vhost_dev_has_iommu(hdev) &&
+ hdev->vhost_ops->vhost_set_iotlb_callback) {
+ hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
+ }
+ if (hdev->vhost_ops->vhost_dev_start) {
+ hdev->vhost_ops->vhost_dev_start(hdev, false);
+ }
fail_start:
if (vrings) {
vhost_dev_set_vring_enable(hdev, false);