aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/acpi/Makefile.objs2
-rw-r--r--hw/acpi/aml-build.c53
-rw-r--r--hw/acpi/nvdimm.c231
-rw-r--r--hw/block/fdc.c23
-rw-r--r--hw/i386/acpi-build.c301
-rw-r--r--hw/i386/kvm/apic.c2
-rw-r--r--hw/i386/pc.c97
-rw-r--r--hw/i386/pc_piix.c5
-rw-r--r--hw/i386/pc_q35.c8
-rw-r--r--hw/i386/xen/xen_apic.c2
-rw-r--r--hw/intc/apic.c2
-rw-r--r--hw/intc/arm_gicv2m.c2
-rw-r--r--hw/intc/openpic.c2
-rw-r--r--hw/intc/openpic_kvm.c2
-rw-r--r--hw/ipmi/ipmi_bmc_sim.c729
-rw-r--r--hw/isa/lpc_ich9.c35
-rw-r--r--hw/mem/pc-dimm.c13
-rw-r--r--hw/pci-bridge/pci_bridge_dev.c2
-rw-r--r--hw/pci-bridge/pci_expander_bridge.c2
-rw-r--r--hw/pci/msi.c19
-rw-r--r--hw/pci/msix.c2
-rw-r--r--hw/ppc/spapr.c4
-rw-r--r--hw/ppc/spapr_pci.c2
-rw-r--r--hw/s390x/s390-pci-bus.c2
-rw-r--r--hw/virtio/virtio-balloon.c25
-rw-r--r--hw/virtio/virtio-pci.c11
-rw-r--r--hw/virtio/virtio-pci.h17
27 files changed, 997 insertions, 598 deletions
diff --git a/hw/acpi/Makefile.objs b/hw/acpi/Makefile.objs
index f3ade9a..faee86c 100644
--- a/hw/acpi/Makefile.objs
+++ b/hw/acpi/Makefile.objs
@@ -2,7 +2,7 @@ common-obj-$(CONFIG_ACPI_X86) += core.o piix4.o pcihp.o
common-obj-$(CONFIG_ACPI_X86_ICH) += ich9.o tco.o
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu_hotplug.o cpu_hotplug_acpi_table.o
common-obj-$(CONFIG_ACPI_MEMORY_HOTPLUG) += memory_hotplug.o memory_hotplug_acpi_table.o
-common-obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
+obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
common-obj-$(CONFIG_ACPI) += acpi_interface.o
common-obj-$(CONFIG_ACPI) += bios-linker-loader.o
common-obj-$(CONFIG_ACPI) += aml-build.o
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index 6675535..ab89ca6 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -258,6 +258,34 @@ static void build_append_int(GArray *table, uint64_t value)
}
}
+/*
+ * Build NAME(XXXX, 0x00000000) where 0x00000000 is encoded as a dword,
+ * and return the offset to 0x00000000 for runtime patching.
+ *
+ * Warning: runtime patching is best avoided. Only use this as
+ * a replacement for DataTableRegion (for guests that don't
+ * support it).
+ */
+int
+build_append_named_dword(GArray *array, const char *name_format, ...)
+{
+ int offset;
+ va_list ap;
+
+ build_append_byte(array, 0x08); /* NameOp */
+ va_start(ap, name_format);
+ build_append_namestringv(array, name_format, ap);
+ va_end(ap);
+
+ build_append_byte(array, 0x0C); /* DWordPrefix */
+
+ offset = array->len;
+ build_append_int_noprefix(array, 0x00000000, 4);
+ assert(array->len == offset + 4);
+
+ return offset;
+}
+
static GPtrArray *alloc_list;
static Aml *aml_alloc(void)
@@ -942,14 +970,14 @@ Aml *aml_package(uint8_t num_elements)
/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefOpRegion */
Aml *aml_operation_region(const char *name, AmlRegionSpace rs,
- uint32_t offset, uint32_t len)
+ Aml *offset, uint32_t len)
{
Aml *var = aml_alloc();
build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */
build_append_byte(var->buf, 0x80); /* OpRegionOp */
build_append_namestring(var->buf, "%s", name);
build_append_byte(var->buf, rs);
- build_append_int(var->buf, offset);
+ aml_append(var, offset);
build_append_int(var->buf, len);
return var;
}
@@ -997,6 +1025,20 @@ Aml *create_field_common(int opcode, Aml *srcbuf, Aml *index, const char *name)
return var;
}
+/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefCreateField */
+Aml *aml_create_field(Aml *srcbuf, Aml *bit_index, Aml *num_bits,
+ const char *name)
+{
+ Aml *var = aml_alloc();
+ build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */
+ build_append_byte(var->buf, 0x13); /* CreateFieldOp */
+ aml_append(var, srcbuf);
+ aml_append(var, bit_index);
+ aml_append(var, num_bits);
+ build_append_namestring(var->buf, "%s", name);
+ return var;
+}
+
/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefCreateDWordField */
Aml *aml_create_dword_field(Aml *srcbuf, Aml *index, const char *name)
{
@@ -1423,6 +1465,13 @@ Aml *aml_alias(const char *source_object, const char *alias_object)
return var;
}
+/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefConcat */
+Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target)
+{
+ return build_opcode_2arg_dst(0x73 /* ConcatOp */, source1, source2,
+ target);
+}
+
void
build_header(GArray *linker, GArray *table_data,
AcpiTableHeader *h, const char *sig, int len, uint8_t rev,
diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
index 49ee68e..9531340 100644
--- a/hw/acpi/nvdimm.c
+++ b/hw/acpi/nvdimm.c
@@ -29,6 +29,8 @@
#include "qemu/osdep.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/aml-build.h"
+#include "hw/acpi/bios-linker-loader.h"
+#include "hw/nvram/fw_cfg.h"
#include "hw/mem/nvdimm.h"
static int nvdimm_plugged_device_list(Object *obj, void *opaque)
@@ -370,15 +372,131 @@ static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets,
g_array_free(structures, true);
}
+struct NvdimmDsmIn {
+ uint32_t handle;
+ uint32_t revision;
+ uint32_t function;
+ /* the remaining size in the page is used by arg3. */
+ union {
+ uint8_t arg3[0];
+ };
+} QEMU_PACKED;
+typedef struct NvdimmDsmIn NvdimmDsmIn;
+
+struct NvdimmDsmOut {
+ /* the size of buffer filled by QEMU. */
+ uint32_t len;
+ uint8_t data[0];
+} QEMU_PACKED;
+typedef struct NvdimmDsmOut NvdimmDsmOut;
+
+struct NvdimmDsmFunc0Out {
+ /* the size of buffer filled by QEMU. */
+ uint32_t len;
+ uint32_t supported_func;
+} QEMU_PACKED;
+typedef struct NvdimmDsmFunc0Out NvdimmDsmFunc0Out;
+
+struct NvdimmDsmFuncNoPayloadOut {
+ /* the size of buffer filled by QEMU. */
+ uint32_t len;
+ uint32_t func_ret_status;
+} QEMU_PACKED;
+typedef struct NvdimmDsmFuncNoPayloadOut NvdimmDsmFuncNoPayloadOut;
+
+static uint64_t
+nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size)
+{
+ nvdimm_debug("BUG: we never read _DSM IO Port.\n");
+ return 0;
+}
+
+static void
+nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
+{
+ NvdimmDsmIn *in;
+ hwaddr dsm_mem_addr = val;
+
+ nvdimm_debug("dsm memory address %#" HWADDR_PRIx ".\n", dsm_mem_addr);
+
+ /*
+ * The DSM memory is mapped to guest address space so an evil guest
+ * can change its content while we are doing DSM emulation. Avoid
+ * this by copying DSM memory to QEMU local memory.
+ */
+ in = g_malloc(TARGET_PAGE_SIZE);
+ cpu_physical_memory_read(dsm_mem_addr, in, TARGET_PAGE_SIZE);
+
+ le32_to_cpus(&in->revision);
+ le32_to_cpus(&in->function);
+ le32_to_cpus(&in->handle);
+
+ nvdimm_debug("Revision %#x Handler %#x Function %#x.\n", in->revision,
+ in->handle, in->function);
+
+ /*
+ * function 0 is called to inquire which functions are supported by
+ * OSPM
+ */
+ if (in->function == 0) {
+ NvdimmDsmFunc0Out func0 = {
+ .len = cpu_to_le32(sizeof(func0)),
+ /* No function supported other than function 0 */
+ .supported_func = cpu_to_le32(0),
+ };
+ cpu_physical_memory_write(dsm_mem_addr, &func0, sizeof func0);
+ } else {
+ /* No function except function 0 is supported yet. */
+ NvdimmDsmFuncNoPayloadOut out = {
+ .len = cpu_to_le32(sizeof(out)),
+ .func_ret_status = cpu_to_le32(1) /* Not Supported */,
+ };
+ cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out));
+ }
+
+ g_free(in);
+}
+
+static const MemoryRegionOps nvdimm_dsm_ops = {
+ .read = nvdimm_dsm_read,
+ .write = nvdimm_dsm_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
+ FWCfgState *fw_cfg, Object *owner)
+{
+ memory_region_init_io(&state->io_mr, owner, &nvdimm_dsm_ops, state,
+ "nvdimm-acpi-io", NVDIMM_ACPI_IO_LEN);
+ memory_region_add_subregion(io, NVDIMM_ACPI_IO_BASE, &state->io_mr);
+
+ state->dsm_mem = g_array_new(false, true /* clear */, 1);
+ acpi_data_push(state->dsm_mem, TARGET_PAGE_SIZE);
+ fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data,
+ state->dsm_mem->len);
+}
+
#define NVDIMM_COMMON_DSM "NCAL"
+#define NVDIMM_ACPI_MEM_ADDR "MEMA"
static void nvdimm_build_common_dsm(Aml *dev)
{
- Aml *method, *ifctx, *function;
+ Aml *method, *ifctx, *function, *dsm_mem, *unpatched, *result_size;
uint8_t byte_list[1];
- method = aml_method(NVDIMM_COMMON_DSM, 4, AML_NOTSERIALIZED);
+ method = aml_method(NVDIMM_COMMON_DSM, 4, AML_SERIALIZED);
function = aml_arg(2);
+ dsm_mem = aml_name(NVDIMM_ACPI_MEM_ADDR);
+
+ /*
+ * do not support any method if DSM memory address has not been
+ * patched.
+ */
+ unpatched = aml_if(aml_equal(dsm_mem, aml_int(0x0)));
/*
* function 0 is called to inquire what functions are supported by
@@ -387,12 +505,38 @@ static void nvdimm_build_common_dsm(Aml *dev)
ifctx = aml_if(aml_equal(function, aml_int(0)));
byte_list[0] = 0 /* No function Supported */;
aml_append(ifctx, aml_return(aml_buffer(1, byte_list)));
- aml_append(method, ifctx);
+ aml_append(unpatched, ifctx);
/* No function is supported yet. */
byte_list[0] = 1 /* Not Supported */;
- aml_append(method, aml_return(aml_buffer(1, byte_list)));
+ aml_append(unpatched, aml_return(aml_buffer(1, byte_list)));
+ aml_append(method, unpatched);
+
+ /*
+ * The HDLE indicates the DSM function is issued from which device,
+ * it is not used at this time as no function is supported yet.
+ * Currently we make it always be 0 for all the devices and will set
+ * the appropriate value once real function is implemented.
+ */
+ aml_append(method, aml_store(aml_int(0x0), aml_name("HDLE")));
+ aml_append(method, aml_store(aml_arg(1), aml_name("REVS")));
+ aml_append(method, aml_store(aml_arg(2), aml_name("FUNC")));
+ /*
+ * tell QEMU about the real address of DSM memory, then QEMU
+ * gets the control and fills the result in DSM memory.
+ */
+ aml_append(method, aml_store(dsm_mem, aml_name("NTFI")));
+
+ result_size = aml_local(1);
+ aml_append(method, aml_store(aml_name("RLEN"), result_size));
+ aml_append(method, aml_store(aml_shiftleft(result_size, aml_int(3)),
+ result_size));
+ aml_append(method, aml_create_field(aml_name("ODAT"), aml_int(0),
+ result_size, "OBUF"));
+ aml_append(method, aml_concatenate(aml_buffer(0, NULL), aml_name("OBUF"),
+ aml_arg(6)));
+ aml_append(method, aml_return(aml_arg(6)));
aml_append(dev, method);
}
@@ -435,7 +579,8 @@ static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev)
static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
GArray *table_data, GArray *linker)
{
- Aml *ssdt, *sb_scope, *dev;
+ Aml *ssdt, *sb_scope, *dev, *field;
+ int mem_addr_offset, nvdimm_ssdt;
acpi_add_table(table_offsets, table_data);
@@ -459,19 +604,89 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
*/
aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012")));
+ /* map DSM memory and IO into ACPI namespace. */
+ aml_append(dev, aml_operation_region("NPIO", AML_SYSTEM_IO,
+ aml_int(NVDIMM_ACPI_IO_BASE), NVDIMM_ACPI_IO_LEN));
+ aml_append(dev, aml_operation_region("NRAM", AML_SYSTEM_MEMORY,
+ aml_name(NVDIMM_ACPI_MEM_ADDR), TARGET_PAGE_SIZE));
+
+ /*
+ * DSM notifier:
+ * NTFI: write the address of DSM memory and notify QEMU to emulate
+ * the access.
+ *
+ * It is the IO port so that accessing them will cause VM-exit, the
+ * control will be transferred to QEMU.
+ */
+ field = aml_field("NPIO", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
+ aml_append(field, aml_named_field("NTFI",
+ sizeof(uint32_t) * BITS_PER_BYTE));
+ aml_append(dev, field);
+
+ /*
+ * DSM input:
+ * HDLE: store device's handle, it's zero if the _DSM call happens
+ * on NVDIMM Root Device.
+ * REVS: store the Arg1 of _DSM call.
+ * FUNC: store the Arg2 of _DSM call.
+ * ARG3: store the Arg3 of _DSM call.
+ *
+ * They are RAM mapping on host so that these accesses never cause
+ * VM-EXIT.
+ */
+ field = aml_field("NRAM", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
+ aml_append(field, aml_named_field("HDLE",
+ sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field("REVS",
+ sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field("FUNC",
+ sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field("ARG3",
+ (TARGET_PAGE_SIZE - offsetof(NvdimmDsmIn, arg3)) *
+ BITS_PER_BYTE));
+ aml_append(dev, field);
+
+ /*
+ * DSM output:
+ * RLEN: the size of the buffer filled by QEMU.
+ * ODAT: the buffer QEMU uses to store the result.
+ *
+ * Since the page is reused by both input and out, the input data
+ * will be lost after storing new result into ODAT so we should fetch
+ * all the input data before writing the result.
+ */
+ field = aml_field("NRAM", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
+ aml_append(field, aml_named_field("RLEN",
+ sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field("ODAT",
+ (TARGET_PAGE_SIZE - offsetof(NvdimmDsmOut, data)) *
+ BITS_PER_BYTE));
+ aml_append(dev, field);
+
nvdimm_build_common_dsm(dev);
nvdimm_build_device_dsm(dev);
nvdimm_build_nvdimm_devices(device_list, dev);
aml_append(sb_scope, dev);
-
aml_append(ssdt, sb_scope);
+
+ nvdimm_ssdt = table_data->len;
+
/* copy AML table into ACPI tables blob and patch header there */
g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len);
+ mem_addr_offset = build_append_named_dword(table_data,
+ NVDIMM_ACPI_MEM_ADDR);
+
+ bios_linker_loader_alloc(linker, NVDIMM_DSM_MEM_FILE, TARGET_PAGE_SIZE,
+ false /* high memory */);
+ bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE,
+ NVDIMM_DSM_MEM_FILE, table_data,
+ table_data->data + mem_addr_offset,
+ sizeof(uint32_t));
build_header(linker, table_data,
- (void *)(table_data->data + table_data->len - ssdt->buf->len),
- "SSDT", ssdt->buf->len, 1, NULL, "NVDIMM");
+ (void *)(table_data->data + nvdimm_ssdt),
+ "SSDT", table_data->len - nvdimm_ssdt, 1, NULL, "NVDIMM");
free_aml_allocator();
}
diff --git a/hw/block/fdc.c b/hw/block/fdc.c
index 9838d21..fc3aef9 100644
--- a/hw/block/fdc.c
+++ b/hw/block/fdc.c
@@ -2557,6 +2557,29 @@ FloppyDriveType isa_fdc_get_drive_type(ISADevice *fdc, int i)
return isa->state.drives[i].drive;
}
+void isa_fdc_get_drive_max_chs(FloppyDriveType type,
+ uint8_t *maxc, uint8_t *maxh, uint8_t *maxs)
+{
+ const FDFormat *fdf;
+
+ *maxc = *maxh = *maxs = 0;
+ for (fdf = fd_formats; fdf->drive != FLOPPY_DRIVE_TYPE_NONE; fdf++) {
+ if (fdf->drive != type) {
+ continue;
+ }
+ if (*maxc < fdf->max_track) {
+ *maxc = fdf->max_track;
+ }
+ if (*maxh < fdf->max_head) {
+ *maxh = fdf->max_head;
+ }
+ if (*maxs < fdf->last_sect) {
+ *maxs = fdf->last_sect;
+ }
+ }
+ (*maxc)--;
+}
+
static const VMStateDescription vmstate_isa_fdc ={
.name = "fdc",
.version_id = 2,
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index b888008..325d8ce 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -37,8 +37,8 @@
#include "hw/acpi/bios-linker-loader.h"
#include "hw/loader.h"
#include "hw/isa/isa.h"
+#include "hw/block/fdc.h"
#include "hw/acpi/memory_hotplug.h"
-#include "hw/mem/nvdimm.h"
#include "sysemu/tpm.h"
#include "hw/acpi/tpm.h"
#include "sysemu/tpm_backend.h"
@@ -76,10 +76,6 @@
#define ACPI_BUILD_DPRINTF(fmt, ...)
#endif
-typedef struct AcpiCpuInfo {
- DECLARE_BITMAP(found_cpus, ACPI_CPU_HOTPLUG_ID_LIMIT);
-} AcpiCpuInfo;
-
typedef struct AcpiMcfgInfo {
uint64_t mcfg_base;
uint32_t mcfg_size;
@@ -121,31 +117,6 @@ typedef struct AcpiBuildPciBusHotplugState {
bool pcihp_bridge_en;
} AcpiBuildPciBusHotplugState;
-static
-int acpi_add_cpu_info(Object *o, void *opaque)
-{
- AcpiCpuInfo *cpu = opaque;
- uint64_t apic_id;
-
- if (object_dynamic_cast(o, TYPE_CPU)) {
- apic_id = object_property_get_int(o, "apic-id", NULL);
- assert(apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT);
-
- set_bit(apic_id, cpu->found_cpus);
- }
-
- object_child_foreach(o, acpi_add_cpu_info, opaque);
- return 0;
-}
-
-static void acpi_get_cpu_info(AcpiCpuInfo *cpu)
-{
- Object *root = object_get_root();
-
- memset(cpu->found_cpus, 0, sizeof cpu->found_cpus);
- object_child_foreach(root, acpi_add_cpu_info, cpu);
-}
-
static void acpi_get_pm_info(AcpiPmInfo *pm)
{
Object *piix = piix4_pm_find();
@@ -362,9 +333,10 @@ build_fadt(GArray *table_data, GArray *linker, AcpiPmInfo *pm,
}
static void
-build_madt(GArray *table_data, GArray *linker, AcpiCpuInfo *cpu)
+build_madt(GArray *table_data, GArray *linker, PCMachineState *pcms)
{
- PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
+ MachineClass *mc = MACHINE_GET_CLASS(pcms);
+ CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(pcms));
int madt_start = table_data->len;
AcpiMultipleApicTable *madt;
@@ -377,18 +349,28 @@ build_madt(GArray *table_data, GArray *linker, AcpiCpuInfo *cpu)
madt->local_apic_address = cpu_to_le32(APIC_DEFAULT_ADDRESS);
madt->flags = cpu_to_le32(1);
- for (i = 0; i < pcms->apic_id_limit; i++) {
+ for (i = 0; i < apic_ids->len; i++) {
AcpiMadtProcessorApic *apic = acpi_data_push(table_data, sizeof *apic);
+ int apic_id = apic_ids->cpus[i].arch_id;
+
apic->type = ACPI_APIC_PROCESSOR;
apic->length = sizeof(*apic);
- apic->processor_id = i;
- apic->local_apic_id = i;
- if (test_bit(i, cpu->found_cpus)) {
+ apic->processor_id = apic_id;
+ apic->local_apic_id = apic_id;
+ if (apic_ids->cpus[i].cpu != NULL) {
apic->flags = cpu_to_le32(1);
} else {
+ /* ACPI spec says that LAPIC entry for non present
+ * CPU may be omitted from MADT or it must be marked
+ * as disabled. However omitting non present CPU from
+ * MADT breaks hotplug on linux. So possible CPUs
+ * should be put in MADT but kept disabled.
+ */
apic->flags = cpu_to_le32(0);
}
}
+ g_free(apic_ids);
+
io_apic = acpi_data_push(table_data, sizeof *io_apic);
io_apic->type = ACPI_APIC_IO;
io_apic->length = sizeof(*io_apic);
@@ -960,21 +942,24 @@ static Aml *build_crs(PCIHostState *host,
return crs;
}
-static void build_processor_devices(Aml *sb_scope, unsigned acpi_cpus,
- AcpiCpuInfo *cpu, AcpiPmInfo *pm)
+static void build_processor_devices(Aml *sb_scope, MachineState *machine,
+ AcpiPmInfo *pm)
{
- int i;
+ int i, apic_idx;
Aml *dev;
Aml *crs;
Aml *pkg;
Aml *field;
Aml *ifctx;
Aml *method;
+ MachineClass *mc = MACHINE_GET_CLASS(machine);
+ CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine);
+ PCMachineState *pcms = PC_MACHINE(machine);
/* The current AML generator can cover the APIC ID range [0..255],
* inclusive, for VCPU hotplug. */
QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT > 256);
- g_assert(acpi_cpus <= ACPI_CPU_HOTPLUG_ID_LIMIT);
+ g_assert(pcms->apic_id_limit <= ACPI_CPU_HOTPLUG_ID_LIMIT);
/* create PCI0.PRES device and its _CRS to reserve CPU hotplug MMIO */
dev = aml_device("PCI0." stringify(CPU_HOTPLUG_RESOURCE_DEVICE));
@@ -993,28 +978,33 @@ static void build_processor_devices(Aml *sb_scope, unsigned acpi_cpus,
aml_append(sb_scope, dev);
/* declare CPU hotplug MMIO region and PRS field to access it */
aml_append(sb_scope, aml_operation_region(
- "PRST", AML_SYSTEM_IO, pm->cpu_hp_io_base, pm->cpu_hp_io_len));
+ "PRST", AML_SYSTEM_IO, aml_int(pm->cpu_hp_io_base), pm->cpu_hp_io_len));
field = aml_field("PRST", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("PRS", 256));
aml_append(sb_scope, field);
/* build Processor object for each processor */
- for (i = 0; i < acpi_cpus; i++) {
- dev = aml_processor(i, 0, 0, "CP%.02X", i);
+ for (i = 0; i < apic_ids->len; i++) {
+ int apic_id = apic_ids->cpus[i].arch_id;
+
+ assert(apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT);
+
+ dev = aml_processor(apic_id, 0, 0, "CP%.02X", apic_id);
method = aml_method("_MAT", 0, AML_NOTSERIALIZED);
aml_append(method,
- aml_return(aml_call1(CPU_MAT_METHOD, aml_int(i))));
+ aml_return(aml_call1(CPU_MAT_METHOD, aml_int(apic_id))));
aml_append(dev, method);
method = aml_method("_STA", 0, AML_NOTSERIALIZED);
aml_append(method,
- aml_return(aml_call1(CPU_STATUS_METHOD, aml_int(i))));
+ aml_return(aml_call1(CPU_STATUS_METHOD, aml_int(apic_id))));
aml_append(dev, method);
method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
aml_append(method,
- aml_return(aml_call2(CPU_EJECT_METHOD, aml_int(i), aml_arg(0)))
+ aml_return(aml_call2(CPU_EJECT_METHOD, aml_int(apic_id),
+ aml_arg(0)))
);
aml_append(dev, method);
@@ -1026,10 +1016,12 @@ static void build_processor_devices(Aml *sb_scope, unsigned acpi_cpus,
*/
/* Arg0 = Processor ID = APIC ID */
method = aml_method(AML_NOTIFY_METHOD, 2, AML_NOTSERIALIZED);
- for (i = 0; i < acpi_cpus; i++) {
- ifctx = aml_if(aml_equal(aml_arg(0), aml_int(i)));
+ for (i = 0; i < apic_ids->len; i++) {
+ int apic_id = apic_ids->cpus[i].arch_id;
+
+ ifctx = aml_if(aml_equal(aml_arg(0), aml_int(apic_id)));
aml_append(ifctx,
- aml_notify(aml_name("CP%.02X", i), aml_arg(1))
+ aml_notify(aml_name("CP%.02X", apic_id), aml_arg(1))
);
aml_append(method, ifctx);
}
@@ -1042,14 +1034,20 @@ static void build_processor_devices(Aml *sb_scope, unsigned acpi_cpus,
* ith up to 255 elements. Windows guests up to win2k8 fail when
* VarPackageOp is used.
*/
- pkg = acpi_cpus <= 255 ? aml_package(acpi_cpus) :
- aml_varpackage(acpi_cpus);
+ pkg = pcms->apic_id_limit <= 255 ? aml_package(pcms->apic_id_limit) :
+ aml_varpackage(pcms->apic_id_limit);
+
+ for (i = 0, apic_idx = 0; i < apic_ids->len; i++) {
+ int apic_id = apic_ids->cpus[i].arch_id;
- for (i = 0; i < acpi_cpus; i++) {
- uint8_t b = test_bit(i, cpu->found_cpus) ? 0x01 : 0x00;
- aml_append(pkg, aml_int(b));
+ for (; apic_idx < apic_id; apic_idx++) {
+ aml_append(pkg, aml_int(0));
+ }
+ aml_append(pkg, aml_int(apic_ids->cpus[i].cpu ? 1 : 0));
+ apic_idx = apic_id + 1;
}
aml_append(sb_scope, aml_name_decl(CPU_ON_BITMAP, pkg));
+ g_free(apic_ids);
}
static void build_memory_devices(Aml *sb_scope, int nr_mem,
@@ -1078,7 +1076,7 @@ static void build_memory_devices(Aml *sb_scope, int nr_mem,
aml_append(scope, aml_operation_region(
MEMORY_HOTPLUG_IO_REGION, AML_SYSTEM_IO,
- io_base, io_len)
+ aml_int(io_base), io_len)
);
field = aml_field(MEMORY_HOTPLUG_IO_REGION, AML_DWORD_ACC,
@@ -1192,7 +1190,8 @@ static void build_hpet_aml(Aml *table)
aml_append(dev, aml_name_decl("_UID", zero));
aml_append(dev,
- aml_operation_region("HPTM", AML_SYSTEM_MEMORY, HPET_BASE, HPET_LEN));
+ aml_operation_region("HPTM", AML_SYSTEM_MEMORY, aml_int(HPET_BASE),
+ HPET_LEN));
field = aml_field("HPTM", AML_DWORD_ACC, AML_LOCK, AML_PRESERVE);
aml_append(field, aml_named_field("VEND", 32));
aml_append(field, aml_named_field("PRD", 32));
@@ -1227,33 +1226,63 @@ static void build_hpet_aml(Aml *table)
aml_append(table, scope);
}
-static Aml *build_fdc_device_aml(void)
+static Aml *build_fdinfo_aml(int idx, FloppyDriveType type)
{
+ Aml *dev, *fdi;
+ uint8_t maxc, maxh, maxs;
+
+ isa_fdc_get_drive_max_chs(type, &maxc, &maxh, &maxs);
+
+ dev = aml_device("FLP%c", 'A' + idx);
+
+ aml_append(dev, aml_name_decl("_ADR", aml_int(idx)));
+
+ fdi = aml_package(16);
+ aml_append(fdi, aml_int(idx)); /* Drive Number */
+ aml_append(fdi,
+ aml_int(cmos_get_fd_drive_type(type))); /* Device Type */
+ /*
+ * the values below are the limits of the drive, and are thus independent
+ * of the inserted media
+ */
+ aml_append(fdi, aml_int(maxc)); /* Maximum Cylinder Number */
+ aml_append(fdi, aml_int(maxs)); /* Maximum Sector Number */
+ aml_append(fdi, aml_int(maxh)); /* Maximum Head Number */
+ /*
+ * SeaBIOS returns the below values for int 0x13 func 0x08 regardless of
+ * the drive type, so shall we
+ */
+ aml_append(fdi, aml_int(0xAF)); /* disk_specify_1 */
+ aml_append(fdi, aml_int(0x02)); /* disk_specify_2 */
+ aml_append(fdi, aml_int(0x25)); /* disk_motor_wait */
+ aml_append(fdi, aml_int(0x02)); /* disk_sector_siz */
+ aml_append(fdi, aml_int(0x12)); /* disk_eot */
+ aml_append(fdi, aml_int(0x1B)); /* disk_rw_gap */
+ aml_append(fdi, aml_int(0xFF)); /* disk_dtl */
+ aml_append(fdi, aml_int(0x6C)); /* disk_formt_gap */
+ aml_append(fdi, aml_int(0xF6)); /* disk_fill */
+ aml_append(fdi, aml_int(0x0F)); /* disk_head_sttl */
+ aml_append(fdi, aml_int(0x08)); /* disk_motor_strt */
+
+ aml_append(dev, aml_name_decl("_FDI", fdi));
+ return dev;
+}
+
+static Aml *build_fdc_device_aml(ISADevice *fdc)
+{
+ int i;
Aml *dev;
Aml *crs;
- Aml *method;
- Aml *if_ctx;
- Aml *else_ctx;
- Aml *zero = aml_int(0);
- Aml *is_present = aml_local(0);
+
+#define ACPI_FDE_MAX_FD 4
+ uint32_t fde_buf[5] = {
+ 0, 0, 0, 0, /* presence of floppy drives #0 - #3 */
+ cpu_to_le32(2) /* tape presence (2 == never present) */
+ };
dev = aml_device("FDC0");
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0700")));
- method = aml_method("_STA", 0, AML_NOTSERIALIZED);
- aml_append(method, aml_store(aml_name("FDEN"), is_present));
- if_ctx = aml_if(aml_equal(is_present, zero));
- {
- aml_append(if_ctx, aml_return(aml_int(0x00)));
- }
- aml_append(method, if_ctx);
- else_ctx = aml_else();
- {
- aml_append(else_ctx, aml_return(aml_int(0x0f)));
- }
- aml_append(method, else_ctx);
- aml_append(dev, method);
-
crs = aml_resource_template();
aml_append(crs, aml_io(AML_DECODE16, 0x03F2, 0x03F2, 0x00, 0x04));
aml_append(crs, aml_io(AML_DECODE16, 0x03F7, 0x03F7, 0x00, 0x01));
@@ -1262,6 +1291,17 @@ static Aml *build_fdc_device_aml(void)
aml_dma(AML_COMPATIBILITY, AML_NOTBUSMASTER, AML_TRANSFER8, 2));
aml_append(dev, aml_name_decl("_CRS", crs));
+ for (i = 0; i < MIN(MAX_FD, ACPI_FDE_MAX_FD); i++) {
+ FloppyDriveType type = isa_fdc_get_drive_type(fdc, i);
+
+ if (type < FLOPPY_DRIVE_TYPE_NONE) {
+ fde_buf[i] = cpu_to_le32(1); /* drive present */
+ aml_append(dev, build_fdinfo_aml(i, type));
+ }
+ }
+ aml_append(dev, aml_name_decl("_FDE",
+ aml_buffer(sizeof(fde_buf), (uint8_t *)fde_buf)));
+
return dev;
}
@@ -1406,12 +1446,16 @@ static Aml *build_com_device_aml(uint8_t uid)
static void build_isa_devices_aml(Aml *table)
{
+ ISADevice *fdc = pc_find_fdc0();
+
Aml *scope = aml_scope("_SB.PCI0.ISA");
aml_append(scope, build_rtc_device_aml());
aml_append(scope, build_kbd_device_aml());
aml_append(scope, build_mouse_device_aml());
- aml_append(scope, build_fdc_device_aml());
+ if (fdc) {
+ aml_append(scope, build_fdc_device_aml(fdc));
+ }
aml_append(scope, build_lpt_device_aml());
aml_append(scope, build_com_device_aml(1));
aml_append(scope, build_com_device_aml(2));
@@ -1430,7 +1474,7 @@ static void build_dbg_aml(Aml *table)
Aml *idx = aml_local(2);
aml_append(scope,
- aml_operation_region("DBG", AML_SYSTEM_IO, 0x0402, 0x01));
+ aml_operation_region("DBG", AML_SYSTEM_IO, aml_int(0x0402), 0x01));
field = aml_field("DBG", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("DBGB", 8));
aml_append(scope, field);
@@ -1509,6 +1553,12 @@ static Aml *build_gsi_link_dev(const char *name, uint8_t uid, uint8_t gsi)
aml_append(dev, aml_name_decl("_CRS", crs));
+ /*
+ * _DIS can be no-op because the interrupt cannot be disabled.
+ */
+ method = aml_method("_DIS", 0, AML_NOTSERIALIZED);
+ aml_append(dev, method);
+
method = aml_method("_SRS", 1, AML_NOTSERIALIZED);
aml_append(dev, method);
@@ -1742,18 +1792,14 @@ static void build_q35_pci0_int(Aml *table)
aml_append(sb_scope, build_link_dev("LNKG", 6, aml_name("PRQG")));
aml_append(sb_scope, build_link_dev("LNKH", 7, aml_name("PRQH")));
- /*
- * TODO: UID probably shouldn't be the same for GSIx devices
- * but that's how it was in original ASL so keep it for now
- */
- aml_append(sb_scope, build_gsi_link_dev("GSIA", 0, 0x10));
- aml_append(sb_scope, build_gsi_link_dev("GSIB", 0, 0x11));
- aml_append(sb_scope, build_gsi_link_dev("GSIC", 0, 0x12));
- aml_append(sb_scope, build_gsi_link_dev("GSID", 0, 0x13));
- aml_append(sb_scope, build_gsi_link_dev("GSIE", 0, 0x14));
- aml_append(sb_scope, build_gsi_link_dev("GSIF", 0, 0x15));
- aml_append(sb_scope, build_gsi_link_dev("GSIG", 0, 0x16));
- aml_append(sb_scope, build_gsi_link_dev("GSIH", 0, 0x17));
+ aml_append(sb_scope, build_gsi_link_dev("GSIA", 0x10, 0x10));
+ aml_append(sb_scope, build_gsi_link_dev("GSIB", 0x11, 0x11));
+ aml_append(sb_scope, build_gsi_link_dev("GSIC", 0x12, 0x12));
+ aml_append(sb_scope, build_gsi_link_dev("GSID", 0x13, 0x13));
+ aml_append(sb_scope, build_gsi_link_dev("GSIE", 0x14, 0x14));
+ aml_append(sb_scope, build_gsi_link_dev("GSIF", 0x15, 0x15));
+ aml_append(sb_scope, build_gsi_link_dev("GSIG", 0x16, 0x16));
+ aml_append(sb_scope, build_gsi_link_dev("GSIH", 0x17, 0x17));
aml_append(table, sb_scope);
}
@@ -1770,28 +1816,25 @@ static void build_q35_isa_bridge(Aml *table)
/* ICH9 PCI to ISA irq remapping */
aml_append(dev, aml_operation_region("PIRQ", AML_PCI_CONFIG,
- 0x60, 0x0C));
+ aml_int(0x60), 0x0C));
aml_append(dev, aml_operation_region("LPCD", AML_PCI_CONFIG,
- 0x80, 0x02));
+ aml_int(0x80), 0x02));
field = aml_field("LPCD", AML_ANY_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("COMA", 3));
aml_append(field, aml_reserved_field(1));
aml_append(field, aml_named_field("COMB", 3));
aml_append(field, aml_reserved_field(1));
aml_append(field, aml_named_field("LPTD", 2));
- aml_append(field, aml_reserved_field(2));
- aml_append(field, aml_named_field("FDCD", 2));
aml_append(dev, field);
aml_append(dev, aml_operation_region("LPCE", AML_PCI_CONFIG,
- 0x82, 0x02));
+ aml_int(0x82), 0x02));
/* enable bits */
field = aml_field("LPCE", AML_ANY_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("CAEN", 1));
aml_append(field, aml_named_field("CBEN", 1));
aml_append(field, aml_named_field("LPEN", 1));
- aml_append(field, aml_named_field("FDEN", 1));
aml_append(dev, field);
aml_append(scope, dev);
@@ -1808,7 +1851,7 @@ static void build_piix4_pm(Aml *table)
aml_append(dev, aml_name_decl("_ADR", aml_int(0x00010003)));
aml_append(dev, aml_operation_region("P13C", AML_PCI_CONFIG,
- 0x00, 0xff));
+ aml_int(0x00), 0xff));
aml_append(scope, dev);
aml_append(table, scope);
}
@@ -1825,7 +1868,7 @@ static void build_piix4_isa_bridge(Aml *table)
/* PIIX PCI to ISA irq remapping */
aml_append(dev, aml_operation_region("P40C", AML_PCI_CONFIG,
- 0x60, 0x04));
+ aml_int(0x60), 0x04));
/* enable bits */
field = aml_field("^PX13.P13C", AML_ANY_ACC, AML_NOLOCK, AML_PRESERVE);
/* Offset(0x5f),, 7, */
@@ -1839,7 +1882,6 @@ static void build_piix4_isa_bridge(Aml *table)
aml_append(field, aml_reserved_field(3));
aml_append(field, aml_named_field("CBEN", 1));
aml_append(dev, field);
- aml_append(dev, aml_name_decl("FDEN", aml_int(1)));
aml_append(scope, dev);
aml_append(table, scope);
@@ -1854,20 +1896,20 @@ static void build_piix4_pci_hotplug(Aml *table)
scope = aml_scope("_SB.PCI0");
aml_append(scope,
- aml_operation_region("PCST", AML_SYSTEM_IO, 0xae00, 0x08));
+ aml_operation_region("PCST", AML_SYSTEM_IO, aml_int(0xae00), 0x08));
field = aml_field("PCST", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_named_field("PCIU", 32));
aml_append(field, aml_named_field("PCID", 32));
aml_append(scope, field);
aml_append(scope,
- aml_operation_region("SEJ", AML_SYSTEM_IO, 0xae08, 0x04));
+ aml_operation_region("SEJ", AML_SYSTEM_IO, aml_int(0xae08), 0x04));
field = aml_field("SEJ", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_named_field("B0EJ", 32));
aml_append(scope, field);
aml_append(scope,
- aml_operation_region("BNMR", AML_SYSTEM_IO, 0xae10, 0x04));
+ aml_operation_region("BNMR", AML_SYSTEM_IO, aml_int(0xae10), 0x04));
field = aml_field("BNMR", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_named_field("BNUM", 32));
aml_append(scope, field);
@@ -1937,14 +1979,13 @@ static Aml *build_q35_osc_method(void)
static void
build_dsdt(GArray *table_data, GArray *linker,
- AcpiCpuInfo *cpu, AcpiPmInfo *pm, AcpiMiscInfo *misc,
- PcPciInfo *pci)
+ AcpiPmInfo *pm, AcpiMiscInfo *misc,
+ PcPciInfo *pci, MachineState *machine)
{
CrsRangeEntry *entry;
Aml *dsdt, *sb_scope, *scope, *dev, *method, *field, *pkg, *crs;
GPtrArray *mem_ranges = g_ptr_array_new_with_free_func(crs_range_free);
GPtrArray *io_ranges = g_ptr_array_new_with_free_func(crs_range_free);
- MachineState *machine = MACHINE(qdev_get_machine());
PCMachineState *pcms = PC_MACHINE(machine);
uint32_t nr_mem = machine->ram_slots;
int root_bus_limit = 0xFF;
@@ -1975,9 +2016,9 @@ build_dsdt(GArray *table_data, GArray *linker,
} else {
sb_scope = aml_scope("_SB");
aml_append(sb_scope,
- aml_operation_region("PCST", AML_SYSTEM_IO, 0xae00, 0x0c));
+ aml_operation_region("PCST", AML_SYSTEM_IO, aml_int(0xae00), 0x0c));
aml_append(sb_scope,
- aml_operation_region("PCSB", AML_SYSTEM_IO, 0xae0c, 0x01));
+ aml_operation_region("PCSB", AML_SYSTEM_IO, aml_int(0xae0c), 0x01));
field = aml_field("PCSB", AML_ANY_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_named_field("PCIB", 8));
aml_append(sb_scope, field);
@@ -2252,7 +2293,7 @@ build_dsdt(GArray *table_data, GArray *linker,
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(dev, aml_operation_region("PEOR", AML_SYSTEM_IO,
- misc->pvpanic_port, 1));
+ aml_int(misc->pvpanic_port), 1));
field = aml_field("PEOR", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("PEPT", 8));
aml_append(dev, field);
@@ -2275,7 +2316,7 @@ build_dsdt(GArray *table_data, GArray *linker,
sb_scope = aml_scope("\\_SB");
{
- build_processor_devices(sb_scope, pcms->apic_id_limit, cpu, pm);
+ build_processor_devices(sb_scope, machine, pm);
build_memory_devices(sb_scope, nr_mem, pm->mem_hp_io_base,
pm->mem_hp_io_len);
@@ -2396,7 +2437,7 @@ acpi_build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base,
}
static void
-build_srat(GArray *table_data, GArray *linker)
+build_srat(GArray *table_data, GArray *linker, MachineState *machine)
{
AcpiSystemResourceAffinityTable *srat;
AcpiSratProcessorAffinity *core;
@@ -2406,7 +2447,9 @@ build_srat(GArray *table_data, GArray *linker)
uint64_t curnode;
int srat_start, numa_start, slots;
uint64_t mem_len, mem_base, next_base;
- PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
+ MachineClass *mc = MACHINE_GET_CLASS(machine);
+ CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine);
+ PCMachineState *pcms = PC_MACHINE(machine);
ram_addr_t hotplugabble_address_space_size =
object_property_get_int(OBJECT(pcms), PC_MACHINE_MEMHP_REGION_SIZE,
NULL);
@@ -2415,14 +2458,15 @@ build_srat(GArray *table_data, GArray *linker)
srat = acpi_data_push(table_data, sizeof *srat);
srat->reserved1 = cpu_to_le32(1);
- core = (void *)(srat + 1);
- for (i = 0; i < pcms->apic_id_limit; ++i) {
+ for (i = 0; i < apic_ids->len; i++) {
+ int apic_id = apic_ids->cpus[i].arch_id;
+
core = acpi_data_push(table_data, sizeof *core);
core->type = ACPI_SRAT_PROCESSOR;
core->length = sizeof(*core);
- core->local_apic_id = i;
- curnode = pcms->node_cpu[i];
+ core->local_apic_id = apic_id;
+ curnode = pcms->node_cpu[apic_id];
core->proximity_lo = curnode;
memset(core->proximity_hi, 0, 3);
core->local_sapic_eid = 0;
@@ -2487,6 +2531,7 @@ build_srat(GArray *table_data, GArray *linker)
(void *)(table_data->data + srat_start),
"SRAT",
table_data->len - srat_start, 1, NULL, NULL);
+ g_free(apic_ids);
}
static void
@@ -2610,21 +2655,13 @@ static bool acpi_has_iommu(void)
return intel_iommu && !ambiguous;
}
-static bool acpi_has_nvdimm(void)
-{
- PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
-
- return pcms->nvdimm;
-}
-
static
-void acpi_build(AcpiBuildTables *tables)
+void acpi_build(AcpiBuildTables *tables, MachineState *machine)
{
- PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
+ PCMachineState *pcms = PC_MACHINE(machine);
PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
GArray *table_offsets;
unsigned facs, dsdt, rsdt, fadt;
- AcpiCpuInfo cpu;
AcpiPmInfo pm;
AcpiMiscInfo misc;
AcpiMcfgInfo mcfg;
@@ -2634,7 +2671,6 @@ void acpi_build(AcpiBuildTables *tables)
GArray *tables_blob = tables->table_data;
AcpiSlicOem slic_oem = { .id = NULL, .table_id = NULL };
- acpi_get_cpu_info(&cpu);
acpi_get_pm_info(&pm);
acpi_get_misc_info(&misc);
acpi_get_pci_info(&pci);
@@ -2658,7 +2694,7 @@ void acpi_build(AcpiBuildTables *tables)
/* DSDT is pointed to by FADT */
dsdt = tables_blob->len;
- build_dsdt(tables_blob, tables->linker, &cpu, &pm, &misc, &pci);
+ build_dsdt(tables_blob, tables->linker, &pm, &misc, &pci, machine);
/* Count the size of the DSDT and SSDT, we will need it for legacy
* sizing of ACPI tables.
@@ -2673,7 +2709,7 @@ void acpi_build(AcpiBuildTables *tables)
aml_len += tables_blob->len - fadt;
acpi_add_table(table_offsets, tables_blob);
- build_madt(tables_blob, tables->linker, &cpu);
+ build_madt(tables_blob, tables->linker, pcms);
if (misc.has_hpet) {
acpi_add_table(table_offsets, tables_blob);
@@ -2690,7 +2726,7 @@ void acpi_build(AcpiBuildTables *tables)
}
if (pcms->numa_nodes) {
acpi_add_table(table_offsets, tables_blob);
- build_srat(tables_blob, tables->linker);
+ build_srat(tables_blob, tables->linker, machine);
}
if (acpi_get_mcfg(&mcfg)) {
acpi_add_table(table_offsets, tables_blob);
@@ -2700,8 +2736,7 @@ void acpi_build(AcpiBuildTables *tables)
acpi_add_table(table_offsets, tables_blob);
build_dmar_q35(tables_blob, tables->linker);
}
-
- if (acpi_has_nvdimm()) {
+ if (pcms->acpi_nvdimm_state.is_enabled) {
nvdimm_build_acpi(table_offsets, tables_blob, tables->linker);
}
@@ -2795,7 +2830,7 @@ static void acpi_build_update(void *build_opaque)
acpi_build_tables_init(&tables);
- acpi_build(&tables);
+ acpi_build(&tables, MACHINE(qdev_get_machine()));
acpi_ram_update(build_state->table_mr, tables.table_data);
@@ -2860,7 +2895,7 @@ void acpi_setup(void)
acpi_set_pci_info();
acpi_build_tables_init(&tables);
- acpi_build(&tables);
+ acpi_build(&tables, MACHINE(pcms));
/* Now expose it all to Guest */
build_state->table_mr = acpi_add_rom_blob(build_state, tables.table_data,
diff --git a/hw/i386/kvm/apic.c b/hw/i386/kvm/apic.c
index 694d398..3c7c8fa 100644
--- a/hw/i386/kvm/apic.c
+++ b/hw/i386/kvm/apic.c
@@ -186,7 +186,7 @@ static void kvm_apic_realize(DeviceState *dev, Error **errp)
APIC_SPACE_SIZE);
if (kvm_has_gsi_routing()) {
- msi_supported = true;
+ msi_nonbroken = true;
}
}
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 56ec6cd..2ac97c4 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -199,7 +199,7 @@ static void pic_irq_request(void *opaque, int irq, int level)
#define REG_EQUIPMENT_BYTE 0x14
-static int cmos_get_fd_drive_type(FloppyDriveType fd0)
+int cmos_get_fd_drive_type(FloppyDriveType fd0)
{
int val;
@@ -699,18 +699,6 @@ static uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index)
}
}
-/* Calculates the limit to CPU APIC ID values
- *
- * This function returns the limit for the APIC ID value, so that all
- * CPU APIC IDs are < pc_apic_id_limit().
- *
- * This is used for FW_CFG_MAX_CPUS. See comments on bochs_bios_init().
- */
-static unsigned int pc_apic_id_limit(unsigned int max_cpus)
-{
- return x86_cpu_apic_id_from_index(max_cpus - 1) + 1;
-}
-
static void pc_build_smbios(FWCfgState *fw_cfg)
{
uint8_t *smbios_tables, *smbios_anchor;
@@ -748,12 +736,11 @@ static void pc_build_smbios(FWCfgState *fw_cfg)
}
}
-static FWCfgState *bochs_bios_init(AddressSpace *as)
+static FWCfgState *bochs_bios_init(AddressSpace *as, PCMachineState *pcms)
{
FWCfgState *fw_cfg;
uint64_t *numa_fw_cfg;
int i, j;
- unsigned int apic_id_limit = pc_apic_id_limit(max_cpus);
fw_cfg = fw_cfg_init_io_dma(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4, as);
@@ -771,7 +758,7 @@ static FWCfgState *bochs_bios_init(AddressSpace *as)
* [1] The only kind of "CPU identifier" used between SeaBIOS and QEMU is
* the APIC ID, not the "CPU index"
*/
- fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)apic_id_limit);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)pcms->apic_id_limit);
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
fw_cfg_add_bytes(fw_cfg, FW_CFG_ACPI_TABLES,
acpi_tables, acpi_tables_len);
@@ -789,11 +776,11 @@ static FWCfgState *bochs_bios_init(AddressSpace *as)
* of nodes, one word for each VCPU->node and one word for each node to
* hold the amount of memory.
*/
- numa_fw_cfg = g_new0(uint64_t, 1 + apic_id_limit + nb_numa_nodes);
+ numa_fw_cfg = g_new0(uint64_t, 1 + pcms->apic_id_limit + nb_numa_nodes);
numa_fw_cfg[0] = cpu_to_le64(nb_numa_nodes);
for (i = 0; i < max_cpus; i++) {
unsigned int apic_id = x86_cpu_apic_id_from_index(i);
- assert(apic_id < apic_id_limit);
+ assert(apic_id < pcms->apic_id_limit);
for (j = 0; j < nb_numa_nodes; j++) {
if (test_bit(i, numa_info[j].node_cpu)) {
numa_fw_cfg[apic_id + 1] = cpu_to_le64(j);
@@ -802,10 +789,11 @@ static FWCfgState *bochs_bios_init(AddressSpace *as)
}
}
for (i = 0; i < nb_numa_nodes; i++) {
- numa_fw_cfg[apic_id_limit + 1 + i] = cpu_to_le64(numa_info[i].node_mem);
+ numa_fw_cfg[pcms->apic_id_limit + 1 + i] =
+ cpu_to_le64(numa_info[i].node_mem);
}
fw_cfg_add_bytes(fw_cfg, FW_CFG_NUMA, numa_fw_cfg,
- (1 + apic_id_limit + nb_numa_nodes) *
+ (1 + pcms->apic_id_limit + nb_numa_nodes) *
sizeof(*numa_fw_cfg));
return fw_cfg;
@@ -1119,7 +1107,6 @@ void pc_cpus_init(PCMachineState *pcms)
int i;
X86CPU *cpu = NULL;
MachineState *machine = MACHINE(pcms);
- unsigned long apic_id_limit;
/* init CPUs */
if (machine->cpu_model == NULL) {
@@ -1130,17 +1117,31 @@ void pc_cpus_init(PCMachineState *pcms)
#endif
}
- apic_id_limit = pc_apic_id_limit(max_cpus);
- if (apic_id_limit > ACPI_CPU_HOTPLUG_ID_LIMIT) {
- error_report("max_cpus is too large. APIC ID of last CPU is %lu",
- apic_id_limit - 1);
+ /* Calculates the limit to CPU APIC ID values
+ *
+ * Limit for the APIC ID value, so that all
+ * CPU APIC IDs are < pcms->apic_id_limit.
+ *
+ * This is used for FW_CFG_MAX_CPUS. See comments on bochs_bios_init().
+ */
+ pcms->apic_id_limit = x86_cpu_apic_id_from_index(max_cpus - 1) + 1;
+ if (pcms->apic_id_limit > ACPI_CPU_HOTPLUG_ID_LIMIT) {
+ error_report("max_cpus is too large. APIC ID of last CPU is %u",
+ pcms->apic_id_limit - 1);
exit(1);
}
- for (i = 0; i < smp_cpus; i++) {
- cpu = pc_new_cpu(machine->cpu_model, x86_cpu_apic_id_from_index(i),
- &error_fatal);
- object_unref(OBJECT(cpu));
+ pcms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
+ sizeof(CPUArchId) * max_cpus);
+ for (i = 0; i < max_cpus; i++) {
+ pcms->possible_cpus->cpus[i].arch_id = x86_cpu_apic_id_from_index(i);
+ pcms->possible_cpus->len++;
+ if (i < smp_cpus) {
+ cpu = pc_new_cpu(machine->cpu_model, x86_cpu_apic_id_from_index(i),
+ &error_fatal);
+ pcms->possible_cpus->cpus[i].cpu = CPU(cpu);
+ object_unref(OBJECT(cpu));
+ }
}
/* tell smbios about cpuid version and features */
@@ -1186,7 +1187,6 @@ void pc_guest_info_init(PCMachineState *pcms)
{
int i, j;
- pcms->apic_id_limit = pc_apic_id_limit(max_cpus);
pcms->apic_xrupt_override = kvm_allows_irq0_override();
pcms->numa_nodes = nb_numa_nodes;
pcms->node_mem = g_malloc0(pcms->numa_nodes *
@@ -1371,7 +1371,7 @@ void pc_memory_init(PCMachineState *pcms,
option_rom_mr,
1);
- fw_cfg = bochs_bios_init(&address_space_memory);
+ fw_cfg = bochs_bios_init(&address_space_memory, pcms);
rom_set_fw(fw_cfg);
@@ -1664,9 +1664,19 @@ static void pc_dimm_unplug(HotplugHandler *hotplug_dev,
error_propagate(errp, local_err);
}
+static int pc_apic_cmp(const void *a, const void *b)
+{
+ CPUArchId *apic_a = (CPUArchId *)a;
+ CPUArchId *apic_b = (CPUArchId *)b;
+
+ return apic_a->arch_id - apic_b->arch_id;
+}
+
static void pc_cpu_plug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
+ CPUClass *cc = CPU_GET_CLASS(dev);
+ CPUArchId apic_id, *found_cpu;
HotplugHandlerClass *hhc;
Error *local_err = NULL;
PCMachineState *pcms = PC_MACHINE(hotplug_dev);
@@ -1689,6 +1699,13 @@ static void pc_cpu_plug(HotplugHandler *hotplug_dev,
/* increment the number of CPUs */
rtc_set_memory(pcms->rtc, 0x5f, rtc_get_memory(pcms->rtc, 0x5f) + 1);
+
+ apic_id.arch_id = cc->get_arch_id(CPU(dev));
+ found_cpu = bsearch(&apic_id, pcms->possible_cpus->cpus,
+ pcms->possible_cpus->len, sizeof(*pcms->possible_cpus->cpus),
+ pc_apic_cmp);
+ assert(found_cpu);
+ found_cpu->cpu = CPU(dev);
out:
error_propagate(errp, local_err);
}
@@ -1853,14 +1870,14 @@ static bool pc_machine_get_nvdimm(Object *obj, Error **errp)
{
PCMachineState *pcms = PC_MACHINE(obj);
- return pcms->nvdimm;
+ return pcms->acpi_nvdimm_state.is_enabled;
}
static void pc_machine_set_nvdimm(Object *obj, bool value, Error **errp)
{
PCMachineState *pcms = PC_MACHINE(obj);
- pcms->nvdimm = value;
+ pcms->acpi_nvdimm_state.is_enabled = value;
}
static void pc_machine_initfn(Object *obj)
@@ -1899,7 +1916,7 @@ static void pc_machine_initfn(Object *obj)
&error_abort);
/* nvdimm is disabled on default. */
- pcms->nvdimm = false;
+ pcms->acpi_nvdimm_state.is_enabled = false;
object_property_add_bool(obj, PC_MACHINE_NVDIMM, pc_machine_get_nvdimm,
pc_machine_set_nvdimm, &error_abort);
}
@@ -1931,6 +1948,17 @@ static unsigned pc_cpu_index_to_socket_id(unsigned cpu_index)
return topo.pkg_id;
}
+static CPUArchIdList *pc_possible_cpu_arch_ids(MachineState *machine)
+{
+ PCMachineState *pcms = PC_MACHINE(machine);
+ int len = sizeof(CPUArchIdList) +
+ sizeof(CPUArchId) * (pcms->possible_cpus->len);
+ CPUArchIdList *list = g_malloc(len);
+
+ memcpy(list, pcms->possible_cpus, len);
+ return list;
+}
+
static void pc_machine_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -1953,6 +1981,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
pcmc->save_tsc_khz = true;
mc->get_hotplug_handler = pc_get_hotpug_handler;
mc->cpu_index_to_socket_id = pc_cpu_index_to_socket_id;
+ mc->possible_cpu_arch_ids = pc_possible_cpu_arch_ids;
mc->default_boot_order = "cad";
mc->hot_add_cpu = pc_hot_add_cpu;
mc->max_cpus = 255;
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 6f8c2cd..6a69b23 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -274,6 +274,11 @@ static void pc_init1(MachineState *machine,
if (pcmc->pci_enabled) {
pc_pci_device_init(pci_bus);
}
+
+ if (pcms->acpi_nvdimm_state.is_enabled) {
+ nvdimm_init_acpi_state(&pcms->acpi_nvdimm_state, system_io,
+ pcms->fw_cfg, OBJECT(pcms));
+ }
}
/* Looking for a pc_compat_2_4() function? It doesn't exist.
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index 46522c9..17915b0 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -61,6 +61,7 @@ static void pc_q35_init(MachineState *machine)
PCIDevice *lpc;
BusState *idebus[MAX_SATA_PORTS];
ISADevice *rtc_state;
+ MemoryRegion *system_io = get_system_io();
MemoryRegion *pci_memory;
MemoryRegion *rom_memory;
MemoryRegion *ram_memory;
@@ -160,7 +161,7 @@ static void pc_q35_init(MachineState *machine)
q35_host->mch.ram_memory = ram_memory;
q35_host->mch.pci_address_space = pci_memory;
q35_host->mch.system_memory = get_system_memory();
- q35_host->mch.address_space_io = get_system_io();
+ q35_host->mch.address_space_io = system_io;
q35_host->mch.below_4g_mem_size = pcms->below_4g_mem_size;
q35_host->mch.above_4g_mem_size = pcms->above_4g_mem_size;
/* pci */
@@ -251,6 +252,11 @@ static void pc_q35_init(MachineState *machine)
if (pcmc->pci_enabled) {
pc_pci_device_init(host_bus);
}
+
+ if (pcms->acpi_nvdimm_state.is_enabled) {
+ nvdimm_init_acpi_state(&pcms->acpi_nvdimm_state, system_io,
+ pcms->fw_cfg, OBJECT(pcms));
+ }
}
#define DEFINE_Q35_MACHINE(suffix, name, compatfn, optionfn) \
diff --git a/hw/i386/xen/xen_apic.c b/hw/i386/xen/xen_apic.c
index 2b8d709..21d68ee 100644
--- a/hw/i386/xen/xen_apic.c
+++ b/hw/i386/xen/xen_apic.c
@@ -44,7 +44,7 @@ static void xen_apic_realize(DeviceState *dev, Error **errp)
s->vapic_control = 0;
memory_region_init_io(&s->io_memory, OBJECT(s), &xen_apic_io_ops, s,
"xen-apic-msi", APIC_SPACE_SIZE);
- msi_supported = true;
+ msi_nonbroken = true;
}
static void xen_apic_set_base(APICCommonState *s, uint64_t val)
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index a299462..28c2ea5 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -874,7 +874,7 @@ static void apic_realize(DeviceState *dev, Error **errp)
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s);
local_apics[s->idx] = s;
- msi_supported = true;
+ msi_nonbroken = true;
}
static void apic_class_init(ObjectClass *klass, void *data)
diff --git a/hw/intc/arm_gicv2m.c b/hw/intc/arm_gicv2m.c
index 70c0b97..ebd368b 100644
--- a/hw/intc/arm_gicv2m.c
+++ b/hw/intc/arm_gicv2m.c
@@ -148,7 +148,7 @@ static void gicv2m_realize(DeviceState *dev, Error **errp)
sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->spi[i]);
}
- msi_supported = true;
+ msi_nonbroken = true;
kvm_gsi_direct_mapping = true;
kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
}
diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c
index 903888c..7685250 100644
--- a/hw/intc/openpic.c
+++ b/hw/intc/openpic.c
@@ -1375,7 +1375,7 @@ static void fsl_common_init(OpenPICState *opp)
opp->irq_msi = 224;
- msi_supported = true;
+ msi_nonbroken = true;
for (i = 0; i < opp->fsl->max_ext; i++) {
opp->src[i].level = false;
}
diff --git a/hw/intc/openpic_kvm.c b/hw/intc/openpic_kvm.c
index 4dcdb61..778af4a 100644
--- a/hw/intc/openpic_kvm.c
+++ b/hw/intc/openpic_kvm.c
@@ -239,7 +239,7 @@ static void kvm_openpic_realize(DeviceState *dev, Error **errp)
memory_listener_register(&opp->mem_listener, &address_space_memory);
/* indicate pic capabilities */
- msi_supported = true;
+ msi_nonbroken = true;
kvm_kernel_irqchip = true;
kvm_async_interrupts_allowed = true;
diff --git a/hw/ipmi/ipmi_bmc_sim.c b/hw/ipmi/ipmi_bmc_sim.c
index 51d234a..dc9c14c 100644
--- a/hw/ipmi/ipmi_bmc_sim.c
+++ b/hw/ipmi/ipmi_bmc_sim.c
@@ -153,12 +153,17 @@ typedef struct IPMISensor {
#define IPMI_WATCHDOG_SENSOR 0
typedef struct IPMIBmcSim IPMIBmcSim;
+typedef struct RspBuffer RspBuffer;
#define MAX_NETFNS 64
-typedef void (*IPMICmdHandler)(IPMIBmcSim *s,
- uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len);
+
+typedef struct IPMICmdHandler {
+ void (*cmd_handler)(IPMIBmcSim *s,
+ uint8_t *cmd, unsigned int cmd_len,
+ RspBuffer *rsp);
+ unsigned int cmd_len_min;
+} IPMICmdHandler;
+
typedef struct IPMINetfn {
unsigned int cmd_nums;
const IPMICmdHandler *cmd_handlers;
@@ -258,33 +263,39 @@ struct IPMIBmcSim {
#define IPMI_BMC_WATCHDOG_ACTION_POWER_DOWN 2
#define IPMI_BMC_WATCHDOG_ACTION_POWER_CYCLE 3
+struct RspBuffer {
+ uint8_t buffer[MAX_IPMI_MSG_SIZE];
+ unsigned int len;
+};
+
+#define RSP_BUFFER_INITIALIZER { }
+
+static inline void rsp_buffer_set_error(RspBuffer *rsp, uint8_t byte)
+{
+ rsp->buffer[2] = byte;
+}
/* Add a byte to the response. */
-#define IPMI_ADD_RSP_DATA(b) \
- do { \
- if (*rsp_len >= max_rsp_len) { \
- rsp[2] = IPMI_CC_REQUEST_DATA_TRUNCATED; \
- return; \
- } \
- rsp[(*rsp_len)++] = (b); \
- } while (0)
-
-/* Verify that the received command is a certain length. */
-#define IPMI_CHECK_CMD_LEN(l) \
- if (cmd_len < l) { \
- rsp[2] = IPMI_CC_REQUEST_DATA_LENGTH_INVALID; \
- return; \
- }
-
-/* Check that the reservation in the command is valid. */
-#define IPMI_CHECK_RESERVATION(off, r) \
- do { \
- if ((cmd[off] | (cmd[off + 1] << 8)) != r) { \
- rsp[2] = IPMI_CC_INVALID_RESERVATION; \
- return; \
- } \
- } while (0)
+static inline void rsp_buffer_push(RspBuffer *rsp, uint8_t byte)
+{
+ if (rsp->len >= sizeof(rsp->buffer)) {
+ rsp_buffer_set_error(rsp, IPMI_CC_REQUEST_DATA_TRUNCATED);
+ return;
+ }
+ rsp->buffer[rsp->len++] = byte;
+}
+static inline void rsp_buffer_pushmore(RspBuffer *rsp, uint8_t *bytes,
+ unsigned int n)
+{
+ if (rsp->len + n >= sizeof(rsp->buffer)) {
+ rsp_buffer_set_error(rsp, IPMI_CC_REQUEST_DATA_TRUNCATED);
+ return;
+ }
+
+ memcpy(&rsp->buffer[rsp->len], bytes, n);
+ rsp->len += n;
+}
static void ipmi_sim_handle_timeout(IPMIBmcSim *ibs);
@@ -566,6 +577,28 @@ static int ipmi_register_netfn(IPMIBmcSim *s, unsigned int netfn,
return 0;
}
+static const IPMICmdHandler *ipmi_get_handler(IPMIBmcSim *ibs,
+ unsigned int netfn,
+ unsigned int cmd)
+{
+ const IPMICmdHandler *hdl;
+
+ if (netfn & 1 || netfn >= MAX_NETFNS || !ibs->netfns[netfn / 2]) {
+ return NULL;
+ }
+
+ if (cmd >= ibs->netfns[netfn / 2]->cmd_nums) {
+ return NULL;
+ }
+
+ hdl = &ibs->netfns[netfn / 2]->cmd_handlers[cmd];
+ if (!hdl->cmd_handler) {
+ return NULL;
+ }
+
+ return hdl;
+}
+
static void next_timeout(IPMIBmcSim *ibs)
{
int64_t next;
@@ -586,54 +619,51 @@ static void ipmi_sim_handle_command(IPMIBmc *b,
IPMIBmcSim *ibs = IPMI_BMC_SIMULATOR(b);
IPMIInterface *s = ibs->parent.intf;
IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
- unsigned int netfn;
- uint8_t rsp[MAX_IPMI_MSG_SIZE];
- unsigned int rsp_len_holder = 0;
- unsigned int *rsp_len = &rsp_len_holder;
- unsigned int max_rsp_len = sizeof(rsp);
+ const IPMICmdHandler *hdl;
+ RspBuffer rsp = RSP_BUFFER_INITIALIZER;
/* Set up the response, set the low bit of NETFN. */
/* Note that max_rsp_len must be at least 3 */
- if (max_rsp_len < 3) {
- rsp[2] = IPMI_CC_REQUEST_DATA_TRUNCATED;
+ if (sizeof(rsp.buffer) < 3) {
+ rsp_buffer_set_error(&rsp, IPMI_CC_REQUEST_DATA_TRUNCATED);
goto out;
}
- IPMI_ADD_RSP_DATA(cmd[0] | 0x04);
- IPMI_ADD_RSP_DATA(cmd[1]);
- IPMI_ADD_RSP_DATA(0); /* Assume success */
+ rsp_buffer_push(&rsp, cmd[0] | 0x04);
+ rsp_buffer_push(&rsp, cmd[1]);
+ rsp_buffer_push(&rsp, 0); /* Assume success */
/* If it's too short or it was truncated, return an error. */
if (cmd_len < 2) {
- rsp[2] = IPMI_CC_REQUEST_DATA_LENGTH_INVALID;
+ rsp_buffer_set_error(&rsp, IPMI_CC_REQUEST_DATA_LENGTH_INVALID);
goto out;
}
if (cmd_len > max_cmd_len) {
- rsp[2] = IPMI_CC_REQUEST_DATA_TRUNCATED;
+ rsp_buffer_set_error(&rsp, IPMI_CC_REQUEST_DATA_TRUNCATED);
goto out;
}
if ((cmd[0] & 0x03) != 0) {
/* Only have stuff on LUN 0 */
- rsp[2] = IPMI_CC_COMMAND_INVALID_FOR_LUN;
+ rsp_buffer_set_error(&rsp, IPMI_CC_COMMAND_INVALID_FOR_LUN);
goto out;
}
- netfn = cmd[0] >> 2;
+ hdl = ipmi_get_handler(ibs, cmd[0] >> 2, cmd[1]);
+ if (!hdl) {
+ rsp_buffer_set_error(&rsp, IPMI_CC_INVALID_CMD);
+ goto out;
+ }
- /* Odd netfns are not valid, make sure the command is registered */
- if ((netfn & 1) || !ibs->netfns[netfn / 2] ||
- (cmd[1] >= ibs->netfns[netfn / 2]->cmd_nums) ||
- (!ibs->netfns[netfn / 2]->cmd_handlers[cmd[1]])) {
- rsp[2] = IPMI_CC_INVALID_CMD;
+ if (cmd_len < hdl->cmd_len_min) {
+ rsp_buffer_set_error(&rsp, IPMI_CC_REQUEST_DATA_LENGTH_INVALID);
goto out;
}
- ibs->netfns[netfn / 2]->cmd_handlers[cmd[1]](ibs, cmd, cmd_len, rsp, rsp_len,
- max_rsp_len);
+ hdl->cmd_handler(ibs, cmd, cmd_len, &rsp);
out:
- k->handle_rsp(s, msg_id, rsp, *rsp_len);
+ k->handle_rsp(s, msg_id, rsp.buffer, rsp.len);
next_timeout(ibs);
}
@@ -708,87 +738,82 @@ static void ipmi_sim_handle_timeout(IPMIBmcSim *ibs)
static void chassis_capabilities(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
- IPMI_ADD_RSP_DATA(0);
- IPMI_ADD_RSP_DATA(ibs->parent.slave_addr);
- IPMI_ADD_RSP_DATA(ibs->parent.slave_addr);
- IPMI_ADD_RSP_DATA(ibs->parent.slave_addr);
- IPMI_ADD_RSP_DATA(ibs->parent.slave_addr);
+ rsp_buffer_push(rsp, 0);
+ rsp_buffer_push(rsp, ibs->parent.slave_addr);
+ rsp_buffer_push(rsp, ibs->parent.slave_addr);
+ rsp_buffer_push(rsp, ibs->parent.slave_addr);
+ rsp_buffer_push(rsp, ibs->parent.slave_addr);
}
static void chassis_status(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
- IPMI_ADD_RSP_DATA(0x61); /* Unknown power restore, power is on */
- IPMI_ADD_RSP_DATA(0);
- IPMI_ADD_RSP_DATA(0);
- IPMI_ADD_RSP_DATA(0);
+ rsp_buffer_push(rsp, 0x61); /* Unknown power restore, power is on */
+ rsp_buffer_push(rsp, 0);
+ rsp_buffer_push(rsp, 0);
+ rsp_buffer_push(rsp, 0);
}
static void chassis_control(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
IPMIInterface *s = ibs->parent.intf;
IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
- IPMI_CHECK_CMD_LEN(3);
switch (cmd[2] & 0xf) {
case 0: /* power down */
- rsp[2] = k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 0);
+ rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 0));
break;
case 1: /* power up */
- rsp[2] = k->do_hw_op(s, IPMI_POWERON_CHASSIS, 0);
+ rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_POWERON_CHASSIS, 0));
break;
case 2: /* power cycle */
- rsp[2] = k->do_hw_op(s, IPMI_POWERCYCLE_CHASSIS, 0);
+ rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_POWERCYCLE_CHASSIS, 0));
break;
case 3: /* hard reset */
- rsp[2] = k->do_hw_op(s, IPMI_RESET_CHASSIS, 0);
+ rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_RESET_CHASSIS, 0));
break;
case 4: /* pulse diagnostic interrupt */
- rsp[2] = k->do_hw_op(s, IPMI_PULSE_DIAG_IRQ, 0);
+ rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_PULSE_DIAG_IRQ, 0));
break;
case 5: /* soft shutdown via ACPI by overtemp emulation */
- rsp[2] = k->do_hw_op(s,
- IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP, 0);
+ rsp_buffer_set_error(rsp, k->do_hw_op(s,
+ IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP, 0));
break;
default:
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
return;
}
}
static void chassis_get_sys_restart_cause(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
+
{
- IPMI_ADD_RSP_DATA(ibs->restart_cause & 0xf); /* Restart Cause */
- IPMI_ADD_RSP_DATA(0); /* Channel 0 */
+ rsp_buffer_push(rsp, ibs->restart_cause & 0xf); /* Restart Cause */
+ rsp_buffer_push(rsp, 0); /* Channel 0 */
}
static void get_device_id(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
-{
- IPMI_ADD_RSP_DATA(ibs->device_id);
- IPMI_ADD_RSP_DATA(ibs->device_rev & 0xf);
- IPMI_ADD_RSP_DATA(ibs->fwrev1 & 0x7f);
- IPMI_ADD_RSP_DATA(ibs->fwrev2);
- IPMI_ADD_RSP_DATA(ibs->ipmi_version);
- IPMI_ADD_RSP_DATA(0x07); /* sensor, SDR, and SEL. */
- IPMI_ADD_RSP_DATA(ibs->mfg_id[0]);
- IPMI_ADD_RSP_DATA(ibs->mfg_id[1]);
- IPMI_ADD_RSP_DATA(ibs->mfg_id[2]);
- IPMI_ADD_RSP_DATA(ibs->product_id[0]);
- IPMI_ADD_RSP_DATA(ibs->product_id[1]);
+ RspBuffer *rsp)
+{
+ rsp_buffer_push(rsp, ibs->device_id);
+ rsp_buffer_push(rsp, ibs->device_rev & 0xf);
+ rsp_buffer_push(rsp, ibs->fwrev1 & 0x7f);
+ rsp_buffer_push(rsp, ibs->fwrev2);
+ rsp_buffer_push(rsp, ibs->ipmi_version);
+ rsp_buffer_push(rsp, 0x07); /* sensor, SDR, and SEL. */
+ rsp_buffer_push(rsp, ibs->mfg_id[0]);
+ rsp_buffer_push(rsp, ibs->mfg_id[1]);
+ rsp_buffer_push(rsp, ibs->mfg_id[2]);
+ rsp_buffer_push(rsp, ibs->product_id[0]);
+ rsp_buffer_push(rsp, ibs->product_id[1]);
}
static void set_global_enables(IPMIBmcSim *ibs, uint8_t val)
@@ -807,8 +832,7 @@ static void set_global_enables(IPMIBmcSim *ibs, uint8_t val)
static void cold_reset(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
IPMIInterface *s = ibs->parent.intf;
IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
@@ -823,8 +847,7 @@ static void cold_reset(IPMIBmcSim *ibs,
static void warm_reset(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
IPMIInterface *s = ibs->parent.intf;
IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
@@ -834,89 +857,78 @@ static void warm_reset(IPMIBmcSim *ibs,
}
}
static void set_acpi_power_state(IPMIBmcSim *ibs,
- uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ uint8_t *cmd, unsigned int cmd_len,
+ RspBuffer *rsp)
{
- IPMI_CHECK_CMD_LEN(4);
ibs->acpi_power_state[0] = cmd[2];
ibs->acpi_power_state[1] = cmd[3];
}
static void get_acpi_power_state(IPMIBmcSim *ibs,
- uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ uint8_t *cmd, unsigned int cmd_len,
+ RspBuffer *rsp)
{
- IPMI_ADD_RSP_DATA(ibs->acpi_power_state[0]);
- IPMI_ADD_RSP_DATA(ibs->acpi_power_state[1]);
+ rsp_buffer_push(rsp, ibs->acpi_power_state[0]);
+ rsp_buffer_push(rsp, ibs->acpi_power_state[1]);
}
static void get_device_guid(IPMIBmcSim *ibs,
- uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ uint8_t *cmd, unsigned int cmd_len,
+ RspBuffer *rsp)
{
unsigned int i;
for (i = 0; i < 16; i++) {
- IPMI_ADD_RSP_DATA(ibs->uuid[i]);
+ rsp_buffer_push(rsp, ibs->uuid[i]);
}
}
static void set_bmc_global_enables(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
- IPMI_CHECK_CMD_LEN(3);
set_global_enables(ibs, cmd[2]);
}
static void get_bmc_global_enables(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
- IPMI_ADD_RSP_DATA(ibs->bmc_global_enables);
+ rsp_buffer_push(rsp, ibs->bmc_global_enables);
}
static void clr_msg_flags(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
IPMIInterface *s = ibs->parent.intf;
IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
- IPMI_CHECK_CMD_LEN(3);
ibs->msg_flags &= ~cmd[2];
k->set_atn(s, attn_set(ibs), attn_irq_enabled(ibs));
}
static void get_msg_flags(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
- IPMI_ADD_RSP_DATA(ibs->msg_flags);
+ rsp_buffer_push(rsp, ibs->msg_flags);
}
static void read_evt_msg_buf(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
IPMIInterface *s = ibs->parent.intf;
IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
unsigned int i;
if (!(ibs->msg_flags & IPMI_BMC_MSG_FLAG_EVT_BUF_FULL)) {
- rsp[2] = 0x80;
+ rsp_buffer_set_error(rsp, 0x80);
return;
}
for (i = 0; i < 16; i++) {
- IPMI_ADD_RSP_DATA(ibs->evtbuf[i]);
+ rsp_buffer_push(rsp, ibs->evtbuf[i]);
}
ibs->msg_flags &= ~IPMI_BMC_MSG_FLAG_EVT_BUF_FULL;
k->set_atn(s, attn_set(ibs), attn_irq_enabled(ibs));
@@ -924,21 +936,18 @@ static void read_evt_msg_buf(IPMIBmcSim *ibs,
static void get_msg(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
IPMIRcvBufEntry *msg;
qemu_mutex_lock(&ibs->lock);
if (QTAILQ_EMPTY(&ibs->rcvbufs)) {
- rsp[2] = 0x80; /* Queue empty */
+ rsp_buffer_set_error(rsp, 0x80); /* Queue empty */
goto out;
}
- rsp[3] = 0; /* Channel 0 */
- *rsp_len += 1;
+ rsp_buffer_push(rsp, 0); /* Channel 0 */
msg = QTAILQ_FIRST(&ibs->rcvbufs);
- memcpy(rsp + 4, msg->buf, msg->len);
- *rsp_len += msg->len;
+ rsp_buffer_pushmore(rsp, msg->buf, msg->len);
QTAILQ_REMOVE(&ibs->rcvbufs, msg, entry);
g_free(msg);
@@ -967,8 +976,7 @@ ipmb_checksum(unsigned char *data, int size, unsigned char csum)
static void send_msg(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
IPMIInterface *s = ibs->parent.intf;
IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
@@ -976,18 +984,20 @@ static void send_msg(IPMIBmcSim *ibs,
uint8_t *buf;
uint8_t netfn, rqLun, rsLun, rqSeq;
- IPMI_CHECK_CMD_LEN(3);
-
if (cmd[2] != 0) {
/* We only handle channel 0 with no options */
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
+ return;
+ }
+
+ if (cmd_len < 10) {
+ rsp_buffer_set_error(rsp, IPMI_CC_REQUEST_DATA_LENGTH_INVALID);
return;
}
- IPMI_CHECK_CMD_LEN(10);
if (cmd[3] != 0x40) {
/* We only emulate a MC at address 0x40. */
- rsp[2] = 0x83; /* NAK on write */
+ rsp_buffer_set_error(rsp, 0x83); /* NAK on write */
return;
}
@@ -1073,11 +1083,10 @@ static void do_watchdog_reset(IPMIBmcSim *ibs)
static void reset_watchdog_timer(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
if (!ibs->watchdog_initialized) {
- rsp[2] = 0x80;
+ rsp_buffer_set_error(rsp, 0x80);
return;
}
do_watchdog_reset(ibs);
@@ -1085,17 +1094,15 @@ static void reset_watchdog_timer(IPMIBmcSim *ibs,
static void set_watchdog_timer(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
IPMIInterface *s = ibs->parent.intf;
IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
unsigned int val;
- IPMI_CHECK_CMD_LEN(8);
val = cmd[2] & 0x7; /* Validate use */
if (val == 0 || val > 5) {
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
return;
}
val = cmd[3] & 0x7; /* Validate action */
@@ -1104,22 +1111,22 @@ static void set_watchdog_timer(IPMIBmcSim *ibs,
break;
case IPMI_BMC_WATCHDOG_ACTION_RESET:
- rsp[2] = k->do_hw_op(s, IPMI_RESET_CHASSIS, 1);
+ rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_RESET_CHASSIS, 1));
break;
case IPMI_BMC_WATCHDOG_ACTION_POWER_DOWN:
- rsp[2] = k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 1);
+ rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 1));
break;
case IPMI_BMC_WATCHDOG_ACTION_POWER_CYCLE:
- rsp[2] = k->do_hw_op(s, IPMI_POWERCYCLE_CHASSIS, 1);
+ rsp_buffer_set_error(rsp, k->do_hw_op(s, IPMI_POWERCYCLE_CHASSIS, 1));
break;
default:
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
}
- if (rsp[2]) {
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ if (rsp->buffer[2]) {
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
return;
}
@@ -1132,14 +1139,14 @@ static void set_watchdog_timer(IPMIBmcSim *ibs,
case IPMI_BMC_WATCHDOG_PRE_NMI:
if (!k->do_hw_op(s, IPMI_SEND_NMI, 1)) {
/* NMI not supported. */
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
return;
}
break;
default:
/* We don't support PRE_SMI */
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
return;
}
@@ -1158,194 +1165,193 @@ static void set_watchdog_timer(IPMIBmcSim *ibs,
static void get_watchdog_timer(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
- IPMI_ADD_RSP_DATA(ibs->watchdog_use);
- IPMI_ADD_RSP_DATA(ibs->watchdog_action);
- IPMI_ADD_RSP_DATA(ibs->watchdog_pretimeout);
- IPMI_ADD_RSP_DATA(ibs->watchdog_expired);
+ rsp_buffer_push(rsp, ibs->watchdog_use);
+ rsp_buffer_push(rsp, ibs->watchdog_action);
+ rsp_buffer_push(rsp, ibs->watchdog_pretimeout);
+ rsp_buffer_push(rsp, ibs->watchdog_expired);
if (ibs->watchdog_running) {
long timeout;
timeout = ((ibs->watchdog_expiry - ipmi_getmonotime() + 50000000)
/ 100000000);
- IPMI_ADD_RSP_DATA(timeout & 0xff);
- IPMI_ADD_RSP_DATA((timeout >> 8) & 0xff);
+ rsp_buffer_push(rsp, timeout & 0xff);
+ rsp_buffer_push(rsp, (timeout >> 8) & 0xff);
} else {
- IPMI_ADD_RSP_DATA(0);
- IPMI_ADD_RSP_DATA(0);
+ rsp_buffer_push(rsp, 0);
+ rsp_buffer_push(rsp, 0);
}
}
static void get_sdr_rep_info(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
unsigned int i;
- IPMI_ADD_RSP_DATA(0x51); /* Conform to IPMI 1.5 spec */
- IPMI_ADD_RSP_DATA(ibs->sdr.next_rec_id & 0xff);
- IPMI_ADD_RSP_DATA((ibs->sdr.next_rec_id >> 8) & 0xff);
- IPMI_ADD_RSP_DATA((MAX_SDR_SIZE - ibs->sdr.next_free) & 0xff);
- IPMI_ADD_RSP_DATA(((MAX_SDR_SIZE - ibs->sdr.next_free) >> 8) & 0xff);
+ rsp_buffer_push(rsp, 0x51); /* Conform to IPMI 1.5 spec */
+ rsp_buffer_push(rsp, ibs->sdr.next_rec_id & 0xff);
+ rsp_buffer_push(rsp, (ibs->sdr.next_rec_id >> 8) & 0xff);
+ rsp_buffer_push(rsp, (MAX_SDR_SIZE - ibs->sdr.next_free) & 0xff);
+ rsp_buffer_push(rsp, ((MAX_SDR_SIZE - ibs->sdr.next_free) >> 8) & 0xff);
for (i = 0; i < 4; i++) {
- IPMI_ADD_RSP_DATA(ibs->sdr.last_addition[i]);
+ rsp_buffer_push(rsp, ibs->sdr.last_addition[i]);
}
for (i = 0; i < 4; i++) {
- IPMI_ADD_RSP_DATA(ibs->sdr.last_clear[i]);
+ rsp_buffer_push(rsp, ibs->sdr.last_clear[i]);
}
/* Only modal support, reserve supported */
- IPMI_ADD_RSP_DATA((ibs->sdr.overflow << 7) | 0x22);
+ rsp_buffer_push(rsp, (ibs->sdr.overflow << 7) | 0x22);
}
static void reserve_sdr_rep(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
- IPMI_ADD_RSP_DATA(ibs->sdr.reservation & 0xff);
- IPMI_ADD_RSP_DATA((ibs->sdr.reservation >> 8) & 0xff);
+ rsp_buffer_push(rsp, ibs->sdr.reservation & 0xff);
+ rsp_buffer_push(rsp, (ibs->sdr.reservation >> 8) & 0xff);
}
static void get_sdr(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
unsigned int pos;
uint16_t nextrec;
struct ipmi_sdr_header *sdrh;
- IPMI_CHECK_CMD_LEN(8);
if (cmd[6]) {
- IPMI_CHECK_RESERVATION(2, ibs->sdr.reservation);
+ if ((cmd[2] | (cmd[3] << 8)) != ibs->sdr.reservation) {
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_RESERVATION);
+ return;
+ }
}
+
pos = 0;
if (sdr_find_entry(&ibs->sdr, cmd[4] | (cmd[5] << 8),
&pos, &nextrec)) {
- rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT);
return;
}
sdrh = (struct ipmi_sdr_header *) &ibs->sdr.sdr[pos];
if (cmd[6] > ipmi_sdr_length(sdrh)) {
- rsp[2] = IPMI_CC_PARM_OUT_OF_RANGE;
+ rsp_buffer_set_error(rsp, IPMI_CC_PARM_OUT_OF_RANGE);
return;
}
- IPMI_ADD_RSP_DATA(nextrec & 0xff);
- IPMI_ADD_RSP_DATA((nextrec >> 8) & 0xff);
+ rsp_buffer_push(rsp, nextrec & 0xff);
+ rsp_buffer_push(rsp, (nextrec >> 8) & 0xff);
if (cmd[7] == 0xff) {
cmd[7] = ipmi_sdr_length(sdrh) - cmd[6];
}
- if ((cmd[7] + *rsp_len) > max_rsp_len) {
- rsp[2] = IPMI_CC_CANNOT_RETURN_REQ_NUM_BYTES;
+ if ((cmd[7] + rsp->len) > sizeof(rsp->buffer)) {
+ rsp_buffer_set_error(rsp, IPMI_CC_CANNOT_RETURN_REQ_NUM_BYTES);
return;
}
- memcpy(rsp + *rsp_len, ibs->sdr.sdr + pos + cmd[6], cmd[7]);
- *rsp_len += cmd[7];
+
+ rsp_buffer_pushmore(rsp, ibs->sdr.sdr + pos + cmd[6], cmd[7]);
}
static void add_sdr(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
uint16_t recid;
struct ipmi_sdr_header *sdrh = (struct ipmi_sdr_header *) cmd + 2;
if (sdr_add_entry(ibs, sdrh, cmd_len - 2, &recid)) {
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
return;
}
- IPMI_ADD_RSP_DATA(recid & 0xff);
- IPMI_ADD_RSP_DATA((recid >> 8) & 0xff);
+ rsp_buffer_push(rsp, recid & 0xff);
+ rsp_buffer_push(rsp, (recid >> 8) & 0xff);
}
static void clear_sdr_rep(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
- IPMI_CHECK_CMD_LEN(8);
- IPMI_CHECK_RESERVATION(2, ibs->sdr.reservation);
+ if ((cmd[2] | (cmd[3] << 8)) != ibs->sdr.reservation) {
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_RESERVATION);
+ return;
+ }
+
if (cmd[4] != 'C' || cmd[5] != 'L' || cmd[6] != 'R') {
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
return;
}
if (cmd[7] == 0xaa) {
ibs->sdr.next_free = 0;
ibs->sdr.overflow = 0;
set_timestamp(ibs, ibs->sdr.last_clear);
- IPMI_ADD_RSP_DATA(1); /* Erasure complete */
+ rsp_buffer_push(rsp, 1); /* Erasure complete */
sdr_inc_reservation(&ibs->sdr);
} else if (cmd[7] == 0) {
- IPMI_ADD_RSP_DATA(1); /* Erasure complete */
+ rsp_buffer_push(rsp, 1); /* Erasure complete */
} else {
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
return;
}
}
static void get_sel_info(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
unsigned int i, val;
- IPMI_ADD_RSP_DATA(0x51); /* Conform to IPMI 1.5 */
- IPMI_ADD_RSP_DATA(ibs->sel.next_free & 0xff);
- IPMI_ADD_RSP_DATA((ibs->sel.next_free >> 8) & 0xff);
+ rsp_buffer_push(rsp, 0x51); /* Conform to IPMI 1.5 */
+ rsp_buffer_push(rsp, ibs->sel.next_free & 0xff);
+ rsp_buffer_push(rsp, (ibs->sel.next_free >> 8) & 0xff);
val = (MAX_SEL_SIZE - ibs->sel.next_free) * 16;
- IPMI_ADD_RSP_DATA(val & 0xff);
- IPMI_ADD_RSP_DATA((val >> 8) & 0xff);
+ rsp_buffer_push(rsp, val & 0xff);
+ rsp_buffer_push(rsp, (val >> 8) & 0xff);
for (i = 0; i < 4; i++) {
- IPMI_ADD_RSP_DATA(ibs->sel.last_addition[i]);
+ rsp_buffer_push(rsp, ibs->sel.last_addition[i]);
}
for (i = 0; i < 4; i++) {
- IPMI_ADD_RSP_DATA(ibs->sel.last_clear[i]);
+ rsp_buffer_push(rsp, ibs->sel.last_clear[i]);
}
/* Only support Reserve SEL */
- IPMI_ADD_RSP_DATA((ibs->sel.overflow << 7) | 0x02);
+ rsp_buffer_push(rsp, (ibs->sel.overflow << 7) | 0x02);
}
static void reserve_sel(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
- IPMI_ADD_RSP_DATA(ibs->sel.reservation & 0xff);
- IPMI_ADD_RSP_DATA((ibs->sel.reservation >> 8) & 0xff);
+ rsp_buffer_push(rsp, ibs->sel.reservation & 0xff);
+ rsp_buffer_push(rsp, (ibs->sel.reservation >> 8) & 0xff);
}
static void get_sel_entry(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
unsigned int val;
- IPMI_CHECK_CMD_LEN(8);
if (cmd[6]) {
- IPMI_CHECK_RESERVATION(2, ibs->sel.reservation);
+ if ((cmd[2] | (cmd[3] << 8)) != ibs->sel.reservation) {
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_RESERVATION);
+ return;
+ }
}
if (ibs->sel.next_free == 0) {
- rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT);
return;
}
if (cmd[6] > 15) {
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
return;
}
if (cmd[7] == 0xff) {
cmd[7] = 16;
} else if ((cmd[7] + cmd[6]) > 16) {
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
return;
} else {
cmd[7] += cmd[6];
@@ -1355,86 +1361,83 @@ static void get_sel_entry(IPMIBmcSim *ibs,
if (val == 0xffff) {
val = ibs->sel.next_free - 1;
} else if (val >= ibs->sel.next_free) {
- rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT);
return;
}
if ((val + 1) == ibs->sel.next_free) {
- IPMI_ADD_RSP_DATA(0xff);
- IPMI_ADD_RSP_DATA(0xff);
+ rsp_buffer_push(rsp, 0xff);
+ rsp_buffer_push(rsp, 0xff);
} else {
- IPMI_ADD_RSP_DATA((val + 1) & 0xff);
- IPMI_ADD_RSP_DATA(((val + 1) >> 8) & 0xff);
+ rsp_buffer_push(rsp, (val + 1) & 0xff);
+ rsp_buffer_push(rsp, ((val + 1) >> 8) & 0xff);
}
for (; cmd[6] < cmd[7]; cmd[6]++) {
- IPMI_ADD_RSP_DATA(ibs->sel.sel[val][cmd[6]]);
+ rsp_buffer_push(rsp, ibs->sel.sel[val][cmd[6]]);
}
}
static void add_sel_entry(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
- IPMI_CHECK_CMD_LEN(18);
if (sel_add_event(ibs, cmd + 2)) {
- rsp[2] = IPMI_CC_OUT_OF_SPACE;
+ rsp_buffer_set_error(rsp, IPMI_CC_OUT_OF_SPACE);
return;
}
/* sel_add_event fills in the record number. */
- IPMI_ADD_RSP_DATA(cmd[2]);
- IPMI_ADD_RSP_DATA(cmd[3]);
+ rsp_buffer_push(rsp, cmd[2]);
+ rsp_buffer_push(rsp, cmd[3]);
}
static void clear_sel(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
- IPMI_CHECK_CMD_LEN(8);
- IPMI_CHECK_RESERVATION(2, ibs->sel.reservation);
+ if ((cmd[2] | (cmd[3] << 8)) != ibs->sel.reservation) {
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_RESERVATION);
+ return;
+ }
+
if (cmd[4] != 'C' || cmd[5] != 'L' || cmd[6] != 'R') {
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
return;
}
if (cmd[7] == 0xaa) {
ibs->sel.next_free = 0;
ibs->sel.overflow = 0;
set_timestamp(ibs, ibs->sdr.last_clear);
- IPMI_ADD_RSP_DATA(1); /* Erasure complete */
+ rsp_buffer_push(rsp, 1); /* Erasure complete */
sel_inc_reservation(&ibs->sel);
} else if (cmd[7] == 0) {
- IPMI_ADD_RSP_DATA(1); /* Erasure complete */
+ rsp_buffer_push(rsp, 1); /* Erasure complete */
} else {
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
return;
}
}
static void get_sel_time(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
uint32_t val;
struct ipmi_time now;
ipmi_gettime(&now);
val = now.tv_sec + ibs->sel.time_offset;
- IPMI_ADD_RSP_DATA(val & 0xff);
- IPMI_ADD_RSP_DATA((val >> 8) & 0xff);
- IPMI_ADD_RSP_DATA((val >> 16) & 0xff);
- IPMI_ADD_RSP_DATA((val >> 24) & 0xff);
+ rsp_buffer_push(rsp, val & 0xff);
+ rsp_buffer_push(rsp, (val >> 8) & 0xff);
+ rsp_buffer_push(rsp, (val >> 16) & 0xff);
+ rsp_buffer_push(rsp, (val >> 24) & 0xff);
}
static void set_sel_time(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
uint32_t val;
struct ipmi_time now;
- IPMI_CHECK_CMD_LEN(6);
val = cmd[2] | (cmd[3] << 8) | (cmd[4] << 16) | (cmd[5] << 24);
ipmi_gettime(&now);
ibs->sel.time_offset = now.tv_sec - ((long) val);
@@ -1442,15 +1445,13 @@ static void set_sel_time(IPMIBmcSim *ibs,
static void set_sensor_evt_enable(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
IPMISensor *sens;
- IPMI_CHECK_CMD_LEN(4);
if ((cmd[2] >= MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
- rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT);
return;
}
sens = ibs->sensors + cmd[2];
@@ -1486,7 +1487,7 @@ static void set_sensor_evt_enable(IPMIBmcSim *ibs,
}
break;
case 3:
- rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ rsp_buffer_set_error(rsp, IPMI_CC_INVALID_DATA_FIELD);
return;
}
IPMI_SENSOR_SET_RET_STATUS(sens, cmd[3]);
@@ -1494,36 +1495,32 @@ static void set_sensor_evt_enable(IPMIBmcSim *ibs,
static void get_sensor_evt_enable(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
IPMISensor *sens;
- IPMI_CHECK_CMD_LEN(3);
if ((cmd[2] >= MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
- rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT);
return;
}
sens = ibs->sensors + cmd[2];
- IPMI_ADD_RSP_DATA(IPMI_SENSOR_GET_RET_STATUS(sens));
- IPMI_ADD_RSP_DATA(sens->assert_enable & 0xff);
- IPMI_ADD_RSP_DATA((sens->assert_enable >> 8) & 0xff);
- IPMI_ADD_RSP_DATA(sens->deassert_enable & 0xff);
- IPMI_ADD_RSP_DATA((sens->deassert_enable >> 8) & 0xff);
+ rsp_buffer_push(rsp, IPMI_SENSOR_GET_RET_STATUS(sens));
+ rsp_buffer_push(rsp, sens->assert_enable & 0xff);
+ rsp_buffer_push(rsp, (sens->assert_enable >> 8) & 0xff);
+ rsp_buffer_push(rsp, sens->deassert_enable & 0xff);
+ rsp_buffer_push(rsp, (sens->deassert_enable >> 8) & 0xff);
}
static void rearm_sensor_evts(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
IPMISensor *sens;
- IPMI_CHECK_CMD_LEN(4);
if ((cmd[2] >= MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
- rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT);
return;
}
sens = ibs->sensors + cmd[2];
@@ -1537,60 +1534,54 @@ static void rearm_sensor_evts(IPMIBmcSim *ibs,
static void get_sensor_evt_status(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
IPMISensor *sens;
- IPMI_CHECK_CMD_LEN(3);
if ((cmd[2] >= MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
- rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT);
return;
}
sens = ibs->sensors + cmd[2];
- IPMI_ADD_RSP_DATA(sens->reading);
- IPMI_ADD_RSP_DATA(IPMI_SENSOR_GET_RET_STATUS(sens));
- IPMI_ADD_RSP_DATA(sens->assert_states & 0xff);
- IPMI_ADD_RSP_DATA((sens->assert_states >> 8) & 0xff);
- IPMI_ADD_RSP_DATA(sens->deassert_states & 0xff);
- IPMI_ADD_RSP_DATA((sens->deassert_states >> 8) & 0xff);
+ rsp_buffer_push(rsp, sens->reading);
+ rsp_buffer_push(rsp, IPMI_SENSOR_GET_RET_STATUS(sens));
+ rsp_buffer_push(rsp, sens->assert_states & 0xff);
+ rsp_buffer_push(rsp, (sens->assert_states >> 8) & 0xff);
+ rsp_buffer_push(rsp, sens->deassert_states & 0xff);
+ rsp_buffer_push(rsp, (sens->deassert_states >> 8) & 0xff);
}
static void get_sensor_reading(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ RspBuffer *rsp)
{
IPMISensor *sens;
- IPMI_CHECK_CMD_LEN(3);
if ((cmd[2] >= MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
- rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT);
return;
}
sens = ibs->sensors + cmd[2];
- IPMI_ADD_RSP_DATA(sens->reading);
- IPMI_ADD_RSP_DATA(IPMI_SENSOR_GET_RET_STATUS(sens));
- IPMI_ADD_RSP_DATA(sens->states & 0xff);
+ rsp_buffer_push(rsp, sens->reading);
+ rsp_buffer_push(rsp, IPMI_SENSOR_GET_RET_STATUS(sens));
+ rsp_buffer_push(rsp, sens->states & 0xff);
if (IPMI_SENSOR_IS_DISCRETE(sens)) {
- IPMI_ADD_RSP_DATA((sens->states >> 8) & 0xff);
+ rsp_buffer_push(rsp, (sens->states >> 8) & 0xff);
}
}
static void set_sensor_type(IPMIBmcSim *ibs,
- uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ uint8_t *cmd, unsigned int cmd_len,
+ RspBuffer *rsp)
{
IPMISensor *sens;
- IPMI_CHECK_CMD_LEN(5);
if ((cmd[2] >= MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
- rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT);
return;
}
sens = ibs->sensors + cmd[2];
@@ -1599,30 +1590,28 @@ static void set_sensor_type(IPMIBmcSim *ibs,
}
static void get_sensor_type(IPMIBmcSim *ibs,
- uint8_t *cmd, unsigned int cmd_len,
- uint8_t *rsp, unsigned int *rsp_len,
- unsigned int max_rsp_len)
+ uint8_t *cmd, unsigned int cmd_len,
+ RspBuffer *rsp)
{
IPMISensor *sens;
- IPMI_CHECK_CMD_LEN(3);
if ((cmd[2] >= MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
- rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ rsp_buffer_set_error(rsp, IPMI_CC_REQ_ENTRY_NOT_PRESENT);
return;
}
sens = ibs->sensors + cmd[2];
- IPMI_ADD_RSP_DATA(sens->sensor_type);
- IPMI_ADD_RSP_DATA(sens->evt_reading_type_code);
+ rsp_buffer_push(rsp, sens->sensor_type);
+ rsp_buffer_push(rsp, sens->evt_reading_type_code);
}
static const IPMICmdHandler chassis_cmds[] = {
- [IPMI_CMD_GET_CHASSIS_CAPABILITIES] = chassis_capabilities,
- [IPMI_CMD_GET_CHASSIS_STATUS] = chassis_status,
- [IPMI_CMD_CHASSIS_CONTROL] = chassis_control,
- [IPMI_CMD_GET_SYS_RESTART_CAUSE] = chassis_get_sys_restart_cause
+ [IPMI_CMD_GET_CHASSIS_CAPABILITIES] = { chassis_capabilities },
+ [IPMI_CMD_GET_CHASSIS_STATUS] = { chassis_status },
+ [IPMI_CMD_CHASSIS_CONTROL] = { chassis_control, 3 },
+ [IPMI_CMD_GET_SYS_RESTART_CAUSE] = { chassis_get_sys_restart_cause }
};
static const IPMINetfn chassis_netfn = {
.cmd_nums = ARRAY_SIZE(chassis_cmds),
@@ -1630,13 +1619,13 @@ static const IPMINetfn chassis_netfn = {
};
static const IPMICmdHandler sensor_event_cmds[] = {
- [IPMI_CMD_SET_SENSOR_EVT_ENABLE] = set_sensor_evt_enable,
- [IPMI_CMD_GET_SENSOR_EVT_ENABLE] = get_sensor_evt_enable,
- [IPMI_CMD_REARM_SENSOR_EVTS] = rearm_sensor_evts,
- [IPMI_CMD_GET_SENSOR_EVT_STATUS] = get_sensor_evt_status,
- [IPMI_CMD_GET_SENSOR_READING] = get_sensor_reading,
- [IPMI_CMD_SET_SENSOR_TYPE] = set_sensor_type,
- [IPMI_CMD_GET_SENSOR_TYPE] = get_sensor_type,
+ [IPMI_CMD_SET_SENSOR_EVT_ENABLE] = { set_sensor_evt_enable, 4 },
+ [IPMI_CMD_GET_SENSOR_EVT_ENABLE] = { get_sensor_evt_enable, 3 },
+ [IPMI_CMD_REARM_SENSOR_EVTS] = { rearm_sensor_evts, 4 },
+ [IPMI_CMD_GET_SENSOR_EVT_STATUS] = { get_sensor_evt_status, 3 },
+ [IPMI_CMD_GET_SENSOR_READING] = { get_sensor_reading, 3 },
+ [IPMI_CMD_SET_SENSOR_TYPE] = { set_sensor_type, 5 },
+ [IPMI_CMD_GET_SENSOR_TYPE] = { get_sensor_type, 3 },
};
static const IPMINetfn sensor_event_netfn = {
.cmd_nums = ARRAY_SIZE(sensor_event_cmds),
@@ -1644,22 +1633,22 @@ static const IPMINetfn sensor_event_netfn = {
};
static const IPMICmdHandler app_cmds[] = {
- [IPMI_CMD_GET_DEVICE_ID] = get_device_id,
- [IPMI_CMD_COLD_RESET] = cold_reset,
- [IPMI_CMD_WARM_RESET] = warm_reset,
- [IPMI_CMD_SET_ACPI_POWER_STATE] = set_acpi_power_state,
- [IPMI_CMD_GET_ACPI_POWER_STATE] = get_acpi_power_state,
- [IPMI_CMD_GET_DEVICE_GUID] = get_device_guid,
- [IPMI_CMD_SET_BMC_GLOBAL_ENABLES] = set_bmc_global_enables,
- [IPMI_CMD_GET_BMC_GLOBAL_ENABLES] = get_bmc_global_enables,
- [IPMI_CMD_CLR_MSG_FLAGS] = clr_msg_flags,
- [IPMI_CMD_GET_MSG_FLAGS] = get_msg_flags,
- [IPMI_CMD_GET_MSG] = get_msg,
- [IPMI_CMD_SEND_MSG] = send_msg,
- [IPMI_CMD_READ_EVT_MSG_BUF] = read_evt_msg_buf,
- [IPMI_CMD_RESET_WATCHDOG_TIMER] = reset_watchdog_timer,
- [IPMI_CMD_SET_WATCHDOG_TIMER] = set_watchdog_timer,
- [IPMI_CMD_GET_WATCHDOG_TIMER] = get_watchdog_timer,
+ [IPMI_CMD_GET_DEVICE_ID] = { get_device_id },
+ [IPMI_CMD_COLD_RESET] = { cold_reset },
+ [IPMI_CMD_WARM_RESET] = { warm_reset },
+ [IPMI_CMD_SET_ACPI_POWER_STATE] = { set_acpi_power_state, 4 },
+ [IPMI_CMD_GET_ACPI_POWER_STATE] = { get_acpi_power_state },
+ [IPMI_CMD_GET_DEVICE_GUID] = { get_device_guid },
+ [IPMI_CMD_SET_BMC_GLOBAL_ENABLES] = { set_bmc_global_enables, 3 },
+ [IPMI_CMD_GET_BMC_GLOBAL_ENABLES] = { get_bmc_global_enables },
+ [IPMI_CMD_CLR_MSG_FLAGS] = { clr_msg_flags, 3 },
+ [IPMI_CMD_GET_MSG_FLAGS] = { get_msg_flags },
+ [IPMI_CMD_GET_MSG] = { get_msg },
+ [IPMI_CMD_SEND_MSG] = { send_msg, 3 },
+ [IPMI_CMD_READ_EVT_MSG_BUF] = { read_evt_msg_buf },
+ [IPMI_CMD_RESET_WATCHDOG_TIMER] = { reset_watchdog_timer },
+ [IPMI_CMD_SET_WATCHDOG_TIMER] = { set_watchdog_timer, 8 },
+ [IPMI_CMD_GET_WATCHDOG_TIMER] = { get_watchdog_timer },
};
static const IPMINetfn app_netfn = {
.cmd_nums = ARRAY_SIZE(app_cmds),
@@ -1667,18 +1656,18 @@ static const IPMINetfn app_netfn = {
};
static const IPMICmdHandler storage_cmds[] = {
- [IPMI_CMD_GET_SDR_REP_INFO] = get_sdr_rep_info,
- [IPMI_CMD_RESERVE_SDR_REP] = reserve_sdr_rep,
- [IPMI_CMD_GET_SDR] = get_sdr,
- [IPMI_CMD_ADD_SDR] = add_sdr,
- [IPMI_CMD_CLEAR_SDR_REP] = clear_sdr_rep,
- [IPMI_CMD_GET_SEL_INFO] = get_sel_info,
- [IPMI_CMD_RESERVE_SEL] = reserve_sel,
- [IPMI_CMD_GET_SEL_ENTRY] = get_sel_entry,
- [IPMI_CMD_ADD_SEL_ENTRY] = add_sel_entry,
- [IPMI_CMD_CLEAR_SEL] = clear_sel,
- [IPMI_CMD_GET_SEL_TIME] = get_sel_time,
- [IPMI_CMD_SET_SEL_TIME] = set_sel_time,
+ [IPMI_CMD_GET_SDR_REP_INFO] = { get_sdr_rep_info },
+ [IPMI_CMD_RESERVE_SDR_REP] = { reserve_sdr_rep },
+ [IPMI_CMD_GET_SDR] = { get_sdr, 8 },
+ [IPMI_CMD_ADD_SDR] = { add_sdr },
+ [IPMI_CMD_CLEAR_SDR_REP] = { clear_sdr_rep, 8 },
+ [IPMI_CMD_GET_SEL_INFO] = { get_sel_info },
+ [IPMI_CMD_RESERVE_SEL] = { reserve_sel },
+ [IPMI_CMD_GET_SEL_ENTRY] = { get_sel_entry, 8 },
+ [IPMI_CMD_ADD_SEL_ENTRY] = { add_sel_entry, 18 },
+ [IPMI_CMD_CLEAR_SEL] = { clear_sel, 8 },
+ [IPMI_CMD_GET_SEL_TIME] = { get_sel_time, 6 },
+ [IPMI_CMD_SET_SEL_TIME] = { set_sel_time },
};
static const IPMINetfn storage_netfn = {
@@ -1694,17 +1683,42 @@ static void register_cmds(IPMIBmcSim *s)
ipmi_register_netfn(s, IPMI_NETFN_STORAGE, &storage_netfn);
}
-static const uint8_t init_sdrs[] = {
+static uint8_t init_sdrs[] = {
/* Watchdog device */
0x00, 0x00, 0x51, 0x02, 35, 0x20, 0x00, 0x00,
0x23, 0x01, 0x63, 0x00, 0x23, 0x6f, 0x0f, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8,
'W', 'a', 't', 'c', 'h', 'd', 'o', 'g',
- /* End */
- 0xff, 0xff, 0x00, 0x00, 0x00
};
+static void ipmi_sdr_init(IPMIBmcSim *ibs)
+{
+ unsigned int i;
+ int len;
+ size_t sdrs_size;
+ uint8_t *sdrs;
+
+ sdrs_size = sizeof(init_sdrs);
+ sdrs = init_sdrs;
+
+ for (i = 0; i < sdrs_size; i += len) {
+ struct ipmi_sdr_header *sdrh;
+
+ if (i + IPMI_SDR_HEADER_SIZE > sdrs_size) {
+ error_report("Problem with recid 0x%4.4x", i);
+ return;
+ }
+ sdrh = (struct ipmi_sdr_header *) &sdrs[i];
+ len = ipmi_sdr_length(sdrh);
+ if (i + len > sdrs_size) {
+ error_report("Problem with recid 0x%4.4x", i);
+ return;
+ }
+ sdr_add_entry(ibs, sdrh, len, NULL);
+ }
+}
+
static const VMStateDescription vmstate_ipmi_sim = {
.name = TYPE_IPMI_BMC_SIMULATOR,
.version_id = 1,
@@ -1733,11 +1747,10 @@ static const VMStateDescription vmstate_ipmi_sim = {
}
};
-static void ipmi_sim_init(Object *obj)
+static void ipmi_sim_realize(DeviceState *dev, Error **errp)
{
- IPMIBmc *b = IPMI_BMC(obj);
+ IPMIBmc *b = IPMI_BMC(dev);
unsigned int i;
- unsigned int recid;
IPMIBmcSim *ibs = IPMI_BMC_SIMULATOR(b);
qemu_mutex_init(&ibs->lock);
@@ -1754,26 +1767,7 @@ static void ipmi_sim_init(Object *obj)
ibs->sdr.last_clear[i] = 0xff;
}
- for (i = 0;;) {
- struct ipmi_sdr_header *sdrh;
- int len;
- if ((i + IPMI_SDR_HEADER_SIZE) > sizeof(init_sdrs)) {
- error_report("Problem with recid 0x%4.4x", i);
- return;
- }
- sdrh = (struct ipmi_sdr_header *) &init_sdrs[i];
- len = ipmi_sdr_length(sdrh);
- recid = ipmi_sdr_recid(sdrh);
- if (recid == 0xffff) {
- break;
- }
- if ((i + len) > sizeof(init_sdrs)) {
- error_report("Problem with recid 0x%4.4x", i);
- return;
- }
- sdr_add_entry(ibs, sdrh, len, NULL);
- i += len;
- }
+ ipmi_sdr_init(ibs);
ibs->acpi_power_state[0] = 0;
ibs->acpi_power_state[1] = 0;
@@ -1794,8 +1788,10 @@ static void ipmi_sim_init(Object *obj)
static void ipmi_sim_class_init(ObjectClass *oc, void *data)
{
+ DeviceClass *dc = DEVICE_CLASS(oc);
IPMIBmcClass *bk = IPMI_BMC_CLASS(oc);
+ dc->realize = ipmi_sim_realize;
bk->handle_command = ipmi_sim_handle_command;
}
@@ -1803,7 +1799,6 @@ static const TypeInfo ipmi_sim_type = {
.name = TYPE_IPMI_BMC_SIMULATOR,
.parent = TYPE_IPMI_BMC,
.instance_size = sizeof(IPMIBmcSim),
- .instance_init = ipmi_sim_init,
.class_init = ipmi_sim_class_init,
};
diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c
index 4e896b2..0ee29c0 100644
--- a/hw/isa/lpc_ich9.c
+++ b/hw/isa/lpc_ich9.c
@@ -409,18 +409,18 @@ ich9_lpc_pmbase_update(ICH9LPCState *lpc)
ich9_pm_iospace_update(&lpc->pm, pm_io_base);
}
-/* config:RBCA */
-static void ich9_lpc_rcba_update(ICH9LPCState *lpc, uint32_t rbca_old)
+/* config:RCBA */
+static void ich9_lpc_rcba_update(ICH9LPCState *lpc, uint32_t rcba_old)
{
- uint32_t rbca = pci_get_long(lpc->d.config + ICH9_LPC_RCBA);
+ uint32_t rcba = pci_get_long(lpc->d.config + ICH9_LPC_RCBA);
- if (rbca_old & ICH9_LPC_RCBA_EN) {
- memory_region_del_subregion(get_system_memory(), &lpc->rbca_mem);
+ if (rcba_old & ICH9_LPC_RCBA_EN) {
+ memory_region_del_subregion(get_system_memory(), &lpc->rcrb_mem);
}
- if (rbca & ICH9_LPC_RCBA_EN) {
- memory_region_add_subregion_overlap(get_system_memory(),
- rbca & ICH9_LPC_RCBA_BA_MASK,
- &lpc->rbca_mem, 1);
+ if (rcba & ICH9_LPC_RCBA_EN) {
+ memory_region_add_subregion_overlap(get_system_memory(),
+ rcba & ICH9_LPC_RCBA_BA_MASK,
+ &lpc->rcrb_mem, 1);
}
}
@@ -444,7 +444,7 @@ static int ich9_lpc_post_load(void *opaque, int version_id)
ICH9LPCState *lpc = opaque;
ich9_lpc_pmbase_update(lpc);
- ich9_lpc_rcba_update(lpc, 0 /* disabled ICH9_LPC_RBCA_EN */);
+ ich9_lpc_rcba_update(lpc, 0 /* disabled ICH9_LPC_RCBA_EN */);
ich9_lpc_pmcon_update(lpc);
return 0;
}
@@ -453,14 +453,14 @@ static void ich9_lpc_config_write(PCIDevice *d,
uint32_t addr, uint32_t val, int len)
{
ICH9LPCState *lpc = ICH9_LPC_DEVICE(d);
- uint32_t rbca_old = pci_get_long(d->config + ICH9_LPC_RCBA);
+ uint32_t rcba_old = pci_get_long(d->config + ICH9_LPC_RCBA);
pci_default_write_config(d, addr, val, len);
if (ranges_overlap(addr, len, ICH9_LPC_PMBASE, 4)) {
ich9_lpc_pmbase_update(lpc);
}
if (ranges_overlap(addr, len, ICH9_LPC_RCBA, 4)) {
- ich9_lpc_rcba_update(lpc, rbca_old);
+ ich9_lpc_rcba_update(lpc, rcba_old);
}
if (ranges_overlap(addr, len, ICH9_LPC_PIRQA_ROUT, 4)) {
pci_bus_fire_intx_routing_notifier(lpc->d.bus);
@@ -477,7 +477,7 @@ static void ich9_lpc_reset(DeviceState *qdev)
{
PCIDevice *d = PCI_DEVICE(qdev);
ICH9LPCState *lpc = ICH9_LPC_DEVICE(d);
- uint32_t rbca_old = pci_get_long(d->config + ICH9_LPC_RCBA);
+ uint32_t rcba_old = pci_get_long(d->config + ICH9_LPC_RCBA);
int i;
for (i = 0; i < 4; i++) {
@@ -496,13 +496,14 @@ static void ich9_lpc_reset(DeviceState *qdev)
ich9_cc_reset(lpc);
ich9_lpc_pmbase_update(lpc);
- ich9_lpc_rcba_update(lpc, rbca_old);
+ ich9_lpc_rcba_update(lpc, rcba_old);
lpc->sci_level = 0;
lpc->rst_cnt = 0;
}
-static const MemoryRegionOps rbca_mmio_ops = {
+/* root complex register block is mapped into memory space */
+static const MemoryRegionOps rcrb_mmio_ops = {
.read = ich9_cc_read,
.write = ich9_cc_write,
.endianness = DEVICE_LITTLE_ENDIAN,
@@ -616,8 +617,8 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp)
pci_set_long(d->wmask + ICH9_LPC_PMBASE,
ICH9_LPC_PMBASE_BASE_ADDRESS_MASK);
- memory_region_init_io(&lpc->rbca_mem, OBJECT(d), &rbca_mmio_ops, lpc,
- "lpc-rbca-mmio", ICH9_CC_SIZE);
+ memory_region_init_io(&lpc->rcrb_mem, OBJECT(d), &rcrb_mmio_ops, lpc,
+ "lpc-rcrb-mmio", ICH9_CC_SIZE);
lpc->isa_bus = isa_bus;
diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c
index 650f0f8..973bf20 100644
--- a/hw/mem/pc-dimm.c
+++ b/hw/mem/pc-dimm.c
@@ -364,15 +364,22 @@ static void pc_dimm_check_memdev_is_busy(Object *obj, const char *name,
Object *val, Error **errp)
{
MemoryRegion *mr;
+ Error *local_err = NULL;
- mr = host_memory_backend_get_memory(MEMORY_BACKEND(val), errp);
+ mr = host_memory_backend_get_memory(MEMORY_BACKEND(val), &local_err);
+ if (local_err) {
+ goto out;
+ }
if (memory_region_is_mapped(mr)) {
char *path = object_get_canonical_path_component(val);
- error_setg(errp, "can't use already busy memdev: %s", path);
+ error_setg(&local_err, "can't use already busy memdev: %s", path);
g_free(path);
} else {
- qdev_prop_allow_set_link_before_realize(obj, name, val, errp);
+ qdev_prop_allow_set_link_before_realize(obj, name, val, &local_err);
}
+
+out:
+ error_propagate(errp, local_err);
}
static void pc_dimm_init(Object *obj)
diff --git a/hw/pci-bridge/pci_bridge_dev.c b/hw/pci-bridge/pci_bridge_dev.c
index 100bb5e..862a236 100644
--- a/hw/pci-bridge/pci_bridge_dev.c
+++ b/hw/pci-bridge/pci_bridge_dev.c
@@ -72,7 +72,7 @@ static int pci_bridge_dev_initfn(PCIDevice *dev)
goto slotid_error;
}
if ((bridge_dev->flags & (1 << PCI_BRIDGE_DEV_F_MSI_REQ)) &&
- msi_supported) {
+ msi_nonbroken) {
err = msi_init(dev, 0, 1, true, true);
if (err < 0) {
goto msi_error;
diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c
index d23b8da..5e7e546 100644
--- a/hw/pci-bridge/pci_expander_bridge.c
+++ b/hw/pci-bridge/pci_expander_bridge.c
@@ -283,7 +283,7 @@ static void pxb_dev_exitfn(PCIDevice *pci_dev)
}
static Property pxb_dev_properties[] = {
- /* Note: 0 is not a legal a PXB bus number. */
+ /* Note: 0 is not a legal PXB bus number. */
DEFINE_PROP_UINT8("bus_nr", PXBDev, bus_nr, 0),
DEFINE_PROP_UINT16("numa_node", PXBDev, numa_node, NUMA_NODE_UNASSIGNED),
DEFINE_PROP_END_OF_LIST(),
diff --git a/hw/pci/msi.c b/hw/pci/msi.c
index 85f21b8..e0e64c2 100644
--- a/hw/pci/msi.c
+++ b/hw/pci/msi.c
@@ -34,8 +34,21 @@
#define PCI_MSI_VECTORS_MAX 32
-/* Flag for interrupt controller to declare MSI/MSI-X support */
-bool msi_supported;
+/*
+ * Flag for interrupt controllers to declare broken MSI/MSI-X support.
+ * values: false - broken; true - non-broken.
+ *
+ * Setting this flag to false will remove MSI/MSI-X capability from all devices.
+ *
+ * It is preferrable for controllers to set this to true (non-broken) even if
+ * they do not actually support MSI/MSI-X: guests normally probe the controller
+ * type and do not attempt to enable MSI/MSI-X with interrupt controllers not
+ * supporting such, so removing the capability is not required, and
+ * it seems cleaner to have a given device look the same for all boards.
+ *
+ * TODO: some existing controllers violate the above rule. Identify and fix them.
+ */
+bool msi_nonbroken;
/* If we get rid of cap allocator, we won't need this. */
static inline uint8_t msi_cap_sizeof(uint16_t flags)
@@ -160,7 +173,7 @@ int msi_init(struct PCIDevice *dev, uint8_t offset,
uint8_t cap_size;
int config_offset;
- if (!msi_supported) {
+ if (!msi_nonbroken) {
return -ENOTSUP;
}
diff --git a/hw/pci/msix.c b/hw/pci/msix.c
index 537fdba..b75f0e9 100644
--- a/hw/pci/msix.c
+++ b/hw/pci/msix.c
@@ -249,7 +249,7 @@ int msix_init(struct PCIDevice *dev, unsigned short nentries,
uint8_t *config;
/* Nothing to do if MSI is not supported by interrupt controller */
- if (!msi_supported) {
+ if (!msi_nonbroken) {
return -ENOTSUP;
}
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 64c4acc..298171a 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -439,7 +439,7 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base,
_FDT((fdt_property_cell(fdt, "rtas-event-scan-rate",
RTAS_EVENT_SCAN_RATE)));
- if (msi_supported) {
+ if (msi_nonbroken) {
_FDT((fdt_property(fdt, "ibm,change-msix-capable", NULL, 0)));
}
@@ -1743,7 +1743,7 @@ static void ppc_spapr_init(MachineState *machine)
bool kernel_le = false;
char *filename;
- msi_supported = true;
+ msi_nonbroken = true;
QLIST_INIT(&spapr->phbs);
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index e8edad3..3fc7895 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -1790,7 +1790,7 @@ void spapr_pci_rtas_init(void)
rtas_ibm_read_pci_config);
spapr_rtas_register(RTAS_IBM_WRITE_PCI_CONFIG, "ibm,write-pci-config",
rtas_ibm_write_pci_config);
- if (msi_supported) {
+ if (msi_nonbroken) {
spapr_rtas_register(RTAS_IBM_QUERY_INTERRUPT_SOURCE_NUMBER,
"ibm,query-interrupt-source-number",
rtas_ibm_query_interrupt_source_number);
diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c
index dba0202..f5f679f 100644
--- a/hw/s390x/s390-pci-bus.c
+++ b/hw/s390x/s390-pci-bus.c
@@ -597,7 +597,7 @@ static void s390_pcihost_class_init(ObjectClass *klass, void *data)
k->init = s390_pcihost_init;
hc->plug = s390_pcihost_hot_plug;
hc->unplug = s390_pcihost_hot_unplug;
- msi_supported = true;
+ msi_nonbroken = true;
}
static const TypeInfo s390_pcihost_info = {
diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c
index e9c30e9..22ad25c 100644
--- a/hw/virtio/virtio-balloon.c
+++ b/hw/virtio/virtio-balloon.c
@@ -53,6 +53,7 @@ static const char *balloon_stat_names[] = {
[VIRTIO_BALLOON_S_MINFLT] = "stat-minor-faults",
[VIRTIO_BALLOON_S_MEMFREE] = "stat-free-memory",
[VIRTIO_BALLOON_S_MEMTOT] = "stat-total-memory",
+ [VIRTIO_BALLOON_S_AVAIL] = "stat-available-memory",
[VIRTIO_BALLOON_S_NR] = NULL
};
@@ -101,7 +102,7 @@ static void balloon_stats_poll_cb(void *opaque)
VirtIOBalloon *s = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
- if (!balloon_stats_supported(s)) {
+ if (s->stats_vq_elem == NULL || !balloon_stats_supported(s)) {
/* re-schedule */
balloon_stats_change_timer(s, s->stats_poll_interval);
return;
@@ -258,11 +259,20 @@ static void virtio_balloon_receive_stats(VirtIODevice *vdev, VirtQueue *vq)
size_t offset = 0;
qemu_timeval tv;
- s->stats_vq_elem = elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
goto out;
}
+ if (s->stats_vq_elem != NULL) {
+ /* This should never happen if the driver follows the spec. */
+ virtqueue_push(vq, s->stats_vq_elem, 0);
+ virtio_notify(vdev, vq);
+ g_free(s->stats_vq_elem);
+ }
+
+ s->stats_vq_elem = elem;
+
/* Initialize the stats to get rid of any stale values. This is only
* needed to handle the case where a guest supports fewer stats than it
* used to (ie. it has booted into an old kernel).
@@ -458,6 +468,16 @@ static void virtio_balloon_device_unrealize(DeviceState *dev, Error **errp)
virtio_cleanup(vdev);
}
+static void virtio_balloon_device_reset(VirtIODevice *vdev)
+{
+ VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
+
+ if (s->stats_vq_elem != NULL) {
+ g_free(s->stats_vq_elem);
+ s->stats_vq_elem = NULL;
+ }
+}
+
static void virtio_balloon_instance_init(Object *obj)
{
VirtIOBalloon *s = VIRTIO_BALLOON(obj);
@@ -486,6 +506,7 @@ static void virtio_balloon_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
vdc->realize = virtio_balloon_device_realize;
vdc->unrealize = virtio_balloon_device_unrealize;
+ vdc->reset = virtio_balloon_device_reset;
vdc->get_config = virtio_balloon_get_config;
vdc->set_config = virtio_balloon_set_config;
vdc->get_features = virtio_balloon_get_features;
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 440776c..0dadb66 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -47,6 +47,7 @@
static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
VirtIOPCIProxy *dev);
+static void virtio_pci_reset(DeviceState *qdev);
/* virtio device */
/* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
@@ -404,9 +405,7 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
case VIRTIO_PCI_QUEUE_PFN:
pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
if (pa == 0) {
- virtio_pci_stop_ioeventfd(proxy);
- virtio_reset(vdev);
- msix_unuse_all_vectors(&proxy->pci_dev);
+ virtio_pci_reset(DEVICE(proxy));
}
else
virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
@@ -432,8 +431,7 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
}
if (vdev->status == 0) {
- virtio_reset(vdev);
- msix_unuse_all_vectors(&proxy->pci_dev);
+ virtio_pci_reset(DEVICE(proxy));
}
/* Linux before 2.6.34 drives the device without enabling
@@ -1353,8 +1351,7 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr,
}
if (vdev->status == 0) {
- virtio_reset(vdev);
- msix_unuse_all_vectors(&proxy->pci_dev);
+ virtio_pci_reset(DEVICE(proxy));
}
break;
diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
index e096e98..e4548c2 100644
--- a/hw/virtio/virtio-pci.h
+++ b/hw/virtio/virtio-pci.h
@@ -58,30 +58,33 @@ typedef struct VirtioBusClass VirtioPCIBusClass;
#define VIRTIO_PCI_BUS_CLASS(klass) \
OBJECT_CLASS_CHECK(VirtioPCIBusClass, klass, TYPE_VIRTIO_PCI_BUS)
+enum {
+ VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT,
+ VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT,
+ VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT,
+ VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT,
+ VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT,
+ VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT,
+};
+
/* Need to activate work-arounds for buggy guests at vmstate load. */
-#define VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT 0
#define VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION \
(1 << VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT)
/* Performance improves when virtqueue kick processing is decoupled from the
* vcpu thread using ioeventfd for some devices. */
-#define VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT 1
#define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
/* virtio version flags */
-#define VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT 2
-#define VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT 3
-#define VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT 4
#define VIRTIO_PCI_FLAG_DISABLE_LEGACY (1 << VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT)
#define VIRTIO_PCI_FLAG_DISABLE_MODERN (1 << VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT)
#define VIRTIO_PCI_FLAG_DISABLE_PCIE (1 << VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT)
/* migrate extra state */
-#define VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT 4
#define VIRTIO_PCI_FLAG_MIGRATE_EXTRA (1 << VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT)
/* have pio notification for modern device ? */
-#define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT 5
#define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY \
(1 << VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT)