aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2015-12-23 12:27:51 +0000
committerPeter Maydell <peter.maydell@linaro.org>2015-12-23 12:27:51 +0000
commit5fbba56073828759b87d36cff3d31922a0e6f17e (patch)
tree5128df72067614610d350b8a0f3d0e35f8d7788d /hw
parent05bec7eb0e6df6c166caea761b311d5c8c2b4cb8 (diff)
parent5530427f0ca240b972f25ef0413fb966f96dfb05 (diff)
downloadqemu-5fbba56073828759b87d36cff3d31922a0e6f17e.zip
qemu-5fbba56073828759b87d36cff3d31922a0e6f17e.tar.gz
qemu-5fbba56073828759b87d36cff3d31922a0e6f17e.tar.bz2
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
acpi, pc features pxb support for q35 nvdimm support most of ipmi support part of DSDT rewrite Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # gpg: Signature made Tue 22 Dec 2015 16:47:18 GMT using RSA key ID D28D5469 # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" * remotes/mst/tags/for_upstream: (55 commits) acpi: extend aml_and() to accept target argument acpi: extend aml_or() to accept target argument acpi add aml_dma() acpi: add aml_to_buffer() acpi: add aml_to_hexstring() acpi: extend aml_field() to support LockRule acpi: add aml_lgreater() acpi: add aml_lor() acpi: add aml_sleep() acpi: add aml_alias() acpi: extend aml_shiftright() to accept target argument acpi: add aml_to_integer() acpi: add aml_call0() helper acpi: add aml_decrement() and aml_subtract() acpi: extend aml_add() to accept target argument acpi: aml: add helper for Opcode Arg2 Arg2 [Dst] AML pattern acpi: add aml_create_qword_field() acpi: add aml_mutex(), aml_acquire(), aml_release() acpi: add aml_lgreater_equal() acpi: add aml_sizeof ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/Makefile.objs1
-rw-r--r--hw/acpi/Makefile.objs1
-rw-r--r--hw/acpi/aml-build.c270
-rw-r--r--hw/acpi/memory_hotplug.c5
-rw-r--r--hw/acpi/nvdimm.c488
-rw-r--r--hw/arm/virt-acpi-build.c21
-rw-r--r--hw/core/loader.c10
-rw-r--r--hw/core/machine.c1
-rw-r--r--hw/i386/acpi-build.c181
-rw-r--r--hw/i386/pc.c53
-rw-r--r--hw/i386/pc_piix.c157
-rw-r--r--hw/i386/pc_q35.c85
-rw-r--r--hw/ipmi/Makefile.objs5
-rw-r--r--hw/ipmi/ipmi.c152
-rw-r--r--hw/ipmi/ipmi_bmc_extern.c518
-rw-r--r--hw/ipmi/ipmi_bmc_sim.c1756
-rw-r--r--hw/ipmi/isa_ipmi_bt.c528
-rw-r--r--hw/ipmi/isa_ipmi_kcs.c493
-rw-r--r--hw/mem/Makefile.objs1
-rw-r--r--hw/mem/nvdimm.c46
-rw-r--r--hw/pci-bridge/pci_expander_bridge.c98
21 files changed, 4592 insertions, 278 deletions
diff --git a/hw/Makefile.objs b/hw/Makefile.objs
index 7e7c241..4a07ed4 100644
--- a/hw/Makefile.objs
+++ b/hw/Makefile.objs
@@ -13,6 +13,7 @@ devices-dirs-$(CONFIG_SOFTMMU) += ide/
devices-dirs-$(CONFIG_SOFTMMU) += input/
devices-dirs-$(CONFIG_SOFTMMU) += intc/
devices-dirs-$(CONFIG_IPACK) += ipack/
+devices-dirs-$(CONFIG_IPMI) += ipmi/
devices-dirs-$(CONFIG_SOFTMMU) += isa/
devices-dirs-$(CONFIG_SOFTMMU) += misc/
devices-dirs-$(CONFIG_SOFTMMU) += net/
diff --git a/hw/acpi/Makefile.objs b/hw/acpi/Makefile.objs
index 7d3230c..095597f 100644
--- a/hw/acpi/Makefile.objs
+++ b/hw/acpi/Makefile.objs
@@ -2,6 +2,7 @@ common-obj-$(CONFIG_ACPI_X86) += core.o piix4.o pcihp.o
common-obj-$(CONFIG_ACPI_X86_ICH) += ich9.o tco.o
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu_hotplug.o
common-obj-$(CONFIG_ACPI_MEMORY_HOTPLUG) += memory_hotplug.o
+common-obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
common-obj-$(CONFIG_ACPI) += acpi_interface.o
common-obj-$(CONFIG_ACPI) += bios-linker-loader.o
common-obj-$(CONFIG_ACPI) += aml-build.o
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index 302cf01..78e1290 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -427,6 +427,41 @@ Aml *aml_arg(int pos)
return var;
}
+/* ACPI 2.0a: 17.2.4.4 Type 2 Opcodes Encoding: DefToInteger */
+Aml *aml_to_integer(Aml *arg)
+{
+ Aml *var = aml_opcode(0x99 /* ToIntegerOp */);
+ aml_append(var, arg);
+ build_append_byte(var->buf, 0x00 /* NullNameOp */);
+ return var;
+}
+
+/* ACPI 2.0a: 17.2.4.4 Type 2 Opcodes Encoding: DefToHexString */
+Aml *aml_to_hexstring(Aml *src, Aml *dst)
+{
+ Aml *var = aml_opcode(0x98 /* ToHexStringOp */);
+ aml_append(var, src);
+ if (dst) {
+ aml_append(var, dst);
+ } else {
+ build_append_byte(var->buf, 0x00 /* NullNameOp */);
+ }
+ return var;
+}
+
+/* ACPI 2.0a: 17.2.4.4 Type 2 Opcodes Encoding: DefToBuffer */
+Aml *aml_to_buffer(Aml *src, Aml *dst)
+{
+ Aml *var = aml_opcode(0x96 /* ToBufferOp */);
+ aml_append(var, src);
+ if (dst) {
+ aml_append(var, dst);
+ } else {
+ build_append_byte(var->buf, 0x00 /* NullNameOp */);
+ }
+ return var;
+}
+
/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefStore */
Aml *aml_store(Aml *val, Aml *target)
{
@@ -436,44 +471,64 @@ Aml *aml_store(Aml *val, Aml *target)
return var;
}
-/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefAnd */
-Aml *aml_and(Aml *arg1, Aml *arg2)
+/**
+ * build_opcode_2arg_dst:
+ * @op: 1-byte opcode
+ * @arg1: 1st operand
+ * @arg2: 2nd operand
+ * @dst: optional target to store to, set to NULL if it's not required
+ *
+ * An internal helper to compose AML terms that have
+ * "Op Operand Operand Target"
+ * pattern.
+ *
+ * Returns: The newly allocated and composed according to patter Aml object.
+ */
+static Aml *
+build_opcode_2arg_dst(uint8_t op, Aml *arg1, Aml *arg2, Aml *dst)
{
- Aml *var = aml_opcode(0x7B /* AndOp */);
+ Aml *var = aml_opcode(op);
aml_append(var, arg1);
aml_append(var, arg2);
- build_append_byte(var->buf, 0x00 /* NullNameOp */);
+ if (dst) {
+ aml_append(var, dst);
+ } else {
+ build_append_byte(var->buf, 0x00 /* NullNameOp */);
+ }
return var;
}
+/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefAnd */
+Aml *aml_and(Aml *arg1, Aml *arg2, Aml *dst)
+{
+ return build_opcode_2arg_dst(0x7B /* AndOp */, arg1, arg2, dst);
+}
+
/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefOr */
-Aml *aml_or(Aml *arg1, Aml *arg2)
+Aml *aml_or(Aml *arg1, Aml *arg2, Aml *dst)
+{
+ return build_opcode_2arg_dst(0x7D /* OrOp */, arg1, arg2, dst);
+}
+
+/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefLOr */
+Aml *aml_lor(Aml *arg1, Aml *arg2)
{
- Aml *var = aml_opcode(0x7D /* OrOp */);
+ Aml *var = aml_opcode(0x91 /* LOrOp */);
aml_append(var, arg1);
aml_append(var, arg2);
- build_append_byte(var->buf, 0x00 /* NullNameOp */);
return var;
}
/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefShiftLeft */
Aml *aml_shiftleft(Aml *arg1, Aml *count)
{
- Aml *var = aml_opcode(0x79 /* ShiftLeftOp */);
- aml_append(var, arg1);
- aml_append(var, count);
- build_append_byte(var->buf, 0x00); /* NullNameOp */
- return var;
+ return build_opcode_2arg_dst(0x79 /* ShiftLeftOp */, arg1, count, NULL);
}
/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefShiftRight */
-Aml *aml_shiftright(Aml *arg1, Aml *count)
+Aml *aml_shiftright(Aml *arg1, Aml *count, Aml *dst)
{
- Aml *var = aml_opcode(0x7A /* ShiftRightOp */);
- aml_append(var, arg1);
- aml_append(var, count);
- build_append_byte(var->buf, 0x00); /* NullNameOp */
- return var;
+ return build_opcode_2arg_dst(0x7A /* ShiftRightOp */, arg1, count, dst);
}
/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefLLess */
@@ -486,13 +541,15 @@ Aml *aml_lless(Aml *arg1, Aml *arg2)
}
/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefAdd */
-Aml *aml_add(Aml *arg1, Aml *arg2)
+Aml *aml_add(Aml *arg1, Aml *arg2, Aml *dst)
{
- Aml *var = aml_opcode(0x72 /* AddOp */);
- aml_append(var, arg1);
- aml_append(var, arg2);
- build_append_byte(var->buf, 0x00 /* NullNameOp */);
- return var;
+ return build_opcode_2arg_dst(0x72 /* AddOp */, arg1, arg2, dst);
+}
+
+/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefSubtract */
+Aml *aml_subtract(Aml *arg1, Aml *arg2, Aml *dst)
+{
+ return build_opcode_2arg_dst(0x74 /* SubtractOp */, arg1, arg2, dst);
}
/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefIncrement */
@@ -503,14 +560,18 @@ Aml *aml_increment(Aml *arg)
return var;
}
+/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefDecrement */
+Aml *aml_decrement(Aml *arg)
+{
+ Aml *var = aml_opcode(0x76 /* DecrementOp */);
+ aml_append(var, arg);
+ return var;
+}
+
/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefIndex */
Aml *aml_index(Aml *arg1, Aml *idx)
{
- Aml *var = aml_opcode(0x88 /* IndexOp */);
- aml_append(var, arg1);
- aml_append(var, idx);
- build_append_byte(var->buf, 0x00 /* NullNameOp */);
- return var;
+ return build_opcode_2arg_dst(0x88 /* IndexOp */, arg1, idx, NULL);
}
/* ACPI 1.0b: 16.2.5.3 Type 1 Opcodes Encoding: DefNotify */
@@ -523,6 +584,14 @@ Aml *aml_notify(Aml *arg1, Aml *arg2)
}
/* helper to call method with 1 argument */
+Aml *aml_call0(const char *method)
+{
+ Aml *var = aml_alloc();
+ build_append_namestring(var->buf, "%s", method);
+ return var;
+}
+
+/* helper to call method with 1 argument */
Aml *aml_call1(const char *method, Aml *arg1)
{
Aml *var = aml_alloc();
@@ -764,6 +833,26 @@ Aml *aml_equal(Aml *arg1, Aml *arg2)
return var;
}
+/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefLGreater */
+Aml *aml_lgreater(Aml *arg1, Aml *arg2)
+{
+ Aml *var = aml_opcode(0x94 /* LGreaterOp */);
+ aml_append(var, arg1);
+ aml_append(var, arg2);
+ return var;
+}
+
+/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefLGreaterEqual */
+Aml *aml_lgreater_equal(Aml *arg1, Aml *arg2)
+{
+ /* LGreaterEqualOp := LNotOp LLessOp */
+ Aml *var = aml_opcode(0x92 /* LNotOp */);
+ build_append_byte(var->buf, 0x95 /* LLessOp */);
+ aml_append(var, arg1);
+ aml_append(var, arg2);
+ return var;
+}
+
/* ACPI 1.0b: 16.2.5.3 Type 1 Opcodes Encoding: DefIfElse */
Aml *aml_if(Aml *predicate)
{
@@ -889,27 +978,43 @@ Aml *aml_reserved_field(unsigned length)
}
/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefField */
-Aml *aml_field(const char *name, AmlAccessType type, AmlUpdateRule rule)
+Aml *aml_field(const char *name, AmlAccessType type, AmlLockRule lock,
+ AmlUpdateRule rule)
{
Aml *var = aml_bundle(0x81 /* FieldOp */, AML_EXT_PACKAGE);
uint8_t flags = rule << 5 | type;
+ flags |= lock << 4; /* LockRule at 4 bit offset */
+
build_append_namestring(var->buf, "%s", name);
build_append_byte(var->buf, flags);
return var;
}
-/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefCreateDWordField */
-Aml *aml_create_dword_field(Aml *srcbuf, Aml *index, const char *name)
+static
+Aml *create_field_common(int opcode, Aml *srcbuf, Aml *index, const char *name)
{
- Aml *var = aml_alloc();
- build_append_byte(var->buf, 0x8A); /* CreateDWordFieldOp */
+ Aml *var = aml_opcode(opcode);
aml_append(var, srcbuf);
aml_append(var, index);
build_append_namestring(var->buf, "%s", name);
return var;
}
+/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefCreateDWordField */
+Aml *aml_create_dword_field(Aml *srcbuf, Aml *index, const char *name)
+{
+ return create_field_common(0x8A /* CreateDWordFieldOp */,
+ srcbuf, index, name);
+}
+
+/* ACPI 2.0a: 17.2.4.2 Named Objects Encoding: DefCreateQWordField */
+Aml *aml_create_qword_field(Aml *srcbuf, Aml *index, const char *name)
+{
+ return create_field_common(0x8F /* CreateQWordFieldOp */,
+ srcbuf, index, name);
+}
+
/* ACPI 1.0b: 16.2.3 Data Objects Encoding: String */
Aml *aml_string(const char *name_format, ...)
{
@@ -1170,6 +1275,30 @@ Aml *aml_qword_memory(AmlDecode dec, AmlMinFixed min_fixed,
addr_trans, len, flags);
}
+/* ACPI 1.0b: 6.4.2.2 DMA Format/6.4.2.2.1 ASL Macro for DMA Descriptor */
+Aml *aml_dma(AmlDmaType typ, AmlDmaBusMaster bm, AmlTransferSize sz,
+ uint8_t channel)
+{
+ Aml *var = aml_alloc();
+ uint8_t flags = sz | bm << 2 | typ << 5;
+
+ assert(channel < 8);
+ build_append_byte(var->buf, 0x2A); /* Byte 0: DMA Descriptor */
+ build_append_byte(var->buf, 1U << channel); /* Byte 1: _DMA - DmaChannel */
+ build_append_byte(var->buf, flags); /* Byte 2 */
+ return var;
+}
+
+/* ACPI 1.0b: 16.2.5.3 Type 1 Opcodes Encoding: DefSleep */
+Aml *aml_sleep(uint64_t msec)
+{
+ Aml *var = aml_alloc();
+ build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */
+ build_append_byte(var->buf, 0x22); /* SleepOp */
+ aml_append(var, aml_int(msec));
+ return var;
+}
+
static uint8_t Hex2Byte(const char *src)
{
int hi, lo;
@@ -1240,16 +1369,81 @@ Aml *aml_unicode(const char *str)
return var;
}
+/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefDerefOf */
+Aml *aml_derefof(Aml *arg)
+{
+ Aml *var = aml_opcode(0x83 /* DerefOfOp */);
+ aml_append(var, arg);
+ return var;
+}
+
+/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefSizeOf */
+Aml *aml_sizeof(Aml *arg)
+{
+ Aml *var = aml_opcode(0x87 /* SizeOfOp */);
+ aml_append(var, arg);
+ return var;
+}
+
+/* ACPI 1.0b: 16.2.5.2 Named Objects Encoding: DefMutex */
+Aml *aml_mutex(const char *name, uint8_t sync_level)
+{
+ Aml *var = aml_alloc();
+ build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */
+ build_append_byte(var->buf, 0x01); /* MutexOp */
+ build_append_namestring(var->buf, "%s", name);
+ assert(!(sync_level & 0xF0));
+ build_append_byte(var->buf, sync_level);
+ return var;
+}
+
+/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefAcquire */
+Aml *aml_acquire(Aml *mutex, uint16_t timeout)
+{
+ Aml *var = aml_alloc();
+ build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */
+ build_append_byte(var->buf, 0x23); /* AcquireOp */
+ aml_append(var, mutex);
+ build_append_int_noprefix(var->buf, timeout, sizeof(timeout));
+ return var;
+}
+
+/* ACPI 1.0b: 16.2.5.3 Type 1 Opcodes Encoding: DefRelease */
+Aml *aml_release(Aml *mutex)
+{
+ Aml *var = aml_alloc();
+ build_append_byte(var->buf, 0x5B); /* ExtOpPrefix */
+ build_append_byte(var->buf, 0x27); /* ReleaseOp */
+ aml_append(var, mutex);
+ return var;
+}
+
+/* ACPI 1.0b: 16.2.5.1 Name Space Modifier Objects Encoding: DefAlias */
+Aml *aml_alias(const char *source_object, const char *alias_object)
+{
+ Aml *var = aml_opcode(0x06 /* AliasOp */);
+ aml_append(var, aml_name("%s", source_object));
+ aml_append(var, aml_name("%s", alias_object));
+ return var;
+}
+
void
build_header(GArray *linker, GArray *table_data,
- AcpiTableHeader *h, const char *sig, int len, uint8_t rev)
+ AcpiTableHeader *h, const char *sig, int len, uint8_t rev,
+ const char *oem_table_id)
{
memcpy(&h->signature, sig, 4);
h->length = cpu_to_le32(len);
h->revision = rev;
memcpy(h->oem_id, ACPI_BUILD_APPNAME6, 6);
- memcpy(h->oem_table_id, ACPI_BUILD_APPNAME4, 4);
- memcpy(h->oem_table_id + 4, sig, 4);
+
+ if (oem_table_id) {
+ strncpy((char *)h->oem_table_id, oem_table_id, sizeof(h->oem_table_id));
+ } else {
+ memcpy(h->oem_table_id, ACPI_BUILD_APPNAME4, 4);
+ memcpy(h->oem_table_id + 4, sig, 4);
+ }
+
h->oem_revision = cpu_to_le32(1);
memcpy(h->asl_compiler_id, ACPI_BUILD_APPNAME4, 4);
h->asl_compiler_revision = cpu_to_le32(1);
@@ -1316,5 +1510,5 @@ build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets)
sizeof(uint32_t));
}
build_header(linker, table_data,
- (void *)rsdt, "RSDT", rsdt_len, 1);
+ (void *)rsdt, "RSDT", rsdt_len, 1, NULL);
}
diff --git a/hw/acpi/memory_hotplug.c b/hw/acpi/memory_hotplug.c
index e4b9a01..298e868 100644
--- a/hw/acpi/memory_hotplug.c
+++ b/hw/acpi/memory_hotplug.c
@@ -231,6 +231,11 @@ void acpi_memory_plug_cb(ACPIREGS *ar, qemu_irq irq, MemHotplugState *mem_st,
DeviceState *dev, Error **errp)
{
MemStatus *mdev;
+ DeviceClass *dc = DEVICE_GET_CLASS(dev);
+
+ if (!dc->hotpluggable) {
+ return;
+ }
mdev = acpi_memory_slot_status(mem_st, dev, errp);
if (!mdev) {
diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
new file mode 100644
index 0000000..9534418
--- /dev/null
+++ b/hw/acpi/nvdimm.c
@@ -0,0 +1,488 @@
+/*
+ * NVDIMM ACPI Implementation
+ *
+ * Copyright(C) 2015 Intel Corporation.
+ *
+ * Author:
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ *
+ * NFIT is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT)
+ * and the DSM specification can be found at:
+ * http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
+ *
+ * Currently, it only supports PMEM Virtualization.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/aml-build.h"
+#include "hw/mem/nvdimm.h"
+
+static int nvdimm_plugged_device_list(Object *obj, void *opaque)
+{
+ GSList **list = opaque;
+
+ if (object_dynamic_cast(obj, TYPE_NVDIMM)) {
+ DeviceState *dev = DEVICE(obj);
+
+ if (dev->realized) { /* only realized NVDIMMs matter */
+ *list = g_slist_append(*list, DEVICE(obj));
+ }
+ }
+
+ object_child_foreach(obj, nvdimm_plugged_device_list, opaque);
+ return 0;
+}
+
+/*
+ * inquire plugged NVDIMM devices and link them into the list which is
+ * returned to the caller.
+ *
+ * Note: it is the caller's responsibility to free the list to avoid
+ * memory leak.
+ */
+static GSList *nvdimm_get_plugged_device_list(void)
+{
+ GSList *list = NULL;
+
+ object_child_foreach(qdev_get_machine(), nvdimm_plugged_device_list,
+ &list);
+ return list;
+}
+
+#define NVDIMM_UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+ { (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, (c) & 0xff, ((c) >> 8) & 0xff, \
+ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }
+
+/*
+ * define Byte Addressable Persistent Memory (PM) Region according to
+ * ACPI 6.0: 5.2.25.1 System Physical Address Range Structure.
+ */
+static const uint8_t nvdimm_nfit_spa_uuid[] =
+ NVDIMM_UUID_LE(0x66f0d379, 0xb4f3, 0x4074, 0xac, 0x43, 0x0d, 0x33,
+ 0x18, 0xb7, 0x8c, 0xdb);
+
+/*
+ * NVDIMM Firmware Interface Table
+ * @signature: "NFIT"
+ *
+ * It provides information that allows OSPM to enumerate NVDIMM present in
+ * the platform and associate system physical address ranges created by the
+ * NVDIMMs.
+ *
+ * It is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT)
+ */
+struct NvdimmNfitHeader {
+ ACPI_TABLE_HEADER_DEF
+ uint32_t reserved;
+} QEMU_PACKED;
+typedef struct NvdimmNfitHeader NvdimmNfitHeader;
+
+/*
+ * define NFIT structures according to ACPI 6.0: 5.2.25 NVDIMM Firmware
+ * Interface Table (NFIT).
+ */
+
+/*
+ * System Physical Address Range Structure
+ *
+ * It describes the system physical address ranges occupied by NVDIMMs and
+ * the types of the regions.
+ */
+struct NvdimmNfitSpa {
+ uint16_t type;
+ uint16_t length;
+ uint16_t spa_index;
+ uint16_t flags;
+ uint32_t reserved;
+ uint32_t proximity_domain;
+ uint8_t type_guid[16];
+ uint64_t spa_base;
+ uint64_t spa_length;
+ uint64_t mem_attr;
+} QEMU_PACKED;
+typedef struct NvdimmNfitSpa NvdimmNfitSpa;
+
+/*
+ * Memory Device to System Physical Address Range Mapping Structure
+ *
+ * It enables identifying each NVDIMM region and the corresponding SPA
+ * describing the memory interleave
+ */
+struct NvdimmNfitMemDev {
+ uint16_t type;
+ uint16_t length;
+ uint32_t nfit_handle;
+ uint16_t phys_id;
+ uint16_t region_id;
+ uint16_t spa_index;
+ uint16_t dcr_index;
+ uint64_t region_len;
+ uint64_t region_offset;
+ uint64_t region_dpa;
+ uint16_t interleave_index;
+ uint16_t interleave_ways;
+ uint16_t flags;
+ uint16_t reserved;
+} QEMU_PACKED;
+typedef struct NvdimmNfitMemDev NvdimmNfitMemDev;
+
+/*
+ * NVDIMM Control Region Structure
+ *
+ * It describes the NVDIMM and if applicable, Block Control Window.
+ */
+struct NvdimmNfitControlRegion {
+ uint16_t type;
+ uint16_t length;
+ uint16_t dcr_index;
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint16_t revision_id;
+ uint16_t sub_vendor_id;
+ uint16_t sub_device_id;
+ uint16_t sub_revision_id;
+ uint8_t reserved[6];
+ uint32_t serial_number;
+ uint16_t fic;
+ uint16_t num_bcw;
+ uint64_t bcw_size;
+ uint64_t cmd_offset;
+ uint64_t cmd_size;
+ uint64_t status_offset;
+ uint64_t status_size;
+ uint16_t flags;
+ uint8_t reserved2[6];
+} QEMU_PACKED;
+typedef struct NvdimmNfitControlRegion NvdimmNfitControlRegion;
+
+/*
+ * Module serial number is a unique number for each device. We use the
+ * slot id of NVDIMM device to generate this number so that each device
+ * associates with a different number.
+ *
+ * 0x123456 is a magic number we arbitrarily chose.
+ */
+static uint32_t nvdimm_slot_to_sn(int slot)
+{
+ return 0x123456 + slot;
+}
+
+/*
+ * handle is used to uniquely associate nfit_memdev structure with NVDIMM
+ * ACPI device - nfit_memdev.nfit_handle matches with the value returned
+ * by ACPI device _ADR method.
+ *
+ * We generate the handle with the slot id of NVDIMM device and reserve
+ * 0 for NVDIMM root device.
+ */
+static uint32_t nvdimm_slot_to_handle(int slot)
+{
+ return slot + 1;
+}
+
+/*
+ * index uniquely identifies the structure, 0 is reserved which indicates
+ * that the structure is not valid or the associated structure is not
+ * present.
+ *
+ * Each NVDIMM device needs two indexes, one for nfit_spa and another for
+ * nfit_dc which are generated by the slot id of NVDIMM device.
+ */
+static uint16_t nvdimm_slot_to_spa_index(int slot)
+{
+ return (slot + 1) << 1;
+}
+
+/* See the comments of nvdimm_slot_to_spa_index(). */
+static uint32_t nvdimm_slot_to_dcr_index(int slot)
+{
+ return nvdimm_slot_to_spa_index(slot) + 1;
+}
+
+/* ACPI 6.0: 5.2.25.1 System Physical Address Range Structure */
+static void
+nvdimm_build_structure_spa(GArray *structures, DeviceState *dev)
+{
+ NvdimmNfitSpa *nfit_spa;
+ uint64_t addr = object_property_get_int(OBJECT(dev), PC_DIMM_ADDR_PROP,
+ NULL);
+ uint64_t size = object_property_get_int(OBJECT(dev), PC_DIMM_SIZE_PROP,
+ NULL);
+ uint32_t node = object_property_get_int(OBJECT(dev), PC_DIMM_NODE_PROP,
+ NULL);
+ int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
+ NULL);
+
+ nfit_spa = acpi_data_push(structures, sizeof(*nfit_spa));
+
+ nfit_spa->type = cpu_to_le16(0 /* System Physical Address Range
+ Structure */);
+ nfit_spa->length = cpu_to_le16(sizeof(*nfit_spa));
+ nfit_spa->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot));
+
+ /*
+ * Control region is strict as all the device info, such as SN, index,
+ * is associated with slot id.
+ */
+ nfit_spa->flags = cpu_to_le16(1 /* Control region is strictly for
+ management during hot add/online
+ operation */ |
+ 2 /* Data in Proximity Domain field is
+ valid*/);
+
+ /* NUMA node. */
+ nfit_spa->proximity_domain = cpu_to_le32(node);
+ /* the region reported as PMEM. */
+ memcpy(nfit_spa->type_guid, nvdimm_nfit_spa_uuid,
+ sizeof(nvdimm_nfit_spa_uuid));
+
+ nfit_spa->spa_base = cpu_to_le64(addr);
+ nfit_spa->spa_length = cpu_to_le64(size);
+
+ /* It is the PMEM and can be cached as writeback. */
+ nfit_spa->mem_attr = cpu_to_le64(0x8ULL /* EFI_MEMORY_WB */ |
+ 0x8000ULL /* EFI_MEMORY_NV */);
+}
+
+/*
+ * ACPI 6.0: 5.2.25.2 Memory Device to System Physical Address Range Mapping
+ * Structure
+ */
+static void
+nvdimm_build_structure_memdev(GArray *structures, DeviceState *dev)
+{
+ NvdimmNfitMemDev *nfit_memdev;
+ uint64_t addr = object_property_get_int(OBJECT(dev), PC_DIMM_ADDR_PROP,
+ NULL);
+ uint64_t size = object_property_get_int(OBJECT(dev), PC_DIMM_SIZE_PROP,
+ NULL);
+ int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
+ NULL);
+ uint32_t handle = nvdimm_slot_to_handle(slot);
+
+ nfit_memdev = acpi_data_push(structures, sizeof(*nfit_memdev));
+
+ nfit_memdev->type = cpu_to_le16(1 /* Memory Device to System Address
+ Range Map Structure*/);
+ nfit_memdev->length = cpu_to_le16(sizeof(*nfit_memdev));
+ nfit_memdev->nfit_handle = cpu_to_le32(handle);
+
+ /*
+ * associate memory device with System Physical Address Range
+ * Structure.
+ */
+ nfit_memdev->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot));
+ /* associate memory device with Control Region Structure. */
+ nfit_memdev->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot));
+
+ /* The memory region on the device. */
+ nfit_memdev->region_len = cpu_to_le64(size);
+ nfit_memdev->region_dpa = cpu_to_le64(addr);
+
+ /* Only one interleave for PMEM. */
+ nfit_memdev->interleave_ways = cpu_to_le16(1);
+}
+
+/*
+ * ACPI 6.0: 5.2.25.5 NVDIMM Control Region Structure.
+ */
+static void nvdimm_build_structure_dcr(GArray *structures, DeviceState *dev)
+{
+ NvdimmNfitControlRegion *nfit_dcr;
+ int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
+ NULL);
+ uint32_t sn = nvdimm_slot_to_sn(slot);
+
+ nfit_dcr = acpi_data_push(structures, sizeof(*nfit_dcr));
+
+ nfit_dcr->type = cpu_to_le16(4 /* NVDIMM Control Region Structure */);
+ nfit_dcr->length = cpu_to_le16(sizeof(*nfit_dcr));
+ nfit_dcr->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot));
+
+ /* vendor: Intel. */
+ nfit_dcr->vendor_id = cpu_to_le16(0x8086);
+ nfit_dcr->device_id = cpu_to_le16(1);
+
+ /* The _DSM method is following Intel's DSM specification. */
+ nfit_dcr->revision_id = cpu_to_le16(1 /* Current Revision supported
+ in ACPI 6.0 is 1. */);
+ nfit_dcr->serial_number = cpu_to_le32(sn);
+ nfit_dcr->fic = cpu_to_le16(0x201 /* Format Interface Code. See Chapter
+ 2: NVDIMM Device Specific Method
+ (DSM) in DSM Spec Rev1.*/);
+}
+
+static GArray *nvdimm_build_device_structure(GSList *device_list)
+{
+ GArray *structures = g_array_new(false, true /* clear */, 1);
+
+ for (; device_list; device_list = device_list->next) {
+ DeviceState *dev = device_list->data;
+
+ /* build System Physical Address Range Structure. */
+ nvdimm_build_structure_spa(structures, dev);
+
+ /*
+ * build Memory Device to System Physical Address Range Mapping
+ * Structure.
+ */
+ nvdimm_build_structure_memdev(structures, dev);
+
+ /* build NVDIMM Control Region Structure. */
+ nvdimm_build_structure_dcr(structures, dev);
+ }
+
+ return structures;
+}
+
+static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets,
+ GArray *table_data, GArray *linker)
+{
+ GArray *structures = nvdimm_build_device_structure(device_list);
+ void *header;
+
+ acpi_add_table(table_offsets, table_data);
+
+ /* NFIT header. */
+ header = acpi_data_push(table_data, sizeof(NvdimmNfitHeader));
+ /* NVDIMM device structures. */
+ g_array_append_vals(table_data, structures->data, structures->len);
+
+ build_header(linker, table_data, header, "NFIT",
+ sizeof(NvdimmNfitHeader) + structures->len, 1, NULL);
+ g_array_free(structures, true);
+}
+
+#define NVDIMM_COMMON_DSM "NCAL"
+
+static void nvdimm_build_common_dsm(Aml *dev)
+{
+ Aml *method, *ifctx, *function;
+ uint8_t byte_list[1];
+
+ method = aml_method(NVDIMM_COMMON_DSM, 4, AML_NOTSERIALIZED);
+ function = aml_arg(2);
+
+ /*
+ * function 0 is called to inquire what functions are supported by
+ * OSPM
+ */
+ ifctx = aml_if(aml_equal(function, aml_int(0)));
+ byte_list[0] = 0 /* No function Supported */;
+ aml_append(ifctx, aml_return(aml_buffer(1, byte_list)));
+ aml_append(method, ifctx);
+
+ /* No function is supported yet. */
+ byte_list[0] = 1 /* Not Supported */;
+ aml_append(method, aml_return(aml_buffer(1, byte_list)));
+
+ aml_append(dev, method);
+}
+
+static void nvdimm_build_device_dsm(Aml *dev)
+{
+ Aml *method;
+
+ method = aml_method("_DSM", 4, AML_NOTSERIALIZED);
+ aml_append(method, aml_return(aml_call4(NVDIMM_COMMON_DSM, aml_arg(0),
+ aml_arg(1), aml_arg(2), aml_arg(3))));
+ aml_append(dev, method);
+}
+
+static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev)
+{
+ for (; device_list; device_list = device_list->next) {
+ DeviceState *dev = device_list->data;
+ int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
+ NULL);
+ uint32_t handle = nvdimm_slot_to_handle(slot);
+ Aml *nvdimm_dev;
+
+ nvdimm_dev = aml_device("NV%02X", slot);
+
+ /*
+ * ACPI 6.0: 9.20 NVDIMM Devices:
+ *
+ * _ADR object that is used to supply OSPM with unique address
+ * of the NVDIMM device. This is done by returning the NFIT Device
+ * handle that is used to identify the associated entries in ACPI
+ * table NFIT or _FIT.
+ */
+ aml_append(nvdimm_dev, aml_name_decl("_ADR", aml_int(handle)));
+
+ nvdimm_build_device_dsm(nvdimm_dev);
+ aml_append(root_dev, nvdimm_dev);
+ }
+}
+
+static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
+ GArray *table_data, GArray *linker)
+{
+ Aml *ssdt, *sb_scope, *dev;
+
+ acpi_add_table(table_offsets, table_data);
+
+ ssdt = init_aml_allocator();
+ acpi_data_push(ssdt->buf, sizeof(AcpiTableHeader));
+
+ sb_scope = aml_scope("\\_SB");
+
+ dev = aml_device("NVDR");
+
+ /*
+ * ACPI 6.0: 9.20 NVDIMM Devices:
+ *
+ * The ACPI Name Space device uses _HID of ACPI0012 to identify the root
+ * NVDIMM interface device. Platform firmware is required to contain one
+ * such device in _SB scope if NVDIMMs support is exposed by platform to
+ * OSPM.
+ * For each NVDIMM present or intended to be supported by platform,
+ * platform firmware also exposes an ACPI Namespace Device under the
+ * root device.
+ */
+ aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012")));
+
+ nvdimm_build_common_dsm(dev);
+ nvdimm_build_device_dsm(dev);
+
+ nvdimm_build_nvdimm_devices(device_list, dev);
+
+ aml_append(sb_scope, dev);
+
+ aml_append(ssdt, sb_scope);
+ /* copy AML table into ACPI tables blob and patch header there */
+ g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len);
+ build_header(linker, table_data,
+ (void *)(table_data->data + table_data->len - ssdt->buf->len),
+ "SSDT", ssdt->buf->len, 1, "NVDIMM");
+ free_aml_allocator();
+}
+
+void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
+ GArray *linker)
+{
+ GSList *device_list;
+
+ /* no NVDIMM device is plugged. */
+ device_list = nvdimm_get_plugged_device_list();
+ if (!device_list) {
+ return;
+ }
+ nvdimm_build_nfit(device_list, table_offsets, table_data, linker);
+ nvdimm_build_ssdt(device_list, table_offsets, table_data, linker);
+ g_slist_free(device_list);
+}
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 32ec19e..0caf5ce 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -274,16 +274,16 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3"));
aml_append(ifctx, aml_store(aml_name("CDW2"), aml_name("SUPP")));
aml_append(ifctx, aml_store(aml_name("CDW3"), aml_name("CTRL")));
- aml_append(ifctx, aml_store(aml_and(aml_name("CTRL"), aml_int(0x1D)),
+ aml_append(ifctx, aml_store(aml_and(aml_name("CTRL"), aml_int(0x1D), NULL),
aml_name("CTRL")));
ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(0x1))));
- aml_append(ifctx1, aml_store(aml_or(aml_name("CDW1"), aml_int(0x08)),
+ aml_append(ifctx1, aml_store(aml_or(aml_name("CDW1"), aml_int(0x08), NULL),
aml_name("CDW1")));
aml_append(ifctx, ifctx1);
ifctx1 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), aml_name("CTRL"))));
- aml_append(ifctx1, aml_store(aml_or(aml_name("CDW1"), aml_int(0x10)),
+ aml_append(ifctx1, aml_store(aml_or(aml_name("CDW1"), aml_int(0x10), NULL),
aml_name("CDW1")));
aml_append(ifctx, ifctx1);
@@ -292,7 +292,7 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
aml_append(method, ifctx);
elsectx = aml_else();
- aml_append(elsectx, aml_store(aml_or(aml_name("CDW1"), aml_int(4)),
+ aml_append(elsectx, aml_store(aml_or(aml_name("CDW1"), aml_int(4), NULL),
aml_name("CDW1")));
aml_append(elsectx, aml_return(aml_arg(3)));
aml_append(method, elsectx);
@@ -423,7 +423,8 @@ build_spcr(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info)
spcr->pci_device_id = 0xffff; /* PCI Device ID: not a PCI device */
spcr->pci_vendor_id = 0xffff; /* PCI Vendor ID: not a PCI device */
- build_header(linker, table_data, (void *)spcr, "SPCR", sizeof(*spcr), 2);
+ build_header(linker, table_data, (void *)spcr, "SPCR", sizeof(*spcr), 2,
+ NULL);
}
static void
@@ -442,7 +443,7 @@ build_mcfg(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info)
mcfg->allocation[0].end_bus_number = (memmap[VIRT_PCIE_ECAM].size
/ PCIE_MMCFG_SIZE_MIN) - 1;
- build_header(linker, table_data, (void *)mcfg, "MCFG", len, 1);
+ build_header(linker, table_data, (void *)mcfg, "MCFG", len, 1, NULL);
}
/* GTDT */
@@ -468,7 +469,7 @@ build_gtdt(GArray *table_data, GArray *linker)
build_header(linker, table_data,
(void *)(table_data->data + gtdt_start), "GTDT",
- table_data->len - gtdt_start, 2);
+ table_data->len - gtdt_start, 2, NULL);
}
/* MADT */
@@ -530,7 +531,7 @@ build_madt(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info,
build_header(linker, table_data,
(void *)(table_data->data + madt_start), "APIC",
- table_data->len - madt_start, 3);
+ table_data->len - madt_start, 3, NULL);
}
/* FADT */
@@ -555,7 +556,7 @@ build_fadt(GArray *table_data, GArray *linker, unsigned dsdt)
sizeof fadt->dsdt);
build_header(linker, table_data,
- (void *)fadt, "FACP", sizeof(*fadt), 5);
+ (void *)fadt, "FACP", sizeof(*fadt), 5, NULL);
}
/* DSDT */
@@ -591,7 +592,7 @@ build_dsdt(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info)
g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
build_header(linker, table_data,
(void *)(table_data->data + table_data->len - dsdt->buf->len),
- "DSDT", dsdt->buf->len, 2);
+ "DSDT", dsdt->buf->len, 2, NULL);
free_aml_allocator();
}
diff --git a/hw/core/loader.c b/hw/core/loader.c
index eb67f05..6b69852 100644
--- a/hw/core/loader.c
+++ b/hw/core/loader.c
@@ -51,12 +51,10 @@
#include "hw/nvram/fw_cfg.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
+#include "hw/boards.h"
#include <zlib.h>
-bool option_rom_has_mr = false;
-bool rom_file_has_mr = true;
-
static int roms_loaded;
/* return the size or -1 if error */
@@ -754,6 +752,7 @@ int rom_add_file(const char *file, const char *fw_dir,
hwaddr addr, int32_t bootindex,
bool option_rom)
{
+ MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
Rom *rom;
int rc, fd = -1;
char devpath[100];
@@ -810,7 +809,7 @@ int rom_add_file(const char *file, const char *fw_dir,
basename);
snprintf(devpath, sizeof(devpath), "/rom@%s", fw_file_name);
- if ((!option_rom || option_rom_has_mr) && rom_file_has_mr) {
+ if ((!option_rom || mc->option_rom_has_mr) && mc->rom_file_has_mr) {
data = rom_set_mr(rom, OBJECT(fw_cfg), devpath);
} else {
data = rom->data;
@@ -838,6 +837,7 @@ MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
size_t max_len, hwaddr addr, const char *fw_file_name,
FWCfgReadCallback fw_callback, void *callback_opaque)
{
+ MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
Rom *rom;
MemoryRegion *mr = NULL;
@@ -855,7 +855,7 @@ MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
snprintf(devpath, sizeof(devpath), "/rom@%s", fw_file_name);
- if (rom_file_has_mr) {
+ if (mc->rom_file_has_mr) {
data = rom_set_mr(rom, OBJECT(fw_cfg), devpath);
mr = rom->mr;
} else {
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 82be54e..c46ddc7 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -342,6 +342,7 @@ static void machine_class_init(ObjectClass *oc, void *data)
/* Default 128 MB as guest ram size */
mc->default_ram_size = 128 * M_BYTE;
+ mc->rom_file_has_mr = true;
}
static void machine_class_base_init(ObjectClass *oc, void *data)
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index fa866f6..4cc1440 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -39,6 +39,7 @@
#include "hw/loader.h"
#include "hw/isa/isa.h"
#include "hw/acpi/memory_hotplug.h"
+#include "hw/mem/nvdimm.h"
#include "sysemu/tpm.h"
#include "hw/acpi/tpm.h"
#include "sysemu/tpm_backend.h"
@@ -361,7 +362,7 @@ build_fadt(GArray *table_data, GArray *linker, AcpiPmInfo *pm,
fadt_setup(fadt, pm);
build_header(linker, table_data,
- (void *)fadt, "FACP", sizeof(*fadt), 1);
+ (void *)fadt, "FACP", sizeof(*fadt), 1, NULL);
}
static void
@@ -431,7 +432,7 @@ build_madt(GArray *table_data, GArray *linker, AcpiCpuInfo *cpu,
build_header(linker, table_data,
(void *)(table_data->data + madt_start), "APIC",
- table_data->len - madt_start, 1);
+ table_data->len - madt_start, 1, NULL);
}
/* Assign BSEL property to all buses. In the future, this can be changed
@@ -469,7 +470,7 @@ static void build_append_pcihp_notify_entry(Aml *method, int slot)
Aml *if_ctx;
int32_t devfn = PCI_DEVFN(slot, 0);
- if_ctx = aml_if(aml_and(aml_arg(0), aml_int(0x1U << slot)));
+ if_ctx = aml_if(aml_and(aml_arg(0), aml_int(0x1U << slot), NULL));
aml_append(if_ctx, aml_notify(aml_name("S%.02X", devfn), aml_arg(1)));
aml_append(method, if_ctx);
}
@@ -666,10 +667,11 @@ static Aml *build_prt(void)
/* slot = pin >> 2 */
aml_append(while_ctx,
- aml_store(aml_shiftright(pin, aml_int(2)), slot));
+ aml_store(aml_shiftright(pin, aml_int(2), NULL), slot));
/* lnk_idx = (slot + pin) & 3 */
aml_append(while_ctx,
- aml_store(aml_and(aml_add(pin, slot), aml_int(3)), lnk_idx));
+ aml_store(aml_and(aml_add(pin, slot, NULL), aml_int(3), NULL),
+ lnk_idx));
/* route[2] = "LNK[D|A|B|C]", selection based on pin % 3 */
aml_append(while_ctx, initialize_route(route, "LNKD", lnk_idx, 0));
@@ -679,11 +681,13 @@ static Aml *build_prt(void)
/* route[0] = 0x[slot]FFFF */
aml_append(while_ctx,
- aml_store(aml_or(aml_shiftleft(slot, aml_int(16)), aml_int(0xFFFF)),
+ aml_store(aml_or(aml_shiftleft(slot, aml_int(16)), aml_int(0xFFFF),
+ NULL),
aml_index(route, aml_int(0))));
/* route[1] = pin & 3 */
aml_append(while_ctx,
- aml_store(aml_and(pin, aml_int(3)), aml_index(route, aml_int(1))));
+ aml_store(aml_and(pin, aml_int(3), NULL),
+ aml_index(route, aml_int(1))));
/* res[pin] = route */
aml_append(while_ctx, aml_store(route, aml_index(res, pin)));
/* pin++ */
@@ -762,16 +766,59 @@ static void crs_replace_with_free_ranges(GPtrArray *ranges,
g_ptr_array_free(free_ranges, false);
}
+/*
+ * crs_range_merge - merges adjacent ranges in the given array.
+ * Array elements are deleted and replaced with the merged ranges.
+ */
+static void crs_range_merge(GPtrArray *range)
+{
+ GPtrArray *tmp = g_ptr_array_new_with_free_func(crs_range_free);
+ CrsRangeEntry *entry;
+ uint64_t range_base, range_limit;
+ int i;
+
+ if (!range->len) {
+ return;
+ }
+
+ g_ptr_array_sort(range, crs_range_compare);
+
+ entry = g_ptr_array_index(range, 0);
+ range_base = entry->base;
+ range_limit = entry->limit;
+ for (i = 1; i < range->len; i++) {
+ entry = g_ptr_array_index(range, i);
+ if (entry->base - 1 == range_limit) {
+ range_limit = entry->limit;
+ } else {
+ crs_range_insert(tmp, range_base, range_limit);
+ range_base = entry->base;
+ range_limit = entry->limit;
+ }
+ }
+ crs_range_insert(tmp, range_base, range_limit);
+
+ g_ptr_array_set_size(range, 0);
+ for (i = 0; i < tmp->len; i++) {
+ entry = g_ptr_array_index(tmp, i);
+ crs_range_insert(range, entry->base, entry->limit);
+ }
+ g_ptr_array_free(tmp, true);
+}
+
static Aml *build_crs(PCIHostState *host,
GPtrArray *io_ranges, GPtrArray *mem_ranges)
{
Aml *crs = aml_resource_template();
+ GPtrArray *host_io_ranges = g_ptr_array_new_with_free_func(crs_range_free);
+ GPtrArray *host_mem_ranges = g_ptr_array_new_with_free_func(crs_range_free);
+ CrsRangeEntry *entry;
uint8_t max_bus = pci_bus_num(host->bus);
uint8_t type;
int devfn;
+ int i;
for (devfn = 0; devfn < ARRAY_SIZE(host->bus->devices); devfn++) {
- int i;
uint64_t range_base, range_limit;
PCIDevice *dev = host->bus->devices[devfn];
@@ -794,26 +841,9 @@ static Aml *build_crs(PCIHostState *host,
}
if (r->type & PCI_BASE_ADDRESS_SPACE_IO) {
- aml_append(crs,
- aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED,
- AML_POS_DECODE, AML_ENTIRE_RANGE,
- 0,
- range_base,
- range_limit,
- 0,
- range_limit - range_base + 1));
- crs_range_insert(io_ranges, range_base, range_limit);
+ crs_range_insert(host_io_ranges, range_base, range_limit);
} else { /* "memory" */
- aml_append(crs,
- aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
- AML_MAX_FIXED, AML_NON_CACHEABLE,
- AML_READ_WRITE,
- 0,
- range_base,
- range_limit,
- 0,
- range_limit - range_base + 1));
- crs_range_insert(mem_ranges, range_base, range_limit);
+ crs_range_insert(host_mem_ranges, range_base, range_limit);
}
}
@@ -832,15 +862,7 @@ static Aml *build_crs(PCIHostState *host,
* that do not support multiple root buses
*/
if (range_base && range_base <= range_limit) {
- aml_append(crs,
- aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED,
- AML_POS_DECODE, AML_ENTIRE_RANGE,
- 0,
- range_base,
- range_limit,
- 0,
- range_limit - range_base + 1));
- crs_range_insert(io_ranges, range_base, range_limit);
+ crs_range_insert(host_io_ranges, range_base, range_limit);
}
range_base =
@@ -853,16 +875,7 @@ static Aml *build_crs(PCIHostState *host,
* that do not support multiple root buses
*/
if (range_base && range_base <= range_limit) {
- aml_append(crs,
- aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
- AML_MAX_FIXED, AML_NON_CACHEABLE,
- AML_READ_WRITE,
- 0,
- range_base,
- range_limit,
- 0,
- range_limit - range_base + 1));
- crs_range_insert(mem_ranges, range_base, range_limit);
+ crs_range_insert(host_mem_ranges, range_base, range_limit);
}
range_base =
@@ -875,20 +888,36 @@ static Aml *build_crs(PCIHostState *host,
* that do not support multiple root buses
*/
if (range_base && range_base <= range_limit) {
- aml_append(crs,
- aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
- AML_MAX_FIXED, AML_NON_CACHEABLE,
- AML_READ_WRITE,
- 0,
- range_base,
- range_limit,
- 0,
- range_limit - range_base + 1));
- crs_range_insert(mem_ranges, range_base, range_limit);
+ crs_range_insert(host_mem_ranges, range_base, range_limit);
}
}
}
+ crs_range_merge(host_io_ranges);
+ for (i = 0; i < host_io_ranges->len; i++) {
+ entry = g_ptr_array_index(host_io_ranges, i);
+ aml_append(crs,
+ aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED,
+ AML_POS_DECODE, AML_ENTIRE_RANGE,
+ 0, entry->base, entry->limit, 0,
+ entry->limit - entry->base + 1));
+ crs_range_insert(io_ranges, entry->base, entry->limit);
+ }
+ g_ptr_array_free(host_io_ranges, true);
+
+ crs_range_merge(host_mem_ranges);
+ for (i = 0; i < host_mem_ranges->len; i++) {
+ entry = g_ptr_array_index(host_mem_ranges, i);
+ aml_append(crs,
+ aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
+ AML_MAX_FIXED, AML_NON_CACHEABLE,
+ AML_READ_WRITE,
+ 0, entry->base, entry->limit, 0,
+ entry->limit - entry->base + 1));
+ crs_range_insert(mem_ranges, entry->base, entry->limit);
+ }
+ g_ptr_array_free(host_mem_ranges, true);
+
aml_append(crs,
aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
0,
@@ -925,8 +954,7 @@ build_ssdt(GArray *table_data, GArray *linker,
/* Reserve space for header */
acpi_data_push(ssdt->buf, sizeof(AcpiTableHeader));
- /* Extra PCI root buses are implemented only for i440fx */
- bus = find_i440fx();
+ bus = PC_MACHINE(machine)->bus;
if (bus) {
QLIST_FOREACH(bus, &bus->child, sibling) {
uint8_t bus_num = pci_bus_num(bus);
@@ -1105,7 +1133,7 @@ build_ssdt(GArray *table_data, GArray *linker,
aml_append(dev, aml_operation_region("PEOR", AML_SYSTEM_IO,
misc->pvpanic_port, 1));
- field = aml_field("PEOR", AML_BYTE_ACC, AML_PRESERVE);
+ field = aml_field("PEOR", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("PEPT", 8));
aml_append(dev, field);
@@ -1145,7 +1173,7 @@ build_ssdt(GArray *table_data, GArray *linker,
/* declare CPU hotplug MMIO region and PRS field to access it */
aml_append(sb_scope, aml_operation_region(
"PRST", AML_SYSTEM_IO, pm->cpu_hp_io_base, pm->cpu_hp_io_len));
- field = aml_field("PRST", AML_BYTE_ACC, AML_PRESERVE);
+ field = aml_field("PRST", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("PRS", 256));
aml_append(sb_scope, field);
@@ -1220,7 +1248,7 @@ build_ssdt(GArray *table_data, GArray *linker,
);
field = aml_field(stringify(MEMORY_HOTPLUG_IO_REGION), AML_DWORD_ACC,
- AML_PRESERVE);
+ AML_NOLOCK, AML_PRESERVE);
aml_append(field, /* read only */
aml_named_field(stringify(MEMORY_SLOT_ADDR_LOW), 32));
aml_append(field, /* read only */
@@ -1234,7 +1262,7 @@ build_ssdt(GArray *table_data, GArray *linker,
aml_append(scope, field);
field = aml_field(stringify(MEMORY_HOTPLUG_IO_REGION), AML_BYTE_ACC,
- AML_WRITE_AS_ZEROS);
+ AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_reserved_field(160 /* bits, Offset(20) */));
aml_append(field, /* 1 if enabled, read only */
aml_named_field(stringify(MEMORY_SLOT_ENABLED), 1));
@@ -1250,7 +1278,7 @@ build_ssdt(GArray *table_data, GArray *linker,
aml_append(scope, field);
field = aml_field(stringify(MEMORY_HOTPLUG_IO_REGION), AML_DWORD_ACC,
- AML_PRESERVE);
+ AML_NOLOCK, AML_PRESERVE);
aml_append(field, /* DIMM selector, write only */
aml_named_field(stringify(MEMORY_SLOT_SLECTOR), 32));
aml_append(field, /* _OST event code, write only */
@@ -1350,7 +1378,7 @@ build_ssdt(GArray *table_data, GArray *linker,
g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len);
build_header(linker, table_data,
(void *)(table_data->data + table_data->len - ssdt->buf->len),
- "SSDT", ssdt->buf->len, 1);
+ "SSDT", ssdt->buf->len, 1, NULL);
free_aml_allocator();
}
@@ -1366,7 +1394,7 @@ build_hpet(GArray *table_data, GArray *linker)
hpet->timer_block_id = cpu_to_le32(0x8086a201);
hpet->addr.address = cpu_to_le64(HPET_BASE);
build_header(linker, table_data,
- (void *)hpet, "HPET", sizeof(*hpet), 1);
+ (void *)hpet, "HPET", sizeof(*hpet), 1, NULL);
}
static void
@@ -1389,7 +1417,7 @@ build_tpm_tcpa(GArray *table_data, GArray *linker, GArray *tcpalog)
sizeof(tcpa->log_area_start_address));
build_header(linker, table_data,
- (void *)tcpa, "TCPA", sizeof(*tcpa), 2);
+ (void *)tcpa, "TCPA", sizeof(*tcpa), 2, NULL);
acpi_data_push(tcpalog, TPM_LOG_AREA_MINIMUM_SIZE);
}
@@ -1406,7 +1434,7 @@ build_tpm2(GArray *table_data, GArray *linker)
tpm2_ptr->start_method = cpu_to_le32(TPM2_START_METHOD_MMIO);
build_header(linker, table_data,
- (void *)tpm2_ptr, "TPM2", sizeof(*tpm2_ptr), 4);
+ (void *)tpm2_ptr, "TPM2", sizeof(*tpm2_ptr), 4, NULL);
}
typedef enum {
@@ -1520,7 +1548,7 @@ build_srat(GArray *table_data, GArray *linker, PcGuestInfo *guest_info)
build_header(linker, table_data,
(void *)(table_data->data + srat_start),
"SRAT",
- table_data->len - srat_start, 1);
+ table_data->len - srat_start, 1, NULL);
}
static void
@@ -1549,7 +1577,7 @@ build_mcfg_q35(GArray *table_data, GArray *linker, AcpiMcfgInfo *info)
} else {
sig = "MCFG";
}
- build_header(linker, table_data, (void *)mcfg, sig, len, 1);
+ build_header(linker, table_data, (void *)mcfg, sig, len, 1, NULL);
}
static void
@@ -1573,7 +1601,7 @@ build_dmar_q35(GArray *table_data, GArray *linker)
drhd->address = cpu_to_le64(Q35_HOST_BRIDGE_IOMMU_ADDR);
build_header(linker, table_data, (void *)(table_data->data + dmar_start),
- "DMAR", table_data->len - dmar_start, 1);
+ "DMAR", table_data->len - dmar_start, 1, NULL);
}
static void
@@ -1588,7 +1616,7 @@ build_dsdt(GArray *table_data, GArray *linker, AcpiMiscInfo *misc)
memset(dsdt, 0, sizeof *dsdt);
build_header(linker, table_data, dsdt, "DSDT",
- misc->dsdt_size, 1);
+ misc->dsdt_size, 1, NULL);
}
static GArray *
@@ -1659,6 +1687,13 @@ static bool acpi_has_iommu(void)
return intel_iommu && !ambiguous;
}
+static bool acpi_has_nvdimm(void)
+{
+ PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
+
+ return pcms->nvdimm;
+}
+
static
void acpi_build(PcGuestInfo *guest_info, AcpiBuildTables *tables)
{
@@ -1743,6 +1778,10 @@ void acpi_build(PcGuestInfo *guest_info, AcpiBuildTables *tables)
build_dmar_q35(tables_blob, tables->linker);
}
+ if (acpi_has_nvdimm()) {
+ nvdimm_build_acpi(table_offsets, tables_blob, tables->linker);
+ }
+
/* Add tables supplied by user (if any) */
for (u = acpi_table_first(); u; u = acpi_table_next(u)) {
unsigned len = acpi_table_len(u);
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 426424a..459260b 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -77,15 +77,6 @@
#define DPRINTF(fmt, ...)
#endif
-/* Leave a chunk of memory at the top of RAM for the BIOS ACPI tables
- * (128K) and other BIOS datastructures (less than 4K reported to be used at
- * the moment, 32K should be enough for a while). */
-static unsigned acpi_data_size = 0x20000 + 0x8000;
-void pc_set_legacy_acpi_data_size(void)
-{
- acpi_data_size = 0x10000;
-}
-
#define BIOS_CFG_IOPORT 0x510
#define FW_CFG_ACPI_TABLES (FW_CFG_ARCH_LOCAL + 0)
#define FW_CFG_SMBIOS_ENTRIES (FW_CFG_ARCH_LOCAL + 1)
@@ -841,6 +832,7 @@ static void load_linux(PCMachineState *pcms,
FILE *f;
char *vmode;
MachineState *machine = MACHINE(pcms);
+ PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
const char *kernel_filename = machine->kernel_filename;
const char *initrd_filename = machine->initrd_filename;
const char *kernel_cmdline = machine->kernel_cmdline;
@@ -908,8 +900,8 @@ static void load_linux(PCMachineState *pcms,
initrd_max = 0x37ffffff;
}
- if (initrd_max >= pcms->below_4g_mem_size - acpi_data_size) {
- initrd_max = pcms->below_4g_mem_size - acpi_data_size - 1;
+ if (initrd_max >= pcms->below_4g_mem_size - pcmc->acpi_data_size) {
+ initrd_max = pcms->below_4g_mem_size - pcmc->acpi_data_size - 1;
}
fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr);
@@ -1175,7 +1167,7 @@ void pc_guest_info_machine_done(Notifier *notifier, void *data)
PcGuestInfoState *guest_info_state = container_of(notifier,
PcGuestInfoState,
machine_done);
- PCIBus *bus = find_i440fx();
+ PCIBus *bus = PC_MACHINE(qdev_get_machine())->bus;
if (bus) {
int extra_hosts = 0;
@@ -1303,6 +1295,7 @@ FWCfgState *pc_memory_init(PCMachineState *pcms,
MemoryRegion *ram_below_4g, *ram_above_4g;
FWCfgState *fw_cfg;
MachineState *machine = MACHINE(pcms);
+ PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
assert(machine->ram_size == pcms->below_4g_mem_size +
pcms->above_4g_mem_size);
@@ -1364,7 +1357,7 @@ FWCfgState *pc_memory_init(PCMachineState *pcms,
pcms->hotplug_memory.base =
ROUND_UP(0x100000000ULL + pcms->above_4g_mem_size, 1ULL << 30);
- if (pcms->enforce_aligned_dimm) {
+ if (pcmc->enforce_aligned_dimm) {
/* size hotplug region assuming 1G page max alignment per slot */
hotplug_mem_size += (1ULL << 30) * machine->ram_slots;
}
@@ -1617,12 +1610,13 @@ static void pc_dimm_plug(HotplugHandler *hotplug_dev,
HotplugHandlerClass *hhc;
Error *local_err = NULL;
PCMachineState *pcms = PC_MACHINE(hotplug_dev);
+ PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
PCDIMMDevice *dimm = PC_DIMM(dev);
PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
MemoryRegion *mr = ddc->get_memory_region(dimm);
uint64_t align = TARGET_PAGE_SIZE;
- if (memory_region_get_alignment(mr) && pcms->enforce_aligned_dimm) {
+ if (memory_region_get_alignment(mr) && pcmc->enforce_aligned_dimm) {
align = memory_region_get_alignment(mr);
}
@@ -1871,11 +1865,18 @@ static void pc_machine_set_smm(Object *obj, Visitor *v, void *opaque,
visit_type_OnOffAuto(v, &pcms->smm, name, errp);
}
-static bool pc_machine_get_aligned_dimm(Object *obj, Error **errp)
+static bool pc_machine_get_nvdimm(Object *obj, Error **errp)
+{
+ PCMachineState *pcms = PC_MACHINE(obj);
+
+ return pcms->nvdimm;
+}
+
+static void pc_machine_set_nvdimm(Object *obj, bool value, Error **errp)
{
PCMachineState *pcms = PC_MACHINE(obj);
- return pcms->enforce_aligned_dimm;
+ pcms->nvdimm = value;
}
static void pc_machine_initfn(Object *obj)
@@ -1913,10 +1914,10 @@ static void pc_machine_initfn(Object *obj)
"Enable vmport (pc & q35)",
&error_abort);
- pcms->enforce_aligned_dimm = true;
- object_property_add_bool(obj, PC_MACHINE_ENFORCE_ALIGNED_DIMM,
- pc_machine_get_aligned_dimm,
- NULL, &error_abort);
+ /* nvdimm is disabled on default. */
+ pcms->nvdimm = false;
+ object_property_add_bool(obj, PC_MACHINE_NVDIMM, pc_machine_get_nvdimm,
+ pc_machine_set_nvdimm, &error_abort);
}
static void pc_machine_reset(void)
@@ -1953,6 +1954,18 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
pcmc->get_hotplug_handler = mc->get_hotplug_handler;
+ pcmc->pci_enabled = true;
+ pcmc->has_acpi_build = true;
+ pcmc->rsdp_in_ram = true;
+ pcmc->smbios_defaults = true;
+ pcmc->smbios_uuid_encoded = true;
+ pcmc->gigabyte_align = true;
+ pcmc->has_reserved_memory = true;
+ pcmc->kvmclock_enabled = true;
+ pcmc->enforce_aligned_dimm = true;
+ /* BIOS ACPI tables: 128K. Other BIOS datastructures: less than 4K reported
+ * to be used at the moment, 32K should be enough for a while. */
+ pcmc->acpi_data_size = 0x20000 + 0x8000;
mc->get_hotplug_handler = pc_get_hotpug_handler;
mc->cpu_index_to_socket_id = pc_cpu_index_to_socket_id;
mc->default_boot_order = "cad";
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 319497e..438cdae 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -61,26 +61,12 @@ static const int ide_iobase[MAX_IDE_BUS] = { 0x1f0, 0x170 };
static const int ide_iobase2[MAX_IDE_BUS] = { 0x3f6, 0x376 };
static const int ide_irq[MAX_IDE_BUS] = { 14, 15 };
-static bool pci_enabled = true;
-static bool has_acpi_build = true;
-static bool rsdp_in_ram = true;
-static int legacy_acpi_table_size;
-static bool smbios_defaults = true;
-static bool smbios_legacy_mode;
-static bool smbios_uuid_encoded = true;
-/* Make sure that guest addresses aligned at 1Gbyte boundaries get mapped to
- * host addresses aligned at 1Gbyte boundaries. This way we can use 1GByte
- * pages in the host.
- */
-static bool gigabyte_align = true;
-static bool has_reserved_memory = true;
-static bool kvmclock_enabled = true;
-
/* PC hardware initialisation */
static void pc_init1(MachineState *machine,
const char *host_type, const char *pci_type)
{
PCMachineState *pcms = PC_MACHINE(machine);
+ PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
MemoryRegion *system_memory = get_system_memory();
MemoryRegion *system_io = get_system_io();
int i;
@@ -109,7 +95,7 @@ static void pc_init1(MachineState *machine,
* breaking migration.
*/
if (machine->ram_size >= 0xe0000000) {
- lowmem = gigabyte_align ? 0xc0000000 : 0xe0000000;
+ lowmem = pcmc->gigabyte_align ? 0xc0000000 : 0xe0000000;
} else {
lowmem = 0xe0000000;
}
@@ -142,11 +128,11 @@ static void pc_init1(MachineState *machine,
pc_cpus_init(pcms);
- if (kvm_enabled() && kvmclock_enabled) {
+ if (kvm_enabled() && pcmc->kvmclock_enabled) {
kvmclock_create();
}
- if (pci_enabled) {
+ if (pcmc->pci_enabled) {
pci_memory = g_new(MemoryRegion, 1);
memory_region_init(pci_memory, NULL, "pci", UINT64_MAX);
rom_memory = pci_memory;
@@ -157,18 +143,19 @@ static void pc_init1(MachineState *machine,
guest_info = pc_guest_info_init(pcms);
- guest_info->has_acpi_build = has_acpi_build;
- guest_info->legacy_acpi_table_size = legacy_acpi_table_size;
+ guest_info->has_acpi_build = pcmc->has_acpi_build;
+ guest_info->legacy_acpi_table_size = pcmc->legacy_acpi_table_size;
- guest_info->isapc_ram_fw = !pci_enabled;
- guest_info->has_reserved_memory = has_reserved_memory;
- guest_info->rsdp_in_ram = rsdp_in_ram;
+ guest_info->isapc_ram_fw = !pcmc->pci_enabled;
+ guest_info->has_reserved_memory = pcmc->has_reserved_memory;
+ guest_info->rsdp_in_ram = pcmc->rsdp_in_ram;
- if (smbios_defaults) {
+ if (pcmc->smbios_defaults) {
MachineClass *mc = MACHINE_GET_CLASS(machine);
/* These values are guest ABI, do not change */
smbios_set_defaults("QEMU", "Standard PC (i440FX + PIIX, 1996)",
- mc->name, smbios_legacy_mode, smbios_uuid_encoded,
+ mc->name, pcmc->smbios_legacy_mode,
+ pcmc->smbios_uuid_encoded,
SMBIOS_ENTRY_POINT_21);
}
@@ -183,14 +170,14 @@ static void pc_init1(MachineState *machine,
gsi_state = g_malloc0(sizeof(*gsi_state));
if (kvm_ioapic_in_kernel()) {
- kvm_pc_setup_irq_routing(pci_enabled);
+ kvm_pc_setup_irq_routing(pcmc->pci_enabled);
gsi = qemu_allocate_irqs(kvm_pc_gsi_handler, gsi_state,
GSI_NUM_PINS);
} else {
gsi = qemu_allocate_irqs(gsi_handler, gsi_state, GSI_NUM_PINS);
}
- if (pci_enabled) {
+ if (pcmc->pci_enabled) {
pci_bus = i440fx_init(host_type,
pci_type,
&i440fx_state, &piix3_devfn, &isa_bus, gsi,
@@ -198,6 +185,7 @@ static void pc_init1(MachineState *machine,
pcms->below_4g_mem_size,
pcms->above_4g_mem_size,
pci_memory, ram_memory);
+ pcms->bus = pci_bus;
} else {
pci_bus = NULL;
i440fx_state = NULL;
@@ -218,13 +206,13 @@ static void pc_init1(MachineState *machine,
gsi_state->i8259_irq[i] = i8259[i];
}
g_free(i8259);
- if (pci_enabled) {
+ if (pcmc->pci_enabled) {
ioapic_init_gsi(gsi_state, "i440fx");
}
pc_register_ferr_irq(gsi[13]);
- pc_vga_init(isa_bus, pci_enabled ? pci_bus : NULL);
+ pc_vga_init(isa_bus, pcmc->pci_enabled ? pci_bus : NULL);
assert(pcms->vmport != ON_OFF_AUTO__MAX);
if (pcms->vmport == ON_OFF_AUTO_AUTO) {
@@ -238,7 +226,7 @@ static void pc_init1(MachineState *machine,
pc_nic_init(isa_bus, pci_bus);
ide_drive_get(hd, ARRAY_SIZE(hd));
- if (pci_enabled) {
+ if (pcmc->pci_enabled) {
PCIDevice *dev;
if (xen_enabled()) {
dev = pci_piix3_xen_ide_init(pci_bus, hd, piix3_devfn + 1);
@@ -265,11 +253,11 @@ static void pc_init1(MachineState *machine,
pc_cmos_init(pcms, idebus[0], idebus[1], rtc_state);
- if (pci_enabled && usb_enabled()) {
+ if (pcmc->pci_enabled && usb_enabled()) {
pci_create_simple(pci_bus, piix3_devfn + 2, "piix3-usb-uhci");
}
- if (pci_enabled && acpi_enabled) {
+ if (pcmc->pci_enabled && acpi_enabled) {
DeviceState *piix4_pm;
I2CBus *smbus;
@@ -290,7 +278,7 @@ static void pc_init1(MachineState *machine,
PC_MACHINE_ACPI_DEVICE_PROP, &error_abort);
}
- if (pci_enabled) {
+ if (pcmc->pci_enabled) {
pc_pci_device_init(pci_bus);
}
}
@@ -316,60 +304,29 @@ static void pc_compat_2_3(MachineState *machine)
static void pc_compat_2_2(MachineState *machine)
{
pc_compat_2_3(machine);
- rsdp_in_ram = false;
machine->suppress_vmdesc = true;
}
static void pc_compat_2_1(MachineState *machine)
{
- PCMachineState *pcms = PC_MACHINE(machine);
-
pc_compat_2_2(machine);
- smbios_uuid_encoded = false;
x86_cpu_change_kvm_default("svm", NULL);
- pcms->enforce_aligned_dimm = false;
}
static void pc_compat_2_0(MachineState *machine)
{
pc_compat_2_1(machine);
- /* This value depends on the actual DSDT and SSDT compiled into
- * the source QEMU; unfortunately it depends on the binary and
- * not on the machine type, so we cannot make pc-i440fx-1.7 work on
- * both QEMU 1.7 and QEMU 2.0.
- *
- * Large variations cause migration to fail for more than one
- * consecutive value of the "-smp" maxcpus option.
- *
- * For small variations of the kind caused by different iasl versions,
- * the 4k rounding usually leaves slack. However, there could be still
- * one or two values that break. For QEMU 1.7 and QEMU 2.0 the
- * slack is only ~10 bytes before one "-smp maxcpus" value breaks!
- *
- * 6652 is valid for QEMU 2.0, the right value for pc-i440fx-1.7 on
- * QEMU 1.7 it is 6414. For RHEL/CentOS 7.0 it is 6418.
- */
- legacy_acpi_table_size = 6652;
- smbios_legacy_mode = true;
- has_reserved_memory = false;
- pc_set_legacy_acpi_data_size();
}
static void pc_compat_1_7(MachineState *machine)
{
pc_compat_2_0(machine);
- smbios_defaults = false;
- gigabyte_align = false;
- option_rom_has_mr = true;
- legacy_acpi_table_size = 6414;
x86_cpu_change_kvm_default("x2apic", NULL);
}
static void pc_compat_1_6(MachineState *machine)
{
pc_compat_1_7(machine);
- rom_file_has_mr = false;
- has_acpi_build = false;
}
static void pc_compat_1_5(MachineState *machine)
@@ -399,19 +356,10 @@ static void pc_compat_1_2(MachineState *machine)
static void pc_compat_0_13(MachineState *machine)
{
pc_compat_1_2(machine);
- kvmclock_enabled = false;
}
static void pc_init_isa(MachineState *machine)
{
- pci_enabled = false;
- has_acpi_build = false;
- smbios_defaults = false;
- gigabyte_align = false;
- smbios_legacy_mode = true;
- has_reserved_memory = false;
- option_rom_has_mr = true;
- rom_file_has_mr = false;
if (!machine->cpu_model) {
machine->cpu_model = "486";
}
@@ -470,13 +418,25 @@ static void pc_i440fx_machine_options(MachineClass *m)
m->default_display = "std";
}
-static void pc_i440fx_2_5_machine_options(MachineClass *m)
+static void pc_i440fx_2_6_machine_options(MachineClass *m)
{
pc_i440fx_machine_options(m);
m->alias = "pc";
m->is_default = 1;
}
+DEFINE_I440FX_MACHINE(v2_6, "pc-i440fx-2.6", NULL,
+ pc_i440fx_2_6_machine_options);
+
+
+static void pc_i440fx_2_5_machine_options(MachineClass *m)
+{
+ pc_i440fx_2_6_machine_options(m);
+ m->alias = NULL;
+ m->is_default = 0;
+ SET_MACHINE_COMPAT(m, PC_COMPAT_2_5);
+}
+
DEFINE_I440FX_MACHINE(v2_5, "pc-i440fx-2.5", NULL,
pc_i440fx_2_5_machine_options);
@@ -486,8 +446,6 @@ static void pc_i440fx_2_4_machine_options(MachineClass *m)
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_2_5_machine_options(m);
m->hw_version = "2.4.0";
- m->alias = NULL;
- m->is_default = 0;
pcmc->broken_reserved_end = true;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_4);
}
@@ -500,8 +458,6 @@ static void pc_i440fx_2_3_machine_options(MachineClass *m)
{
pc_i440fx_2_4_machine_options(m);
m->hw_version = "2.3.0";
- m->alias = NULL;
- m->is_default = 0;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_3);
}
@@ -511,9 +467,11 @@ DEFINE_I440FX_MACHINE(v2_3, "pc-i440fx-2.3", pc_compat_2_3,
static void pc_i440fx_2_2_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_2_3_machine_options(m);
m->hw_version = "2.2.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_2_2);
+ pcmc->rsdp_in_ram = false;
}
DEFINE_I440FX_MACHINE(v2_2, "pc-i440fx-2.2", pc_compat_2_2,
@@ -522,10 +480,13 @@ DEFINE_I440FX_MACHINE(v2_2, "pc-i440fx-2.2", pc_compat_2_2,
static void pc_i440fx_2_1_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_2_2_machine_options(m);
m->hw_version = "2.1.0";
m->default_display = NULL;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_1);
+ pcmc->smbios_uuid_encoded = false;
+ pcmc->enforce_aligned_dimm = false;
}
DEFINE_I440FX_MACHINE(v2_1, "pc-i440fx-2.1", pc_compat_2_1,
@@ -535,9 +496,30 @@ DEFINE_I440FX_MACHINE(v2_1, "pc-i440fx-2.1", pc_compat_2_1,
static void pc_i440fx_2_0_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_2_1_machine_options(m);
m->hw_version = "2.0.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_2_0);
+ pcmc->smbios_legacy_mode = true;
+ pcmc->has_reserved_memory = false;
+ /* This value depends on the actual DSDT and SSDT compiled into
+ * the source QEMU; unfortunately it depends on the binary and
+ * not on the machine type, so we cannot make pc-i440fx-1.7 work on
+ * both QEMU 1.7 and QEMU 2.0.
+ *
+ * Large variations cause migration to fail for more than one
+ * consecutive value of the "-smp" maxcpus option.
+ *
+ * For small variations of the kind caused by different iasl versions,
+ * the 4k rounding usually leaves slack. However, there could be still
+ * one or two values that break. For QEMU 1.7 and QEMU 2.0 the
+ * slack is only ~10 bytes before one "-smp maxcpus" value breaks!
+ *
+ * 6652 is valid for QEMU 2.0, the right value for pc-i440fx-1.7 on
+ * QEMU 1.7 it is 6414. For RHEL/CentOS 7.0 it is 6418.
+ */
+ pcmc->legacy_acpi_table_size = 6652;
+ pcmc->acpi_data_size = 0x10000;
}
DEFINE_I440FX_MACHINE(v2_0, "pc-i440fx-2.0", pc_compat_2_0,
@@ -546,10 +528,15 @@ DEFINE_I440FX_MACHINE(v2_0, "pc-i440fx-2.0", pc_compat_2_0,
static void pc_i440fx_1_7_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_2_0_machine_options(m);
m->hw_version = "1.7.0";
m->default_machine_opts = NULL;
+ m->option_rom_has_mr = true;
SET_MACHINE_COMPAT(m, PC_COMPAT_1_7);
+ pcmc->smbios_defaults = false;
+ pcmc->gigabyte_align = false;
+ pcmc->legacy_acpi_table_size = 6414;
}
DEFINE_I440FX_MACHINE(v1_7, "pc-i440fx-1.7", pc_compat_1_7,
@@ -558,9 +545,12 @@ DEFINE_I440FX_MACHINE(v1_7, "pc-i440fx-1.7", pc_compat_1_7,
static void pc_i440fx_1_6_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_1_7_machine_options(m);
m->hw_version = "1.6.0";
+ m->rom_file_has_mr = false;
SET_MACHINE_COMPAT(m, PC_COMPAT_1_6);
+ pcmc->has_acpi_build = false;
}
DEFINE_I440FX_MACHINE(v1_6, "pc-i440fx-1.6", pc_compat_1_6,
@@ -814,9 +804,11 @@ DEFINE_I440FX_MACHINE(v0_14, "pc-0.14", pc_compat_1_2,
static void pc_i440fx_0_13_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_0_14_machine_options(m);
m->hw_version = "0.13";
SET_MACHINE_COMPAT(m, PC_COMPAT_0_13);
+ pcmc->kvmclock_enabled = false;
}
DEFINE_I440FX_MACHINE(v0_13, "pc-0.13", pc_compat_0_13,
@@ -1038,8 +1030,17 @@ void igd_passthrough_isa_bridge_create(PCIBus *bus, uint16_t gpu_dev_id)
static void isapc_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
m->desc = "ISA-only PC";
m->max_cpus = 1;
+ m->option_rom_has_mr = true;
+ m->rom_file_has_mr = false;
+ pcmc->pci_enabled = false;
+ pcmc->has_acpi_build = false;
+ pcmc->smbios_defaults = false;
+ pcmc->gigabyte_align = false;
+ pcmc->smbios_legacy_mode = true;
+ pcmc->has_reserved_memory = false;
}
DEFINE_PC_MACHINE(isapc, "isapc", pc_init_isa,
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index 9a12068..412b3cd 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -49,22 +49,11 @@
/* ICH9 AHCI has 6 ports */
#define MAX_SATA_PORTS 6
-static bool has_acpi_build = true;
-static bool rsdp_in_ram = true;
-static bool smbios_defaults = true;
-static bool smbios_legacy_mode;
-static bool smbios_uuid_encoded = true;
-/* Make sure that guest addresses aligned at 1Gbyte boundaries get mapped to
- * host addresses aligned at 1Gbyte boundaries. This way we can use 1GByte
- * pages in the host.
- */
-static bool gigabyte_align = true;
-static bool has_reserved_memory = true;
-
/* PC hardware initialisation */
static void pc_q35_init(MachineState *machine)
{
PCMachineState *pcms = PC_MACHINE(machine);
+ PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
Q35PCIHost *q35_host;
PCIHostState *phb;
PCIBus *host_bus;
@@ -76,7 +65,6 @@ static void pc_q35_init(MachineState *machine)
MemoryRegion *ram_memory;
GSIState *gsi_state;
ISABus *isa_bus;
- int pci_enabled = 1;
qemu_irq *gsi;
qemu_irq *i8259;
int i;
@@ -97,7 +85,7 @@ static void pc_q35_init(MachineState *machine)
* breaking migration.
*/
if (machine->ram_size >= 0xb0000000) {
- lowmem = gigabyte_align ? 0x80000000 : 0xb0000000;
+ lowmem = pcmc->gigabyte_align ? 0x80000000 : 0xb0000000;
} else {
lowmem = 0xb0000000;
}
@@ -129,12 +117,15 @@ static void pc_q35_init(MachineState *machine)
}
pc_cpus_init(pcms);
- pc_acpi_init("q35-acpi-dsdt.aml");
+ if (!pcmc->has_acpi_build) {
+ /* only machine types 1.7 & older need this */
+ pc_acpi_init("q35-acpi-dsdt.aml");
+ }
kvmclock_create();
/* pci enabled */
- if (pci_enabled) {
+ if (pcmc->pci_enabled) {
pci_memory = g_new(MemoryRegion, 1);
memory_region_init(pci_memory, NULL, "pci", UINT64_MAX);
rom_memory = pci_memory;
@@ -145,19 +136,20 @@ static void pc_q35_init(MachineState *machine)
guest_info = pc_guest_info_init(pcms);
guest_info->isapc_ram_fw = false;
- guest_info->has_acpi_build = has_acpi_build;
- guest_info->has_reserved_memory = has_reserved_memory;
- guest_info->rsdp_in_ram = rsdp_in_ram;
+ guest_info->has_acpi_build = pcmc->has_acpi_build;
+ guest_info->has_reserved_memory = pcmc->has_reserved_memory;
+ guest_info->rsdp_in_ram = pcmc->rsdp_in_ram;
/* Migration was not supported in 2.0 for Q35, so do not bother
* with this hack (see hw/i386/acpi-build.c).
*/
guest_info->legacy_acpi_table_size = 0;
- if (smbios_defaults) {
+ if (pcmc->smbios_defaults) {
/* These values are guest ABI, do not change */
smbios_set_defaults("QEMU", "Standard PC (Q35 + ICH9, 2009)",
- mc->name, smbios_legacy_mode, smbios_uuid_encoded,
+ mc->name, pcmc->smbios_legacy_mode,
+ pcmc->smbios_uuid_encoded,
SMBIOS_ENTRY_POINT_21);
}
@@ -170,7 +162,7 @@ static void pc_q35_init(MachineState *machine)
/* irq lines */
gsi_state = g_malloc0(sizeof(*gsi_state));
if (kvm_irqchip_in_kernel()) {
- kvm_pc_setup_irq_routing(pci_enabled);
+ kvm_pc_setup_irq_routing(pcmc->pci_enabled);
gsi = qemu_allocate_irqs(kvm_pc_gsi_handler, gsi_state,
GSI_NUM_PINS);
} else {
@@ -187,11 +179,11 @@ static void pc_q35_init(MachineState *machine)
q35_host->mch.address_space_io = get_system_io();
q35_host->mch.below_4g_mem_size = pcms->below_4g_mem_size;
q35_host->mch.above_4g_mem_size = pcms->above_4g_mem_size;
- q35_host->mch.guest_info = guest_info;
/* pci */
qdev_init_nofail(DEVICE(q35_host));
phb = PCI_HOST_BRIDGE(q35_host);
host_bus = phb->bus;
+ pcms->bus = phb->bus;
/* create ISA bus */
lpc = pci_create_simple_multifunction(host_bus, PCI_DEVFN(ICH9_LPC_DEV,
ICH9_LPC_FUNC), true,
@@ -227,7 +219,7 @@ static void pc_q35_init(MachineState *machine)
for (i = 0; i < ISA_NUM_IRQS; i++) {
gsi_state->i8259_irq[i] = i8259[i];
}
- if (pci_enabled) {
+ if (pcmc->pci_enabled) {
ioapic_init_gsi(gsi_state, "q35");
}
@@ -272,7 +264,7 @@ static void pc_q35_init(MachineState *machine)
/* the rest devices to which pci devfn is automatically assigned */
pc_vga_init(isa_bus, host_bus);
pc_nic_init(isa_bus, host_bus);
- if (pci_enabled) {
+ if (pcmc->pci_enabled) {
pc_pci_device_init(host_bus);
}
}
@@ -298,42 +290,29 @@ static void pc_compat_2_3(MachineState *machine)
static void pc_compat_2_2(MachineState *machine)
{
pc_compat_2_3(machine);
- rsdp_in_ram = false;
machine->suppress_vmdesc = true;
}
static void pc_compat_2_1(MachineState *machine)
{
- PCMachineState *pcms = PC_MACHINE(machine);
-
pc_compat_2_2(machine);
- pcms->enforce_aligned_dimm = false;
- smbios_uuid_encoded = false;
x86_cpu_change_kvm_default("svm", NULL);
}
static void pc_compat_2_0(MachineState *machine)
{
pc_compat_2_1(machine);
- smbios_legacy_mode = true;
- has_reserved_memory = false;
- pc_set_legacy_acpi_data_size();
}
static void pc_compat_1_7(MachineState *machine)
{
pc_compat_2_0(machine);
- smbios_defaults = false;
- gigabyte_align = false;
- option_rom_has_mr = true;
x86_cpu_change_kvm_default("x2apic", NULL);
}
static void pc_compat_1_6(MachineState *machine)
{
pc_compat_1_7(machine);
- rom_file_has_mr = false;
- has_acpi_build = false;
}
static void pc_compat_1_5(MachineState *machine)
@@ -370,12 +349,22 @@ static void pc_q35_machine_options(MachineClass *m)
m->no_tco = 0;
}
-static void pc_q35_2_5_machine_options(MachineClass *m)
+static void pc_q35_2_6_machine_options(MachineClass *m)
{
pc_q35_machine_options(m);
m->alias = "q35";
}
+DEFINE_Q35_MACHINE(v2_6, "pc-q35-2.6", NULL,
+ pc_q35_2_6_machine_options);
+
+static void pc_q35_2_5_machine_options(MachineClass *m)
+{
+ pc_q35_2_6_machine_options(m);
+ m->alias = NULL;
+ SET_MACHINE_COMPAT(m, PC_COMPAT_2_5);
+}
+
DEFINE_Q35_MACHINE(v2_5, "pc-q35-2.5", NULL,
pc_q35_2_5_machine_options);
@@ -384,7 +373,6 @@ static void pc_q35_2_4_machine_options(MachineClass *m)
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_2_5_machine_options(m);
m->hw_version = "2.4.0";
- m->alias = NULL;
pcmc->broken_reserved_end = true;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_4);
}
@@ -399,7 +387,6 @@ static void pc_q35_2_3_machine_options(MachineClass *m)
m->hw_version = "2.3.0";
m->no_floppy = 0;
m->no_tco = 1;
- m->alias = NULL;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_3);
}
@@ -409,9 +396,11 @@ DEFINE_Q35_MACHINE(v2_3, "pc-q35-2.3", pc_compat_2_3,
static void pc_q35_2_2_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_2_3_machine_options(m);
m->hw_version = "2.2.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_2_2);
+ pcmc->rsdp_in_ram = false;
}
DEFINE_Q35_MACHINE(v2_2, "pc-q35-2.2", pc_compat_2_2,
@@ -420,10 +409,13 @@ DEFINE_Q35_MACHINE(v2_2, "pc-q35-2.2", pc_compat_2_2,
static void pc_q35_2_1_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_2_2_machine_options(m);
m->hw_version = "2.1.0";
m->default_display = NULL;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_1);
+ pcmc->smbios_uuid_encoded = false;
+ pcmc->enforce_aligned_dimm = false;
}
DEFINE_Q35_MACHINE(v2_1, "pc-q35-2.1", pc_compat_2_1,
@@ -432,9 +424,13 @@ DEFINE_Q35_MACHINE(v2_1, "pc-q35-2.1", pc_compat_2_1,
static void pc_q35_2_0_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_2_1_machine_options(m);
m->hw_version = "2.0.0";
SET_MACHINE_COMPAT(m, PC_COMPAT_2_0);
+ pcmc->has_reserved_memory = false;
+ pcmc->smbios_legacy_mode = true;
+ pcmc->acpi_data_size = 0x10000;
}
DEFINE_Q35_MACHINE(v2_0, "pc-q35-2.0", pc_compat_2_0,
@@ -443,10 +439,14 @@ DEFINE_Q35_MACHINE(v2_0, "pc-q35-2.0", pc_compat_2_0,
static void pc_q35_1_7_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_2_0_machine_options(m);
m->hw_version = "1.7.0";
m->default_machine_opts = NULL;
+ m->option_rom_has_mr = true;
SET_MACHINE_COMPAT(m, PC_COMPAT_1_7);
+ pcmc->smbios_defaults = false;
+ pcmc->gigabyte_align = false;
}
DEFINE_Q35_MACHINE(v1_7, "pc-q35-1.7", pc_compat_1_7,
@@ -455,9 +455,12 @@ DEFINE_Q35_MACHINE(v1_7, "pc-q35-1.7", pc_compat_1_7,
static void pc_q35_1_6_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_machine_options(m);
m->hw_version = "1.6.0";
+ m->rom_file_has_mr = false;
SET_MACHINE_COMPAT(m, PC_COMPAT_1_6);
+ pcmc->has_acpi_build = false;
}
DEFINE_Q35_MACHINE(v1_6, "pc-q35-1.6", pc_compat_1_6,
diff --git a/hw/ipmi/Makefile.objs b/hw/ipmi/Makefile.objs
new file mode 100644
index 0000000..a90318d
--- /dev/null
+++ b/hw/ipmi/Makefile.objs
@@ -0,0 +1,5 @@
+common-obj-$(CONFIG_IPMI) += ipmi.o
+common-obj-$(CONFIG_IPMI_LOCAL) += ipmi_bmc_sim.o
+common-obj-$(CONFIG_IPMI_LOCAL) += ipmi_bmc_extern.o
+common-obj-$(CONFIG_ISA_IPMI_KCS) += isa_ipmi_kcs.o
+common-obj-$(CONFIG_ISA_IPMI_BT) += isa_ipmi_bt.o
diff --git a/hw/ipmi/ipmi.c b/hw/ipmi/ipmi.c
new file mode 100644
index 0000000..52aba1e
--- /dev/null
+++ b/hw/ipmi/ipmi.c
@@ -0,0 +1,152 @@
+/*
+ * QEMU IPMI emulation
+ *
+ * Copyright (c) 2015 Corey Minyard, MontaVista Software, LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/hw.h"
+#include "hw/ipmi/ipmi.h"
+#include "sysemu/sysemu.h"
+#include "qmp-commands.h"
+#include "qom/object_interfaces.h"
+#include "qapi/visitor.h"
+
+static int ipmi_do_hw_op(IPMIInterface *s, enum ipmi_op op, int checkonly)
+{
+ switch (op) {
+ case IPMI_RESET_CHASSIS:
+ if (checkonly) {
+ return 0;
+ }
+ qemu_system_reset_request();
+ return 0;
+
+ case IPMI_POWEROFF_CHASSIS:
+ if (checkonly) {
+ return 0;
+ }
+ qemu_system_powerdown_request();
+ return 0;
+
+ case IPMI_SEND_NMI:
+ if (checkonly) {
+ return 0;
+ }
+ qemu_mutex_lock_iothread();
+ qmp_inject_nmi(NULL);
+ qemu_mutex_unlock_iothread();
+ return 0;
+
+ case IPMI_POWERCYCLE_CHASSIS:
+ case IPMI_PULSE_DIAG_IRQ:
+ case IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP:
+ case IPMI_POWERON_CHASSIS:
+ default:
+ return IPMI_CC_COMMAND_NOT_SUPPORTED;
+ }
+}
+
+static void ipmi_interface_class_init(ObjectClass *class, void *data)
+{
+ IPMIInterfaceClass *ik = IPMI_INTERFACE_CLASS(class);
+
+ ik->do_hw_op = ipmi_do_hw_op;
+}
+
+static TypeInfo ipmi_interface_type_info = {
+ .name = TYPE_IPMI_INTERFACE,
+ .parent = TYPE_INTERFACE,
+ .class_size = sizeof(IPMIInterfaceClass),
+ .class_init = ipmi_interface_class_init,
+};
+
+static void isa_ipmi_bmc_check(Object *obj, const char *name,
+ Object *val, Error **errp)
+{
+ IPMIBmc *bmc = IPMI_BMC(val);
+
+ if (bmc->intf)
+ error_setg(errp, "BMC object is already in use");
+}
+
+void ipmi_bmc_find_and_link(Object *obj, Object **bmc)
+{
+ object_property_add_link(obj, "bmc", TYPE_IPMI_BMC, bmc,
+ isa_ipmi_bmc_check,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ &error_abort);
+}
+
+static Property ipmi_bmc_properties[] = {
+ DEFINE_PROP_UINT8("slave_addr", IPMIBmc, slave_addr, 0x20),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void bmc_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->props = ipmi_bmc_properties;
+}
+
+static TypeInfo ipmi_bmc_type_info = {
+ .name = TYPE_IPMI_BMC,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(IPMIBmc),
+ .abstract = true,
+ .class_size = sizeof(IPMIBmcClass),
+ .class_init = bmc_class_init,
+};
+
+static void ipmi_register_types(void)
+{
+ type_register_static(&ipmi_interface_type_info);
+ type_register_static(&ipmi_bmc_type_info);
+}
+
+type_init(ipmi_register_types)
+
+static IPMIFwInfo *ipmi_fw_info;
+static unsigned int ipmi_fw_info_len;
+
+static uint32_t current_uuid = 1;
+
+void ipmi_add_fwinfo(IPMIFwInfo *info, Error **errp)
+{
+ info->uuid = current_uuid++;
+ ipmi_fw_info = g_realloc(ipmi_fw_info,
+ sizeof(*ipmi_fw_info) * (ipmi_fw_info_len + 1));
+ ipmi_fw_info[ipmi_fw_info_len] = *info;
+}
+
+IPMIFwInfo *ipmi_first_fwinfo(void)
+{
+ return ipmi_fw_info;
+}
+
+IPMIFwInfo *ipmi_next_fwinfo(IPMIFwInfo *current)
+{
+ current++;
+ if (current >= &ipmi_fw_info[ipmi_fw_info_len]) {
+ return NULL;
+ }
+ return current;
+}
diff --git a/hw/ipmi/ipmi_bmc_extern.c b/hw/ipmi/ipmi_bmc_extern.c
new file mode 100644
index 0000000..56073b3
--- /dev/null
+++ b/hw/ipmi/ipmi_bmc_extern.c
@@ -0,0 +1,518 @@
+/*
+ * IPMI BMC external connection
+ *
+ * Copyright (c) 2015 Corey Minyard, MontaVista Software, LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/*
+ * This is designed to connect with OpenIPMI's lanserv serial interface
+ * using the "VM" connection type. See that for details.
+ */
+
+#include <stdint.h>
+#include "qemu/timer.h"
+#include "sysemu/char.h"
+#include "sysemu/sysemu.h"
+#include "hw/ipmi/ipmi.h"
+
+#define VM_MSG_CHAR 0xA0 /* Marks end of message */
+#define VM_CMD_CHAR 0xA1 /* Marks end of a command */
+#define VM_ESCAPE_CHAR 0xAA /* Set bit 4 from the next byte to 0 */
+
+#define VM_PROTOCOL_VERSION 1
+#define VM_CMD_VERSION 0xff /* A version number byte follows */
+#define VM_CMD_NOATTN 0x00
+#define VM_CMD_ATTN 0x01
+#define VM_CMD_ATTN_IRQ 0x02
+#define VM_CMD_POWEROFF 0x03
+#define VM_CMD_RESET 0x04
+#define VM_CMD_ENABLE_IRQ 0x05 /* Enable/disable the messaging irq */
+#define VM_CMD_DISABLE_IRQ 0x06
+#define VM_CMD_SEND_NMI 0x07
+#define VM_CMD_CAPABILITIES 0x08
+#define VM_CAPABILITIES_POWER 0x01
+#define VM_CAPABILITIES_RESET 0x02
+#define VM_CAPABILITIES_IRQ 0x04
+#define VM_CAPABILITIES_NMI 0x08
+#define VM_CAPABILITIES_ATTN 0x10
+#define VM_CMD_FORCEOFF 0x09
+
+#define TYPE_IPMI_BMC_EXTERN "ipmi-bmc-extern"
+#define IPMI_BMC_EXTERN(obj) OBJECT_CHECK(IPMIBmcExtern, (obj), \
+ TYPE_IPMI_BMC_EXTERN)
+typedef struct IPMIBmcExtern {
+ IPMIBmc parent;
+
+ CharDriverState *chr;
+
+ bool connected;
+
+ unsigned char inbuf[MAX_IPMI_MSG_SIZE + 2];
+ unsigned int inpos;
+ bool in_escape;
+ bool in_too_many;
+ bool waiting_rsp;
+ bool sending_cmd;
+
+ unsigned char outbuf[(MAX_IPMI_MSG_SIZE + 2) * 2 + 1];
+ unsigned int outpos;
+ unsigned int outlen;
+
+ struct QEMUTimer *extern_timer;
+
+ /* A reset event is pending to be sent upstream. */
+ bool send_reset;
+} IPMIBmcExtern;
+
+static int can_receive(void *opaque);
+static void receive(void *opaque, const uint8_t *buf, int size);
+static void chr_event(void *opaque, int event);
+
+static unsigned char
+ipmb_checksum(const unsigned char *data, int size, unsigned char start)
+{
+ unsigned char csum = start;
+
+ for (; size > 0; size--, data++) {
+ csum += *data;
+ }
+ return csum;
+}
+
+static void continue_send(IPMIBmcExtern *ibe)
+{
+ if (ibe->outlen == 0) {
+ goto check_reset;
+ }
+ send:
+ ibe->outpos += qemu_chr_fe_write(ibe->chr, ibe->outbuf + ibe->outpos,
+ ibe->outlen - ibe->outpos);
+ if (ibe->outpos < ibe->outlen) {
+ /* Not fully transmitted, try again in a 10ms */
+ timer_mod_ns(ibe->extern_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 10000000);
+ } else {
+ /* Sent */
+ ibe->outlen = 0;
+ ibe->outpos = 0;
+ if (!ibe->sending_cmd) {
+ ibe->waiting_rsp = true;
+ } else {
+ ibe->sending_cmd = false;
+ }
+ check_reset:
+ if (ibe->connected && ibe->send_reset) {
+ /* Send the reset */
+ ibe->outbuf[0] = VM_CMD_RESET;
+ ibe->outbuf[1] = VM_CMD_CHAR;
+ ibe->outlen = 2;
+ ibe->outpos = 0;
+ ibe->send_reset = false;
+ ibe->sending_cmd = true;
+ goto send;
+ }
+
+ if (ibe->waiting_rsp) {
+ /* Make sure we get a response within 4 seconds. */
+ timer_mod_ns(ibe->extern_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 4000000000ULL);
+ }
+ }
+ return;
+}
+
+static void extern_timeout(void *opaque)
+{
+ IPMIBmcExtern *ibe = opaque;
+ IPMIInterface *s = ibe->parent.intf;
+
+ if (ibe->connected) {
+ if (ibe->waiting_rsp && (ibe->outlen == 0)) {
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+ /* The message response timed out, return an error. */
+ ibe->waiting_rsp = false;
+ ibe->inbuf[1] = ibe->outbuf[1] | 0x04;
+ ibe->inbuf[2] = ibe->outbuf[2];
+ ibe->inbuf[3] = IPMI_CC_TIMEOUT;
+ k->handle_rsp(s, ibe->outbuf[0], ibe->inbuf + 1, 3);
+ } else {
+ continue_send(ibe);
+ }
+ }
+}
+
+static void addchar(IPMIBmcExtern *ibe, unsigned char ch)
+{
+ switch (ch) {
+ case VM_MSG_CHAR:
+ case VM_CMD_CHAR:
+ case VM_ESCAPE_CHAR:
+ ibe->outbuf[ibe->outlen] = VM_ESCAPE_CHAR;
+ ibe->outlen++;
+ ch |= 0x10;
+ /* No break */
+
+ default:
+ ibe->outbuf[ibe->outlen] = ch;
+ ibe->outlen++;
+ }
+}
+
+static void ipmi_bmc_extern_handle_command(IPMIBmc *b,
+ uint8_t *cmd, unsigned int cmd_len,
+ unsigned int max_cmd_len,
+ uint8_t msg_id)
+{
+ IPMIBmcExtern *ibe = IPMI_BMC_EXTERN(b);
+ IPMIInterface *s = ibe->parent.intf;
+ uint8_t err = 0, csum;
+ unsigned int i;
+
+ if (ibe->outlen) {
+ /* We already have a command queued. Shouldn't ever happen. */
+ fprintf(stderr, "IPMI KCS: Got command when not finished with the"
+ " previous commmand\n");
+ abort();
+ }
+
+ /* If it's too short or it was truncated, return an error. */
+ if (cmd_len < 2) {
+ err = IPMI_CC_REQUEST_DATA_LENGTH_INVALID;
+ } else if ((cmd_len > max_cmd_len) || (cmd_len > MAX_IPMI_MSG_SIZE)) {
+ err = IPMI_CC_REQUEST_DATA_TRUNCATED;
+ } else if (!ibe->connected) {
+ err = IPMI_CC_BMC_INIT_IN_PROGRESS;
+ }
+ if (err) {
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+ unsigned char rsp[3];
+ rsp[0] = cmd[0] | 0x04;
+ rsp[1] = cmd[1];
+ rsp[2] = err;
+ ibe->waiting_rsp = false;
+ k->handle_rsp(s, msg_id, rsp, 3);
+ goto out;
+ }
+
+ addchar(ibe, msg_id);
+ for (i = 0; i < cmd_len; i++) {
+ addchar(ibe, cmd[i]);
+ }
+ csum = ipmb_checksum(&msg_id, 1, 0);
+ addchar(ibe, -ipmb_checksum(cmd, cmd_len, csum));
+
+ ibe->outbuf[ibe->outlen] = VM_MSG_CHAR;
+ ibe->outlen++;
+
+ /* Start the transmit */
+ continue_send(ibe);
+
+ out:
+ return;
+}
+
+static void handle_hw_op(IPMIBmcExtern *ibe, unsigned char hw_op)
+{
+ IPMIInterface *s = ibe->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+
+ switch (hw_op) {
+ case VM_CMD_VERSION:
+ /* We only support one version at this time. */
+ break;
+
+ case VM_CMD_NOATTN:
+ k->set_atn(s, 0, 0);
+ break;
+
+ case VM_CMD_ATTN:
+ k->set_atn(s, 1, 0);
+ break;
+
+ case VM_CMD_ATTN_IRQ:
+ k->set_atn(s, 1, 1);
+ break;
+
+ case VM_CMD_POWEROFF:
+ k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 0);
+ break;
+
+ case VM_CMD_RESET:
+ k->do_hw_op(s, IPMI_RESET_CHASSIS, 0);
+ break;
+
+ case VM_CMD_ENABLE_IRQ:
+ k->set_irq_enable(s, 1);
+ break;
+
+ case VM_CMD_DISABLE_IRQ:
+ k->set_irq_enable(s, 0);
+ break;
+
+ case VM_CMD_SEND_NMI:
+ k->do_hw_op(s, IPMI_SEND_NMI, 0);
+ break;
+
+ case VM_CMD_FORCEOFF:
+ qemu_system_shutdown_request();
+ break;
+ }
+}
+
+static void handle_msg(IPMIBmcExtern *ibe)
+{
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(ibe->parent.intf);
+
+ if (ibe->in_escape) {
+ ipmi_debug("msg escape not ended\n");
+ return;
+ }
+ if (ibe->inpos < 5) {
+ ipmi_debug("msg too short\n");
+ return;
+ }
+ if (ibe->in_too_many) {
+ ibe->inbuf[3] = IPMI_CC_REQUEST_DATA_TRUNCATED;
+ ibe->inpos = 4;
+ } else if (ipmb_checksum(ibe->inbuf, ibe->inpos, 0) != 0) {
+ ipmi_debug("msg checksum failure\n");
+ return;
+ } else {
+ ibe->inpos--; /* Remove checkum */
+ }
+
+ timer_del(ibe->extern_timer);
+ ibe->waiting_rsp = false;
+ k->handle_rsp(ibe->parent.intf, ibe->inbuf[0], ibe->inbuf + 1, ibe->inpos - 1);
+}
+
+static int can_receive(void *opaque)
+{
+ return 1;
+}
+
+static void receive(void *opaque, const uint8_t *buf, int size)
+{
+ IPMIBmcExtern *ibe = opaque;
+ int i;
+ unsigned char hw_op;
+
+ for (i = 0; i < size; i++) {
+ unsigned char ch = buf[i];
+
+ switch (ch) {
+ case VM_MSG_CHAR:
+ handle_msg(ibe);
+ ibe->in_too_many = false;
+ ibe->inpos = 0;
+ break;
+
+ case VM_CMD_CHAR:
+ if (ibe->in_too_many) {
+ ipmi_debug("cmd in too many\n");
+ ibe->in_too_many = false;
+ ibe->inpos = 0;
+ break;
+ }
+ if (ibe->in_escape) {
+ ipmi_debug("cmd in escape\n");
+ ibe->in_too_many = false;
+ ibe->inpos = 0;
+ ibe->in_escape = false;
+ break;
+ }
+ ibe->in_too_many = false;
+ if (ibe->inpos < 1) {
+ break;
+ }
+ hw_op = ibe->inbuf[0];
+ ibe->inpos = 0;
+ goto out_hw_op;
+ break;
+
+ case VM_ESCAPE_CHAR:
+ ibe->in_escape = true;
+ break;
+
+ default:
+ if (ibe->in_escape) {
+ ch &= ~0x10;
+ ibe->in_escape = false;
+ }
+ if (ibe->in_too_many) {
+ break;
+ }
+ if (ibe->inpos >= sizeof(ibe->inbuf)) {
+ ibe->in_too_many = true;
+ break;
+ }
+ ibe->inbuf[ibe->inpos] = ch;
+ ibe->inpos++;
+ break;
+ }
+ }
+ return;
+
+ out_hw_op:
+ handle_hw_op(ibe, hw_op);
+}
+
+static void chr_event(void *opaque, int event)
+{
+ IPMIBmcExtern *ibe = opaque;
+ IPMIInterface *s = ibe->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+ unsigned char v;
+
+ switch (event) {
+ case CHR_EVENT_OPENED:
+ ibe->connected = true;
+ ibe->outpos = 0;
+ ibe->outlen = 0;
+ addchar(ibe, VM_CMD_VERSION);
+ addchar(ibe, VM_PROTOCOL_VERSION);
+ ibe->outbuf[ibe->outlen] = VM_CMD_CHAR;
+ ibe->outlen++;
+ addchar(ibe, VM_CMD_CAPABILITIES);
+ v = VM_CAPABILITIES_IRQ | VM_CAPABILITIES_ATTN;
+ if (k->do_hw_op(ibe->parent.intf, IPMI_POWEROFF_CHASSIS, 1) == 0) {
+ v |= VM_CAPABILITIES_POWER;
+ }
+ if (k->do_hw_op(ibe->parent.intf, IPMI_RESET_CHASSIS, 1) == 0) {
+ v |= VM_CAPABILITIES_RESET;
+ }
+ if (k->do_hw_op(ibe->parent.intf, IPMI_SEND_NMI, 1) == 0) {
+ v |= VM_CAPABILITIES_NMI;
+ }
+ addchar(ibe, v);
+ ibe->outbuf[ibe->outlen] = VM_CMD_CHAR;
+ ibe->outlen++;
+ ibe->sending_cmd = false;
+ continue_send(ibe);
+ break;
+
+ case CHR_EVENT_CLOSED:
+ if (!ibe->connected) {
+ return;
+ }
+ ibe->connected = false;
+ if (ibe->waiting_rsp) {
+ ibe->waiting_rsp = false;
+ ibe->inbuf[1] = ibe->outbuf[1] | 0x04;
+ ibe->inbuf[2] = ibe->outbuf[2];
+ ibe->inbuf[3] = IPMI_CC_BMC_INIT_IN_PROGRESS;
+ k->handle_rsp(s, ibe->outbuf[0], ibe->inbuf + 1, 3);
+ }
+ break;
+ }
+}
+
+static void ipmi_bmc_extern_handle_reset(IPMIBmc *b)
+{
+ IPMIBmcExtern *ibe = IPMI_BMC_EXTERN(b);
+
+ ibe->send_reset = true;
+ continue_send(ibe);
+}
+
+static void ipmi_bmc_extern_realize(DeviceState *dev, Error **errp)
+{
+ IPMIBmcExtern *ibe = IPMI_BMC_EXTERN(dev);
+
+ if (!ibe->chr) {
+ error_setg(errp, "IPMI external bmc requires chardev attribute");
+ return;
+ }
+
+ qemu_chr_add_handlers(ibe->chr, can_receive, receive, chr_event, ibe);
+}
+
+static int ipmi_bmc_extern_post_migrate(void *opaque, int version_id)
+{
+ IPMIBmcExtern *ibe = opaque;
+
+ /*
+ * We don't directly restore waiting_rsp, Instead, we return an
+ * error on the interface if a response was being waited for.
+ */
+ if (ibe->waiting_rsp) {
+ IPMIInterface *ii = ibe->parent.intf;
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+
+ ibe->waiting_rsp = false;
+ ibe->inbuf[1] = ibe->outbuf[1] | 0x04;
+ ibe->inbuf[2] = ibe->outbuf[2];
+ ibe->inbuf[3] = IPMI_CC_BMC_INIT_IN_PROGRESS;
+ iic->handle_rsp(ii, ibe->outbuf[0], ibe->inbuf + 1, 3);
+ }
+ return 0;
+}
+
+static const VMStateDescription vmstate_ipmi_bmc_extern = {
+ .name = TYPE_IPMI_BMC_EXTERN,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .post_load = ipmi_bmc_extern_post_migrate,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(send_reset, IPMIBmcExtern),
+ VMSTATE_BOOL(waiting_rsp, IPMIBmcExtern),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void ipmi_bmc_extern_init(Object *obj)
+{
+ IPMIBmcExtern *ibe = IPMI_BMC_EXTERN(obj);
+
+ ibe->extern_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, extern_timeout, ibe);
+ vmstate_register(NULL, 0, &vmstate_ipmi_bmc_extern, ibe);
+}
+
+static Property ipmi_bmc_extern_properties[] = {
+ DEFINE_PROP_CHR("chardev", IPMIBmcExtern, chr),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void ipmi_bmc_extern_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ IPMIBmcClass *bk = IPMI_BMC_CLASS(oc);
+
+ bk->handle_command = ipmi_bmc_extern_handle_command;
+ bk->handle_reset = ipmi_bmc_extern_handle_reset;
+ dc->realize = ipmi_bmc_extern_realize;
+ dc->props = ipmi_bmc_extern_properties;
+}
+
+static const TypeInfo ipmi_bmc_extern_type = {
+ .name = TYPE_IPMI_BMC_EXTERN,
+ .parent = TYPE_IPMI_BMC,
+ .instance_size = sizeof(IPMIBmcExtern),
+ .instance_init = ipmi_bmc_extern_init,
+ .class_init = ipmi_bmc_extern_class_init,
+ };
+
+static void ipmi_bmc_extern_register_types(void)
+{
+ type_register_static(&ipmi_bmc_extern_type);
+}
+
+type_init(ipmi_bmc_extern_register_types)
diff --git a/hw/ipmi/ipmi_bmc_sim.c b/hw/ipmi/ipmi_bmc_sim.c
new file mode 100644
index 0000000..0a59e53
--- /dev/null
+++ b/hw/ipmi/ipmi_bmc_sim.c
@@ -0,0 +1,1756 @@
+/*
+ * IPMI BMC emulation
+ *
+ * Copyright (c) 2015 Corey Minyard, MontaVista Software, LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include "qemu/timer.h"
+#include "hw/ipmi/ipmi.h"
+#include "qemu/error-report.h"
+
+#define IPMI_NETFN_CHASSIS 0x00
+#define IPMI_NETFN_CHASSIS_MAXCMD 0x03
+
+#define IPMI_CMD_GET_CHASSIS_CAPABILITIES 0x00
+#define IPMI_CMD_GET_CHASSIS_STATUS 0x01
+#define IPMI_CMD_CHASSIS_CONTROL 0x02
+
+#define IPMI_NETFN_SENSOR_EVENT 0x04
+#define IPMI_NETFN_SENSOR_EVENT_MAXCMD 0x2e
+
+#define IPMI_CMD_SET_SENSOR_EVT_ENABLE 0x28
+#define IPMI_CMD_GET_SENSOR_EVT_ENABLE 0x29
+#define IPMI_CMD_REARM_SENSOR_EVTS 0x2a
+#define IPMI_CMD_GET_SENSOR_EVT_STATUS 0x2b
+#define IPMI_CMD_GET_SENSOR_READING 0x2d
+
+/* #define IPMI_NETFN_APP 0x06 In ipmi.h */
+#define IPMI_NETFN_APP_MAXCMD 0x36
+
+#define IPMI_CMD_GET_DEVICE_ID 0x01
+#define IPMI_CMD_COLD_RESET 0x02
+#define IPMI_CMD_WARM_RESET 0x03
+#define IPMI_CMD_RESET_WATCHDOG_TIMER 0x22
+#define IPMI_CMD_SET_WATCHDOG_TIMER 0x24
+#define IPMI_CMD_GET_WATCHDOG_TIMER 0x25
+#define IPMI_CMD_SET_BMC_GLOBAL_ENABLES 0x2e
+#define IPMI_CMD_GET_BMC_GLOBAL_ENABLES 0x2f
+#define IPMI_CMD_CLR_MSG_FLAGS 0x30
+#define IPMI_CMD_GET_MSG_FLAGS 0x31
+#define IPMI_CMD_GET_MSG 0x33
+#define IPMI_CMD_SEND_MSG 0x34
+#define IPMI_CMD_READ_EVT_MSG_BUF 0x35
+
+#define IPMI_NETFN_STORAGE 0x0a
+#define IPMI_NETFN_STORAGE_MAXCMD 0x4a
+
+#define IPMI_CMD_GET_SDR_REP_INFO 0x20
+#define IPMI_CMD_GET_SDR_REP_ALLOC_INFO 0x21
+#define IPMI_CMD_RESERVE_SDR_REP 0x22
+#define IPMI_CMD_GET_SDR 0x23
+#define IPMI_CMD_ADD_SDR 0x24
+#define IPMI_CMD_PARTIAL_ADD_SDR 0x25
+#define IPMI_CMD_DELETE_SDR 0x26
+#define IPMI_CMD_CLEAR_SDR_REP 0x27
+#define IPMI_CMD_GET_SDR_REP_TIME 0x28
+#define IPMI_CMD_SET_SDR_REP_TIME 0x29
+#define IPMI_CMD_ENTER_SDR_REP_UPD_MODE 0x2A
+#define IPMI_CMD_EXIT_SDR_REP_UPD_MODE 0x2B
+#define IPMI_CMD_RUN_INIT_AGENT 0x2C
+#define IPMI_CMD_GET_SEL_INFO 0x40
+#define IPMI_CMD_GET_SEL_ALLOC_INFO 0x41
+#define IPMI_CMD_RESERVE_SEL 0x42
+#define IPMI_CMD_GET_SEL_ENTRY 0x43
+#define IPMI_CMD_ADD_SEL_ENTRY 0x44
+#define IPMI_CMD_PARTIAL_ADD_SEL_ENTRY 0x45
+#define IPMI_CMD_DELETE_SEL_ENTRY 0x46
+#define IPMI_CMD_CLEAR_SEL 0x47
+#define IPMI_CMD_GET_SEL_TIME 0x48
+#define IPMI_CMD_SET_SEL_TIME 0x49
+
+
+/* Same as a timespec struct. */
+struct ipmi_time {
+ long tv_sec;
+ long tv_nsec;
+};
+
+#define MAX_SEL_SIZE 128
+
+typedef struct IPMISel {
+ uint8_t sel[MAX_SEL_SIZE][16];
+ unsigned int next_free;
+ long time_offset;
+ uint16_t reservation;
+ uint8_t last_addition[4];
+ uint8_t last_clear[4];
+ uint8_t overflow;
+} IPMISel;
+
+#define MAX_SDR_SIZE 16384
+
+typedef struct IPMISdr {
+ uint8_t sdr[MAX_SDR_SIZE];
+ unsigned int next_free;
+ uint16_t next_rec_id;
+ uint16_t reservation;
+ uint8_t last_addition[4];
+ uint8_t last_clear[4];
+ uint8_t overflow;
+} IPMISdr;
+
+typedef struct IPMISensor {
+ uint8_t status;
+ uint8_t reading;
+ uint16_t states_suppt;
+ uint16_t assert_suppt;
+ uint16_t deassert_suppt;
+ uint16_t states;
+ uint16_t assert_states;
+ uint16_t deassert_states;
+ uint16_t assert_enable;
+ uint16_t deassert_enable;
+ uint8_t sensor_type;
+ uint8_t evt_reading_type_code;
+} IPMISensor;
+#define IPMI_SENSOR_GET_PRESENT(s) ((s)->status & 0x01)
+#define IPMI_SENSOR_SET_PRESENT(s, v) ((s)->status = (s->status & ~0x01) | \
+ !!(v))
+#define IPMI_SENSOR_GET_SCAN_ON(s) ((s)->status & 0x40)
+#define IPMI_SENSOR_SET_SCAN_ON(s, v) ((s)->status = (s->status & ~0x40) | \
+ ((!!(v)) << 6))
+#define IPMI_SENSOR_GET_EVENTS_ON(s) ((s)->status & 0x80)
+#define IPMI_SENSOR_SET_EVENTS_ON(s, v) ((s)->status = (s->status & ~0x80) | \
+ ((!!(v)) << 7))
+#define IPMI_SENSOR_GET_RET_STATUS(s) ((s)->status & 0xc0)
+#define IPMI_SENSOR_SET_RET_STATUS(s, v) ((s)->status = (s->status & ~0xc0) | \
+ (v & 0xc0))
+#define IPMI_SENSOR_IS_DISCRETE(s) ((s)->evt_reading_type_code != 1)
+
+#define MAX_SENSORS 20
+#define IPMI_WATCHDOG_SENSOR 0
+
+typedef struct IPMIBmcSim IPMIBmcSim;
+
+#define MAX_NETFNS 64
+typedef void (*IPMICmdHandler)(IPMIBmcSim *s,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len);
+typedef struct IPMINetfn {
+ unsigned int cmd_nums;
+ const IPMICmdHandler *cmd_handlers;
+} IPMINetfn;
+
+typedef struct IPMIRcvBufEntry {
+ QTAILQ_ENTRY(IPMIRcvBufEntry) entry;
+ uint8_t len;
+ uint8_t buf[MAX_IPMI_MSG_SIZE];
+} IPMIRcvBufEntry;
+
+#define TYPE_IPMI_BMC_SIMULATOR "ipmi-bmc-sim"
+#define IPMI_BMC_SIMULATOR(obj) OBJECT_CHECK(IPMIBmcSim, (obj), \
+ TYPE_IPMI_BMC_SIMULATOR)
+struct IPMIBmcSim {
+ IPMIBmc parent;
+
+ QEMUTimer *timer;
+
+ uint8_t bmc_global_enables;
+ uint8_t msg_flags;
+
+ bool watchdog_initialized;
+ uint8_t watchdog_use;
+ uint8_t watchdog_action;
+ uint8_t watchdog_pretimeout; /* In seconds */
+ bool watchdog_expired;
+ uint16_t watchdog_timeout; /* in 100's of milliseconds */
+
+ bool watchdog_running;
+ bool watchdog_preaction_ran;
+ int64_t watchdog_expiry;
+
+ uint8_t device_id;
+ uint8_t ipmi_version;
+ uint8_t device_rev;
+ uint8_t fwrev1;
+ uint8_t fwrev2;
+ uint8_t mfg_id[3];
+ uint8_t product_id[2];
+
+ IPMISel sel;
+ IPMISdr sdr;
+ IPMISensor sensors[MAX_SENSORS];
+
+ /* Odd netfns are for responses, so we only need the even ones. */
+ const IPMINetfn *netfns[MAX_NETFNS / 2];
+
+ QemuMutex lock;
+ /* We allow one event in the buffer */
+ uint8_t evtbuf[16];
+
+ QTAILQ_HEAD(, IPMIRcvBufEntry) rcvbufs;
+};
+
+#define IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK (1 << 3)
+#define IPMI_BMC_MSG_FLAG_EVT_BUF_FULL (1 << 1)
+#define IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE (1 << 0)
+#define IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK_SET(s) \
+ (IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK & (s)->msg_flags)
+#define IPMI_BMC_MSG_FLAG_EVT_BUF_FULL_SET(s) \
+ (IPMI_BMC_MSG_FLAG_EVT_BUF_FULL & (s)->msg_flags)
+#define IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE_SET(s) \
+ (IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE & (s)->msg_flags)
+
+#define IPMI_BMC_RCV_MSG_QUEUE_INT_BIT 0
+#define IPMI_BMC_EVBUF_FULL_INT_BIT 1
+#define IPMI_BMC_EVENT_MSG_BUF_BIT 2
+#define IPMI_BMC_EVENT_LOG_BIT 3
+#define IPMI_BMC_MSG_INTS_ON(s) ((s)->bmc_global_enables & \
+ (1 << IPMI_BMC_RCV_MSG_QUEUE_INT_BIT))
+#define IPMI_BMC_EVBUF_FULL_INT_ENABLED(s) ((s)->bmc_global_enables & \
+ (1 << IPMI_BMC_EVBUF_FULL_INT_BIT))
+#define IPMI_BMC_EVENT_LOG_ENABLED(s) ((s)->bmc_global_enables & \
+ (1 << IPMI_BMC_EVENT_LOG_BIT))
+#define IPMI_BMC_EVENT_MSG_BUF_ENABLED(s) ((s)->bmc_global_enables & \
+ (1 << IPMI_BMC_EVENT_MSG_BUF_BIT))
+
+#define IPMI_BMC_WATCHDOG_USE_MASK 0xc7
+#define IPMI_BMC_WATCHDOG_ACTION_MASK 0x77
+#define IPMI_BMC_WATCHDOG_GET_USE(s) ((s)->watchdog_use & 0x7)
+#define IPMI_BMC_WATCHDOG_GET_DONT_LOG(s) (((s)->watchdog_use >> 7) & 0x1)
+#define IPMI_BMC_WATCHDOG_GET_DONT_STOP(s) (((s)->watchdog_use >> 6) & 0x1)
+#define IPMI_BMC_WATCHDOG_GET_PRE_ACTION(s) (((s)->watchdog_action >> 4) & 0x7)
+#define IPMI_BMC_WATCHDOG_PRE_NONE 0
+#define IPMI_BMC_WATCHDOG_PRE_SMI 1
+#define IPMI_BMC_WATCHDOG_PRE_NMI 2
+#define IPMI_BMC_WATCHDOG_PRE_MSG_INT 3
+#define IPMI_BMC_WATCHDOG_GET_ACTION(s) ((s)->watchdog_action & 0x7)
+#define IPMI_BMC_WATCHDOG_ACTION_NONE 0
+#define IPMI_BMC_WATCHDOG_ACTION_RESET 1
+#define IPMI_BMC_WATCHDOG_ACTION_POWER_DOWN 2
+#define IPMI_BMC_WATCHDOG_ACTION_POWER_CYCLE 3
+
+
+/* Add a byte to the response. */
+#define IPMI_ADD_RSP_DATA(b) \
+ do { \
+ if (*rsp_len >= max_rsp_len) { \
+ rsp[2] = IPMI_CC_REQUEST_DATA_TRUNCATED; \
+ goto out; \
+ } \
+ rsp[(*rsp_len)++] = (b); \
+ } while (0)
+
+/* Verify that the received command is a certain length. */
+#define IPMI_CHECK_CMD_LEN(l) \
+ if (cmd_len < l) { \
+ rsp[2] = IPMI_CC_REQUEST_DATA_LENGTH_INVALID; \
+ goto out; \
+ }
+
+/* Check that the reservation in the command is valid. */
+#define IPMI_CHECK_RESERVATION(off, r) \
+ do { \
+ if ((cmd[off] | (cmd[off + 1] << 8)) != r) { \
+ rsp[2] = IPMI_CC_INVALID_RESERVATION; \
+ goto out; \
+ } \
+ } while (0)
+
+
+static void ipmi_sim_handle_timeout(IPMIBmcSim *ibs);
+
+static void ipmi_gettime(struct ipmi_time *time)
+{
+ int64_t stime;
+
+ stime = qemu_clock_get_ns(QEMU_CLOCK_HOST);
+ time->tv_sec = stime / 1000000000LL;
+ time->tv_nsec = stime % 1000000000LL;
+}
+
+static int64_t ipmi_getmonotime(void)
+{
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+}
+
+static void ipmi_timeout(void *opaque)
+{
+ IPMIBmcSim *ibs = opaque;
+
+ ipmi_sim_handle_timeout(ibs);
+}
+
+static void set_timestamp(IPMIBmcSim *ibs, uint8_t *ts)
+{
+ unsigned int val;
+ struct ipmi_time now;
+
+ ipmi_gettime(&now);
+ val = now.tv_sec + ibs->sel.time_offset;
+ ts[0] = val & 0xff;
+ ts[1] = (val >> 8) & 0xff;
+ ts[2] = (val >> 16) & 0xff;
+ ts[3] = (val >> 24) & 0xff;
+}
+
+static void sdr_inc_reservation(IPMISdr *sdr)
+{
+ sdr->reservation++;
+ if (sdr->reservation == 0) {
+ sdr->reservation = 1;
+ }
+}
+
+static int sdr_add_entry(IPMIBmcSim *ibs, const uint8_t *entry,
+ unsigned int len, uint16_t *recid)
+{
+ if ((len < 5) || (len > 255)) {
+ return 1;
+ }
+
+ if (entry[4] != len - 5) {
+ return 1;
+ }
+
+ if (ibs->sdr.next_free + len > MAX_SDR_SIZE) {
+ ibs->sdr.overflow = 1;
+ return 1;
+ }
+
+ memcpy(ibs->sdr.sdr + ibs->sdr.next_free, entry, len);
+ ibs->sdr.sdr[ibs->sdr.next_free] = ibs->sdr.next_rec_id & 0xff;
+ ibs->sdr.sdr[ibs->sdr.next_free+1] = (ibs->sdr.next_rec_id >> 8) & 0xff;
+ ibs->sdr.sdr[ibs->sdr.next_free+2] = 0x51; /* Conform to IPMI 1.5 spec */
+
+ if (recid) {
+ *recid = ibs->sdr.next_rec_id;
+ }
+ ibs->sdr.next_rec_id++;
+ set_timestamp(ibs, ibs->sdr.last_addition);
+ ibs->sdr.next_free += len;
+ sdr_inc_reservation(&ibs->sdr);
+ return 0;
+}
+
+static int sdr_find_entry(IPMISdr *sdr, uint16_t recid,
+ unsigned int *retpos, uint16_t *nextrec)
+{
+ unsigned int pos = *retpos;
+
+ while (pos < sdr->next_free) {
+ uint16_t trec = sdr->sdr[pos] | (sdr->sdr[pos + 1] << 8);
+ unsigned int nextpos = pos + sdr->sdr[pos + 4];
+
+ if (trec == recid) {
+ if (nextrec) {
+ if (nextpos >= sdr->next_free) {
+ *nextrec = 0xffff;
+ } else {
+ *nextrec = (sdr->sdr[nextpos] |
+ (sdr->sdr[nextpos + 1] << 8));
+ }
+ }
+ *retpos = pos;
+ return 0;
+ }
+ pos = nextpos;
+ }
+ return 1;
+}
+
+static void sel_inc_reservation(IPMISel *sel)
+{
+ sel->reservation++;
+ if (sel->reservation == 0) {
+ sel->reservation = 1;
+ }
+}
+
+/* Returns 1 if the SEL is full and can't hold the event. */
+static int sel_add_event(IPMIBmcSim *ibs, uint8_t *event)
+{
+ event[0] = 0xff;
+ event[1] = 0xff;
+ set_timestamp(ibs, event + 3);
+ if (ibs->sel.next_free == MAX_SEL_SIZE) {
+ ibs->sel.overflow = 1;
+ return 1;
+ }
+ event[0] = ibs->sel.next_free & 0xff;
+ event[1] = (ibs->sel.next_free >> 8) & 0xff;
+ memcpy(ibs->sel.last_addition, event + 3, 4);
+ memcpy(ibs->sel.sel[ibs->sel.next_free], event, 16);
+ ibs->sel.next_free++;
+ sel_inc_reservation(&ibs->sel);
+ return 0;
+}
+
+static int attn_set(IPMIBmcSim *ibs)
+{
+ return IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE_SET(ibs)
+ || IPMI_BMC_MSG_FLAG_EVT_BUF_FULL_SET(ibs)
+ || IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK_SET(ibs);
+}
+
+static int attn_irq_enabled(IPMIBmcSim *ibs)
+{
+ return (IPMI_BMC_MSG_INTS_ON(ibs) && IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE_SET(ibs))
+ || (IPMI_BMC_EVBUF_FULL_INT_ENABLED(ibs) &&
+ IPMI_BMC_MSG_FLAG_EVT_BUF_FULL_SET(ibs));
+}
+
+static void gen_event(IPMIBmcSim *ibs, unsigned int sens_num, uint8_t deassert,
+ uint8_t evd1, uint8_t evd2, uint8_t evd3)
+{
+ IPMIInterface *s = ibs->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+ uint8_t evt[16];
+ IPMISensor *sens = ibs->sensors + sens_num;
+
+ if (!IPMI_BMC_EVENT_MSG_BUF_ENABLED(ibs)) {
+ return;
+ }
+ if (!IPMI_SENSOR_GET_EVENTS_ON(sens)) {
+ return;
+ }
+
+ evt[2] = 0x2; /* System event record */
+ evt[7] = ibs->parent.slave_addr;
+ evt[8] = 0;
+ evt[9] = 0x04; /* Format version */
+ evt[10] = sens->sensor_type;
+ evt[11] = sens_num;
+ evt[12] = sens->evt_reading_type_code | (!!deassert << 7);
+ evt[13] = evd1;
+ evt[14] = evd2;
+ evt[15] = evd3;
+
+ if (IPMI_BMC_EVENT_LOG_ENABLED(ibs)) {
+ sel_add_event(ibs, evt);
+ }
+
+ if (ibs->msg_flags & IPMI_BMC_MSG_FLAG_EVT_BUF_FULL) {
+ goto out;
+ }
+
+ memcpy(ibs->evtbuf, evt, 16);
+ ibs->msg_flags |= IPMI_BMC_MSG_FLAG_EVT_BUF_FULL;
+ k->set_atn(s, 1, attn_irq_enabled(ibs));
+ out:
+ return;
+}
+
+static void sensor_set_discrete_bit(IPMIBmcSim *ibs, unsigned int sensor,
+ unsigned int bit, unsigned int val,
+ uint8_t evd1, uint8_t evd2, uint8_t evd3)
+{
+ IPMISensor *sens;
+ uint16_t mask;
+
+ if (sensor >= MAX_SENSORS) {
+ return;
+ }
+ if (bit >= 16) {
+ return;
+ }
+
+ mask = (1 << bit);
+ sens = ibs->sensors + sensor;
+ if (val) {
+ sens->states |= mask & sens->states_suppt;
+ if (sens->assert_states & mask) {
+ return; /* Already asserted */
+ }
+ sens->assert_states |= mask & sens->assert_suppt;
+ if (sens->assert_enable & mask & sens->assert_states) {
+ /* Send an event on assert */
+ gen_event(ibs, sensor, 0, evd1, evd2, evd3);
+ }
+ } else {
+ sens->states &= ~(mask & sens->states_suppt);
+ if (sens->deassert_states & mask) {
+ return; /* Already deasserted */
+ }
+ sens->deassert_states |= mask & sens->deassert_suppt;
+ if (sens->deassert_enable & mask & sens->deassert_states) {
+ /* Send an event on deassert */
+ gen_event(ibs, sensor, 1, evd1, evd2, evd3);
+ }
+ }
+}
+
+static void ipmi_init_sensors_from_sdrs(IPMIBmcSim *s)
+{
+ unsigned int i, pos;
+ IPMISensor *sens;
+
+ for (i = 0; i < MAX_SENSORS; i++) {
+ memset(s->sensors + i, 0, sizeof(*sens));
+ }
+
+ pos = 0;
+ for (i = 0; !sdr_find_entry(&s->sdr, i, &pos, NULL); i++) {
+ uint8_t *sdr = s->sdr.sdr + pos;
+ unsigned int len = sdr[4];
+
+ if (len < 20) {
+ continue;
+ }
+ if ((sdr[3] < 1) || (sdr[3] > 2)) {
+ continue; /* Not a sensor SDR we set from */
+ }
+
+ if (sdr[7] > MAX_SENSORS) {
+ continue;
+ }
+ sens = s->sensors + sdr[7];
+
+ IPMI_SENSOR_SET_PRESENT(sens, 1);
+ IPMI_SENSOR_SET_SCAN_ON(sens, (sdr[10] >> 6) & 1);
+ IPMI_SENSOR_SET_EVENTS_ON(sens, (sdr[10] >> 5) & 1);
+ sens->assert_suppt = sdr[14] | (sdr[15] << 8);
+ sens->deassert_suppt = sdr[16] | (sdr[17] << 8);
+ sens->states_suppt = sdr[18] | (sdr[19] << 8);
+ sens->sensor_type = sdr[12];
+ sens->evt_reading_type_code = sdr[13] & 0x7f;
+
+ /* Enable all the events that are supported. */
+ sens->assert_enable = sens->assert_suppt;
+ sens->deassert_enable = sens->deassert_suppt;
+ }
+}
+
+static int ipmi_register_netfn(IPMIBmcSim *s, unsigned int netfn,
+ const IPMINetfn *netfnd)
+{
+ if ((netfn & 1) || (netfn > MAX_NETFNS) || (s->netfns[netfn / 2])) {
+ return -1;
+ }
+ s->netfns[netfn / 2] = netfnd;
+ return 0;
+}
+
+static void next_timeout(IPMIBmcSim *ibs)
+{
+ int64_t next;
+ if (ibs->watchdog_running) {
+ next = ibs->watchdog_expiry;
+ } else {
+ /* Wait a minute */
+ next = ipmi_getmonotime() + 60 * 1000000000LL;
+ }
+ timer_mod_ns(ibs->timer, next);
+}
+
+static void ipmi_sim_handle_command(IPMIBmc *b,
+ uint8_t *cmd, unsigned int cmd_len,
+ unsigned int max_cmd_len,
+ uint8_t msg_id)
+{
+ IPMIBmcSim *ibs = IPMI_BMC_SIMULATOR(b);
+ IPMIInterface *s = ibs->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+ unsigned int netfn;
+ uint8_t rsp[MAX_IPMI_MSG_SIZE];
+ unsigned int rsp_len_holder = 0;
+ unsigned int *rsp_len = &rsp_len_holder;
+ unsigned int max_rsp_len = sizeof(rsp);
+
+ /* Set up the response, set the low bit of NETFN. */
+ /* Note that max_rsp_len must be at least 3 */
+ IPMI_ADD_RSP_DATA(cmd[0] | 0x04);
+ IPMI_ADD_RSP_DATA(cmd[1]);
+ IPMI_ADD_RSP_DATA(0); /* Assume success */
+
+ /* If it's too short or it was truncated, return an error. */
+ if (cmd_len < 2) {
+ rsp[2] = IPMI_CC_REQUEST_DATA_LENGTH_INVALID;
+ goto out;
+ }
+ if (cmd_len > max_cmd_len) {
+ rsp[2] = IPMI_CC_REQUEST_DATA_TRUNCATED;
+ goto out;
+ }
+
+ if ((cmd[0] & 0x03) != 0) {
+ /* Only have stuff on LUN 0 */
+ rsp[2] = IPMI_CC_COMMAND_INVALID_FOR_LUN;
+ goto out;
+ }
+
+ netfn = cmd[0] >> 2;
+
+ /* Odd netfns are not valid, make sure the command is registered */
+ if ((netfn & 1) || !ibs->netfns[netfn / 2] ||
+ (cmd[1] >= ibs->netfns[netfn / 2]->cmd_nums) ||
+ (!ibs->netfns[netfn / 2]->cmd_handlers[cmd[1]])) {
+ rsp[2] = IPMI_CC_INVALID_CMD;
+ goto out;
+ }
+
+ ibs->netfns[netfn / 2]->cmd_handlers[cmd[1]](ibs, cmd, cmd_len, rsp, rsp_len,
+ max_rsp_len);
+
+ out:
+ k->handle_rsp(s, msg_id, rsp, *rsp_len);
+
+ next_timeout(ibs);
+}
+
+static void ipmi_sim_handle_timeout(IPMIBmcSim *ibs)
+{
+ IPMIInterface *s = ibs->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+
+ if (!ibs->watchdog_running) {
+ goto out;
+ }
+
+ if (!ibs->watchdog_preaction_ran) {
+ switch (IPMI_BMC_WATCHDOG_GET_PRE_ACTION(ibs)) {
+ case IPMI_BMC_WATCHDOG_PRE_NMI:
+ ibs->msg_flags |= IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK;
+ k->do_hw_op(s, IPMI_SEND_NMI, 0);
+ sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 8, 1,
+ 0xc8, (2 << 4) | 0xf, 0xff);
+ break;
+
+ case IPMI_BMC_WATCHDOG_PRE_MSG_INT:
+ ibs->msg_flags |= IPMI_BMC_MSG_FLAG_WATCHDOG_TIMEOUT_MASK;
+ k->set_atn(s, 1, attn_irq_enabled(ibs));
+ sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 8, 1,
+ 0xc8, (3 << 4) | 0xf, 0xff);
+ break;
+
+ default:
+ goto do_full_expiry;
+ }
+
+ ibs->watchdog_preaction_ran = 1;
+ /* Issued the pretimeout, do the rest of the timeout now. */
+ ibs->watchdog_expiry = ipmi_getmonotime();
+ ibs->watchdog_expiry += ibs->watchdog_pretimeout * 1000000000LL;
+ goto out;
+ }
+
+ do_full_expiry:
+ ibs->watchdog_running = 0; /* Stop the watchdog on a timeout */
+ ibs->watchdog_expired |= (1 << IPMI_BMC_WATCHDOG_GET_USE(ibs));
+ switch (IPMI_BMC_WATCHDOG_GET_ACTION(ibs)) {
+ case IPMI_BMC_WATCHDOG_ACTION_NONE:
+ sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 0, 1,
+ 0xc0, ibs->watchdog_use & 0xf, 0xff);
+ break;
+
+ case IPMI_BMC_WATCHDOG_ACTION_RESET:
+ sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 1, 1,
+ 0xc1, ibs->watchdog_use & 0xf, 0xff);
+ k->do_hw_op(s, IPMI_RESET_CHASSIS, 0);
+ break;
+
+ case IPMI_BMC_WATCHDOG_ACTION_POWER_DOWN:
+ sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 2, 1,
+ 0xc2, ibs->watchdog_use & 0xf, 0xff);
+ k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 0);
+ break;
+
+ case IPMI_BMC_WATCHDOG_ACTION_POWER_CYCLE:
+ sensor_set_discrete_bit(ibs, IPMI_WATCHDOG_SENSOR, 2, 1,
+ 0xc3, ibs->watchdog_use & 0xf, 0xff);
+ k->do_hw_op(s, IPMI_POWERCYCLE_CHASSIS, 0);
+ break;
+ }
+
+ out:
+ next_timeout(ibs);
+}
+
+static void chassis_capabilities(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMI_ADD_RSP_DATA(0);
+ IPMI_ADD_RSP_DATA(ibs->parent.slave_addr);
+ IPMI_ADD_RSP_DATA(ibs->parent.slave_addr);
+ IPMI_ADD_RSP_DATA(ibs->parent.slave_addr);
+ IPMI_ADD_RSP_DATA(ibs->parent.slave_addr);
+ out:
+ return;
+}
+
+static void chassis_status(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMI_ADD_RSP_DATA(0x61); /* Unknown power restore, power is on */
+ IPMI_ADD_RSP_DATA(0);
+ IPMI_ADD_RSP_DATA(0);
+ IPMI_ADD_RSP_DATA(0);
+ out:
+ return;
+}
+
+static void chassis_control(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMIInterface *s = ibs->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+
+ IPMI_CHECK_CMD_LEN(3);
+ switch (cmd[2] & 0xf) {
+ case 0: /* power down */
+ rsp[2] = k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 0);
+ break;
+ case 1: /* power up */
+ rsp[2] = k->do_hw_op(s, IPMI_POWERON_CHASSIS, 0);
+ break;
+ case 2: /* power cycle */
+ rsp[2] = k->do_hw_op(s, IPMI_POWERCYCLE_CHASSIS, 0);
+ break;
+ case 3: /* hard reset */
+ rsp[2] = k->do_hw_op(s, IPMI_RESET_CHASSIS, 0);
+ break;
+ case 4: /* pulse diagnostic interrupt */
+ rsp[2] = k->do_hw_op(s, IPMI_PULSE_DIAG_IRQ, 0);
+ break;
+ case 5: /* soft shutdown via ACPI by overtemp emulation */
+ rsp[2] = k->do_hw_op(s,
+ IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP, 0);
+ break;
+ default:
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ goto out;
+ }
+ out:
+ return;
+}
+
+static void get_device_id(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMI_ADD_RSP_DATA(ibs->device_id);
+ IPMI_ADD_RSP_DATA(ibs->device_rev & 0xf);
+ IPMI_ADD_RSP_DATA(ibs->fwrev1 & 0x7f);
+ IPMI_ADD_RSP_DATA(ibs->fwrev2);
+ IPMI_ADD_RSP_DATA(ibs->ipmi_version);
+ IPMI_ADD_RSP_DATA(0x07); /* sensor, SDR, and SEL. */
+ IPMI_ADD_RSP_DATA(ibs->mfg_id[0]);
+ IPMI_ADD_RSP_DATA(ibs->mfg_id[1]);
+ IPMI_ADD_RSP_DATA(ibs->mfg_id[2]);
+ IPMI_ADD_RSP_DATA(ibs->product_id[0]);
+ IPMI_ADD_RSP_DATA(ibs->product_id[1]);
+ out:
+ return;
+}
+
+static void set_global_enables(IPMIBmcSim *ibs, uint8_t val)
+{
+ IPMIInterface *s = ibs->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+ bool irqs_on;
+
+ ibs->bmc_global_enables = val;
+
+ irqs_on = val & (IPMI_BMC_EVBUF_FULL_INT_BIT |
+ IPMI_BMC_RCV_MSG_QUEUE_INT_BIT);
+
+ k->set_irq_enable(s, irqs_on);
+}
+
+static void cold_reset(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMIInterface *s = ibs->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+
+ /* Disable all interrupts */
+ set_global_enables(ibs, 1 << IPMI_BMC_EVENT_LOG_BIT);
+
+ if (k->reset) {
+ k->reset(s, true);
+ }
+}
+
+static void warm_reset(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMIInterface *s = ibs->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+
+ if (k->reset) {
+ k->reset(s, false);
+ }
+}
+
+static void set_bmc_global_enables(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMI_CHECK_CMD_LEN(3);
+ set_global_enables(ibs, cmd[2]);
+ out:
+ return;
+}
+
+static void get_bmc_global_enables(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMI_ADD_RSP_DATA(ibs->bmc_global_enables);
+ out:
+ return;
+}
+
+static void clr_msg_flags(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMIInterface *s = ibs->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+
+ IPMI_CHECK_CMD_LEN(3);
+ ibs->msg_flags &= ~cmd[2];
+ k->set_atn(s, attn_set(ibs), attn_irq_enabled(ibs));
+ out:
+ return;
+}
+
+static void get_msg_flags(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMI_ADD_RSP_DATA(ibs->msg_flags);
+ out:
+ return;
+}
+
+static void read_evt_msg_buf(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMIInterface *s = ibs->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+ unsigned int i;
+
+ if (!(ibs->msg_flags & IPMI_BMC_MSG_FLAG_EVT_BUF_FULL)) {
+ rsp[2] = 0x80;
+ goto out;
+ }
+ for (i = 0; i < 16; i++) {
+ IPMI_ADD_RSP_DATA(ibs->evtbuf[i]);
+ }
+ ibs->msg_flags &= ~IPMI_BMC_MSG_FLAG_EVT_BUF_FULL;
+ k->set_atn(s, attn_set(ibs), attn_irq_enabled(ibs));
+ out:
+ return;
+}
+
+static void get_msg(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMIRcvBufEntry *msg;
+
+ qemu_mutex_lock(&ibs->lock);
+ if (QTAILQ_EMPTY(&ibs->rcvbufs)) {
+ rsp[2] = 0x80; /* Queue empty */
+ goto out;
+ }
+ rsp[3] = 0; /* Channel 0 */
+ *rsp_len += 1;
+ msg = QTAILQ_FIRST(&ibs->rcvbufs);
+ memcpy(rsp + 4, msg->buf, msg->len);
+ *rsp_len += msg->len;
+ QTAILQ_REMOVE(&ibs->rcvbufs, msg, entry);
+ g_free(msg);
+
+ if (QTAILQ_EMPTY(&ibs->rcvbufs)) {
+ IPMIInterface *s = ibs->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+
+ ibs->msg_flags &= ~IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE;
+ k->set_atn(s, attn_set(ibs), attn_irq_enabled(ibs));
+ }
+
+ out:
+ qemu_mutex_unlock(&ibs->lock);
+ return;
+}
+
+static unsigned char
+ipmb_checksum(unsigned char *data, int size, unsigned char csum)
+{
+ for (; size > 0; size--, data++) {
+ csum += *data;
+ }
+
+ return -csum;
+}
+
+static void send_msg(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMIInterface *s = ibs->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+ IPMIRcvBufEntry *msg;
+ uint8_t *buf;
+ uint8_t netfn, rqLun, rsLun, rqSeq;
+
+ IPMI_CHECK_CMD_LEN(3);
+
+ if (cmd[2] != 0) {
+ /* We only handle channel 0 with no options */
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ goto out;
+ }
+
+ IPMI_CHECK_CMD_LEN(10);
+ if (cmd[3] != 0x40) {
+ /* We only emulate a MC at address 0x40. */
+ rsp[2] = 0x83; /* NAK on write */
+ goto out;
+ }
+
+ cmd += 3; /* Skip the header. */
+ cmd_len -= 3;
+
+ /*
+ * At this point we "send" the message successfully. Any error will
+ * be returned in the response.
+ */
+ if (ipmb_checksum(cmd, cmd_len, 0) != 0 ||
+ cmd[3] != 0x20) { /* Improper response address */
+ goto out; /* No response */
+ }
+
+ netfn = cmd[1] >> 2;
+ rqLun = cmd[4] & 0x3;
+ rsLun = cmd[1] & 0x3;
+ rqSeq = cmd[4] >> 2;
+
+ if (rqLun != 2) {
+ /* We only support LUN 2 coming back to us. */
+ goto out;
+ }
+
+ msg = g_malloc(sizeof(*msg));
+ msg->buf[0] = ((netfn | 1) << 2) | rqLun; /* NetFN, and make a response */
+ msg->buf[1] = ipmb_checksum(msg->buf, 1, 0);
+ msg->buf[2] = cmd[0]; /* rsSA */
+ msg->buf[3] = (rqSeq << 2) | rsLun;
+ msg->buf[4] = cmd[5]; /* Cmd */
+ msg->buf[5] = 0; /* Completion Code */
+ msg->len = 6;
+
+ if ((cmd[1] >> 2) != IPMI_NETFN_APP || cmd[5] != IPMI_CMD_GET_DEVICE_ID) {
+ /* Not a command we handle. */
+ msg->buf[5] = IPMI_CC_INVALID_CMD;
+ goto end_msg;
+ }
+
+ buf = msg->buf + msg->len; /* After the CC */
+ buf[0] = 0;
+ buf[1] = 0;
+ buf[2] = 0;
+ buf[3] = 0;
+ buf[4] = 0x51;
+ buf[5] = 0;
+ buf[6] = 0;
+ buf[7] = 0;
+ buf[8] = 0;
+ buf[9] = 0;
+ buf[10] = 0;
+ msg->len += 11;
+
+ end_msg:
+ msg->buf[msg->len] = ipmb_checksum(msg->buf, msg->len, 0);
+ msg->len++;
+ qemu_mutex_lock(&ibs->lock);
+ QTAILQ_INSERT_TAIL(&ibs->rcvbufs, msg, entry);
+ ibs->msg_flags |= IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE;
+ k->set_atn(s, 1, attn_irq_enabled(ibs));
+ qemu_mutex_unlock(&ibs->lock);
+
+ out:
+ return;
+}
+
+static void do_watchdog_reset(IPMIBmcSim *ibs)
+{
+ if (IPMI_BMC_WATCHDOG_GET_ACTION(ibs) ==
+ IPMI_BMC_WATCHDOG_ACTION_NONE) {
+ ibs->watchdog_running = 0;
+ return;
+ }
+ ibs->watchdog_preaction_ran = 0;
+
+
+ /* Timeout is in tenths of a second, offset is in seconds */
+ ibs->watchdog_expiry = ipmi_getmonotime();
+ ibs->watchdog_expiry += ibs->watchdog_timeout * 100000000LL;
+ if (IPMI_BMC_WATCHDOG_GET_PRE_ACTION(ibs) != IPMI_BMC_WATCHDOG_PRE_NONE) {
+ ibs->watchdog_expiry -= ibs->watchdog_pretimeout * 1000000000LL;
+ }
+ ibs->watchdog_running = 1;
+}
+
+static void reset_watchdog_timer(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ if (!ibs->watchdog_initialized) {
+ rsp[2] = 0x80;
+ goto out;
+ }
+ do_watchdog_reset(ibs);
+ out:
+ return;
+}
+
+static void set_watchdog_timer(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMIInterface *s = ibs->parent.intf;
+ IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s);
+ unsigned int val;
+
+ IPMI_CHECK_CMD_LEN(8);
+ val = cmd[2] & 0x7; /* Validate use */
+ if (val == 0 || val > 5) {
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ goto out;
+ }
+ val = cmd[3] & 0x7; /* Validate action */
+ switch (val) {
+ case IPMI_BMC_WATCHDOG_ACTION_NONE:
+ break;
+
+ case IPMI_BMC_WATCHDOG_ACTION_RESET:
+ rsp[2] = k->do_hw_op(s, IPMI_RESET_CHASSIS, 1);
+ break;
+
+ case IPMI_BMC_WATCHDOG_ACTION_POWER_DOWN:
+ rsp[2] = k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 1);
+ break;
+
+ case IPMI_BMC_WATCHDOG_ACTION_POWER_CYCLE:
+ rsp[2] = k->do_hw_op(s, IPMI_POWERCYCLE_CHASSIS, 1);
+ break;
+
+ default:
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ }
+ if (rsp[2]) {
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ goto out;
+ }
+
+ val = (cmd[3] >> 4) & 0x7; /* Validate preaction */
+ switch (val) {
+ case IPMI_BMC_WATCHDOG_PRE_MSG_INT:
+ case IPMI_BMC_WATCHDOG_PRE_NONE:
+ break;
+
+ case IPMI_BMC_WATCHDOG_PRE_NMI:
+ if (!k->do_hw_op(s, IPMI_SEND_NMI, 1)) {
+ /* NMI not supported. */
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ goto out;
+ }
+ default:
+ /* We don't support PRE_SMI */
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ goto out;
+ }
+
+ ibs->watchdog_initialized = 1;
+ ibs->watchdog_use = cmd[2] & IPMI_BMC_WATCHDOG_USE_MASK;
+ ibs->watchdog_action = cmd[3] & IPMI_BMC_WATCHDOG_ACTION_MASK;
+ ibs->watchdog_pretimeout = cmd[4];
+ ibs->watchdog_expired &= ~cmd[5];
+ ibs->watchdog_timeout = cmd[6] | (((uint16_t) cmd[7]) << 8);
+ if (ibs->watchdog_running & IPMI_BMC_WATCHDOG_GET_DONT_STOP(ibs)) {
+ do_watchdog_reset(ibs);
+ } else {
+ ibs->watchdog_running = 0;
+ }
+ out:
+ return;
+}
+
+static void get_watchdog_timer(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMI_ADD_RSP_DATA(ibs->watchdog_use);
+ IPMI_ADD_RSP_DATA(ibs->watchdog_action);
+ IPMI_ADD_RSP_DATA(ibs->watchdog_pretimeout);
+ IPMI_ADD_RSP_DATA(ibs->watchdog_expired);
+ if (ibs->watchdog_running) {
+ long timeout;
+ timeout = ((ibs->watchdog_expiry - ipmi_getmonotime() + 50000000)
+ / 100000000);
+ IPMI_ADD_RSP_DATA(timeout & 0xff);
+ IPMI_ADD_RSP_DATA((timeout >> 8) & 0xff);
+ } else {
+ IPMI_ADD_RSP_DATA(0);
+ IPMI_ADD_RSP_DATA(0);
+ }
+ out:
+ return;
+}
+
+static void get_sdr_rep_info(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ unsigned int i;
+
+ IPMI_ADD_RSP_DATA(0x51); /* Conform to IPMI 1.5 spec */
+ IPMI_ADD_RSP_DATA(ibs->sdr.next_rec_id & 0xff);
+ IPMI_ADD_RSP_DATA((ibs->sdr.next_rec_id >> 8) & 0xff);
+ IPMI_ADD_RSP_DATA((MAX_SDR_SIZE - ibs->sdr.next_free) & 0xff);
+ IPMI_ADD_RSP_DATA(((MAX_SDR_SIZE - ibs->sdr.next_free) >> 8) & 0xff);
+ for (i = 0; i < 4; i++) {
+ IPMI_ADD_RSP_DATA(ibs->sdr.last_addition[i]);
+ }
+ for (i = 0; i < 4; i++) {
+ IPMI_ADD_RSP_DATA(ibs->sdr.last_clear[i]);
+ }
+ /* Only modal support, reserve supported */
+ IPMI_ADD_RSP_DATA((ibs->sdr.overflow << 7) | 0x22);
+ out:
+ return;
+}
+
+static void reserve_sdr_rep(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMI_ADD_RSP_DATA(ibs->sdr.reservation & 0xff);
+ IPMI_ADD_RSP_DATA((ibs->sdr.reservation >> 8) & 0xff);
+ out:
+ return;
+}
+
+static void get_sdr(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ unsigned int pos;
+ uint16_t nextrec;
+
+ IPMI_CHECK_CMD_LEN(8);
+ if (cmd[6]) {
+ IPMI_CHECK_RESERVATION(2, ibs->sdr.reservation);
+ }
+ pos = 0;
+ if (sdr_find_entry(&ibs->sdr, cmd[4] | (cmd[5] << 8),
+ &pos, &nextrec)) {
+ rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ goto out;
+ }
+ if (cmd[6] > (ibs->sdr.sdr[pos + 4])) {
+ rsp[2] = IPMI_CC_PARM_OUT_OF_RANGE;
+ goto out;
+ }
+
+ IPMI_ADD_RSP_DATA(nextrec & 0xff);
+ IPMI_ADD_RSP_DATA((nextrec >> 8) & 0xff);
+
+ if (cmd[7] == 0xff) {
+ cmd[7] = ibs->sdr.sdr[pos + 4] - cmd[6];
+ }
+
+ if ((cmd[7] + *rsp_len) > max_rsp_len) {
+ rsp[2] = IPMI_CC_CANNOT_RETURN_REQ_NUM_BYTES;
+ goto out;
+ }
+ memcpy(rsp + *rsp_len, ibs->sdr.sdr + pos + cmd[6], cmd[7]);
+ *rsp_len += cmd[7];
+ out:
+ return;
+}
+
+static void add_sdr(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ uint16_t recid;
+
+ if (sdr_add_entry(ibs, cmd + 2, cmd_len - 2, &recid)) {
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ goto out;
+ }
+ IPMI_ADD_RSP_DATA(recid & 0xff);
+ IPMI_ADD_RSP_DATA((recid >> 8) & 0xff);
+ out:
+ return;
+}
+
+static void clear_sdr_rep(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMI_CHECK_CMD_LEN(8);
+ IPMI_CHECK_RESERVATION(2, ibs->sdr.reservation);
+ if (cmd[4] != 'C' || cmd[5] != 'L' || cmd[6] != 'R') {
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ goto out;
+ }
+ if (cmd[7] == 0xaa) {
+ ibs->sdr.next_free = 0;
+ ibs->sdr.overflow = 0;
+ set_timestamp(ibs, ibs->sdr.last_clear);
+ IPMI_ADD_RSP_DATA(1); /* Erasure complete */
+ sdr_inc_reservation(&ibs->sdr);
+ } else if (cmd[7] == 0) {
+ IPMI_ADD_RSP_DATA(1); /* Erasure complete */
+ } else {
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ goto out;
+ }
+ out:
+ return;
+}
+
+static void get_sel_info(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ unsigned int i, val;
+
+ IPMI_ADD_RSP_DATA(0x51); /* Conform to IPMI 1.5 */
+ IPMI_ADD_RSP_DATA(ibs->sel.next_free & 0xff);
+ IPMI_ADD_RSP_DATA((ibs->sel.next_free >> 8) & 0xff);
+ val = (MAX_SEL_SIZE - ibs->sel.next_free) * 16;
+ IPMI_ADD_RSP_DATA(val & 0xff);
+ IPMI_ADD_RSP_DATA((val >> 8) & 0xff);
+ for (i = 0; i < 4; i++) {
+ IPMI_ADD_RSP_DATA(ibs->sel.last_addition[i]);
+ }
+ for (i = 0; i < 4; i++) {
+ IPMI_ADD_RSP_DATA(ibs->sel.last_clear[i]);
+ }
+ /* Only support Reserve SEL */
+ IPMI_ADD_RSP_DATA((ibs->sel.overflow << 7) | 0x02);
+ out:
+ return;
+}
+
+static void reserve_sel(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMI_ADD_RSP_DATA(ibs->sel.reservation & 0xff);
+ IPMI_ADD_RSP_DATA((ibs->sel.reservation >> 8) & 0xff);
+ out:
+ return;
+}
+
+static void get_sel_entry(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ unsigned int val;
+
+ IPMI_CHECK_CMD_LEN(8);
+ if (cmd[6]) {
+ IPMI_CHECK_RESERVATION(2, ibs->sel.reservation);
+ }
+ if (ibs->sel.next_free == 0) {
+ rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ goto out;
+ }
+ if (cmd[6] > 15) {
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ goto out;
+ }
+ if (cmd[7] == 0xff) {
+ cmd[7] = 16;
+ } else if ((cmd[7] + cmd[6]) > 16) {
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ goto out;
+ } else {
+ cmd[7] += cmd[6];
+ }
+
+ val = cmd[4] | (cmd[5] << 8);
+ if (val == 0xffff) {
+ val = ibs->sel.next_free - 1;
+ } else if (val >= ibs->sel.next_free) {
+ rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ goto out;
+ }
+ if ((val + 1) == ibs->sel.next_free) {
+ IPMI_ADD_RSP_DATA(0xff);
+ IPMI_ADD_RSP_DATA(0xff);
+ } else {
+ IPMI_ADD_RSP_DATA((val + 1) & 0xff);
+ IPMI_ADD_RSP_DATA(((val + 1) >> 8) & 0xff);
+ }
+ for (; cmd[6] < cmd[7]; cmd[6]++) {
+ IPMI_ADD_RSP_DATA(ibs->sel.sel[val][cmd[6]]);
+ }
+ out:
+ return;
+}
+
+static void add_sel_entry(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMI_CHECK_CMD_LEN(18);
+ if (sel_add_event(ibs, cmd + 2)) {
+ rsp[2] = IPMI_CC_OUT_OF_SPACE;
+ goto out;
+ }
+ /* sel_add_event fills in the record number. */
+ IPMI_ADD_RSP_DATA(cmd[2]);
+ IPMI_ADD_RSP_DATA(cmd[3]);
+ out:
+ return;
+}
+
+static void clear_sel(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMI_CHECK_CMD_LEN(8);
+ IPMI_CHECK_RESERVATION(2, ibs->sel.reservation);
+ if (cmd[4] != 'C' || cmd[5] != 'L' || cmd[6] != 'R') {
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ goto out;
+ }
+ if (cmd[7] == 0xaa) {
+ ibs->sel.next_free = 0;
+ ibs->sel.overflow = 0;
+ set_timestamp(ibs, ibs->sdr.last_clear);
+ IPMI_ADD_RSP_DATA(1); /* Erasure complete */
+ sel_inc_reservation(&ibs->sel);
+ } else if (cmd[7] == 0) {
+ IPMI_ADD_RSP_DATA(1); /* Erasure complete */
+ } else {
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ goto out;
+ }
+ out:
+ return;
+}
+
+static void get_sel_time(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ uint32_t val;
+ struct ipmi_time now;
+
+ ipmi_gettime(&now);
+ val = now.tv_sec + ibs->sel.time_offset;
+ IPMI_ADD_RSP_DATA(val & 0xff);
+ IPMI_ADD_RSP_DATA((val >> 8) & 0xff);
+ IPMI_ADD_RSP_DATA((val >> 16) & 0xff);
+ IPMI_ADD_RSP_DATA((val >> 24) & 0xff);
+ out:
+ return;
+}
+
+static void set_sel_time(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ uint32_t val;
+ struct ipmi_time now;
+
+ IPMI_CHECK_CMD_LEN(6);
+ val = cmd[2] | (cmd[3] << 8) | (cmd[4] << 16) | (cmd[5] << 24);
+ ipmi_gettime(&now);
+ ibs->sel.time_offset = now.tv_sec - ((long) val);
+ out:
+ return;
+}
+
+static void set_sensor_evt_enable(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMISensor *sens;
+
+ IPMI_CHECK_CMD_LEN(4);
+ if ((cmd[2] > MAX_SENSORS) ||
+ !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
+ rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ goto out;
+ }
+ sens = ibs->sensors + cmd[2];
+ switch ((cmd[3] >> 4) & 0x3) {
+ case 0: /* Do not change */
+ break;
+ case 1: /* Enable bits */
+ if (cmd_len > 4) {
+ sens->assert_enable |= cmd[4];
+ }
+ if (cmd_len > 5) {
+ sens->assert_enable |= cmd[5] << 8;
+ }
+ if (cmd_len > 6) {
+ sens->deassert_enable |= cmd[6];
+ }
+ if (cmd_len > 7) {
+ sens->deassert_enable |= cmd[7] << 8;
+ }
+ break;
+ case 2: /* Disable bits */
+ if (cmd_len > 4) {
+ sens->assert_enable &= ~cmd[4];
+ }
+ if (cmd_len > 5) {
+ sens->assert_enable &= ~(cmd[5] << 8);
+ }
+ if (cmd_len > 6) {
+ sens->deassert_enable &= ~cmd[6];
+ }
+ if (cmd_len > 7) {
+ sens->deassert_enable &= ~(cmd[7] << 8);
+ }
+ break;
+ case 3:
+ rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
+ goto out;
+ }
+ IPMI_SENSOR_SET_RET_STATUS(sens, cmd[3]);
+ out:
+ return;
+}
+
+static void get_sensor_evt_enable(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMISensor *sens;
+
+ IPMI_CHECK_CMD_LEN(3);
+ if ((cmd[2] > MAX_SENSORS) ||
+ !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
+ rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ goto out;
+ }
+ sens = ibs->sensors + cmd[2];
+ IPMI_ADD_RSP_DATA(IPMI_SENSOR_GET_RET_STATUS(sens));
+ IPMI_ADD_RSP_DATA(sens->assert_enable & 0xff);
+ IPMI_ADD_RSP_DATA((sens->assert_enable >> 8) & 0xff);
+ IPMI_ADD_RSP_DATA(sens->deassert_enable & 0xff);
+ IPMI_ADD_RSP_DATA((sens->deassert_enable >> 8) & 0xff);
+ out:
+ return;
+}
+
+static void rearm_sensor_evts(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMISensor *sens;
+
+ IPMI_CHECK_CMD_LEN(4);
+ if ((cmd[2] > MAX_SENSORS) ||
+ !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
+ rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ goto out;
+ }
+ sens = ibs->sensors + cmd[2];
+
+ if ((cmd[3] & 0x80) == 0) {
+ /* Just clear everything */
+ sens->states = 0;
+ goto out;
+ }
+ out:
+ return;
+}
+
+static void get_sensor_evt_status(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMISensor *sens;
+
+ IPMI_CHECK_CMD_LEN(3);
+ if ((cmd[2] > MAX_SENSORS) ||
+ !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
+ rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ goto out;
+ }
+ sens = ibs->sensors + cmd[2];
+ IPMI_ADD_RSP_DATA(sens->reading);
+ IPMI_ADD_RSP_DATA(IPMI_SENSOR_GET_RET_STATUS(sens));
+ IPMI_ADD_RSP_DATA(sens->assert_states & 0xff);
+ IPMI_ADD_RSP_DATA((sens->assert_states >> 8) & 0xff);
+ IPMI_ADD_RSP_DATA(sens->deassert_states & 0xff);
+ IPMI_ADD_RSP_DATA((sens->deassert_states >> 8) & 0xff);
+ out:
+ return;
+}
+
+static void get_sensor_reading(IPMIBmcSim *ibs,
+ uint8_t *cmd, unsigned int cmd_len,
+ uint8_t *rsp, unsigned int *rsp_len,
+ unsigned int max_rsp_len)
+{
+ IPMISensor *sens;
+
+ IPMI_CHECK_CMD_LEN(3);
+ if ((cmd[2] > MAX_SENSORS) ||
+ !IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
+ rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
+ goto out;
+ }
+ sens = ibs->sensors + cmd[2];
+ IPMI_ADD_RSP_DATA(sens->reading);
+ IPMI_ADD_RSP_DATA(IPMI_SENSOR_GET_RET_STATUS(sens));
+ IPMI_ADD_RSP_DATA(sens->states & 0xff);
+ if (IPMI_SENSOR_IS_DISCRETE(sens)) {
+ IPMI_ADD_RSP_DATA((sens->states >> 8) & 0xff);
+ }
+ out:
+ return;
+}
+
+static const IPMICmdHandler chassis_cmds[IPMI_NETFN_CHASSIS_MAXCMD] = {
+ [IPMI_CMD_GET_CHASSIS_CAPABILITIES] = chassis_capabilities,
+ [IPMI_CMD_GET_CHASSIS_STATUS] = chassis_status,
+ [IPMI_CMD_CHASSIS_CONTROL] = chassis_control
+};
+static const IPMINetfn chassis_netfn = {
+ .cmd_nums = IPMI_NETFN_CHASSIS_MAXCMD,
+ .cmd_handlers = chassis_cmds
+};
+
+static const IPMICmdHandler
+sensor_event_cmds[IPMI_NETFN_SENSOR_EVENT_MAXCMD] = {
+ [IPMI_CMD_SET_SENSOR_EVT_ENABLE] = set_sensor_evt_enable,
+ [IPMI_CMD_GET_SENSOR_EVT_ENABLE] = get_sensor_evt_enable,
+ [IPMI_CMD_REARM_SENSOR_EVTS] = rearm_sensor_evts,
+ [IPMI_CMD_GET_SENSOR_EVT_STATUS] = get_sensor_evt_status,
+ [IPMI_CMD_GET_SENSOR_READING] = get_sensor_reading
+};
+static const IPMINetfn sensor_event_netfn = {
+ .cmd_nums = IPMI_NETFN_SENSOR_EVENT_MAXCMD,
+ .cmd_handlers = sensor_event_cmds
+};
+
+static const IPMICmdHandler app_cmds[IPMI_NETFN_APP_MAXCMD] = {
+ [IPMI_CMD_GET_DEVICE_ID] = get_device_id,
+ [IPMI_CMD_COLD_RESET] = cold_reset,
+ [IPMI_CMD_WARM_RESET] = warm_reset,
+ [IPMI_CMD_SET_BMC_GLOBAL_ENABLES] = set_bmc_global_enables,
+ [IPMI_CMD_GET_BMC_GLOBAL_ENABLES] = get_bmc_global_enables,
+ [IPMI_CMD_CLR_MSG_FLAGS] = clr_msg_flags,
+ [IPMI_CMD_GET_MSG_FLAGS] = get_msg_flags,
+ [IPMI_CMD_GET_MSG] = get_msg,
+ [IPMI_CMD_SEND_MSG] = send_msg,
+ [IPMI_CMD_READ_EVT_MSG_BUF] = read_evt_msg_buf,
+ [IPMI_CMD_RESET_WATCHDOG_TIMER] = reset_watchdog_timer,
+ [IPMI_CMD_SET_WATCHDOG_TIMER] = set_watchdog_timer,
+ [IPMI_CMD_GET_WATCHDOG_TIMER] = get_watchdog_timer,
+};
+static const IPMINetfn app_netfn = {
+ .cmd_nums = IPMI_NETFN_APP_MAXCMD,
+ .cmd_handlers = app_cmds
+};
+
+static const IPMICmdHandler storage_cmds[IPMI_NETFN_STORAGE_MAXCMD] = {
+ [IPMI_CMD_GET_SDR_REP_INFO] = get_sdr_rep_info,
+ [IPMI_CMD_RESERVE_SDR_REP] = reserve_sdr_rep,
+ [IPMI_CMD_GET_SDR] = get_sdr,
+ [IPMI_CMD_ADD_SDR] = add_sdr,
+ [IPMI_CMD_CLEAR_SDR_REP] = clear_sdr_rep,
+ [IPMI_CMD_GET_SEL_INFO] = get_sel_info,
+ [IPMI_CMD_RESERVE_SEL] = reserve_sel,
+ [IPMI_CMD_GET_SEL_ENTRY] = get_sel_entry,
+ [IPMI_CMD_ADD_SEL_ENTRY] = add_sel_entry,
+ [IPMI_CMD_CLEAR_SEL] = clear_sel,
+ [IPMI_CMD_GET_SEL_TIME] = get_sel_time,
+ [IPMI_CMD_SET_SEL_TIME] = set_sel_time,
+};
+
+static const IPMINetfn storage_netfn = {
+ .cmd_nums = IPMI_NETFN_STORAGE_MAXCMD,
+ .cmd_handlers = storage_cmds
+};
+
+static void register_cmds(IPMIBmcSim *s)
+{
+ ipmi_register_netfn(s, IPMI_NETFN_CHASSIS, &chassis_netfn);
+ ipmi_register_netfn(s, IPMI_NETFN_SENSOR_EVENT, &sensor_event_netfn);
+ ipmi_register_netfn(s, IPMI_NETFN_APP, &app_netfn);
+ ipmi_register_netfn(s, IPMI_NETFN_STORAGE, &storage_netfn);
+}
+
+static const uint8_t init_sdrs[] = {
+ /* Watchdog device */
+ 0x00, 0x00, 0x51, 0x02, 35, 0x20, 0x00, 0x00,
+ 0x23, 0x01, 0x63, 0x00, 0x23, 0x6f, 0x0f, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8,
+ 'W', 'a', 't', 'c', 'h', 'd', 'o', 'g',
+ /* End */
+ 0xff, 0xff, 0x00, 0x00, 0x00
+};
+
+static const VMStateDescription vmstate_ipmi_sim = {
+ .name = TYPE_IPMI_BMC_SIMULATOR,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(bmc_global_enables, IPMIBmcSim),
+ VMSTATE_UINT8(msg_flags, IPMIBmcSim),
+ VMSTATE_BOOL(watchdog_initialized, IPMIBmcSim),
+ VMSTATE_UINT8(watchdog_use, IPMIBmcSim),
+ VMSTATE_UINT8(watchdog_action, IPMIBmcSim),
+ VMSTATE_UINT8(watchdog_pretimeout, IPMIBmcSim),
+ VMSTATE_BOOL(watchdog_expired, IPMIBmcSim),
+ VMSTATE_UINT16(watchdog_timeout, IPMIBmcSim),
+ VMSTATE_BOOL(watchdog_running, IPMIBmcSim),
+ VMSTATE_BOOL(watchdog_preaction_ran, IPMIBmcSim),
+ VMSTATE_INT64(watchdog_expiry, IPMIBmcSim),
+ VMSTATE_UINT8_ARRAY(evtbuf, IPMIBmcSim, 16),
+ VMSTATE_UINT8(sensors[IPMI_WATCHDOG_SENSOR].status, IPMIBmcSim),
+ VMSTATE_UINT8(sensors[IPMI_WATCHDOG_SENSOR].reading, IPMIBmcSim),
+ VMSTATE_UINT16(sensors[IPMI_WATCHDOG_SENSOR].states, IPMIBmcSim),
+ VMSTATE_UINT16(sensors[IPMI_WATCHDOG_SENSOR].assert_states, IPMIBmcSim),
+ VMSTATE_UINT16(sensors[IPMI_WATCHDOG_SENSOR].deassert_states,
+ IPMIBmcSim),
+ VMSTATE_UINT16(sensors[IPMI_WATCHDOG_SENSOR].assert_enable, IPMIBmcSim),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void ipmi_sim_init(Object *obj)
+{
+ IPMIBmc *b = IPMI_BMC(obj);
+ unsigned int i;
+ unsigned int recid;
+ IPMIBmcSim *ibs = IPMI_BMC_SIMULATOR(b);
+
+ qemu_mutex_init(&ibs->lock);
+ QTAILQ_INIT(&ibs->rcvbufs);
+
+ ibs->bmc_global_enables = (1 << IPMI_BMC_EVENT_LOG_BIT);
+ ibs->device_id = 0x20;
+ ibs->ipmi_version = 0x02; /* IPMI 2.0 */
+ for (i = 0; i < 4; i++) {
+ ibs->sel.last_addition[i] = 0xff;
+ ibs->sel.last_clear[i] = 0xff;
+ ibs->sdr.last_addition[i] = 0xff;
+ ibs->sdr.last_clear[i] = 0xff;
+ }
+
+ for (i = 0;;) {
+ int len;
+ if ((i + 5) > sizeof(init_sdrs)) {
+ error_report("Problem with recid 0x%4.4x: \n", i);
+ return;
+ }
+ len = init_sdrs[i + 4];
+ recid = init_sdrs[i] | (init_sdrs[i + 1] << 8);
+ if (recid == 0xffff) {
+ break;
+ }
+ if ((i + len + 5) > sizeof(init_sdrs)) {
+ error_report("Problem with recid 0x%4.4x\n", i);
+ return;
+ }
+ sdr_add_entry(ibs, init_sdrs + i, len, NULL);
+ i += len + 5;
+ }
+
+ ipmi_init_sensors_from_sdrs(ibs);
+ register_cmds(ibs);
+
+ ibs->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ipmi_timeout, ibs);
+
+ vmstate_register(NULL, 0, &vmstate_ipmi_sim, ibs);
+}
+
+static void ipmi_sim_class_init(ObjectClass *oc, void *data)
+{
+ IPMIBmcClass *bk = IPMI_BMC_CLASS(oc);
+
+ bk->handle_command = ipmi_sim_handle_command;
+}
+
+static const TypeInfo ipmi_sim_type = {
+ .name = TYPE_IPMI_BMC_SIMULATOR,
+ .parent = TYPE_IPMI_BMC,
+ .instance_size = sizeof(IPMIBmcSim),
+ .instance_init = ipmi_sim_init,
+ .class_init = ipmi_sim_class_init,
+};
+
+static void ipmi_sim_register_types(void)
+{
+ type_register_static(&ipmi_sim_type);
+}
+
+type_init(ipmi_sim_register_types)
diff --git a/hw/ipmi/isa_ipmi_bt.c b/hw/ipmi/isa_ipmi_bt.c
new file mode 100644
index 0000000..21fa4a7
--- /dev/null
+++ b/hw/ipmi/isa_ipmi_bt.c
@@ -0,0 +1,528 @@
+/*
+ * QEMU ISA IPMI BT emulation
+ *
+ * Copyright (c) 2015 Corey Minyard, MontaVista Software, LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/ipmi/ipmi.h"
+#include "hw/isa/isa.h"
+#include "hw/i386/pc.h"
+
+/* Control register */
+#define IPMI_BT_CLR_WR_BIT 0
+#define IPMI_BT_CLR_RD_BIT 1
+#define IPMI_BT_H2B_ATN_BIT 2
+#define IPMI_BT_B2H_ATN_BIT 3
+#define IPMI_BT_SMS_ATN_BIT 4
+#define IPMI_BT_HBUSY_BIT 6
+#define IPMI_BT_BBUSY_BIT 7
+
+#define IPMI_BT_CLR_WR_MASK (1 << IPMI_BT_CLR_WR_BIT)
+#define IPMI_BT_GET_CLR_WR(d) (((d) >> IPMI_BT_CLR_WR_BIT) & 0x1)
+#define IPMI_BT_SET_CLR_WR(d, v) (d) = (((d) & ~IPMI_BT_CLR_WR_MASK) | \
+ (((v & 1) << IPMI_BT_CLR_WR_BIT)))
+
+#define IPMI_BT_CLR_RD_MASK (1 << IPMI_BT_CLR_RD_BIT)
+#define IPMI_BT_GET_CLR_RD(d) (((d) >> IPMI_BT_CLR_RD_BIT) & 0x1)
+#define IPMI_BT_SET_CLR_RD(d, v) (d) = (((d) & ~IPMI_BT_CLR_RD_MASK) | \
+ (((v & 1) << IPMI_BT_CLR_RD_BIT)))
+
+#define IPMI_BT_H2B_ATN_MASK (1 << IPMI_BT_H2B_ATN_BIT)
+#define IPMI_BT_GET_H2B_ATN(d) (((d) >> IPMI_BT_H2B_ATN_BIT) & 0x1)
+#define IPMI_BT_SET_H2B_ATN(d, v) (d) = (((d) & ~IPMI_BT_H2B_ATN_MASK) | \
+ (((v & 1) << IPMI_BT_H2B_ATN_BIT)))
+
+#define IPMI_BT_B2H_ATN_MASK (1 << IPMI_BT_B2H_ATN_BIT)
+#define IPMI_BT_GET_B2H_ATN(d) (((d) >> IPMI_BT_B2H_ATN_BIT) & 0x1)
+#define IPMI_BT_SET_B2H_ATN(d, v) (d) = (((d) & ~IPMI_BT_B2H_ATN_MASK) | \
+ (((v & 1) << IPMI_BT_B2H_ATN_BIT)))
+
+#define IPMI_BT_SMS_ATN_MASK (1 << IPMI_BT_SMS_ATN_BIT)
+#define IPMI_BT_GET_SMS_ATN(d) (((d) >> IPMI_BT_SMS_ATN_BIT) & 0x1)
+#define IPMI_BT_SET_SMS_ATN(d, v) (d) = (((d) & ~IPMI_BT_SMS_ATN_MASK) | \
+ (((v & 1) << IPMI_BT_SMS_ATN_BIT)))
+
+#define IPMI_BT_HBUSY_MASK (1 << IPMI_BT_HBUSY_BIT)
+#define IPMI_BT_GET_HBUSY(d) (((d) >> IPMI_BT_HBUSY_BIT) & 0x1)
+#define IPMI_BT_SET_HBUSY(d, v) (d) = (((d) & ~IPMI_BT_HBUSY_MASK) | \
+ (((v & 1) << IPMI_BT_HBUSY_BIT)))
+
+#define IPMI_BT_BBUSY_MASK (1 << IPMI_BT_BBUSY_BIT)
+#define IPMI_BT_GET_BBUSY(d) (((d) >> IPMI_BT_BBUSY_BIT) & 0x1)
+#define IPMI_BT_SET_BBUSY(d, v) (d) = (((d) & ~IPMI_BT_BBUSY_MASK) | \
+ (((v & 1) << IPMI_BT_BBUSY_BIT)))
+
+
+/* Mask register */
+#define IPMI_BT_B2H_IRQ_EN_BIT 0
+#define IPMI_BT_B2H_IRQ_BIT 1
+
+#define IPMI_BT_B2H_IRQ_EN_MASK (1 << IPMI_BT_B2H_IRQ_EN_BIT)
+#define IPMI_BT_GET_B2H_IRQ_EN(d) (((d) >> IPMI_BT_B2H_IRQ_EN_BIT) & 0x1)
+#define IPMI_BT_SET_B2H_IRQ_EN(d, v) (d) = (((d) & ~IPMI_BT_B2H_IRQ_EN_MASK) | \
+ (((v & 1) << IPMI_BT_B2H_IRQ_EN_BIT)))
+
+#define IPMI_BT_B2H_IRQ_MASK (1 << IPMI_BT_B2H_IRQ_BIT)
+#define IPMI_BT_GET_B2H_IRQ(d) (((d) >> IPMI_BT_B2H_IRQ_BIT) & 0x1)
+#define IPMI_BT_SET_B2H_IRQ(d, v) (d) = (((d) & ~IPMI_BT_B2H_IRQ_MASK) | \
+ (((v & 1) << IPMI_BT_B2H_IRQ_BIT)))
+
+typedef struct IPMIBT {
+ IPMIBmc *bmc;
+
+ bool do_wake;
+
+ qemu_irq irq;
+
+ uint32_t io_base;
+ unsigned long io_length;
+ MemoryRegion io;
+
+ bool obf_irq_set;
+ bool atn_irq_set;
+ bool use_irq;
+ bool irqs_enabled;
+
+ uint8_t outmsg[MAX_IPMI_MSG_SIZE];
+ uint32_t outpos;
+ uint32_t outlen;
+
+ uint8_t inmsg[MAX_IPMI_MSG_SIZE];
+ uint32_t inlen;
+
+ uint8_t control_reg;
+ uint8_t mask_reg;
+
+ /*
+ * This is a response number that we send with the command to make
+ * sure that the response matches the command.
+ */
+ uint8_t waiting_rsp;
+ uint8_t waiting_seq;
+} IPMIBT;
+
+#define IPMI_CMD_GET_BT_INTF_CAP 0x36
+
+static void ipmi_bt_handle_event(IPMIInterface *ii)
+{
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIBT *ib = iic->get_backend_data(ii);
+
+ if (ib->inlen < 4) {
+ goto out;
+ }
+ /* Note that overruns are handled by handle_command */
+ if (ib->inmsg[0] != (ib->inlen - 1)) {
+ /* Length mismatch, just ignore. */
+ IPMI_BT_SET_BBUSY(ib->control_reg, 1);
+ ib->inlen = 0;
+ goto out;
+ }
+ if ((ib->inmsg[1] == (IPMI_NETFN_APP << 2)) &&
+ (ib->inmsg[3] == IPMI_CMD_GET_BT_INTF_CAP)) {
+ /* We handle this one ourselves. */
+ ib->outmsg[0] = 9;
+ ib->outmsg[1] = ib->inmsg[1] | 0x04;
+ ib->outmsg[2] = ib->inmsg[2];
+ ib->outmsg[3] = ib->inmsg[3];
+ ib->outmsg[4] = 0;
+ ib->outmsg[5] = 1; /* Only support 1 outstanding request. */
+ if (sizeof(ib->inmsg) > 0xff) { /* Input buffer size */
+ ib->outmsg[6] = 0xff;
+ } else {
+ ib->outmsg[6] = (unsigned char) sizeof(ib->inmsg);
+ }
+ if (sizeof(ib->outmsg) > 0xff) { /* Output buffer size */
+ ib->outmsg[7] = 0xff;
+ } else {
+ ib->outmsg[7] = (unsigned char) sizeof(ib->outmsg);
+ }
+ ib->outmsg[8] = 10; /* Max request to response time */
+ ib->outmsg[9] = 0; /* Don't recommend retries */
+ ib->outlen = 10;
+ IPMI_BT_SET_BBUSY(ib->control_reg, 0);
+ IPMI_BT_SET_B2H_ATN(ib->control_reg, 1);
+ if (ib->use_irq && ib->irqs_enabled &&
+ !IPMI_BT_GET_B2H_IRQ(ib->mask_reg) &&
+ IPMI_BT_GET_B2H_IRQ_EN(ib->mask_reg)) {
+ IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 1);
+ qemu_irq_raise(ib->irq);
+ }
+ goto out;
+ }
+ ib->waiting_seq = ib->inmsg[2];
+ ib->inmsg[2] = ib->inmsg[1];
+ {
+ IPMIBmcClass *bk = IPMI_BMC_GET_CLASS(ib->bmc);
+ bk->handle_command(ib->bmc, ib->inmsg + 2, ib->inlen - 2,
+ sizeof(ib->inmsg), ib->waiting_rsp);
+ }
+ out:
+ return;
+}
+
+static void ipmi_bt_handle_rsp(IPMIInterface *ii, uint8_t msg_id,
+ unsigned char *rsp, unsigned int rsp_len)
+{
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIBT *ib = iic->get_backend_data(ii);
+
+ if (ib->waiting_rsp == msg_id) {
+ ib->waiting_rsp++;
+ if (rsp_len > (sizeof(ib->outmsg) - 2)) {
+ ib->outmsg[0] = 4;
+ ib->outmsg[1] = rsp[0];
+ ib->outmsg[2] = ib->waiting_seq;
+ ib->outmsg[3] = rsp[1];
+ ib->outmsg[4] = IPMI_CC_CANNOT_RETURN_REQ_NUM_BYTES;
+ ib->outlen = 5;
+ } else {
+ ib->outmsg[0] = rsp_len + 1;
+ ib->outmsg[1] = rsp[0];
+ ib->outmsg[2] = ib->waiting_seq;
+ memcpy(ib->outmsg + 3, rsp + 1, rsp_len - 1);
+ ib->outlen = rsp_len + 2;
+ }
+ IPMI_BT_SET_BBUSY(ib->control_reg, 0);
+ IPMI_BT_SET_B2H_ATN(ib->control_reg, 1);
+ if (ib->use_irq && ib->irqs_enabled &&
+ !IPMI_BT_GET_B2H_IRQ(ib->mask_reg) &&
+ IPMI_BT_GET_B2H_IRQ_EN(ib->mask_reg)) {
+ IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 1);
+ qemu_irq_raise(ib->irq);
+ }
+ }
+}
+
+
+static uint64_t ipmi_bt_ioport_read(void *opaque, hwaddr addr, unsigned size)
+{
+ IPMIInterface *ii = opaque;
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIBT *ib = iic->get_backend_data(ii);
+ uint32_t ret = 0xff;
+
+ switch (addr & 3) {
+ case 0:
+ ret = ib->control_reg;
+ break;
+ case 1:
+ if (ib->outpos < ib->outlen) {
+ ret = ib->outmsg[ib->outpos];
+ ib->outpos++;
+ if (ib->outpos == ib->outlen) {
+ ib->outpos = 0;
+ ib->outlen = 0;
+ }
+ } else {
+ ret = 0xff;
+ }
+ break;
+ case 2:
+ ret = ib->mask_reg;
+ break;
+ }
+ return ret;
+}
+
+static void ipmi_bt_signal(IPMIBT *ib, IPMIInterface *ii)
+{
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+
+ ib->do_wake = 1;
+ while (ib->do_wake) {
+ ib->do_wake = 0;
+ iic->handle_if_event(ii);
+ }
+}
+
+static void ipmi_bt_ioport_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ IPMIInterface *ii = opaque;
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIBT *ib = iic->get_backend_data(ii);
+
+ switch (addr & 3) {
+ case 0:
+ if (IPMI_BT_GET_CLR_WR(val)) {
+ ib->inlen = 0;
+ }
+ if (IPMI_BT_GET_CLR_RD(val)) {
+ ib->outpos = 0;
+ }
+ if (IPMI_BT_GET_B2H_ATN(val)) {
+ IPMI_BT_SET_B2H_ATN(ib->control_reg, 0);
+ }
+ if (IPMI_BT_GET_SMS_ATN(val)) {
+ IPMI_BT_SET_SMS_ATN(ib->control_reg, 0);
+ }
+ if (IPMI_BT_GET_HBUSY(val)) {
+ /* Toggle */
+ IPMI_BT_SET_HBUSY(ib->control_reg,
+ !IPMI_BT_GET_HBUSY(ib->control_reg));
+ }
+ if (IPMI_BT_GET_H2B_ATN(val)) {
+ IPMI_BT_SET_BBUSY(ib->control_reg, 1);
+ ipmi_bt_signal(ib, ii);
+ }
+ break;
+
+ case 1:
+ if (ib->inlen < sizeof(ib->inmsg)) {
+ ib->inmsg[ib->inlen] = val;
+ }
+ ib->inlen++;
+ break;
+
+ case 2:
+ if (IPMI_BT_GET_B2H_IRQ_EN(val) !=
+ IPMI_BT_GET_B2H_IRQ_EN(ib->mask_reg)) {
+ if (IPMI_BT_GET_B2H_IRQ_EN(val)) {
+ if (IPMI_BT_GET_B2H_ATN(ib->control_reg) ||
+ IPMI_BT_GET_SMS_ATN(ib->control_reg)) {
+ IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 1);
+ qemu_irq_raise(ib->irq);
+ }
+ IPMI_BT_SET_B2H_IRQ_EN(ib->mask_reg, 1);
+ } else {
+ if (IPMI_BT_GET_B2H_IRQ(ib->mask_reg)) {
+ IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 0);
+ qemu_irq_lower(ib->irq);
+ }
+ IPMI_BT_SET_B2H_IRQ_EN(ib->mask_reg, 0);
+ }
+ }
+ if (IPMI_BT_GET_B2H_IRQ(val) && IPMI_BT_GET_B2H_IRQ(ib->mask_reg)) {
+ IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 0);
+ qemu_irq_lower(ib->irq);
+ }
+ break;
+ }
+}
+
+static const MemoryRegionOps ipmi_bt_io_ops = {
+ .read = ipmi_bt_ioport_read,
+ .write = ipmi_bt_ioport_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void ipmi_bt_set_atn(IPMIInterface *ii, int val, int irq)
+{
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIBT *ib = iic->get_backend_data(ii);
+
+ if (!!val == IPMI_BT_GET_SMS_ATN(ib->control_reg)) {
+ return;
+ }
+
+ IPMI_BT_SET_SMS_ATN(ib->control_reg, val);
+ if (val) {
+ if (irq && ib->use_irq && ib->irqs_enabled &&
+ !IPMI_BT_GET_B2H_ATN(ib->control_reg) &&
+ IPMI_BT_GET_B2H_IRQ_EN(ib->mask_reg)) {
+ IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 1);
+ qemu_irq_raise(ib->irq);
+ }
+ } else {
+ if (!IPMI_BT_GET_B2H_ATN(ib->control_reg) &&
+ IPMI_BT_GET_B2H_IRQ(ib->mask_reg)) {
+ IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 0);
+ qemu_irq_lower(ib->irq);
+ }
+ }
+}
+
+static void ipmi_bt_handle_reset(IPMIInterface *ii, bool is_cold)
+{
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIBT *ib = iic->get_backend_data(ii);
+
+ if (is_cold) {
+ /* Disable the BT interrupt on reset */
+ if (IPMI_BT_GET_B2H_IRQ(ib->mask_reg)) {
+ IPMI_BT_SET_B2H_IRQ(ib->mask_reg, 0);
+ qemu_irq_lower(ib->irq);
+ }
+ IPMI_BT_SET_B2H_IRQ_EN(ib->mask_reg, 0);
+ }
+}
+
+static void ipmi_bt_set_irq_enable(IPMIInterface *ii, int val)
+{
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIBT *ib = iic->get_backend_data(ii);
+
+ ib->irqs_enabled = val;
+}
+
+static void ipmi_bt_init(IPMIInterface *ii, Error **errp)
+{
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIBT *ib = iic->get_backend_data(ii);
+
+ ib->io_length = 3;
+
+ memory_region_init_io(&ib->io, NULL, &ipmi_bt_io_ops, ii, "ipmi-bt", 3);
+}
+
+static void ipmi_bt_class_init(IPMIInterfaceClass *iic)
+{
+ iic->init = ipmi_bt_init;
+ iic->set_atn = ipmi_bt_set_atn;
+ iic->handle_rsp = ipmi_bt_handle_rsp;
+ iic->handle_if_event = ipmi_bt_handle_event;
+ iic->set_irq_enable = ipmi_bt_set_irq_enable;
+ iic->reset = ipmi_bt_handle_reset;
+}
+
+
+#define TYPE_ISA_IPMI_BT "isa-ipmi-bt"
+#define ISA_IPMI_BT(obj) OBJECT_CHECK(ISAIPMIBTDevice, (obj), \
+ TYPE_ISA_IPMI_BT)
+
+typedef struct ISAIPMIBTDevice {
+ ISADevice dev;
+ int32 isairq;
+ IPMIBT bt;
+ IPMIFwInfo fwinfo;
+} ISAIPMIBTDevice;
+
+static void isa_ipmi_bt_realize(DeviceState *dev, Error **errp)
+{
+ ISADevice *isadev = ISA_DEVICE(dev);
+ ISAIPMIBTDevice *iib = ISA_IPMI_BT(dev);
+ IPMIInterface *ii = IPMI_INTERFACE(dev);
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+
+ if (!iib->bt.bmc) {
+ error_setg(errp, "IPMI device requires a bmc attribute to be set");
+ return;
+ }
+
+ iib->bt.bmc->intf = ii;
+
+ iic->init(ii, errp);
+ if (*errp)
+ return;
+
+ if (iib->isairq > 0) {
+ isa_init_irq(isadev, &iib->bt.irq, iib->isairq);
+ iib->bt.use_irq = 1;
+ }
+
+ qdev_set_legacy_instance_id(dev, iib->bt.io_base, iib->bt.io_length);
+
+ isa_register_ioport(isadev, &iib->bt.io, iib->bt.io_base);
+
+ iib->fwinfo.interface_name = "bt";
+ iib->fwinfo.interface_type = IPMI_SMBIOS_BT;
+ iib->fwinfo.ipmi_spec_major_revision = 2;
+ iib->fwinfo.ipmi_spec_minor_revision = 0;
+ iib->fwinfo.base_address = iib->bt.io_base;
+ iib->fwinfo.register_length = iib->bt.io_length;
+ iib->fwinfo.register_spacing = 1;
+ iib->fwinfo.memspace = IPMI_MEMSPACE_IO;
+ iib->fwinfo.irq_type = IPMI_LEVEL_IRQ;
+ iib->fwinfo.interrupt_number = iib->isairq;
+ iib->fwinfo.acpi_parent = "\\_SB.PCI0.ISA";
+ iib->fwinfo.i2c_slave_address = iib->bt.bmc->slave_addr;
+ ipmi_add_fwinfo(&iib->fwinfo, errp);
+}
+
+static const VMStateDescription vmstate_ISAIPMIBTDevice = {
+ .name = TYPE_IPMI_INTERFACE,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(bt.obf_irq_set, ISAIPMIBTDevice),
+ VMSTATE_BOOL(bt.atn_irq_set, ISAIPMIBTDevice),
+ VMSTATE_BOOL(bt.use_irq, ISAIPMIBTDevice),
+ VMSTATE_BOOL(bt.irqs_enabled, ISAIPMIBTDevice),
+ VMSTATE_UINT32(bt.outpos, ISAIPMIBTDevice),
+ VMSTATE_VBUFFER_UINT32(bt.outmsg, ISAIPMIBTDevice, 1, NULL, 0,
+ bt.outlen),
+ VMSTATE_VBUFFER_UINT32(bt.inmsg, ISAIPMIBTDevice, 1, NULL, 0,
+ bt.inlen),
+ VMSTATE_UINT8(bt.control_reg, ISAIPMIBTDevice),
+ VMSTATE_UINT8(bt.mask_reg, ISAIPMIBTDevice),
+ VMSTATE_UINT8(bt.waiting_rsp, ISAIPMIBTDevice),
+ VMSTATE_UINT8(bt.waiting_seq, ISAIPMIBTDevice),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void isa_ipmi_bt_init(Object *obj)
+{
+ ISAIPMIBTDevice *iib = ISA_IPMI_BT(obj);
+
+ ipmi_bmc_find_and_link(obj, (Object **) &iib->bt.bmc);
+
+ vmstate_register(NULL, 0, &vmstate_ISAIPMIBTDevice, iib);
+}
+
+static void *isa_ipmi_bt_get_backend_data(IPMIInterface *ii)
+{
+ ISAIPMIBTDevice *iib = ISA_IPMI_BT(ii);
+
+ return &iib->bt;
+}
+
+static Property ipmi_isa_properties[] = {
+ DEFINE_PROP_UINT32("ioport", ISAIPMIBTDevice, bt.io_base, 0xe4),
+ DEFINE_PROP_INT32("irq", ISAIPMIBTDevice, isairq, 5),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void isa_ipmi_bt_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc);
+
+ dc->realize = isa_ipmi_bt_realize;
+ dc->props = ipmi_isa_properties;
+
+ iic->get_backend_data = isa_ipmi_bt_get_backend_data;
+ ipmi_bt_class_init(iic);
+}
+
+static const TypeInfo isa_ipmi_bt_info = {
+ .name = TYPE_ISA_IPMI_BT,
+ .parent = TYPE_ISA_DEVICE,
+ .instance_size = sizeof(ISAIPMIBTDevice),
+ .instance_init = isa_ipmi_bt_init,
+ .class_init = isa_ipmi_bt_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_IPMI_INTERFACE },
+ { }
+ }
+};
+
+static void ipmi_register_types(void)
+{
+ type_register_static(&isa_ipmi_bt_info);
+}
+
+type_init(ipmi_register_types)
diff --git a/hw/ipmi/isa_ipmi_kcs.c b/hw/ipmi/isa_ipmi_kcs.c
new file mode 100644
index 0000000..c662aee
--- /dev/null
+++ b/hw/ipmi/isa_ipmi_kcs.c
@@ -0,0 +1,493 @@
+/*
+ * QEMU ISA IPMI KCS emulation
+ *
+ * Copyright (c) 2015 Corey Minyard, MontaVista Software, LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/ipmi/ipmi.h"
+#include "hw/isa/isa.h"
+#include "hw/i386/pc.h"
+
+#define IPMI_KCS_OBF_BIT 0
+#define IPMI_KCS_IBF_BIT 1
+#define IPMI_KCS_SMS_ATN_BIT 2
+#define IPMI_KCS_CD_BIT 3
+
+#define IPMI_KCS_OBF_MASK (1 << IPMI_KCS_OBF_BIT)
+#define IPMI_KCS_GET_OBF(d) (((d) >> IPMI_KCS_OBF_BIT) & 0x1)
+#define IPMI_KCS_SET_OBF(d, v) (d) = (((d) & ~IPMI_KCS_OBF_MASK) | \
+ (((v) & 1) << IPMI_KCS_OBF_BIT))
+#define IPMI_KCS_IBF_MASK (1 << IPMI_KCS_IBF_BIT)
+#define IPMI_KCS_GET_IBF(d) (((d) >> IPMI_KCS_IBF_BIT) & 0x1)
+#define IPMI_KCS_SET_IBF(d, v) (d) = (((d) & ~IPMI_KCS_IBF_MASK) | \
+ (((v) & 1) << IPMI_KCS_IBF_BIT))
+#define IPMI_KCS_SMS_ATN_MASK (1 << IPMI_KCS_SMS_ATN_BIT)
+#define IPMI_KCS_GET_SMS_ATN(d) (((d) >> IPMI_KCS_SMS_ATN_BIT) & 0x1)
+#define IPMI_KCS_SET_SMS_ATN(d, v) (d) = (((d) & ~IPMI_KCS_SMS_ATN_MASK) | \
+ (((v) & 1) << IPMI_KCS_SMS_ATN_BIT))
+#define IPMI_KCS_CD_MASK (1 << IPMI_KCS_CD_BIT)
+#define IPMI_KCS_GET_CD(d) (((d) >> IPMI_KCS_CD_BIT) & 0x1)
+#define IPMI_KCS_SET_CD(d, v) (d) = (((d) & ~IPMI_KCS_CD_MASK) | \
+ (((v) & 1) << IPMI_KCS_CD_BIT))
+
+#define IPMI_KCS_IDLE_STATE 0
+#define IPMI_KCS_READ_STATE 1
+#define IPMI_KCS_WRITE_STATE 2
+#define IPMI_KCS_ERROR_STATE 3
+
+#define IPMI_KCS_GET_STATE(d) (((d) >> 6) & 0x3)
+#define IPMI_KCS_SET_STATE(d, v) ((d) = ((d) & ~0xc0) | (((v) & 0x3) << 6))
+
+#define IPMI_KCS_ABORT_STATUS_CMD 0x60
+#define IPMI_KCS_WRITE_START_CMD 0x61
+#define IPMI_KCS_WRITE_END_CMD 0x62
+#define IPMI_KCS_READ_CMD 0x68
+
+#define IPMI_KCS_STATUS_NO_ERR 0x00
+#define IPMI_KCS_STATUS_ABORTED_ERR 0x01
+#define IPMI_KCS_STATUS_BAD_CC_ERR 0x02
+#define IPMI_KCS_STATUS_LENGTH_ERR 0x06
+
+typedef struct IPMIKCS {
+ IPMIBmc *bmc;
+
+ bool do_wake;
+
+ qemu_irq irq;
+
+ uint32_t io_base;
+ unsigned long io_length;
+ MemoryRegion io;
+
+ bool obf_irq_set;
+ bool atn_irq_set;
+ bool use_irq;
+ bool irqs_enabled;
+
+ uint8_t outmsg[MAX_IPMI_MSG_SIZE];
+ uint32_t outpos;
+ uint32_t outlen;
+
+ uint8_t inmsg[MAX_IPMI_MSG_SIZE];
+ uint32_t inlen;
+ bool write_end;
+
+ uint8_t status_reg;
+ uint8_t data_out_reg;
+
+ int16_t data_in_reg; /* -1 means not written */
+ int16_t cmd_reg;
+
+ /*
+ * This is a response number that we send with the command to make
+ * sure that the response matches the command.
+ */
+ uint8_t waiting_rsp;
+} IPMIKCS;
+
+#define SET_OBF() \
+ do { \
+ IPMI_KCS_SET_OBF(ik->status_reg, 1); \
+ if (ik->use_irq && ik->irqs_enabled && !ik->obf_irq_set) { \
+ ik->obf_irq_set = 1; \
+ if (!ik->atn_irq_set) { \
+ qemu_irq_raise(ik->irq); \
+ } \
+ } \
+ } while (0)
+
+static void ipmi_kcs_signal(IPMIKCS *ik, IPMIInterface *ii)
+{
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+
+ ik->do_wake = 1;
+ while (ik->do_wake) {
+ ik->do_wake = 0;
+ iic->handle_if_event(ii);
+ }
+}
+
+static void ipmi_kcs_handle_event(IPMIInterface *ii)
+{
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIKCS *ik = iic->get_backend_data(ii);
+
+ if (ik->cmd_reg == IPMI_KCS_ABORT_STATUS_CMD) {
+ if (IPMI_KCS_GET_STATE(ik->status_reg) != IPMI_KCS_ERROR_STATE) {
+ ik->waiting_rsp++; /* Invalidate the message */
+ ik->outmsg[0] = IPMI_KCS_STATUS_ABORTED_ERR;
+ ik->outlen = 1;
+ ik->outpos = 0;
+ IPMI_KCS_SET_STATE(ik->status_reg, IPMI_KCS_ERROR_STATE);
+ SET_OBF();
+ }
+ goto out;
+ }
+
+ switch (IPMI_KCS_GET_STATE(ik->status_reg)) {
+ case IPMI_KCS_IDLE_STATE:
+ if (ik->cmd_reg == IPMI_KCS_WRITE_START_CMD) {
+ IPMI_KCS_SET_STATE(ik->status_reg, IPMI_KCS_WRITE_STATE);
+ ik->cmd_reg = -1;
+ ik->write_end = 0;
+ ik->inlen = 0;
+ SET_OBF();
+ }
+ break;
+
+ case IPMI_KCS_READ_STATE:
+ handle_read:
+ if (ik->outpos >= ik->outlen) {
+ IPMI_KCS_SET_STATE(ik->status_reg, IPMI_KCS_IDLE_STATE);
+ SET_OBF();
+ } else if (ik->data_in_reg == IPMI_KCS_READ_CMD) {
+ ik->data_out_reg = ik->outmsg[ik->outpos];
+ ik->outpos++;
+ SET_OBF();
+ } else {
+ ik->outmsg[0] = IPMI_KCS_STATUS_BAD_CC_ERR;
+ ik->outlen = 1;
+ ik->outpos = 0;
+ IPMI_KCS_SET_STATE(ik->status_reg, IPMI_KCS_ERROR_STATE);
+ SET_OBF();
+ goto out;
+ }
+ break;
+
+ case IPMI_KCS_WRITE_STATE:
+ if (ik->data_in_reg != -1) {
+ /*
+ * Don't worry about input overrun here, that will be
+ * handled in the BMC.
+ */
+ if (ik->inlen < sizeof(ik->inmsg)) {
+ ik->inmsg[ik->inlen] = ik->data_in_reg;
+ }
+ ik->inlen++;
+ }
+ if (ik->write_end) {
+ IPMIBmcClass *bk = IPMI_BMC_GET_CLASS(ik->bmc);
+ ik->outlen = 0;
+ ik->write_end = 0;
+ ik->outpos = 0;
+ bk->handle_command(ik->bmc, ik->inmsg, ik->inlen, sizeof(ik->inmsg),
+ ik->waiting_rsp);
+ goto out_noibf;
+ } else if (ik->cmd_reg == IPMI_KCS_WRITE_END_CMD) {
+ ik->cmd_reg = -1;
+ ik->write_end = 1;
+ }
+ SET_OBF();
+ break;
+
+ case IPMI_KCS_ERROR_STATE:
+ if (ik->data_in_reg != -1) {
+ IPMI_KCS_SET_STATE(ik->status_reg, IPMI_KCS_READ_STATE);
+ ik->data_in_reg = IPMI_KCS_READ_CMD;
+ goto handle_read;
+ }
+ break;
+ }
+
+ if (ik->cmd_reg != -1) {
+ /* Got an invalid command */
+ ik->outmsg[0] = IPMI_KCS_STATUS_BAD_CC_ERR;
+ ik->outlen = 1;
+ ik->outpos = 0;
+ IPMI_KCS_SET_STATE(ik->status_reg, IPMI_KCS_ERROR_STATE);
+ }
+
+ out:
+ ik->cmd_reg = -1;
+ ik->data_in_reg = -1;
+ IPMI_KCS_SET_IBF(ik->status_reg, 0);
+ out_noibf:
+ return;
+}
+
+static void ipmi_kcs_handle_rsp(IPMIInterface *ii, uint8_t msg_id,
+ unsigned char *rsp, unsigned int rsp_len)
+{
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIKCS *ik = iic->get_backend_data(ii);
+
+ if (ik->waiting_rsp == msg_id) {
+ ik->waiting_rsp++;
+ if (rsp_len > sizeof(ik->outmsg)) {
+ ik->outmsg[0] = rsp[0];
+ ik->outmsg[1] = rsp[1];
+ ik->outmsg[2] = IPMI_CC_CANNOT_RETURN_REQ_NUM_BYTES;
+ ik->outlen = 3;
+ } else {
+ memcpy(ik->outmsg, rsp, rsp_len);
+ ik->outlen = rsp_len;
+ }
+ IPMI_KCS_SET_STATE(ik->status_reg, IPMI_KCS_READ_STATE);
+ ik->data_in_reg = IPMI_KCS_READ_CMD;
+ ipmi_kcs_signal(ik, ii);
+ }
+}
+
+
+static uint64_t ipmi_kcs_ioport_read(void *opaque, hwaddr addr, unsigned size)
+{
+ IPMIInterface *ii = opaque;
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIKCS *ik = iic->get_backend_data(ii);
+ uint32_t ret;
+
+ switch (addr & 1) {
+ case 0:
+ ret = ik->data_out_reg;
+ IPMI_KCS_SET_OBF(ik->status_reg, 0);
+ if (ik->obf_irq_set) {
+ ik->obf_irq_set = 0;
+ if (!ik->atn_irq_set) {
+ qemu_irq_lower(ik->irq);
+ }
+ }
+ break;
+ case 1:
+ ret = ik->status_reg;
+ if (ik->atn_irq_set) {
+ ik->atn_irq_set = 0;
+ if (!ik->obf_irq_set) {
+ qemu_irq_lower(ik->irq);
+ }
+ }
+ break;
+ }
+ return ret;
+}
+
+static void ipmi_kcs_ioport_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ IPMIInterface *ii = opaque;
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIKCS *ik = iic->get_backend_data(ii);
+
+ if (IPMI_KCS_GET_IBF(ik->status_reg)) {
+ return;
+ }
+
+ switch (addr & 1) {
+ case 0:
+ ik->data_in_reg = val;
+ break;
+
+ case 1:
+ ik->cmd_reg = val;
+ break;
+ }
+ IPMI_KCS_SET_IBF(ik->status_reg, 1);
+ ipmi_kcs_signal(ik, ii);
+}
+
+const MemoryRegionOps ipmi_kcs_io_ops = {
+ .read = ipmi_kcs_ioport_read,
+ .write = ipmi_kcs_ioport_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void ipmi_kcs_set_atn(IPMIInterface *ii, int val, int irq)
+{
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIKCS *ik = iic->get_backend_data(ii);
+
+ IPMI_KCS_SET_SMS_ATN(ik->status_reg, val);
+ if (val) {
+ if (irq && !ik->atn_irq_set && ik->use_irq && ik->irqs_enabled) {
+ ik->atn_irq_set = 1;
+ if (!ik->obf_irq_set) {
+ qemu_irq_raise(ik->irq);
+ }
+ }
+ } else {
+ if (ik->atn_irq_set) {
+ ik->atn_irq_set = 0;
+ if (!ik->obf_irq_set) {
+ qemu_irq_lower(ik->irq);
+ }
+ }
+ }
+}
+
+static void ipmi_kcs_set_irq_enable(IPMIInterface *ii, int val)
+{
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIKCS *ik = iic->get_backend_data(ii);
+
+ ik->irqs_enabled = val;
+}
+
+static void ipmi_kcs_init(IPMIInterface *ii, Error **errp)
+{
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+ IPMIKCS *ik = iic->get_backend_data(ii);
+
+ ik->io_length = 2;
+ memory_region_init_io(&ik->io, NULL, &ipmi_kcs_io_ops, ii, "ipmi-kcs", 2);
+}
+
+static void ipmi_kcs_class_init(IPMIInterfaceClass *iic)
+{
+ iic->init = ipmi_kcs_init;
+ iic->set_atn = ipmi_kcs_set_atn;
+ iic->handle_rsp = ipmi_kcs_handle_rsp;
+ iic->handle_if_event = ipmi_kcs_handle_event;
+ iic->set_irq_enable = ipmi_kcs_set_irq_enable;
+}
+
+
+#define TYPE_ISA_IPMI_KCS "isa-ipmi-kcs"
+#define ISA_IPMI_KCS(obj) OBJECT_CHECK(ISAIPMIKCSDevice, (obj), \
+ TYPE_ISA_IPMI_KCS)
+
+typedef struct ISAIPMIKCSDevice {
+ ISADevice dev;
+ int32 isairq;
+ IPMIKCS kcs;
+ IPMIFwInfo fwinfo;
+} ISAIPMIKCSDevice;
+
+static void ipmi_isa_realize(DeviceState *dev, Error **errp)
+{
+ ISADevice *isadev = ISA_DEVICE(dev);
+ ISAIPMIKCSDevice *iik = ISA_IPMI_KCS(dev);
+ IPMIInterface *ii = IPMI_INTERFACE(dev);
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_GET_CLASS(ii);
+
+ if (!iik->kcs.bmc) {
+ error_setg(errp, "IPMI device requires a bmc attribute to be set");
+ return;
+ }
+
+ iik->kcs.bmc->intf = ii;
+
+ iic->init(ii, errp);
+ if (*errp)
+ return;
+
+ if (iik->isairq > 0) {
+ isa_init_irq(isadev, &iik->kcs.irq, iik->isairq);
+ iik->kcs.use_irq = 1;
+ }
+
+ qdev_set_legacy_instance_id(dev, iik->kcs.io_base, iik->kcs.io_length);
+
+ isa_register_ioport(isadev, &iik->kcs.io, iik->kcs.io_base);
+
+ iik->fwinfo.interface_name = "kcs";
+ iik->fwinfo.interface_type = IPMI_SMBIOS_KCS;
+ iik->fwinfo.ipmi_spec_major_revision = 2;
+ iik->fwinfo.ipmi_spec_minor_revision = 0;
+ iik->fwinfo.base_address = iik->kcs.io_base;
+ iik->fwinfo.i2c_slave_address = iik->kcs.bmc->slave_addr;
+ iik->fwinfo.register_length = iik->kcs.io_length;
+ iik->fwinfo.register_spacing = 1;
+ iik->fwinfo.memspace = IPMI_MEMSPACE_IO;
+ iik->fwinfo.irq_type = IPMI_LEVEL_IRQ;
+ iik->fwinfo.interrupt_number = iik->isairq;
+ iik->fwinfo.acpi_parent = "\\_SB.PCI0.ISA";
+ ipmi_add_fwinfo(&iik->fwinfo, errp);
+}
+
+const VMStateDescription vmstate_ISAIPMIKCSDevice = {
+ .name = TYPE_IPMI_INTERFACE,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(kcs.obf_irq_set, ISAIPMIKCSDevice),
+ VMSTATE_BOOL(kcs.atn_irq_set, ISAIPMIKCSDevice),
+ VMSTATE_BOOL(kcs.use_irq, ISAIPMIKCSDevice),
+ VMSTATE_BOOL(kcs.irqs_enabled, ISAIPMIKCSDevice),
+ VMSTATE_UINT32(kcs.outpos, ISAIPMIKCSDevice),
+ VMSTATE_VBUFFER_UINT32(kcs.outmsg, ISAIPMIKCSDevice, 1, NULL, 0,
+ kcs.outlen),
+ VMSTATE_VBUFFER_UINT32(kcs.inmsg, ISAIPMIKCSDevice, 1, NULL, 0,
+ kcs.inlen),
+ VMSTATE_BOOL(kcs.write_end, ISAIPMIKCSDevice),
+ VMSTATE_UINT8(kcs.status_reg, ISAIPMIKCSDevice),
+ VMSTATE_UINT8(kcs.data_out_reg, ISAIPMIKCSDevice),
+ VMSTATE_INT16(kcs.data_in_reg, ISAIPMIKCSDevice),
+ VMSTATE_INT16(kcs.cmd_reg, ISAIPMIKCSDevice),
+ VMSTATE_UINT8(kcs.waiting_rsp, ISAIPMIKCSDevice),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void isa_ipmi_kcs_init(Object *obj)
+{
+ ISAIPMIKCSDevice *iik = ISA_IPMI_KCS(obj);
+
+ ipmi_bmc_find_and_link(obj, (Object **) &iik->kcs.bmc);
+
+ vmstate_register(NULL, 0, &vmstate_ISAIPMIKCSDevice, iik);
+}
+
+static void *isa_ipmi_kcs_get_backend_data(IPMIInterface *ii)
+{
+ ISAIPMIKCSDevice *iik = ISA_IPMI_KCS(ii);
+
+ return &iik->kcs;
+}
+
+static Property ipmi_isa_properties[] = {
+ DEFINE_PROP_UINT32("ioport", ISAIPMIKCSDevice, kcs.io_base, 0xca2),
+ DEFINE_PROP_INT32("irq", ISAIPMIKCSDevice, isairq, 5),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void isa_ipmi_kcs_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ IPMIInterfaceClass *iic = IPMI_INTERFACE_CLASS(oc);
+
+ dc->realize = ipmi_isa_realize;
+ dc->props = ipmi_isa_properties;
+
+ iic->get_backend_data = isa_ipmi_kcs_get_backend_data;
+ ipmi_kcs_class_init(iic);
+}
+
+static const TypeInfo isa_ipmi_kcs_info = {
+ .name = TYPE_ISA_IPMI_KCS,
+ .parent = TYPE_ISA_DEVICE,
+ .instance_size = sizeof(ISAIPMIKCSDevice),
+ .instance_init = isa_ipmi_kcs_init,
+ .class_init = isa_ipmi_kcs_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_IPMI_INTERFACE },
+ { }
+ }
+};
+
+static void ipmi_register_types(void)
+{
+ type_register_static(&isa_ipmi_kcs_info);
+}
+
+type_init(ipmi_register_types)
diff --git a/hw/mem/Makefile.objs b/hw/mem/Makefile.objs
index b000fb4..f12f8b9 100644
--- a/hw/mem/Makefile.objs
+++ b/hw/mem/Makefile.objs
@@ -1 +1,2 @@
common-obj-$(CONFIG_MEM_HOTPLUG) += pc-dimm.o
+common-obj-$(CONFIG_NVDIMM) += nvdimm.o
diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c
new file mode 100644
index 0000000..4fd397f
--- /dev/null
+++ b/hw/mem/nvdimm.c
@@ -0,0 +1,46 @@
+/*
+ * Non-Volatile Dual In-line Memory Module Virtualization Implementation
+ *
+ * Copyright(C) 2015 Intel Corporation.
+ *
+ * Author:
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ *
+ * Currently, it only supports PMEM Virtualization.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+
+#include "hw/mem/nvdimm.h"
+
+static void nvdimm_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ /* nvdimm hotplug has not been supported yet. */
+ dc->hotpluggable = false;
+}
+
+static TypeInfo nvdimm_info = {
+ .name = TYPE_NVDIMM,
+ .parent = TYPE_PC_DIMM,
+ .class_init = nvdimm_class_init,
+};
+
+static void nvdimm_register_types(void)
+{
+ type_register_static(&nvdimm_info);
+}
+
+type_init(nvdimm_register_types)
diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c
index 57f8a37..bb98fb2 100644
--- a/hw/pci-bridge/pci_expander_bridge.c
+++ b/hw/pci-bridge/pci_expander_bridge.c
@@ -23,6 +23,9 @@
#define TYPE_PXB_BUS "pxb-bus"
#define PXB_BUS(obj) OBJECT_CHECK(PXBBus, (obj), TYPE_PXB_BUS)
+#define TYPE_PXB_PCIE_BUS "pxb-pcie-bus"
+#define PXB_PCIE_BUS(obj) OBJECT_CHECK(PXBBus, (obj), TYPE_PXB_PCIE_BUS)
+
typedef struct PXBBus {
/*< private >*/
PCIBus parent_obj;
@@ -34,6 +37,9 @@ typedef struct PXBBus {
#define TYPE_PXB_DEVICE "pxb"
#define PXB_DEV(obj) OBJECT_CHECK(PXBDev, (obj), TYPE_PXB_DEVICE)
+#define TYPE_PXB_PCIE_DEVICE "pxb-pcie"
+#define PXB_PCIE_DEV(obj) OBJECT_CHECK(PXBDev, (obj), TYPE_PXB_PCIE_DEVICE)
+
typedef struct PXBDev {
/*< private >*/
PCIDevice parent_obj;
@@ -43,13 +49,18 @@ typedef struct PXBDev {
uint16_t numa_node;
} PXBDev;
+static PXBDev *convert_to_pxb(PCIDevice *dev)
+{
+ return pci_bus_is_express(dev->bus) ? PXB_PCIE_DEV(dev) : PXB_DEV(dev);
+}
+
static GList *pxb_dev_list;
#define TYPE_PXB_HOST "pxb-host"
static int pxb_bus_num(PCIBus *bus)
{
- PXBDev *pxb = PXB_DEV(bus->parent_dev);
+ PXBDev *pxb = convert_to_pxb(bus->parent_dev);
return pxb->bus_nr;
}
@@ -61,7 +72,7 @@ static bool pxb_is_root(PCIBus *bus)
static uint16_t pxb_bus_numa_node(PCIBus *bus)
{
- PXBDev *pxb = PXB_DEV(bus->parent_dev);
+ PXBDev *pxb = convert_to_pxb(bus->parent_dev);
return pxb->numa_node;
}
@@ -82,10 +93,18 @@ static const TypeInfo pxb_bus_info = {
.class_init = pxb_bus_class_init,
};
+static const TypeInfo pxb_pcie_bus_info = {
+ .name = TYPE_PXB_PCIE_BUS,
+ .parent = TYPE_PCIE_BUS,
+ .instance_size = sizeof(PXBBus),
+ .class_init = pxb_bus_class_init,
+};
+
static const char *pxb_host_root_bus_path(PCIHostState *host_bridge,
PCIBus *rootbus)
{
- PXBBus *bus = PXB_BUS(rootbus);
+ PXBBus *bus = pci_bus_is_express(rootbus) ?
+ PXB_PCIE_BUS(rootbus) : PXB_BUS(rootbus);
snprintf(bus->bus_path, 8, "0000:%02x", pxb_bus_num(rootbus));
return bus->bus_path;
@@ -103,7 +122,7 @@ static char *pxb_host_ofw_unit_address(const SysBusDevice *dev)
pxb_host = PCI_HOST_BRIDGE(dev);
pxb_bus = pxb_host->bus;
- pxb_dev = PXB_DEV(pxb_bus->parent_dev);
+ pxb_dev = convert_to_pxb(pxb_bus->parent_dev);
position = g_list_index(pxb_dev_list, pxb_dev);
assert(position >= 0);
@@ -193,10 +212,10 @@ static gint pxb_compare(gconstpointer a, gconstpointer b)
0;
}
-static int pxb_dev_initfn(PCIDevice *dev)
+static int pxb_dev_init_common(PCIDevice *dev, bool pcie)
{
- PXBDev *pxb = PXB_DEV(dev);
- DeviceState *ds, *bds;
+ PXBDev *pxb = convert_to_pxb(dev);
+ DeviceState *ds, *bds = NULL;
PCIBus *bus;
const char *dev_name = NULL;
@@ -211,18 +230,21 @@ static int pxb_dev_initfn(PCIDevice *dev)
}
ds = qdev_create(NULL, TYPE_PXB_HOST);
- bus = pci_bus_new(ds, "pxb-internal", NULL, NULL, 0, TYPE_PXB_BUS);
+ if (pcie) {
+ bus = pci_bus_new(ds, dev_name, NULL, NULL, 0, TYPE_PXB_PCIE_BUS);
+ } else {
+ bus = pci_bus_new(ds, "pxb-internal", NULL, NULL, 0, TYPE_PXB_BUS);
+ bds = qdev_create(BUS(bus), "pci-bridge");
+ bds->id = dev_name;
+ qdev_prop_set_uint8(bds, PCI_BRIDGE_DEV_PROP_CHASSIS_NR, pxb->bus_nr);
+ qdev_prop_set_bit(bds, PCI_BRIDGE_DEV_PROP_SHPC, false);
+ }
bus->parent_dev = dev;
bus->address_space_mem = dev->bus->address_space_mem;
bus->address_space_io = dev->bus->address_space_io;
bus->map_irq = pxb_map_irq_fn;
- bds = qdev_create(BUS(bus), "pci-bridge");
- bds->id = dev_name;
- qdev_prop_set_uint8(bds, PCI_BRIDGE_DEV_PROP_CHASSIS_NR, pxb->bus_nr);
- qdev_prop_set_bit(bds, PCI_BRIDGE_DEV_PROP_SHPC, false);
-
PCI_HOST_BRIDGE(ds)->bus = bus;
if (pxb_register_bus(dev, bus)) {
@@ -230,7 +252,9 @@ static int pxb_dev_initfn(PCIDevice *dev)
}
qdev_init_nofail(ds);
- qdev_init_nofail(bds);
+ if (bds) {
+ qdev_init_nofail(bds);
+ }
pci_word_test_and_set_mask(dev->config + PCI_STATUS,
PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK);
@@ -240,9 +264,19 @@ static int pxb_dev_initfn(PCIDevice *dev)
return 0;
}
+static int pxb_dev_initfn(PCIDevice *dev)
+{
+ if (pci_bus_is_express(dev->bus)) {
+ error_report("pxb devices cannot reside on a PCIe bus!");
+ return -EINVAL;
+ }
+
+ return pxb_dev_init_common(dev, false);
+}
+
static void pxb_dev_exitfn(PCIDevice *pci_dev)
{
- PXBDev *pxb = PXB_DEV(pci_dev);
+ PXBDev *pxb = convert_to_pxb(pci_dev);
pxb_dev_list = g_list_remove(pxb_dev_list, pxb);
}
@@ -276,11 +310,45 @@ static const TypeInfo pxb_dev_info = {
.class_init = pxb_dev_class_init,
};
+static int pxb_pcie_dev_initfn(PCIDevice *dev)
+{
+ if (!pci_bus_is_express(dev->bus)) {
+ error_report("pxb-pcie devices cannot reside on a PCI bus!");
+ return -EINVAL;
+ }
+
+ return pxb_dev_init_common(dev, true);
+}
+
+static void pxb_pcie_dev_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->init = pxb_pcie_dev_initfn;
+ k->exit = pxb_dev_exitfn;
+ k->vendor_id = PCI_VENDOR_ID_REDHAT;
+ k->device_id = PCI_DEVICE_ID_REDHAT_PXB_PCIE;
+ k->class_id = PCI_CLASS_BRIDGE_HOST;
+
+ dc->desc = "PCI Express Expander Bridge";
+ dc->props = pxb_dev_properties;
+}
+
+static const TypeInfo pxb_pcie_dev_info = {
+ .name = TYPE_PXB_PCIE_DEVICE,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(PXBDev),
+ .class_init = pxb_pcie_dev_class_init,
+};
+
static void pxb_register_types(void)
{
type_register_static(&pxb_bus_info);
+ type_register_static(&pxb_pcie_bus_info);
type_register_static(&pxb_host_info);
type_register_static(&pxb_dev_info);
+ type_register_static(&pxb_pcie_dev_info);
}
type_init(pxb_register_types)