aboutsummaryrefslogtreecommitdiff
path: root/hw/misc
diff options
context:
space:
mode:
Diffstat (limited to 'hw/misc')
-rw-r--r--hw/misc/Makefile.objs40
-rw-r--r--hw/misc/a9scu.c164
-rw-r--r--hw/misc/applesmc.c251
-rw-r--r--hw/misc/arm_l2x0.c194
-rw-r--r--hw/misc/arm_sysctl.c649
-rw-r--r--hw/misc/cbus.c618
-rw-r--r--hw/misc/debugexit.c75
-rw-r--r--hw/misc/eccmemctl.c340
-rw-r--r--hw/misc/exynos4210_pmu.c499
-rw-r--r--hw/misc/imx_ccm.c321
-rw-r--r--hw/misc/ivshmem.c826
-rw-r--r--hw/misc/lm32_sys.c172
-rw-r--r--hw/misc/macio/Makefile.objs3
-rw-r--r--hw/misc/macio/cuda.c740
-rw-r--r--hw/misc/macio/mac_dbdma.c859
-rw-r--r--hw/misc/macio/macio.c305
-rw-r--r--hw/misc/max111x.c193
-rw-r--r--hw/misc/milkymist-hpdmc.c170
-rw-r--r--hw/misc/milkymist-pfpu.c544
-rw-r--r--hw/misc/mst_fpga.c263
-rw-r--r--hw/misc/omap_clk.c1264
-rw-r--r--hw/misc/omap_gpmc.c894
-rw-r--r--hw/misc/omap_l4.c162
-rw-r--r--hw/misc/omap_sdrc.c168
-rw-r--r--hw/misc/omap_tap.c116
-rw-r--r--hw/misc/pc-testdev.c187
-rw-r--r--hw/misc/puv3_pm.c149
-rw-r--r--hw/misc/pxa2xx_pcmcia.c207
-rw-r--r--hw/misc/sga.c63
-rw-r--r--hw/misc/slavio_misc.c508
-rw-r--r--hw/misc/tmp105.c269
-rw-r--r--hw/misc/tmp105.h47
-rw-r--r--hw/misc/vfio.c3206
-rw-r--r--hw/misc/vmport.c170
-rw-r--r--hw/misc/zynq_slcr.c536
35 files changed, 15172 insertions, 0 deletions
diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs
new file mode 100644
index 0000000..03699c3
--- /dev/null
+++ b/hw/misc/Makefile.objs
@@ -0,0 +1,40 @@
+common-obj-$(CONFIG_APPLESMC) += applesmc.o
+common-obj-$(CONFIG_MAX111X) += max111x.o
+common-obj-$(CONFIG_TMP105) += tmp105.o
+common-obj-$(CONFIG_ISA_DEBUG) += debugexit.o
+common-obj-$(CONFIG_SGA) += sga.o
+common-obj-$(CONFIG_ISA_TESTDEV) += pc-testdev.o
+
+obj-$(CONFIG_VMPORT) += vmport.o
+
+# ARM devices
+common-obj-$(CONFIG_PL310) += arm_l2x0.o
+
+# PKUnity SoC devices
+common-obj-$(CONFIG_PUV3) += puv3_pm.o
+
+common-obj-$(CONFIG_MACIO) += macio/
+
+ifeq ($(CONFIG_PCI), y)
+obj-$(CONFIG_KVM) += ivshmem.o
+obj-$(CONFIG_LINUX) += vfio.o
+endif
+
+obj-$(CONFIG_REALVIEW) += arm_sysctl.o
+obj-$(CONFIG_A9SCU) += a9scu.o
+obj-$(CONFIG_NSERIES) += cbus.o
+obj-$(CONFIG_ECCMEMCTL) += eccmemctl.o
+obj-$(CONFIG_EXYNOS4) += exynos4210_pmu.o
+obj-$(CONFIG_IMX) += imx_ccm.o
+obj-$(CONFIG_LM32) += lm32_sys.o
+obj-$(CONFIG_MILKYMIST) += milkymist-hpdmc.o
+obj-$(CONFIG_MILKYMIST) += milkymist-pfpu.o
+obj-$(CONFIG_MAINSTONE) += mst_fpga.o
+obj-$(CONFIG_OMAP) += omap_clk.o
+obj-$(CONFIG_OMAP) += omap_gpmc.o
+obj-$(CONFIG_OMAP) += omap_l4.o
+obj-$(CONFIG_OMAP) += omap_sdrc.o
+obj-$(CONFIG_OMAP) += omap_tap.o
+obj-$(CONFIG_PXA2XX) += pxa2xx_pcmcia.o
+obj-$(CONFIG_SLAVIO) += slavio_misc.o
+obj-$(CONFIG_ZYNQ) += zynq_slcr.o
diff --git a/hw/misc/a9scu.c b/hw/misc/a9scu.c
new file mode 100644
index 0000000..05897c2
--- /dev/null
+++ b/hw/misc/a9scu.c
@@ -0,0 +1,164 @@
+/*
+ * Cortex-A9MPCore Snoop Control Unit (SCU) emulation.
+ *
+ * Copyright (c) 2009 CodeSourcery.
+ * Copyright (c) 2011 Linaro Limited.
+ * Written by Paul Brook, Peter Maydell.
+ *
+ * This code is licensed under the GPL.
+ */
+
+#include "hw/sysbus.h"
+
+/* A9MP private memory region. */
+
+typedef struct A9SCUState {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+ uint32_t control;
+ uint32_t status;
+ uint32_t num_cpu;
+} A9SCUState;
+
+#define TYPE_A9_SCU "a9-scu"
+#define A9_SCU(obj) OBJECT_CHECK(A9SCUState, (obj), TYPE_A9_SCU)
+
+static uint64_t a9_scu_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ A9SCUState *s = (A9SCUState *)opaque;
+ switch (offset) {
+ case 0x00: /* Control */
+ return s->control;
+ case 0x04: /* Configuration */
+ return (((1 << s->num_cpu) - 1) << 4) | (s->num_cpu - 1);
+ case 0x08: /* CPU Power Status */
+ return s->status;
+ case 0x09: /* CPU status. */
+ return s->status >> 8;
+ case 0x0a: /* CPU status. */
+ return s->status >> 16;
+ case 0x0b: /* CPU status. */
+ return s->status >> 24;
+ case 0x0c: /* Invalidate All Registers In Secure State */
+ return 0;
+ case 0x40: /* Filtering Start Address Register */
+ case 0x44: /* Filtering End Address Register */
+ /* RAZ/WI, like an implementation with only one AXI master */
+ return 0;
+ case 0x50: /* SCU Access Control Register */
+ case 0x54: /* SCU Non-secure Access Control Register */
+ /* unimplemented, fall through */
+ default:
+ return 0;
+ }
+}
+
+static void a9_scu_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ A9SCUState *s = (A9SCUState *)opaque;
+ uint32_t mask;
+ uint32_t shift;
+ switch (size) {
+ case 1:
+ mask = 0xff;
+ break;
+ case 2:
+ mask = 0xffff;
+ break;
+ case 4:
+ mask = 0xffffffff;
+ break;
+ default:
+ fprintf(stderr, "Invalid size %u in write to a9 scu register %x\n",
+ size, (unsigned)offset);
+ return;
+ }
+
+ switch (offset) {
+ case 0x00: /* Control */
+ s->control = value & 1;
+ break;
+ case 0x4: /* Configuration: RO */
+ break;
+ case 0x08: case 0x09: case 0x0A: case 0x0B: /* Power Control */
+ shift = (offset - 0x8) * 8;
+ s->status &= ~(mask << shift);
+ s->status |= ((value & mask) << shift);
+ break;
+ case 0x0c: /* Invalidate All Registers In Secure State */
+ /* no-op as we do not implement caches */
+ break;
+ case 0x40: /* Filtering Start Address Register */
+ case 0x44: /* Filtering End Address Register */
+ /* RAZ/WI, like an implementation with only one AXI master */
+ break;
+ case 0x50: /* SCU Access Control Register */
+ case 0x54: /* SCU Non-secure Access Control Register */
+ /* unimplemented, fall through */
+ default:
+ break;
+ }
+}
+
+static const MemoryRegionOps a9_scu_ops = {
+ .read = a9_scu_read,
+ .write = a9_scu_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void a9_scu_reset(DeviceState *dev)
+{
+ A9SCUState *s = A9_SCU(dev);
+ s->control = 0;
+}
+
+static void a9_scu_realize(DeviceState *dev, Error ** errp)
+{
+ A9SCUState *s = A9_SCU(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+
+ memory_region_init_io(&s->iomem, &a9_scu_ops, s, "a9-scu", 0x100);
+ sysbus_init_mmio(sbd, &s->iomem);
+}
+
+static const VMStateDescription vmstate_a9_scu = {
+ .name = "a9-scu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(control, A9SCUState),
+ VMSTATE_UINT32(status, A9SCUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property a9_scu_properties[] = {
+ DEFINE_PROP_UINT32("num-cpu", A9SCUState, num_cpu, 1),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void a9_scu_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = a9_scu_realize;
+ dc->props = a9_scu_properties;
+ dc->vmsd = &vmstate_a9_scu;
+ dc->reset = a9_scu_reset;
+}
+
+static const TypeInfo a9_scu_info = {
+ .name = TYPE_A9_SCU,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(A9SCUState),
+ .class_init = a9_scu_class_init,
+};
+
+static void a9mp_register_types(void)
+{
+ type_register_static(&a9_scu_info);
+}
+
+type_init(a9mp_register_types)
diff --git a/hw/misc/applesmc.c b/hw/misc/applesmc.c
new file mode 100644
index 0000000..c29558b
--- /dev/null
+++ b/hw/misc/applesmc.c
@@ -0,0 +1,251 @@
+/*
+ * Apple SMC controller
+ *
+ * Copyright (c) 2007 Alexander Graf
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ * Susanne Graf <suse@csgraf.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * *****************************************************************
+ *
+ * In all Intel-based Apple hardware there is an SMC chip to control the
+ * backlight, fans and several other generic device parameters. It also
+ * contains the magic keys used to dongle Mac OS X to the device.
+ *
+ * This driver was mostly created by looking at the Linux AppleSMC driver
+ * implementation and does not support IRQ.
+ *
+ */
+
+#include "hw/hw.h"
+#include "hw/isa/isa.h"
+#include "ui/console.h"
+#include "qemu/timer.h"
+
+/* #define DEBUG_SMC */
+
+#define APPLESMC_DEFAULT_IOBASE 0x300
+/* data port used by Apple SMC */
+#define APPLESMC_DATA_PORT 0x0
+/* command/status port used by Apple SMC */
+#define APPLESMC_CMD_PORT 0x4
+#define APPLESMC_NR_PORTS 32
+#define APPLESMC_MAX_DATA_LENGTH 32
+
+#define APPLESMC_READ_CMD 0x10
+#define APPLESMC_WRITE_CMD 0x11
+#define APPLESMC_GET_KEY_BY_INDEX_CMD 0x12
+#define APPLESMC_GET_KEY_TYPE_CMD 0x13
+
+#ifdef DEBUG_SMC
+#define smc_debug(...) fprintf(stderr, "AppleSMC: " __VA_ARGS__)
+#else
+#define smc_debug(...) do { } while(0)
+#endif
+
+static char default_osk[64] = "This is a dummy key. Enter the real key "
+ "using the -osk parameter";
+
+struct AppleSMCData {
+ uint8_t len;
+ const char *key;
+ const char *data;
+ QLIST_ENTRY(AppleSMCData) node;
+};
+
+struct AppleSMCStatus {
+ ISADevice dev;
+ uint32_t iobase;
+ uint8_t cmd;
+ uint8_t status;
+ uint8_t key[4];
+ uint8_t read_pos;
+ uint8_t data_len;
+ uint8_t data_pos;
+ uint8_t data[255];
+ uint8_t charactic[4];
+ char *osk;
+ QLIST_HEAD(, AppleSMCData) data_def;
+};
+
+static void applesmc_io_cmd_writeb(void *opaque, uint32_t addr, uint32_t val)
+{
+ struct AppleSMCStatus *s = opaque;
+
+ smc_debug("CMD Write B: %#x = %#x\n", addr, val);
+ switch(val) {
+ case APPLESMC_READ_CMD:
+ s->status = 0x0c;
+ break;
+ }
+ s->cmd = val;
+ s->read_pos = 0;
+ s->data_pos = 0;
+}
+
+static void applesmc_fill_data(struct AppleSMCStatus *s)
+{
+ struct AppleSMCData *d;
+
+ QLIST_FOREACH(d, &s->data_def, node) {
+ if (!memcmp(d->key, s->key, 4)) {
+ smc_debug("Key matched (%s Len=%d Data=%s)\n", d->key,
+ d->len, d->data);
+ memcpy(s->data, d->data, d->len);
+ return;
+ }
+ }
+}
+
+static void applesmc_io_data_writeb(void *opaque, uint32_t addr, uint32_t val)
+{
+ struct AppleSMCStatus *s = opaque;
+
+ smc_debug("DATA Write B: %#x = %#x\n", addr, val);
+ switch(s->cmd) {
+ case APPLESMC_READ_CMD:
+ if(s->read_pos < 4) {
+ s->key[s->read_pos] = val;
+ s->status = 0x04;
+ } else if(s->read_pos == 4) {
+ s->data_len = val;
+ s->status = 0x05;
+ s->data_pos = 0;
+ smc_debug("Key = %c%c%c%c Len = %d\n", s->key[0],
+ s->key[1], s->key[2], s->key[3], val);
+ applesmc_fill_data(s);
+ }
+ s->read_pos++;
+ break;
+ }
+}
+
+static uint32_t applesmc_io_data_readb(void *opaque, uint32_t addr1)
+{
+ struct AppleSMCStatus *s = opaque;
+ uint8_t retval = 0;
+
+ switch(s->cmd) {
+ case APPLESMC_READ_CMD:
+ if(s->data_pos < s->data_len) {
+ retval = s->data[s->data_pos];
+ smc_debug("READ_DATA[%d] = %#hhx\n", s->data_pos,
+ retval);
+ s->data_pos++;
+ if(s->data_pos == s->data_len) {
+ s->status = 0x00;
+ smc_debug("EOF\n");
+ } else
+ s->status = 0x05;
+ }
+ }
+ smc_debug("DATA Read b: %#x = %#x\n", addr1, retval);
+
+ return retval;
+}
+
+static uint32_t applesmc_io_cmd_readb(void *opaque, uint32_t addr1)
+{
+ struct AppleSMCStatus *s = opaque;
+
+ smc_debug("CMD Read B: %#x\n", addr1);
+ return s->status;
+}
+
+static void applesmc_add_key(struct AppleSMCStatus *s, const char *key,
+ int len, const char *data)
+{
+ struct AppleSMCData *def;
+
+ def = g_malloc0(sizeof(struct AppleSMCData));
+ def->key = key;
+ def->len = len;
+ def->data = data;
+
+ QLIST_INSERT_HEAD(&s->data_def, def, node);
+}
+
+static void qdev_applesmc_isa_reset(DeviceState *dev)
+{
+ struct AppleSMCStatus *s = DO_UPCAST(struct AppleSMCStatus, dev.qdev, dev);
+ struct AppleSMCData *d, *next;
+
+ /* Remove existing entries */
+ QLIST_FOREACH_SAFE(d, &s->data_def, node, next) {
+ QLIST_REMOVE(d, node);
+ }
+
+ applesmc_add_key(s, "REV ", 6, "\x01\x13\x0f\x00\x00\x03");
+ applesmc_add_key(s, "OSK0", 32, s->osk);
+ applesmc_add_key(s, "OSK1", 32, s->osk + 32);
+ applesmc_add_key(s, "NATJ", 1, "\0");
+ applesmc_add_key(s, "MSSP", 1, "\0");
+ applesmc_add_key(s, "MSSD", 1, "\0x3");
+}
+
+static int applesmc_isa_init(ISADevice *dev)
+{
+ struct AppleSMCStatus *s = DO_UPCAST(struct AppleSMCStatus, dev, dev);
+
+ register_ioport_read(s->iobase + APPLESMC_DATA_PORT, 4, 1,
+ applesmc_io_data_readb, s);
+ register_ioport_read(s->iobase + APPLESMC_CMD_PORT, 4, 1,
+ applesmc_io_cmd_readb, s);
+ register_ioport_write(s->iobase + APPLESMC_DATA_PORT, 4, 1,
+ applesmc_io_data_writeb, s);
+ register_ioport_write(s->iobase + APPLESMC_CMD_PORT, 4, 1,
+ applesmc_io_cmd_writeb, s);
+
+ if (!s->osk || (strlen(s->osk) != 64)) {
+ fprintf(stderr, "WARNING: Using AppleSMC with invalid key\n");
+ s->osk = default_osk;
+ }
+
+ QLIST_INIT(&s->data_def);
+ qdev_applesmc_isa_reset(&dev->qdev);
+
+ return 0;
+}
+
+static Property applesmc_isa_properties[] = {
+ DEFINE_PROP_HEX32("iobase", struct AppleSMCStatus, iobase,
+ APPLESMC_DEFAULT_IOBASE),
+ DEFINE_PROP_STRING("osk", struct AppleSMCStatus, osk),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void qdev_applesmc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ISADeviceClass *ic = ISA_DEVICE_CLASS(klass);
+ ic->init = applesmc_isa_init;
+ dc->reset = qdev_applesmc_isa_reset;
+ dc->props = applesmc_isa_properties;
+}
+
+static const TypeInfo applesmc_isa_info = {
+ .name = "isa-applesmc",
+ .parent = TYPE_ISA_DEVICE,
+ .instance_size = sizeof(struct AppleSMCStatus),
+ .class_init = qdev_applesmc_class_init,
+};
+
+static void applesmc_register_types(void)
+{
+ type_register_static(&applesmc_isa_info);
+}
+
+type_init(applesmc_register_types)
diff --git a/hw/misc/arm_l2x0.c b/hw/misc/arm_l2x0.c
new file mode 100644
index 0000000..eb4427d
--- /dev/null
+++ b/hw/misc/arm_l2x0.c
@@ -0,0 +1,194 @@
+/*
+ * ARM dummy L210, L220, PL310 cache controller.
+ *
+ * Copyright (c) 2010-2012 Calxeda
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or any later version, as published by the Free Software
+ * Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "hw/sysbus.h"
+
+/* L2C-310 r3p2 */
+#define CACHE_ID 0x410000c8
+
+typedef struct l2x0_state {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+ uint32_t cache_type;
+ uint32_t ctrl;
+ uint32_t aux_ctrl;
+ uint32_t data_ctrl;
+ uint32_t tag_ctrl;
+ uint32_t filter_start;
+ uint32_t filter_end;
+} l2x0_state;
+
+static const VMStateDescription vmstate_l2x0 = {
+ .name = "l2x0",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(ctrl, l2x0_state),
+ VMSTATE_UINT32(aux_ctrl, l2x0_state),
+ VMSTATE_UINT32(data_ctrl, l2x0_state),
+ VMSTATE_UINT32(tag_ctrl, l2x0_state),
+ VMSTATE_UINT32(filter_start, l2x0_state),
+ VMSTATE_UINT32(filter_end, l2x0_state),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+
+static uint64_t l2x0_priv_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ uint32_t cache_data;
+ l2x0_state *s = (l2x0_state *)opaque;
+ offset &= 0xfff;
+ if (offset >= 0x730 && offset < 0x800) {
+ return 0; /* cache ops complete */
+ }
+ switch (offset) {
+ case 0:
+ return CACHE_ID;
+ case 0x4:
+ /* aux_ctrl values affect cache_type values */
+ cache_data = (s->aux_ctrl & (7 << 17)) >> 15;
+ cache_data |= (s->aux_ctrl & (1 << 16)) >> 16;
+ return s->cache_type |= (cache_data << 18) | (cache_data << 6);
+ case 0x100:
+ return s->ctrl;
+ case 0x104:
+ return s->aux_ctrl;
+ case 0x108:
+ return s->tag_ctrl;
+ case 0x10C:
+ return s->data_ctrl;
+ case 0xC00:
+ return s->filter_start;
+ case 0xC04:
+ return s->filter_end;
+ case 0xF40:
+ return 0;
+ case 0xF60:
+ return 0;
+ case 0xF80:
+ return 0;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "l2x0_priv_read: Bad offset %x\n", (int)offset);
+ break;
+ }
+ return 0;
+}
+
+static void l2x0_priv_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ l2x0_state *s = (l2x0_state *)opaque;
+ offset &= 0xfff;
+ if (offset >= 0x730 && offset < 0x800) {
+ /* ignore */
+ return;
+ }
+ switch (offset) {
+ case 0x100:
+ s->ctrl = value & 1;
+ break;
+ case 0x104:
+ s->aux_ctrl = value;
+ break;
+ case 0x108:
+ s->tag_ctrl = value;
+ break;
+ case 0x10C:
+ s->data_ctrl = value;
+ break;
+ case 0xC00:
+ s->filter_start = value;
+ break;
+ case 0xC04:
+ s->filter_end = value;
+ break;
+ case 0xF40:
+ return;
+ case 0xF60:
+ return;
+ case 0xF80:
+ return;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "l2x0_priv_write: Bad offset %x\n", (int)offset);
+ break;
+ }
+}
+
+static void l2x0_priv_reset(DeviceState *dev)
+{
+ l2x0_state *s = DO_UPCAST(l2x0_state, busdev.qdev, dev);
+
+ s->ctrl = 0;
+ s->aux_ctrl = 0x02020000;
+ s->tag_ctrl = 0;
+ s->data_ctrl = 0;
+ s->filter_start = 0;
+ s->filter_end = 0;
+}
+
+static const MemoryRegionOps l2x0_mem_ops = {
+ .read = l2x0_priv_read,
+ .write = l2x0_priv_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ };
+
+static int l2x0_priv_init(SysBusDevice *dev)
+{
+ l2x0_state *s = FROM_SYSBUS(l2x0_state, dev);
+
+ memory_region_init_io(&s->iomem, &l2x0_mem_ops, s, "l2x0_cc", 0x1000);
+ sysbus_init_mmio(dev, &s->iomem);
+ return 0;
+}
+
+static Property l2x0_properties[] = {
+ DEFINE_PROP_UINT32("cache-type", l2x0_state, cache_type, 0x1c100100),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void l2x0_class_init(ObjectClass *klass, void *data)
+{
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ k->init = l2x0_priv_init;
+ dc->vmsd = &vmstate_l2x0;
+ dc->no_user = 1;
+ dc->props = l2x0_properties;
+ dc->reset = l2x0_priv_reset;
+}
+
+static const TypeInfo l2x0_info = {
+ .name = "l2x0",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(l2x0_state),
+ .class_init = l2x0_class_init,
+};
+
+static void l2x0_register_types(void)
+{
+ type_register_static(&l2x0_info);
+}
+
+type_init(l2x0_register_types)
diff --git a/hw/misc/arm_sysctl.c b/hw/misc/arm_sysctl.c
new file mode 100644
index 0000000..c8b55a8
--- /dev/null
+++ b/hw/misc/arm_sysctl.c
@@ -0,0 +1,649 @@
+/*
+ * Status and system control registers for ARM RealView/Versatile boards.
+ *
+ * Copyright (c) 2006-2007 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GPL.
+ */
+
+#include "hw/hw.h"
+#include "qemu/timer.h"
+#include "qemu/bitops.h"
+#include "hw/sysbus.h"
+#include "hw/arm/primecell.h"
+#include "sysemu/sysemu.h"
+
+#define LOCK_VALUE 0xa05f
+
+typedef struct {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+ qemu_irq pl110_mux_ctrl;
+
+ uint32_t sys_id;
+ uint32_t leds;
+ uint16_t lockval;
+ uint32_t cfgdata1;
+ uint32_t cfgdata2;
+ uint32_t flags;
+ uint32_t nvflags;
+ uint32_t resetlevel;
+ uint32_t proc_id;
+ uint32_t sys_mci;
+ uint32_t sys_cfgdata;
+ uint32_t sys_cfgctrl;
+ uint32_t sys_cfgstat;
+ uint32_t sys_clcd;
+ uint32_t mb_clock[6];
+ uint32_t *db_clock;
+ uint32_t db_num_vsensors;
+ uint32_t *db_voltage;
+ uint32_t db_num_clocks;
+ uint32_t *db_clock_reset;
+} arm_sysctl_state;
+
+static const VMStateDescription vmstate_arm_sysctl = {
+ .name = "realview_sysctl",
+ .version_id = 4,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(leds, arm_sysctl_state),
+ VMSTATE_UINT16(lockval, arm_sysctl_state),
+ VMSTATE_UINT32(cfgdata1, arm_sysctl_state),
+ VMSTATE_UINT32(cfgdata2, arm_sysctl_state),
+ VMSTATE_UINT32(flags, arm_sysctl_state),
+ VMSTATE_UINT32(nvflags, arm_sysctl_state),
+ VMSTATE_UINT32(resetlevel, arm_sysctl_state),
+ VMSTATE_UINT32_V(sys_mci, arm_sysctl_state, 2),
+ VMSTATE_UINT32_V(sys_cfgdata, arm_sysctl_state, 2),
+ VMSTATE_UINT32_V(sys_cfgctrl, arm_sysctl_state, 2),
+ VMSTATE_UINT32_V(sys_cfgstat, arm_sysctl_state, 2),
+ VMSTATE_UINT32_V(sys_clcd, arm_sysctl_state, 3),
+ VMSTATE_UINT32_ARRAY_V(mb_clock, arm_sysctl_state, 6, 4),
+ VMSTATE_VARRAY_UINT32(db_clock, arm_sysctl_state, db_num_clocks,
+ 4, vmstate_info_uint32, uint32_t),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/* The PB926 actually uses a different format for
+ * its SYS_ID register. Fortunately the bits which are
+ * board type on later boards are distinct.
+ */
+#define BOARD_ID_PB926 0x100
+#define BOARD_ID_EB 0x140
+#define BOARD_ID_PBA8 0x178
+#define BOARD_ID_PBX 0x182
+#define BOARD_ID_VEXPRESS 0x190
+
+static int board_id(arm_sysctl_state *s)
+{
+ /* Extract the board ID field from the SYS_ID register value */
+ return (s->sys_id >> 16) & 0xfff;
+}
+
+static void arm_sysctl_reset(DeviceState *d)
+{
+ arm_sysctl_state *s = FROM_SYSBUS(arm_sysctl_state, SYS_BUS_DEVICE(d));
+ int i;
+
+ s->leds = 0;
+ s->lockval = 0;
+ s->cfgdata1 = 0;
+ s->cfgdata2 = 0;
+ s->flags = 0;
+ s->resetlevel = 0;
+ /* Motherboard oscillators (in Hz) */
+ s->mb_clock[0] = 50000000; /* Static memory clock: 50MHz */
+ s->mb_clock[1] = 23750000; /* motherboard CLCD clock: 23.75MHz */
+ s->mb_clock[2] = 24000000; /* IO FPGA peripheral clock: 24MHz */
+ s->mb_clock[3] = 24000000; /* IO FPGA reserved clock: 24MHz */
+ s->mb_clock[4] = 24000000; /* System bus global clock: 24MHz */
+ s->mb_clock[5] = 24000000; /* IO FPGA reserved clock: 24MHz */
+ /* Daughterboard oscillators: reset from property values */
+ for (i = 0; i < s->db_num_clocks; i++) {
+ s->db_clock[i] = s->db_clock_reset[i];
+ }
+ if (board_id(s) == BOARD_ID_VEXPRESS) {
+ /* On VExpress this register will RAZ/WI */
+ s->sys_clcd = 0;
+ } else {
+ /* All others: CLCDID 0x1f, indicating VGA */
+ s->sys_clcd = 0x1f00;
+ }
+}
+
+static uint64_t arm_sysctl_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ arm_sysctl_state *s = (arm_sysctl_state *)opaque;
+
+ switch (offset) {
+ case 0x00: /* ID */
+ return s->sys_id;
+ case 0x04: /* SW */
+ /* General purpose hardware switches.
+ We don't have a useful way of exposing these to the user. */
+ return 0;
+ case 0x08: /* LED */
+ return s->leds;
+ case 0x20: /* LOCK */
+ return s->lockval;
+ case 0x0c: /* OSC0 */
+ case 0x10: /* OSC1 */
+ case 0x14: /* OSC2 */
+ case 0x18: /* OSC3 */
+ case 0x1c: /* OSC4 */
+ case 0x24: /* 100HZ */
+ /* ??? Implement these. */
+ return 0;
+ case 0x28: /* CFGDATA1 */
+ return s->cfgdata1;
+ case 0x2c: /* CFGDATA2 */
+ return s->cfgdata2;
+ case 0x30: /* FLAGS */
+ return s->flags;
+ case 0x38: /* NVFLAGS */
+ return s->nvflags;
+ case 0x40: /* RESETCTL */
+ if (board_id(s) == BOARD_ID_VEXPRESS) {
+ /* reserved: RAZ/WI */
+ return 0;
+ }
+ return s->resetlevel;
+ case 0x44: /* PCICTL */
+ return 1;
+ case 0x48: /* MCI */
+ return s->sys_mci;
+ case 0x4c: /* FLASH */
+ return 0;
+ case 0x50: /* CLCD */
+ return s->sys_clcd;
+ case 0x54: /* CLCDSER */
+ return 0;
+ case 0x58: /* BOOTCS */
+ return 0;
+ case 0x5c: /* 24MHz */
+ return muldiv64(qemu_get_clock_ns(vm_clock), 24000000, get_ticks_per_sec());
+ case 0x60: /* MISC */
+ return 0;
+ case 0x84: /* PROCID0 */
+ return s->proc_id;
+ case 0x88: /* PROCID1 */
+ return 0xff000000;
+ case 0x64: /* DMAPSR0 */
+ case 0x68: /* DMAPSR1 */
+ case 0x6c: /* DMAPSR2 */
+ case 0x70: /* IOSEL */
+ case 0x74: /* PLDCTL */
+ case 0x80: /* BUSID */
+ case 0x8c: /* OSCRESET0 */
+ case 0x90: /* OSCRESET1 */
+ case 0x94: /* OSCRESET2 */
+ case 0x98: /* OSCRESET3 */
+ case 0x9c: /* OSCRESET4 */
+ case 0xc0: /* SYS_TEST_OSC0 */
+ case 0xc4: /* SYS_TEST_OSC1 */
+ case 0xc8: /* SYS_TEST_OSC2 */
+ case 0xcc: /* SYS_TEST_OSC3 */
+ case 0xd0: /* SYS_TEST_OSC4 */
+ return 0;
+ case 0xa0: /* SYS_CFGDATA */
+ if (board_id(s) != BOARD_ID_VEXPRESS) {
+ goto bad_reg;
+ }
+ return s->sys_cfgdata;
+ case 0xa4: /* SYS_CFGCTRL */
+ if (board_id(s) != BOARD_ID_VEXPRESS) {
+ goto bad_reg;
+ }
+ return s->sys_cfgctrl;
+ case 0xa8: /* SYS_CFGSTAT */
+ if (board_id(s) != BOARD_ID_VEXPRESS) {
+ goto bad_reg;
+ }
+ return s->sys_cfgstat;
+ default:
+ bad_reg:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "arm_sysctl_read: Bad register offset 0x%x\n",
+ (int)offset);
+ return 0;
+ }
+}
+
+/* SYS_CFGCTRL functions */
+#define SYS_CFG_OSC 1
+#define SYS_CFG_VOLT 2
+#define SYS_CFG_AMP 3
+#define SYS_CFG_TEMP 4
+#define SYS_CFG_RESET 5
+#define SYS_CFG_SCC 6
+#define SYS_CFG_MUXFPGA 7
+#define SYS_CFG_SHUTDOWN 8
+#define SYS_CFG_REBOOT 9
+#define SYS_CFG_DVIMODE 11
+#define SYS_CFG_POWER 12
+#define SYS_CFG_ENERGY 13
+
+/* SYS_CFGCTRL site field values */
+#define SYS_CFG_SITE_MB 0
+#define SYS_CFG_SITE_DB1 1
+#define SYS_CFG_SITE_DB2 2
+
+/**
+ * vexpress_cfgctrl_read:
+ * @s: arm_sysctl_state pointer
+ * @dcc, @function, @site, @position, @device: split out values from
+ * SYS_CFGCTRL register
+ * @val: pointer to where to put the read data on success
+ *
+ * Handle a VExpress SYS_CFGCTRL register read. On success, return true and
+ * write the read value to *val. On failure, return false (and val may
+ * or may not be written to).
+ */
+static bool vexpress_cfgctrl_read(arm_sysctl_state *s, unsigned int dcc,
+ unsigned int function, unsigned int site,
+ unsigned int position, unsigned int device,
+ uint32_t *val)
+{
+ /* We don't support anything other than DCC 0, board stack position 0
+ * or sites other than motherboard/daughterboard:
+ */
+ if (dcc != 0 || position != 0 ||
+ (site != SYS_CFG_SITE_MB && site != SYS_CFG_SITE_DB1)) {
+ goto cfgctrl_unimp;
+ }
+
+ switch (function) {
+ case SYS_CFG_VOLT:
+ if (site == SYS_CFG_SITE_DB1 && device < s->db_num_vsensors) {
+ *val = s->db_voltage[device];
+ return true;
+ }
+ if (site == SYS_CFG_SITE_MB && device == 0) {
+ /* There is only one motherboard voltage sensor:
+ * VIO : 3.3V : bus voltage between mother and daughterboard
+ */
+ *val = 3300000;
+ return true;
+ }
+ break;
+ case SYS_CFG_OSC:
+ if (site == SYS_CFG_SITE_MB && device < sizeof(s->mb_clock)) {
+ /* motherboard clock */
+ *val = s->mb_clock[device];
+ return true;
+ }
+ if (site == SYS_CFG_SITE_DB1 && device < s->db_num_clocks) {
+ /* daughterboard clock */
+ *val = s->db_clock[device];
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+
+cfgctrl_unimp:
+ qemu_log_mask(LOG_UNIMP,
+ "arm_sysctl: Unimplemented SYS_CFGCTRL read of function "
+ "0x%x DCC 0x%x site 0x%x position 0x%x device 0x%x\n",
+ function, dcc, site, position, device);
+ return false;
+}
+
+/**
+ * vexpress_cfgctrl_write:
+ * @s: arm_sysctl_state pointer
+ * @dcc, @function, @site, @position, @device: split out values from
+ * SYS_CFGCTRL register
+ * @val: data to write
+ *
+ * Handle a VExpress SYS_CFGCTRL register write. On success, return true.
+ * On failure, return false.
+ */
+static bool vexpress_cfgctrl_write(arm_sysctl_state *s, unsigned int dcc,
+ unsigned int function, unsigned int site,
+ unsigned int position, unsigned int device,
+ uint32_t val)
+{
+ /* We don't support anything other than DCC 0, board stack position 0
+ * or sites other than motherboard/daughterboard:
+ */
+ if (dcc != 0 || position != 0 ||
+ (site != SYS_CFG_SITE_MB && site != SYS_CFG_SITE_DB1)) {
+ goto cfgctrl_unimp;
+ }
+
+ switch (function) {
+ case SYS_CFG_OSC:
+ if (site == SYS_CFG_SITE_MB && device < sizeof(s->mb_clock)) {
+ /* motherboard clock */
+ s->mb_clock[device] = val;
+ return true;
+ }
+ if (site == SYS_CFG_SITE_DB1 && device < s->db_num_clocks) {
+ /* daughterboard clock */
+ s->db_clock[device] = val;
+ return true;
+ }
+ break;
+ case SYS_CFG_MUXFPGA:
+ if (site == SYS_CFG_SITE_MB && device == 0) {
+ /* Select whether video output comes from motherboard
+ * or daughterboard: log and ignore as QEMU doesn't
+ * support this.
+ */
+ qemu_log_mask(LOG_UNIMP, "arm_sysctl: selection of video output "
+ "not supported, ignoring\n");
+ return true;
+ }
+ break;
+ case SYS_CFG_SHUTDOWN:
+ if (site == SYS_CFG_SITE_MB && device == 0) {
+ qemu_system_shutdown_request();
+ return true;
+ }
+ break;
+ case SYS_CFG_REBOOT:
+ if (site == SYS_CFG_SITE_MB && device == 0) {
+ qemu_system_reset_request();
+ return true;
+ }
+ break;
+ case SYS_CFG_DVIMODE:
+ if (site == SYS_CFG_SITE_MB && device == 0) {
+ /* Selecting DVI mode is meaningless for QEMU: we will
+ * always display the output correctly according to the
+ * pixel height/width programmed into the CLCD controller.
+ */
+ return true;
+ }
+ default:
+ break;
+ }
+
+cfgctrl_unimp:
+ qemu_log_mask(LOG_UNIMP,
+ "arm_sysctl: Unimplemented SYS_CFGCTRL write of function "
+ "0x%x DCC 0x%x site 0x%x position 0x%x device 0x%x\n",
+ function, dcc, site, position, device);
+ return false;
+}
+
+static void arm_sysctl_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ arm_sysctl_state *s = (arm_sysctl_state *)opaque;
+
+ switch (offset) {
+ case 0x08: /* LED */
+ s->leds = val;
+ break;
+ case 0x0c: /* OSC0 */
+ case 0x10: /* OSC1 */
+ case 0x14: /* OSC2 */
+ case 0x18: /* OSC3 */
+ case 0x1c: /* OSC4 */
+ /* ??? */
+ break;
+ case 0x20: /* LOCK */
+ if (val == LOCK_VALUE)
+ s->lockval = val;
+ else
+ s->lockval = val & 0x7fff;
+ break;
+ case 0x28: /* CFGDATA1 */
+ /* ??? Need to implement this. */
+ s->cfgdata1 = val;
+ break;
+ case 0x2c: /* CFGDATA2 */
+ /* ??? Need to implement this. */
+ s->cfgdata2 = val;
+ break;
+ case 0x30: /* FLAGSSET */
+ s->flags |= val;
+ break;
+ case 0x34: /* FLAGSCLR */
+ s->flags &= ~val;
+ break;
+ case 0x38: /* NVFLAGSSET */
+ s->nvflags |= val;
+ break;
+ case 0x3c: /* NVFLAGSCLR */
+ s->nvflags &= ~val;
+ break;
+ case 0x40: /* RESETCTL */
+ switch (board_id(s)) {
+ case BOARD_ID_PB926:
+ if (s->lockval == LOCK_VALUE) {
+ s->resetlevel = val;
+ if (val & 0x100) {
+ qemu_system_reset_request();
+ }
+ }
+ break;
+ case BOARD_ID_PBX:
+ case BOARD_ID_PBA8:
+ if (s->lockval == LOCK_VALUE) {
+ s->resetlevel = val;
+ if (val & 0x04) {
+ qemu_system_reset_request();
+ }
+ }
+ break;
+ case BOARD_ID_VEXPRESS:
+ case BOARD_ID_EB:
+ default:
+ /* reserved: RAZ/WI */
+ break;
+ }
+ break;
+ case 0x44: /* PCICTL */
+ /* nothing to do. */
+ break;
+ case 0x4c: /* FLASH */
+ break;
+ case 0x50: /* CLCD */
+ switch (board_id(s)) {
+ case BOARD_ID_PB926:
+ /* On 926 bits 13:8 are R/O, bits 1:0 control
+ * the mux that defines how to interpret the PL110
+ * graphics format, and other bits are r/w but we
+ * don't implement them to do anything.
+ */
+ s->sys_clcd &= 0x3f00;
+ s->sys_clcd |= val & ~0x3f00;
+ qemu_set_irq(s->pl110_mux_ctrl, val & 3);
+ break;
+ case BOARD_ID_EB:
+ /* The EB is the same except that there is no mux since
+ * the EB has a PL111.
+ */
+ s->sys_clcd &= 0x3f00;
+ s->sys_clcd |= val & ~0x3f00;
+ break;
+ case BOARD_ID_PBA8:
+ case BOARD_ID_PBX:
+ /* On PBA8 and PBX bit 7 is r/w and all other bits
+ * are either r/o or RAZ/WI.
+ */
+ s->sys_clcd &= (1 << 7);
+ s->sys_clcd |= val & ~(1 << 7);
+ break;
+ case BOARD_ID_VEXPRESS:
+ default:
+ /* On VExpress this register is unimplemented and will RAZ/WI */
+ break;
+ }
+ break;
+ case 0x54: /* CLCDSER */
+ case 0x64: /* DMAPSR0 */
+ case 0x68: /* DMAPSR1 */
+ case 0x6c: /* DMAPSR2 */
+ case 0x70: /* IOSEL */
+ case 0x74: /* PLDCTL */
+ case 0x80: /* BUSID */
+ case 0x84: /* PROCID0 */
+ case 0x88: /* PROCID1 */
+ case 0x8c: /* OSCRESET0 */
+ case 0x90: /* OSCRESET1 */
+ case 0x94: /* OSCRESET2 */
+ case 0x98: /* OSCRESET3 */
+ case 0x9c: /* OSCRESET4 */
+ break;
+ case 0xa0: /* SYS_CFGDATA */
+ if (board_id(s) != BOARD_ID_VEXPRESS) {
+ goto bad_reg;
+ }
+ s->sys_cfgdata = val;
+ return;
+ case 0xa4: /* SYS_CFGCTRL */
+ if (board_id(s) != BOARD_ID_VEXPRESS) {
+ goto bad_reg;
+ }
+ /* Undefined bits [19:18] are RAZ/WI, and writing to
+ * the start bit just triggers the action; it always reads
+ * as zero.
+ */
+ s->sys_cfgctrl = val & ~((3 << 18) | (1 << 31));
+ if (val & (1 << 31)) {
+ /* Start bit set -- actually do something */
+ unsigned int dcc = extract32(s->sys_cfgctrl, 26, 4);
+ unsigned int function = extract32(s->sys_cfgctrl, 20, 6);
+ unsigned int site = extract32(s->sys_cfgctrl, 16, 2);
+ unsigned int position = extract32(s->sys_cfgctrl, 12, 4);
+ unsigned int device = extract32(s->sys_cfgctrl, 0, 12);
+ s->sys_cfgstat = 1; /* complete */
+ if (s->sys_cfgctrl & (1 << 30)) {
+ if (!vexpress_cfgctrl_write(s, dcc, function, site, position,
+ device, s->sys_cfgdata)) {
+ s->sys_cfgstat |= 2; /* error */
+ }
+ } else {
+ uint32_t val;
+ if (!vexpress_cfgctrl_read(s, dcc, function, site, position,
+ device, &val)) {
+ s->sys_cfgstat |= 2; /* error */
+ } else {
+ s->sys_cfgdata = val;
+ }
+ }
+ }
+ s->sys_cfgctrl &= ~(1 << 31);
+ return;
+ case 0xa8: /* SYS_CFGSTAT */
+ if (board_id(s) != BOARD_ID_VEXPRESS) {
+ goto bad_reg;
+ }
+ s->sys_cfgstat = val & 3;
+ return;
+ default:
+ bad_reg:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "arm_sysctl_write: Bad register offset 0x%x\n",
+ (int)offset);
+ return;
+ }
+}
+
+static const MemoryRegionOps arm_sysctl_ops = {
+ .read = arm_sysctl_read,
+ .write = arm_sysctl_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void arm_sysctl_gpio_set(void *opaque, int line, int level)
+{
+ arm_sysctl_state *s = (arm_sysctl_state *)opaque;
+ switch (line) {
+ case ARM_SYSCTL_GPIO_MMC_WPROT:
+ {
+ /* For PB926 and EB write-protect is bit 2 of SYS_MCI;
+ * for all later boards it is bit 1.
+ */
+ int bit = 2;
+ if ((board_id(s) == BOARD_ID_PB926) || (board_id(s) == BOARD_ID_EB)) {
+ bit = 4;
+ }
+ s->sys_mci &= ~bit;
+ if (level) {
+ s->sys_mci |= bit;
+ }
+ break;
+ }
+ case ARM_SYSCTL_GPIO_MMC_CARDIN:
+ s->sys_mci &= ~1;
+ if (level) {
+ s->sys_mci |= 1;
+ }
+ break;
+ }
+}
+
+static void arm_sysctl_init(Object *obj)
+{
+ DeviceState *dev = DEVICE(obj);
+ SysBusDevice *sd = SYS_BUS_DEVICE(obj);
+ arm_sysctl_state *s = FROM_SYSBUS(arm_sysctl_state, sd);
+
+ memory_region_init_io(&s->iomem, &arm_sysctl_ops, s, "arm-sysctl", 0x1000);
+ sysbus_init_mmio(sd, &s->iomem);
+ qdev_init_gpio_in(dev, arm_sysctl_gpio_set, 2);
+ qdev_init_gpio_out(dev, &s->pl110_mux_ctrl, 1);
+}
+
+static void arm_sysctl_realize(DeviceState *d, Error **errp)
+{
+ arm_sysctl_state *s = FROM_SYSBUS(arm_sysctl_state, SYS_BUS_DEVICE(d));
+ s->db_clock = g_new0(uint32_t, s->db_num_clocks);
+}
+
+static void arm_sysctl_finalize(Object *obj)
+{
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
+ arm_sysctl_state *s = FROM_SYSBUS(arm_sysctl_state, dev);
+ g_free(s->db_voltage);
+ g_free(s->db_clock);
+ g_free(s->db_clock_reset);
+}
+
+static Property arm_sysctl_properties[] = {
+ DEFINE_PROP_UINT32("sys_id", arm_sysctl_state, sys_id, 0),
+ DEFINE_PROP_UINT32("proc_id", arm_sysctl_state, proc_id, 0),
+ /* Daughterboard power supply voltages (as reported via SYS_CFG) */
+ DEFINE_PROP_ARRAY("db-voltage", arm_sysctl_state, db_num_vsensors,
+ db_voltage, qdev_prop_uint32, uint32_t),
+ /* Daughterboard clock reset values (as reported via SYS_CFG) */
+ DEFINE_PROP_ARRAY("db-clock", arm_sysctl_state, db_num_clocks,
+ db_clock_reset, qdev_prop_uint32, uint32_t),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void arm_sysctl_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = arm_sysctl_realize;
+ dc->reset = arm_sysctl_reset;
+ dc->vmsd = &vmstate_arm_sysctl;
+ dc->props = arm_sysctl_properties;
+}
+
+static const TypeInfo arm_sysctl_info = {
+ .name = "realview_sysctl",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(arm_sysctl_state),
+ .instance_init = arm_sysctl_init,
+ .instance_finalize = arm_sysctl_finalize,
+ .class_init = arm_sysctl_class_init,
+};
+
+static void arm_sysctl_register_types(void)
+{
+ type_register_static(&arm_sysctl_info);
+}
+
+type_init(arm_sysctl_register_types)
diff --git a/hw/misc/cbus.c b/hw/misc/cbus.c
new file mode 100644
index 0000000..3d9027f
--- /dev/null
+++ b/hw/misc/cbus.c
@@ -0,0 +1,618 @@
+/*
+ * CBUS three-pin bus and the Retu / Betty / Tahvo / Vilma / Avilma /
+ * Hinku / Vinku / Ahne / Pihi chips used in various Nokia platforms.
+ * Based on reverse-engineering of a linux driver.
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Written by Andrzej Zaborowski <andrew@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu-common.h"
+#include "hw/irq.h"
+#include "hw/arm/devices.h"
+#include "sysemu/sysemu.h"
+
+//#define DEBUG
+
+typedef struct {
+ void *opaque;
+ void (*io)(void *opaque, int rw, int reg, uint16_t *val);
+ int addr;
+} CBusSlave;
+
+typedef struct {
+ CBus cbus;
+
+ int sel;
+ int dat;
+ int clk;
+ int bit;
+ int dir;
+ uint16_t val;
+ qemu_irq dat_out;
+
+ int addr;
+ int reg;
+ int rw;
+ enum {
+ cbus_address,
+ cbus_value,
+ } cycle;
+
+ CBusSlave *slave[8];
+} CBusPriv;
+
+static void cbus_io(CBusPriv *s)
+{
+ if (s->slave[s->addr])
+ s->slave[s->addr]->io(s->slave[s->addr]->opaque,
+ s->rw, s->reg, &s->val);
+ else
+ hw_error("%s: bad slave address %i\n", __FUNCTION__, s->addr);
+}
+
+static void cbus_cycle(CBusPriv *s)
+{
+ switch (s->cycle) {
+ case cbus_address:
+ s->addr = (s->val >> 6) & 7;
+ s->rw = (s->val >> 5) & 1;
+ s->reg = (s->val >> 0) & 0x1f;
+
+ s->cycle = cbus_value;
+ s->bit = 15;
+ s->dir = !s->rw;
+ s->val = 0;
+
+ if (s->rw)
+ cbus_io(s);
+ break;
+
+ case cbus_value:
+ if (!s->rw)
+ cbus_io(s);
+
+ s->cycle = cbus_address;
+ s->bit = 8;
+ s->dir = 1;
+ s->val = 0;
+ break;
+ }
+}
+
+static void cbus_clk(void *opaque, int line, int level)
+{
+ CBusPriv *s = (CBusPriv *) opaque;
+
+ if (!s->sel && level && !s->clk) {
+ if (s->dir)
+ s->val |= s->dat << (s->bit --);
+ else
+ qemu_set_irq(s->dat_out, (s->val >> (s->bit --)) & 1);
+
+ if (s->bit < 0)
+ cbus_cycle(s);
+ }
+
+ s->clk = level;
+}
+
+static void cbus_dat(void *opaque, int line, int level)
+{
+ CBusPriv *s = (CBusPriv *) opaque;
+
+ s->dat = level;
+}
+
+static void cbus_sel(void *opaque, int line, int level)
+{
+ CBusPriv *s = (CBusPriv *) opaque;
+
+ if (!level) {
+ s->dir = 1;
+ s->bit = 8;
+ s->val = 0;
+ }
+
+ s->sel = level;
+}
+
+CBus *cbus_init(qemu_irq dat)
+{
+ CBusPriv *s = (CBusPriv *) g_malloc0(sizeof(*s));
+
+ s->dat_out = dat;
+ s->cbus.clk = qemu_allocate_irqs(cbus_clk, s, 1)[0];
+ s->cbus.dat = qemu_allocate_irqs(cbus_dat, s, 1)[0];
+ s->cbus.sel = qemu_allocate_irqs(cbus_sel, s, 1)[0];
+
+ s->sel = 1;
+ s->clk = 0;
+ s->dat = 0;
+
+ return &s->cbus;
+}
+
+void cbus_attach(CBus *bus, void *slave_opaque)
+{
+ CBusSlave *slave = (CBusSlave *) slave_opaque;
+ CBusPriv *s = (CBusPriv *) bus;
+
+ s->slave[slave->addr] = slave;
+}
+
+/* Retu/Vilma */
+typedef struct {
+ uint16_t irqst;
+ uint16_t irqen;
+ uint16_t cc[2];
+ int channel;
+ uint16_t result[16];
+ uint16_t sample;
+ uint16_t status;
+
+ struct {
+ uint16_t cal;
+ } rtc;
+
+ int is_vilma;
+ qemu_irq irq;
+ CBusSlave cbus;
+} CBusRetu;
+
+static void retu_interrupt_update(CBusRetu *s)
+{
+ qemu_set_irq(s->irq, s->irqst & ~s->irqen);
+}
+
+#define RETU_REG_ASICR 0x00 /* (RO) ASIC ID & revision */
+#define RETU_REG_IDR 0x01 /* (T) Interrupt ID */
+#define RETU_REG_IMR 0x02 /* (RW) Interrupt mask */
+#define RETU_REG_RTCDSR 0x03 /* (RW) RTC seconds register */
+#define RETU_REG_RTCHMR 0x04 /* (RO) RTC hours and minutes reg */
+#define RETU_REG_RTCHMAR 0x05 /* (RW) RTC hours and minutes set reg */
+#define RETU_REG_RTCCALR 0x06 /* (RW) RTC calibration register */
+#define RETU_REG_ADCR 0x08 /* (RW) ADC result register */
+#define RETU_REG_ADCSCR 0x09 /* (RW) ADC sample control register */
+#define RETU_REG_AFCR 0x0a /* (RW) AFC register */
+#define RETU_REG_ANTIFR 0x0b /* (RW) AntiF register */
+#define RETU_REG_CALIBR 0x0c /* (RW) CalibR register*/
+#define RETU_REG_CCR1 0x0d /* (RW) Common control register 1 */
+#define RETU_REG_CCR2 0x0e /* (RW) Common control register 2 */
+#define RETU_REG_RCTRL_CLR 0x0f /* (T) Regulator clear register */
+#define RETU_REG_RCTRL_SET 0x10 /* (T) Regulator set register */
+#define RETU_REG_TXCR 0x11 /* (RW) TxC register */
+#define RETU_REG_STATUS 0x16 /* (RO) Status register */
+#define RETU_REG_WATCHDOG 0x17 /* (RW) Watchdog register */
+#define RETU_REG_AUDTXR 0x18 /* (RW) Audio Codec Tx register */
+#define RETU_REG_AUDPAR 0x19 /* (RW) AudioPA register */
+#define RETU_REG_AUDRXR1 0x1a /* (RW) Audio receive register 1 */
+#define RETU_REG_AUDRXR2 0x1b /* (RW) Audio receive register 2 */
+#define RETU_REG_SGR1 0x1c /* (RW) */
+#define RETU_REG_SCR1 0x1d /* (RW) */
+#define RETU_REG_SGR2 0x1e /* (RW) */
+#define RETU_REG_SCR2 0x1f /* (RW) */
+
+/* Retu Interrupt sources */
+enum {
+ retu_int_pwr = 0, /* Power button */
+ retu_int_char = 1, /* Charger */
+ retu_int_rtcs = 2, /* Seconds */
+ retu_int_rtcm = 3, /* Minutes */
+ retu_int_rtcd = 4, /* Days */
+ retu_int_rtca = 5, /* Alarm */
+ retu_int_hook = 6, /* Hook */
+ retu_int_head = 7, /* Headset */
+ retu_int_adcs = 8, /* ADC sample */
+};
+
+/* Retu ADC channel wiring */
+enum {
+ retu_adc_bsi = 1, /* BSI */
+ retu_adc_batt_temp = 2, /* Battery temperature */
+ retu_adc_chg_volt = 3, /* Charger voltage */
+ retu_adc_head_det = 4, /* Headset detection */
+ retu_adc_hook_det = 5, /* Hook detection */
+ retu_adc_rf_gp = 6, /* RF GP */
+ retu_adc_tx_det = 7, /* Wideband Tx detection */
+ retu_adc_batt_volt = 8, /* Battery voltage */
+ retu_adc_sens = 10, /* Light sensor */
+ retu_adc_sens_temp = 11, /* Light sensor temperature */
+ retu_adc_bbatt_volt = 12, /* Backup battery voltage */
+ retu_adc_self_temp = 13, /* RETU temperature */
+};
+
+static inline uint16_t retu_read(CBusRetu *s, int reg)
+{
+#ifdef DEBUG
+ printf("RETU read at %02x\n", reg);
+#endif
+
+ switch (reg) {
+ case RETU_REG_ASICR:
+ return 0x0215 | (s->is_vilma << 7);
+
+ case RETU_REG_IDR: /* TODO: Or is this ffs(s->irqst)? */
+ return s->irqst;
+
+ case RETU_REG_IMR:
+ return s->irqen;
+
+ case RETU_REG_RTCDSR:
+ case RETU_REG_RTCHMR:
+ case RETU_REG_RTCHMAR:
+ /* TODO */
+ return 0x0000;
+
+ case RETU_REG_RTCCALR:
+ return s->rtc.cal;
+
+ case RETU_REG_ADCR:
+ return (s->channel << 10) | s->result[s->channel];
+ case RETU_REG_ADCSCR:
+ return s->sample;
+
+ case RETU_REG_AFCR:
+ case RETU_REG_ANTIFR:
+ case RETU_REG_CALIBR:
+ /* TODO */
+ return 0x0000;
+
+ case RETU_REG_CCR1:
+ return s->cc[0];
+ case RETU_REG_CCR2:
+ return s->cc[1];
+
+ case RETU_REG_RCTRL_CLR:
+ case RETU_REG_RCTRL_SET:
+ case RETU_REG_TXCR:
+ /* TODO */
+ return 0x0000;
+
+ case RETU_REG_STATUS:
+ return s->status;
+
+ case RETU_REG_WATCHDOG:
+ case RETU_REG_AUDTXR:
+ case RETU_REG_AUDPAR:
+ case RETU_REG_AUDRXR1:
+ case RETU_REG_AUDRXR2:
+ case RETU_REG_SGR1:
+ case RETU_REG_SCR1:
+ case RETU_REG_SGR2:
+ case RETU_REG_SCR2:
+ /* TODO */
+ return 0x0000;
+
+ default:
+ hw_error("%s: bad register %02x\n", __FUNCTION__, reg);
+ }
+}
+
+static inline void retu_write(CBusRetu *s, int reg, uint16_t val)
+{
+#ifdef DEBUG
+ printf("RETU write of %04x at %02x\n", val, reg);
+#endif
+
+ switch (reg) {
+ case RETU_REG_IDR:
+ s->irqst ^= val;
+ retu_interrupt_update(s);
+ break;
+
+ case RETU_REG_IMR:
+ s->irqen = val;
+ retu_interrupt_update(s);
+ break;
+
+ case RETU_REG_RTCDSR:
+ case RETU_REG_RTCHMAR:
+ /* TODO */
+ break;
+
+ case RETU_REG_RTCCALR:
+ s->rtc.cal = val;
+ break;
+
+ case RETU_REG_ADCR:
+ s->channel = (val >> 10) & 0xf;
+ s->irqst |= 1 << retu_int_adcs;
+ retu_interrupt_update(s);
+ break;
+ case RETU_REG_ADCSCR:
+ s->sample &= ~val;
+ break;
+
+ case RETU_REG_AFCR:
+ case RETU_REG_ANTIFR:
+ case RETU_REG_CALIBR:
+
+ case RETU_REG_CCR1:
+ s->cc[0] = val;
+ break;
+ case RETU_REG_CCR2:
+ s->cc[1] = val;
+ break;
+
+ case RETU_REG_RCTRL_CLR:
+ case RETU_REG_RCTRL_SET:
+ /* TODO */
+ break;
+
+ case RETU_REG_WATCHDOG:
+ if (val == 0 && (s->cc[0] & 2))
+ qemu_system_shutdown_request();
+ break;
+
+ case RETU_REG_TXCR:
+ case RETU_REG_AUDTXR:
+ case RETU_REG_AUDPAR:
+ case RETU_REG_AUDRXR1:
+ case RETU_REG_AUDRXR2:
+ case RETU_REG_SGR1:
+ case RETU_REG_SCR1:
+ case RETU_REG_SGR2:
+ case RETU_REG_SCR2:
+ /* TODO */
+ break;
+
+ default:
+ hw_error("%s: bad register %02x\n", __FUNCTION__, reg);
+ }
+}
+
+static void retu_io(void *opaque, int rw, int reg, uint16_t *val)
+{
+ CBusRetu *s = (CBusRetu *) opaque;
+
+ if (rw)
+ *val = retu_read(s, reg);
+ else
+ retu_write(s, reg, *val);
+}
+
+void *retu_init(qemu_irq irq, int vilma)
+{
+ CBusRetu *s = (CBusRetu *) g_malloc0(sizeof(*s));
+
+ s->irq = irq;
+ s->irqen = 0xffff;
+ s->irqst = 0x0000;
+ s->status = 0x0020;
+ s->is_vilma = !!vilma;
+ s->rtc.cal = 0x01;
+ s->result[retu_adc_bsi] = 0x3c2;
+ s->result[retu_adc_batt_temp] = 0x0fc;
+ s->result[retu_adc_chg_volt] = 0x165;
+ s->result[retu_adc_head_det] = 123;
+ s->result[retu_adc_hook_det] = 1023;
+ s->result[retu_adc_rf_gp] = 0x11;
+ s->result[retu_adc_tx_det] = 0x11;
+ s->result[retu_adc_batt_volt] = 0x250;
+ s->result[retu_adc_sens] = 2;
+ s->result[retu_adc_sens_temp] = 0x11;
+ s->result[retu_adc_bbatt_volt] = 0x3d0;
+ s->result[retu_adc_self_temp] = 0x330;
+
+ s->cbus.opaque = s;
+ s->cbus.io = retu_io;
+ s->cbus.addr = 1;
+
+ return &s->cbus;
+}
+
+void retu_key_event(void *retu, int state)
+{
+ CBusSlave *slave = (CBusSlave *) retu;
+ CBusRetu *s = (CBusRetu *) slave->opaque;
+
+ s->irqst |= 1 << retu_int_pwr;
+ retu_interrupt_update(s);
+
+ if (state)
+ s->status &= ~(1 << 5);
+ else
+ s->status |= 1 << 5;
+}
+
+#if 0
+static void retu_head_event(void *retu, int state)
+{
+ CBusSlave *slave = (CBusSlave *) retu;
+ CBusRetu *s = (CBusRetu *) slave->opaque;
+
+ if ((s->cc[0] & 0x500) == 0x500) { /* TODO: Which bits? */
+ /* TODO: reissue the interrupt every 100ms or so. */
+ s->irqst |= 1 << retu_int_head;
+ retu_interrupt_update(s);
+ }
+
+ if (state)
+ s->result[retu_adc_head_det] = 50;
+ else
+ s->result[retu_adc_head_det] = 123;
+}
+
+static void retu_hook_event(void *retu, int state)
+{
+ CBusSlave *slave = (CBusSlave *) retu;
+ CBusRetu *s = (CBusRetu *) slave->opaque;
+
+ if ((s->cc[0] & 0x500) == 0x500) {
+ /* TODO: reissue the interrupt every 100ms or so. */
+ s->irqst |= 1 << retu_int_hook;
+ retu_interrupt_update(s);
+ }
+
+ if (state)
+ s->result[retu_adc_hook_det] = 50;
+ else
+ s->result[retu_adc_hook_det] = 123;
+}
+#endif
+
+/* Tahvo/Betty */
+typedef struct {
+ uint16_t irqst;
+ uint16_t irqen;
+ uint8_t charger;
+ uint8_t backlight;
+ uint16_t usbr;
+ uint16_t power;
+
+ int is_betty;
+ qemu_irq irq;
+ CBusSlave cbus;
+} CBusTahvo;
+
+static void tahvo_interrupt_update(CBusTahvo *s)
+{
+ qemu_set_irq(s->irq, s->irqst & ~s->irqen);
+}
+
+#define TAHVO_REG_ASICR 0x00 /* (RO) ASIC ID & revision */
+#define TAHVO_REG_IDR 0x01 /* (T) Interrupt ID */
+#define TAHVO_REG_IDSR 0x02 /* (RO) Interrupt status */
+#define TAHVO_REG_IMR 0x03 /* (RW) Interrupt mask */
+#define TAHVO_REG_CHAPWMR 0x04 /* (RW) Charger PWM */
+#define TAHVO_REG_LEDPWMR 0x05 /* (RW) LED PWM */
+#define TAHVO_REG_USBR 0x06 /* (RW) USB control */
+#define TAHVO_REG_RCR 0x07 /* (RW) Some kind of power management */
+#define TAHVO_REG_CCR1 0x08 /* (RW) Common control register 1 */
+#define TAHVO_REG_CCR2 0x09 /* (RW) Common control register 2 */
+#define TAHVO_REG_TESTR1 0x0a /* (RW) Test register 1 */
+#define TAHVO_REG_TESTR2 0x0b /* (RW) Test register 2 */
+#define TAHVO_REG_NOPR 0x0c /* (RW) Number of periods */
+#define TAHVO_REG_FRR 0x0d /* (RO) FR */
+
+static inline uint16_t tahvo_read(CBusTahvo *s, int reg)
+{
+#ifdef DEBUG
+ printf("TAHVO read at %02x\n", reg);
+#endif
+
+ switch (reg) {
+ case TAHVO_REG_ASICR:
+ return 0x0021 | (s->is_betty ? 0x0b00 : 0x0300); /* 22 in N810 */
+
+ case TAHVO_REG_IDR:
+ case TAHVO_REG_IDSR: /* XXX: what does this do? */
+ return s->irqst;
+
+ case TAHVO_REG_IMR:
+ return s->irqen;
+
+ case TAHVO_REG_CHAPWMR:
+ return s->charger;
+
+ case TAHVO_REG_LEDPWMR:
+ return s->backlight;
+
+ case TAHVO_REG_USBR:
+ return s->usbr;
+
+ case TAHVO_REG_RCR:
+ return s->power;
+
+ case TAHVO_REG_CCR1:
+ case TAHVO_REG_CCR2:
+ case TAHVO_REG_TESTR1:
+ case TAHVO_REG_TESTR2:
+ case TAHVO_REG_NOPR:
+ case TAHVO_REG_FRR:
+ return 0x0000;
+
+ default:
+ hw_error("%s: bad register %02x\n", __FUNCTION__, reg);
+ }
+}
+
+static inline void tahvo_write(CBusTahvo *s, int reg, uint16_t val)
+{
+#ifdef DEBUG
+ printf("TAHVO write of %04x at %02x\n", val, reg);
+#endif
+
+ switch (reg) {
+ case TAHVO_REG_IDR:
+ s->irqst ^= val;
+ tahvo_interrupt_update(s);
+ break;
+
+ case TAHVO_REG_IMR:
+ s->irqen = val;
+ tahvo_interrupt_update(s);
+ break;
+
+ case TAHVO_REG_CHAPWMR:
+ s->charger = val;
+ break;
+
+ case TAHVO_REG_LEDPWMR:
+ if (s->backlight != (val & 0x7f)) {
+ s->backlight = val & 0x7f;
+ printf("%s: LCD backlight now at %i / 127\n",
+ __FUNCTION__, s->backlight);
+ }
+ break;
+
+ case TAHVO_REG_USBR:
+ s->usbr = val;
+ break;
+
+ case TAHVO_REG_RCR:
+ s->power = val;
+ break;
+
+ case TAHVO_REG_CCR1:
+ case TAHVO_REG_CCR2:
+ case TAHVO_REG_TESTR1:
+ case TAHVO_REG_TESTR2:
+ case TAHVO_REG_NOPR:
+ case TAHVO_REG_FRR:
+ break;
+
+ default:
+ hw_error("%s: bad register %02x\n", __FUNCTION__, reg);
+ }
+}
+
+static void tahvo_io(void *opaque, int rw, int reg, uint16_t *val)
+{
+ CBusTahvo *s = (CBusTahvo *) opaque;
+
+ if (rw)
+ *val = tahvo_read(s, reg);
+ else
+ tahvo_write(s, reg, *val);
+}
+
+void *tahvo_init(qemu_irq irq, int betty)
+{
+ CBusTahvo *s = (CBusTahvo *) g_malloc0(sizeof(*s));
+
+ s->irq = irq;
+ s->irqen = 0xffff;
+ s->irqst = 0x0000;
+ s->is_betty = !!betty;
+
+ s->cbus.opaque = s;
+ s->cbus.io = tahvo_io;
+ s->cbus.addr = 2;
+
+ return &s->cbus;
+}
diff --git a/hw/misc/debugexit.c b/hw/misc/debugexit.c
new file mode 100644
index 0000000..59bed5b
--- /dev/null
+++ b/hw/misc/debugexit.c
@@ -0,0 +1,75 @@
+/*
+ * debug exit port emulation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) any later version.
+ */
+
+#include "hw/hw.h"
+#include "hw/isa/isa.h"
+
+#define TYPE_ISA_DEBUG_EXIT_DEVICE "isa-debug-exit"
+#define ISA_DEBUG_EXIT_DEVICE(obj) \
+ OBJECT_CHECK(ISADebugExitState, (obj), TYPE_ISA_DEBUG_EXIT_DEVICE)
+
+typedef struct ISADebugExitState {
+ ISADevice parent_obj;
+
+ uint32_t iobase;
+ uint32_t iosize;
+ MemoryRegion io;
+} ISADebugExitState;
+
+static void debug_exit_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ exit((val << 1) | 1);
+}
+
+static const MemoryRegionOps debug_exit_ops = {
+ .write = debug_exit_write,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static int debug_exit_initfn(ISADevice *dev)
+{
+ ISADebugExitState *isa = ISA_DEBUG_EXIT_DEVICE(dev);
+
+ memory_region_init_io(&isa->io, &debug_exit_ops, isa,
+ TYPE_ISA_DEBUG_EXIT_DEVICE, isa->iosize);
+ memory_region_add_subregion(isa_address_space_io(dev),
+ isa->iobase, &isa->io);
+ return 0;
+}
+
+static Property debug_exit_properties[] = {
+ DEFINE_PROP_HEX32("iobase", ISADebugExitState, iobase, 0x501),
+ DEFINE_PROP_HEX32("iosize", ISADebugExitState, iosize, 0x02),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void debug_exit_class_initfn(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ISADeviceClass *ic = ISA_DEVICE_CLASS(klass);
+ ic->init = debug_exit_initfn;
+ dc->props = debug_exit_properties;
+}
+
+static const TypeInfo debug_exit_info = {
+ .name = TYPE_ISA_DEBUG_EXIT_DEVICE,
+ .parent = TYPE_ISA_DEVICE,
+ .instance_size = sizeof(ISADebugExitState),
+ .class_init = debug_exit_class_initfn,
+};
+
+static void debug_exit_register_types(void)
+{
+ type_register_static(&debug_exit_info);
+}
+
+type_init(debug_exit_register_types)
diff --git a/hw/misc/eccmemctl.c b/hw/misc/eccmemctl.c
new file mode 100644
index 0000000..6f4a407
--- /dev/null
+++ b/hw/misc/eccmemctl.c
@@ -0,0 +1,340 @@
+/*
+ * QEMU Sparc Sun4m ECC memory controller emulation
+ *
+ * Copyright (c) 2007 Robert Reif
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/sysbus.h"
+#include "trace.h"
+
+/* There are 3 versions of this chip used in SMP sun4m systems:
+ * MCC (version 0, implementation 0) SS-600MP
+ * EMC (version 0, implementation 1) SS-10
+ * SMC (version 0, implementation 2) SS-10SX and SS-20
+ *
+ * Chipset docs:
+ * "Sun-4M System Architecture (revision 2.0) by Chuck Narad", 950-1373-01,
+ * http://mediacast.sun.com/users/Barton808/media/Sun4M_SystemArchitecture_edited2.pdf
+ */
+
+#define ECC_MCC 0x00000000
+#define ECC_EMC 0x10000000
+#define ECC_SMC 0x20000000
+
+/* Register indexes */
+#define ECC_MER 0 /* Memory Enable Register */
+#define ECC_MDR 1 /* Memory Delay Register */
+#define ECC_MFSR 2 /* Memory Fault Status Register */
+#define ECC_VCR 3 /* Video Configuration Register */
+#define ECC_MFAR0 4 /* Memory Fault Address Register 0 */
+#define ECC_MFAR1 5 /* Memory Fault Address Register 1 */
+#define ECC_DR 6 /* Diagnostic Register */
+#define ECC_ECR0 7 /* Event Count Register 0 */
+#define ECC_ECR1 8 /* Event Count Register 1 */
+
+/* ECC fault control register */
+#define ECC_MER_EE 0x00000001 /* Enable ECC checking */
+#define ECC_MER_EI 0x00000002 /* Enable Interrupts on
+ correctable errors */
+#define ECC_MER_MRR0 0x00000004 /* SIMM 0 */
+#define ECC_MER_MRR1 0x00000008 /* SIMM 1 */
+#define ECC_MER_MRR2 0x00000010 /* SIMM 2 */
+#define ECC_MER_MRR3 0x00000020 /* SIMM 3 */
+#define ECC_MER_MRR4 0x00000040 /* SIMM 4 */
+#define ECC_MER_MRR5 0x00000080 /* SIMM 5 */
+#define ECC_MER_MRR6 0x00000100 /* SIMM 6 */
+#define ECC_MER_MRR7 0x00000200 /* SIMM 7 */
+#define ECC_MER_REU 0x00000100 /* Memory Refresh Enable (600MP) */
+#define ECC_MER_MRR 0x000003fc /* MRR mask */
+#define ECC_MER_A 0x00000400 /* Memory controller addr map select */
+#define ECC_MER_DCI 0x00000800 /* Disables Coherent Invalidate ACK */
+#define ECC_MER_VER 0x0f000000 /* Version */
+#define ECC_MER_IMPL 0xf0000000 /* Implementation */
+#define ECC_MER_MASK_0 0x00000103 /* Version 0 (MCC) mask */
+#define ECC_MER_MASK_1 0x00000bff /* Version 1 (EMC) mask */
+#define ECC_MER_MASK_2 0x00000bff /* Version 2 (SMC) mask */
+
+/* ECC memory delay register */
+#define ECC_MDR_RRI 0x000003ff /* Refresh Request Interval */
+#define ECC_MDR_MI 0x00001c00 /* MIH Delay */
+#define ECC_MDR_CI 0x0000e000 /* Coherent Invalidate Delay */
+#define ECC_MDR_MDL 0x001f0000 /* MBus Master arbitration delay */
+#define ECC_MDR_MDH 0x03e00000 /* MBus Master arbitration delay */
+#define ECC_MDR_GAD 0x7c000000 /* Graphics Arbitration Delay */
+#define ECC_MDR_RSC 0x80000000 /* Refresh load control */
+#define ECC_MDR_MASK 0x7fffffff
+
+/* ECC fault status register */
+#define ECC_MFSR_CE 0x00000001 /* Correctable error */
+#define ECC_MFSR_BS 0x00000002 /* C2 graphics bad slot access */
+#define ECC_MFSR_TO 0x00000004 /* Timeout on write */
+#define ECC_MFSR_UE 0x00000008 /* Uncorrectable error */
+#define ECC_MFSR_DW 0x000000f0 /* Index of double word in block */
+#define ECC_MFSR_SYND 0x0000ff00 /* Syndrome for correctable error */
+#define ECC_MFSR_ME 0x00010000 /* Multiple errors */
+#define ECC_MFSR_C2ERR 0x00020000 /* C2 graphics error */
+
+/* ECC fault address register 0 */
+#define ECC_MFAR0_PADDR 0x0000000f /* PA[32-35] */
+#define ECC_MFAR0_TYPE 0x000000f0 /* Transaction type */
+#define ECC_MFAR0_SIZE 0x00000700 /* Transaction size */
+#define ECC_MFAR0_CACHE 0x00000800 /* Mapped cacheable */
+#define ECC_MFAR0_LOCK 0x00001000 /* Error occurred in atomic cycle */
+#define ECC_MFAR0_BMODE 0x00002000 /* Boot mode */
+#define ECC_MFAR0_VADDR 0x003fc000 /* VA[12-19] (superset bits) */
+#define ECC_MFAR0_S 0x08000000 /* Supervisor mode */
+#define ECC_MFARO_MID 0xf0000000 /* Module ID */
+
+/* ECC diagnostic register */
+#define ECC_DR_CBX 0x00000001
+#define ECC_DR_CB0 0x00000002
+#define ECC_DR_CB1 0x00000004
+#define ECC_DR_CB2 0x00000008
+#define ECC_DR_CB4 0x00000010
+#define ECC_DR_CB8 0x00000020
+#define ECC_DR_CB16 0x00000040
+#define ECC_DR_CB32 0x00000080
+#define ECC_DR_DMODE 0x00000c00
+
+#define ECC_NREGS 9
+#define ECC_SIZE (ECC_NREGS * sizeof(uint32_t))
+
+#define ECC_DIAG_SIZE 4
+#define ECC_DIAG_MASK (ECC_DIAG_SIZE - 1)
+
+typedef struct ECCState {
+ SysBusDevice busdev;
+ MemoryRegion iomem, iomem_diag;
+ qemu_irq irq;
+ uint32_t regs[ECC_NREGS];
+ uint8_t diag[ECC_DIAG_SIZE];
+ uint32_t version;
+} ECCState;
+
+static void ecc_mem_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ ECCState *s = opaque;
+
+ switch (addr >> 2) {
+ case ECC_MER:
+ if (s->version == ECC_MCC)
+ s->regs[ECC_MER] = (val & ECC_MER_MASK_0);
+ else if (s->version == ECC_EMC)
+ s->regs[ECC_MER] = s->version | (val & ECC_MER_MASK_1);
+ else if (s->version == ECC_SMC)
+ s->regs[ECC_MER] = s->version | (val & ECC_MER_MASK_2);
+ trace_ecc_mem_writel_mer(val);
+ break;
+ case ECC_MDR:
+ s->regs[ECC_MDR] = val & ECC_MDR_MASK;
+ trace_ecc_mem_writel_mdr(val);
+ break;
+ case ECC_MFSR:
+ s->regs[ECC_MFSR] = val;
+ qemu_irq_lower(s->irq);
+ trace_ecc_mem_writel_mfsr(val);
+ break;
+ case ECC_VCR:
+ s->regs[ECC_VCR] = val;
+ trace_ecc_mem_writel_vcr(val);
+ break;
+ case ECC_DR:
+ s->regs[ECC_DR] = val;
+ trace_ecc_mem_writel_dr(val);
+ break;
+ case ECC_ECR0:
+ s->regs[ECC_ECR0] = val;
+ trace_ecc_mem_writel_ecr0(val);
+ break;
+ case ECC_ECR1:
+ s->regs[ECC_ECR0] = val;
+ trace_ecc_mem_writel_ecr1(val);
+ break;
+ }
+}
+
+static uint64_t ecc_mem_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ ECCState *s = opaque;
+ uint32_t ret = 0;
+
+ switch (addr >> 2) {
+ case ECC_MER:
+ ret = s->regs[ECC_MER];
+ trace_ecc_mem_readl_mer(ret);
+ break;
+ case ECC_MDR:
+ ret = s->regs[ECC_MDR];
+ trace_ecc_mem_readl_mdr(ret);
+ break;
+ case ECC_MFSR:
+ ret = s->regs[ECC_MFSR];
+ trace_ecc_mem_readl_mfsr(ret);
+ break;
+ case ECC_VCR:
+ ret = s->regs[ECC_VCR];
+ trace_ecc_mem_readl_vcr(ret);
+ break;
+ case ECC_MFAR0:
+ ret = s->regs[ECC_MFAR0];
+ trace_ecc_mem_readl_mfar0(ret);
+ break;
+ case ECC_MFAR1:
+ ret = s->regs[ECC_MFAR1];
+ trace_ecc_mem_readl_mfar1(ret);
+ break;
+ case ECC_DR:
+ ret = s->regs[ECC_DR];
+ trace_ecc_mem_readl_dr(ret);
+ break;
+ case ECC_ECR0:
+ ret = s->regs[ECC_ECR0];
+ trace_ecc_mem_readl_ecr0(ret);
+ break;
+ case ECC_ECR1:
+ ret = s->regs[ECC_ECR0];
+ trace_ecc_mem_readl_ecr1(ret);
+ break;
+ }
+ return ret;
+}
+
+static const MemoryRegionOps ecc_mem_ops = {
+ .read = ecc_mem_read,
+ .write = ecc_mem_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void ecc_diag_mem_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ ECCState *s = opaque;
+
+ trace_ecc_diag_mem_writeb(addr, val);
+ s->diag[addr & ECC_DIAG_MASK] = val;
+}
+
+static uint64_t ecc_diag_mem_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ ECCState *s = opaque;
+ uint32_t ret = s->diag[(int)addr];
+
+ trace_ecc_diag_mem_readb(addr, ret);
+ return ret;
+}
+
+static const MemoryRegionOps ecc_diag_mem_ops = {
+ .read = ecc_diag_mem_read,
+ .write = ecc_diag_mem_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+};
+
+static const VMStateDescription vmstate_ecc = {
+ .name ="ECC",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .minimum_version_id_old = 3,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT32_ARRAY(regs, ECCState, ECC_NREGS),
+ VMSTATE_BUFFER(diag, ECCState),
+ VMSTATE_UINT32(version, ECCState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void ecc_reset(DeviceState *d)
+{
+ ECCState *s = container_of(d, ECCState, busdev.qdev);
+
+ if (s->version == ECC_MCC)
+ s->regs[ECC_MER] &= ECC_MER_REU;
+ else
+ s->regs[ECC_MER] &= (ECC_MER_VER | ECC_MER_IMPL | ECC_MER_MRR |
+ ECC_MER_DCI);
+ s->regs[ECC_MDR] = 0x20;
+ s->regs[ECC_MFSR] = 0;
+ s->regs[ECC_VCR] = 0;
+ s->regs[ECC_MFAR0] = 0x07c00000;
+ s->regs[ECC_MFAR1] = 0;
+ s->regs[ECC_DR] = 0;
+ s->regs[ECC_ECR0] = 0;
+ s->regs[ECC_ECR1] = 0;
+}
+
+static int ecc_init1(SysBusDevice *dev)
+{
+ ECCState *s = FROM_SYSBUS(ECCState, dev);
+
+ sysbus_init_irq(dev, &s->irq);
+ s->regs[0] = s->version;
+ memory_region_init_io(&s->iomem, &ecc_mem_ops, s, "ecc", ECC_SIZE);
+ sysbus_init_mmio(dev, &s->iomem);
+
+ if (s->version == ECC_MCC) { // SS-600MP only
+ memory_region_init_io(&s->iomem_diag, &ecc_diag_mem_ops, s,
+ "ecc.diag", ECC_DIAG_SIZE);
+ sysbus_init_mmio(dev, &s->iomem_diag);
+ }
+
+ return 0;
+}
+
+static Property ecc_properties[] = {
+ DEFINE_PROP_HEX32("version", ECCState, version, -1),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void ecc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+ k->init = ecc_init1;
+ dc->reset = ecc_reset;
+ dc->vmsd = &vmstate_ecc;
+ dc->props = ecc_properties;
+}
+
+static const TypeInfo ecc_info = {
+ .name = "eccmemctl",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(ECCState),
+ .class_init = ecc_class_init,
+};
+
+
+static void ecc_register_types(void)
+{
+ type_register_static(&ecc_info);
+}
+
+type_init(ecc_register_types)
diff --git a/hw/misc/exynos4210_pmu.c b/hw/misc/exynos4210_pmu.c
new file mode 100644
index 0000000..ba5aa8d
--- /dev/null
+++ b/hw/misc/exynos4210_pmu.c
@@ -0,0 +1,499 @@
+/*
+ * Exynos4210 Power Management Unit (PMU) Emulation
+ *
+ * Copyright (C) 2011 Samsung Electronics Co Ltd.
+ * Maksim Kozlov <m.kozlov@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * This model implements PMU registers just as a bulk of memory. Currently,
+ * the only reason this device exists is that secondary CPU boot loader
+ * uses PMU INFORM5 register as a holding pen.
+ */
+
+#include "hw/sysbus.h"
+
+#ifndef DEBUG_PMU
+#define DEBUG_PMU 0
+#endif
+
+#ifndef DEBUG_PMU_EXTEND
+#define DEBUG_PMU_EXTEND 0
+#endif
+
+#if DEBUG_PMU
+#define PRINT_DEBUG(fmt, args...) \
+ do { \
+ fprintf(stderr, " [%s:%d] "fmt, __func__, __LINE__, ##args); \
+ } while (0)
+
+#if DEBUG_PMU_EXTEND
+#define PRINT_DEBUG_EXTEND(fmt, args...) \
+ do { \
+ fprintf(stderr, " [%s:%d] "fmt, __func__, __LINE__, ##args); \
+ } while (0)
+#else
+#define PRINT_DEBUG_EXTEND(fmt, args...) do {} while (0)
+#endif /* EXTEND */
+
+#else
+#define PRINT_DEBUG(fmt, args...) do {} while (0)
+#define PRINT_DEBUG_EXTEND(fmt, args...) do {} while (0)
+#endif
+
+/*
+ * Offsets for PMU registers
+ */
+#define OM_STAT 0x0000 /* OM status register */
+#define RTC_CLKO_SEL 0x000C /* Controls RTCCLKOUT */
+#define GNSS_RTC_OUT_CTRL 0x0010 /* Controls GNSS_RTC_OUT */
+/* Decides whether system-level low-power mode is used. */
+#define SYSTEM_POWER_DOWN_CTRL 0x0200
+/* Sets control options for CENTRAL_SEQ */
+#define SYSTEM_POWER_DOWN_OPTION 0x0208
+#define SWRESET 0x0400 /* Generate software reset */
+#define RST_STAT 0x0404 /* Reset status register */
+#define WAKEUP_STAT 0x0600 /* Wakeup status register */
+#define EINT_WAKEUP_MASK 0x0604 /* Configure External INTerrupt mask */
+#define WAKEUP_MASK 0x0608 /* Configure wakeup source mask */
+#define HDMI_PHY_CONTROL 0x0700 /* HDMI PHY control register */
+#define USBDEVICE_PHY_CONTROL 0x0704 /* USB Device PHY control register */
+#define USBHOST_PHY_CONTROL 0x0708 /* USB HOST PHY control register */
+#define DAC_PHY_CONTROL 0x070C /* DAC control register */
+#define MIPI_PHY0_CONTROL 0x0710 /* MIPI PHY control register */
+#define MIPI_PHY1_CONTROL 0x0714 /* MIPI PHY control register */
+#define ADC_PHY_CONTROL 0x0718 /* TS-ADC control register */
+#define PCIe_PHY_CONTROL 0x071C /* TS-PCIe control register */
+#define SATA_PHY_CONTROL 0x0720 /* TS-SATA control register */
+#define INFORM0 0x0800 /* Information register 0 */
+#define INFORM1 0x0804 /* Information register 1 */
+#define INFORM2 0x0808 /* Information register 2 */
+#define INFORM3 0x080C /* Information register 3 */
+#define INFORM4 0x0810 /* Information register 4 */
+#define INFORM5 0x0814 /* Information register 5 */
+#define INFORM6 0x0818 /* Information register 6 */
+#define INFORM7 0x081C /* Information register 7 */
+#define PMU_DEBUG 0x0A00 /* PMU debug register */
+/* Registers to set system-level low-power option */
+#define ARM_CORE0_SYS_PWR_REG 0x1000
+#define ARM_CORE1_SYS_PWR_REG 0x1010
+#define ARM_COMMON_SYS_PWR_REG 0x1080
+#define ARM_CPU_L2_0_SYS_PWR_REG 0x10C0
+#define ARM_CPU_L2_1_SYS_PWR_REG 0x10C4
+#define CMU_ACLKSTOP_SYS_PWR_REG 0x1100
+#define CMU_SCLKSTOP_SYS_PWR_REG 0x1104
+#define CMU_RESET_SYS_PWR_REG 0x110C
+#define APLL_SYSCLK_SYS_PWR_REG 0x1120
+#define MPLL_SYSCLK_SYS_PWR_REG 0x1124
+#define VPLL_SYSCLK_SYS_PWR_REG 0x1128
+#define EPLL_SYSCLK_SYS_PWR_REG 0x112C
+#define CMU_CLKSTOP_GPS_ALIVE_SYS_PWR_REG 0x1138
+#define CMU_RESET_GPS_ALIVE_SYS_PWR_REG 0x113C
+#define CMU_CLKSTOP_CAM_SYS_PWR_REG 0x1140
+#define CMU_CLKSTOP_TV_SYS_PWR_REG 0x1144
+#define CMU_CLKSTOP_MFC_SYS_PWR_REG 0x1148
+#define CMU_CLKSTOP_G3D_SYS_PWR_REG 0x114C
+#define CMU_CLKSTOP_LCD0_SYS_PWR_REG 0x1150
+#define CMU_CLKSTOP_LCD1_SYS_PWR_REG 0x1154
+#define CMU_CLKSTOP_MAUDIO_SYS_PWR_REG 0x1158
+#define CMU_CLKSTOP_GPS_SYS_PWR_REG 0x115C
+#define CMU_RESET_CAM_SYS_PWR_REG 0x1160
+#define CMU_RESET_TV_SYS_PWR_REG 0x1164
+#define CMU_RESET_MFC_SYS_PWR_REG 0x1168
+#define CMU_RESET_G3D_SYS_PWR_REG 0x116C
+#define CMU_RESET_LCD0_SYS_PWR_REG 0x1170
+#define CMU_RESET_LCD1_SYS_PWR_REG 0x1174
+#define CMU_RESET_MAUDIO_SYS_PWR_REG 0x1178
+#define CMU_RESET_GPS_SYS_PWR_REG 0x117C
+#define TOP_BUS_SYS_PWR_REG 0x1180
+#define TOP_RETENTION_SYS_PWR_REG 0x1184
+#define TOP_PWR_SYS_PWR_REG 0x1188
+#define LOGIC_RESET_SYS_PWR_REG 0x11A0
+#define OneNANDXL_MEM_SYS_PWR_REG 0x11C0
+#define MODEMIF_MEM_SYS_PWR_REG 0x11C4
+#define USBDEVICE_MEM_SYS_PWR_REG 0x11CC
+#define SDMMC_MEM_SYS_PWR_REG 0x11D0
+#define CSSYS_MEM_SYS_PWR_REG 0x11D4
+#define SECSS_MEM_SYS_PWR_REG 0x11D8
+#define PCIe_MEM_SYS_PWR_REG 0x11E0
+#define SATA_MEM_SYS_PWR_REG 0x11E4
+#define PAD_RETENTION_DRAM_SYS_PWR_REG 0x1200
+#define PAD_RETENTION_MAUDIO_SYS_PWR_REG 0x1204
+#define PAD_RETENTION_GPIO_SYS_PWR_REG 0x1220
+#define PAD_RETENTION_UART_SYS_PWR_REG 0x1224
+#define PAD_RETENTION_MMCA_SYS_PWR_REG 0x1228
+#define PAD_RETENTION_MMCB_SYS_PWR_REG 0x122C
+#define PAD_RETENTION_EBIA_SYS_PWR_REG 0x1230
+#define PAD_RETENTION_EBIB_SYS_PWR_REG 0x1234
+#define PAD_ISOLATION_SYS_PWR_REG 0x1240
+#define PAD_ALV_SEL_SYS_PWR_REG 0x1260
+#define XUSBXTI_SYS_PWR_REG 0x1280
+#define XXTI_SYS_PWR_REG 0x1284
+#define EXT_REGULATOR_SYS_PWR_REG 0x12C0
+#define GPIO_MODE_SYS_PWR_REG 0x1300
+#define GPIO_MODE_MAUDIO_SYS_PWR_REG 0x1340
+#define CAM_SYS_PWR_REG 0x1380
+#define TV_SYS_PWR_REG 0x1384
+#define MFC_SYS_PWR_REG 0x1388
+#define G3D_SYS_PWR_REG 0x138C
+#define LCD0_SYS_PWR_REG 0x1390
+#define LCD1_SYS_PWR_REG 0x1394
+#define MAUDIO_SYS_PWR_REG 0x1398
+#define GPS_SYS_PWR_REG 0x139C
+#define GPS_ALIVE_SYS_PWR_REG 0x13A0
+#define ARM_CORE0_CONFIGURATION 0x2000 /* Configure power mode of ARM_CORE0 */
+#define ARM_CORE0_STATUS 0x2004 /* Check power mode of ARM_CORE0 */
+#define ARM_CORE0_OPTION 0x2008 /* Sets control options for ARM_CORE0 */
+#define ARM_CORE1_CONFIGURATION 0x2080 /* Configure power mode of ARM_CORE1 */
+#define ARM_CORE1_STATUS 0x2084 /* Check power mode of ARM_CORE1 */
+#define ARM_CORE1_OPTION 0x2088 /* Sets control options for ARM_CORE0 */
+#define ARM_COMMON_OPTION 0x2408 /* Sets control options for ARM_COMMON */
+/* Configure power mode of ARM_CPU_L2_0 */
+#define ARM_CPU_L2_0_CONFIGURATION 0x2600
+#define ARM_CPU_L2_0_STATUS 0x2604 /* Check power mode of ARM_CPU_L2_0 */
+/* Configure power mode of ARM_CPU_L2_1 */
+#define ARM_CPU_L2_1_CONFIGURATION 0x2620
+#define ARM_CPU_L2_1_STATUS 0x2624 /* Check power mode of ARM_CPU_L2_1 */
+/* Sets control options for PAD_RETENTION_MAUDIO */
+#define PAD_RETENTION_MAUDIO_OPTION 0x3028
+/* Sets control options for PAD_RETENTION_GPIO */
+#define PAD_RETENTION_GPIO_OPTION 0x3108
+/* Sets control options for PAD_RETENTION_UART */
+#define PAD_RETENTION_UART_OPTION 0x3128
+/* Sets control options for PAD_RETENTION_MMCA */
+#define PAD_RETENTION_MMCA_OPTION 0x3148
+/* Sets control options for PAD_RETENTION_MMCB */
+#define PAD_RETENTION_MMCB_OPTION 0x3168
+/* Sets control options for PAD_RETENTION_EBIA */
+#define PAD_RETENTION_EBIA_OPTION 0x3188
+/* Sets control options for PAD_RETENTION_EBIB */
+#define PAD_RETENTION_EBIB_OPTION 0x31A8
+#define PS_HOLD_CONTROL 0x330C /* PS_HOLD control register */
+#define XUSBXTI_CONFIGURATION 0x3400 /* Configure the pad of XUSBXTI */
+#define XUSBXTI_STATUS 0x3404 /* Check the pad of XUSBXTI */
+/* Sets time required for XUSBXTI to be stabilized */
+#define XUSBXTI_DURATION 0x341C
+#define XXTI_CONFIGURATION 0x3420 /* Configure the pad of XXTI */
+#define XXTI_STATUS 0x3424 /* Check the pad of XXTI */
+/* Sets time required for XXTI to be stabilized */
+#define XXTI_DURATION 0x343C
+/* Sets time required for EXT_REGULATOR to be stabilized */
+#define EXT_REGULATOR_DURATION 0x361C
+#define CAM_CONFIGURATION 0x3C00 /* Configure power mode of CAM */
+#define CAM_STATUS 0x3C04 /* Check power mode of CAM */
+#define CAM_OPTION 0x3C08 /* Sets control options for CAM */
+#define TV_CONFIGURATION 0x3C20 /* Configure power mode of TV */
+#define TV_STATUS 0x3C24 /* Check power mode of TV */
+#define TV_OPTION 0x3C28 /* Sets control options for TV */
+#define MFC_CONFIGURATION 0x3C40 /* Configure power mode of MFC */
+#define MFC_STATUS 0x3C44 /* Check power mode of MFC */
+#define MFC_OPTION 0x3C48 /* Sets control options for MFC */
+#define G3D_CONFIGURATION 0x3C60 /* Configure power mode of G3D */
+#define G3D_STATUS 0x3C64 /* Check power mode of G3D */
+#define G3D_OPTION 0x3C68 /* Sets control options for G3D */
+#define LCD0_CONFIGURATION 0x3C80 /* Configure power mode of LCD0 */
+#define LCD0_STATUS 0x3C84 /* Check power mode of LCD0 */
+#define LCD0_OPTION 0x3C88 /* Sets control options for LCD0 */
+#define LCD1_CONFIGURATION 0x3CA0 /* Configure power mode of LCD1 */
+#define LCD1_STATUS 0x3CA4 /* Check power mode of LCD1 */
+#define LCD1_OPTION 0x3CA8 /* Sets control options for LCD1 */
+#define GPS_CONFIGURATION 0x3CE0 /* Configure power mode of GPS */
+#define GPS_STATUS 0x3CE4 /* Check power mode of GPS */
+#define GPS_OPTION 0x3CE8 /* Sets control options for GPS */
+#define GPS_ALIVE_CONFIGURATION 0x3D00 /* Configure power mode of GPS */
+#define GPS_ALIVE_STATUS 0x3D04 /* Check power mode of GPS */
+#define GPS_ALIVE_OPTION 0x3D08 /* Sets control options for GPS */
+
+#define EXYNOS4210_PMU_REGS_MEM_SIZE 0x3d0c
+
+typedef struct Exynos4210PmuReg {
+ const char *name; /* for debug only */
+ uint32_t offset;
+ uint32_t reset_value;
+} Exynos4210PmuReg;
+
+static const Exynos4210PmuReg exynos4210_pmu_regs[] = {
+ {"OM_STAT", OM_STAT, 0x00000000},
+ {"RTC_CLKO_SEL", RTC_CLKO_SEL, 0x00000000},
+ {"GNSS_RTC_OUT_CTRL", GNSS_RTC_OUT_CTRL, 0x00000001},
+ {"SYSTEM_POWER_DOWN_CTRL", SYSTEM_POWER_DOWN_CTRL, 0x00010000},
+ {"SYSTEM_POWER_DOWN_OPTION", SYSTEM_POWER_DOWN_OPTION, 0x03030000},
+ {"SWRESET", SWRESET, 0x00000000},
+ {"RST_STAT", RST_STAT, 0x00000000},
+ {"WAKEUP_STAT", WAKEUP_STAT, 0x00000000},
+ {"EINT_WAKEUP_MASK", EINT_WAKEUP_MASK, 0x00000000},
+ {"WAKEUP_MASK", WAKEUP_MASK, 0x00000000},
+ {"HDMI_PHY_CONTROL", HDMI_PHY_CONTROL, 0x00960000},
+ {"USBDEVICE_PHY_CONTROL", USBDEVICE_PHY_CONTROL, 0x00000000},
+ {"USBHOST_PHY_CONTROL", USBHOST_PHY_CONTROL, 0x00000000},
+ {"DAC_PHY_CONTROL", DAC_PHY_CONTROL, 0x00000000},
+ {"MIPI_PHY0_CONTROL", MIPI_PHY0_CONTROL, 0x00000000},
+ {"MIPI_PHY1_CONTROL", MIPI_PHY1_CONTROL, 0x00000000},
+ {"ADC_PHY_CONTROL", ADC_PHY_CONTROL, 0x00000001},
+ {"PCIe_PHY_CONTROL", PCIe_PHY_CONTROL, 0x00000000},
+ {"SATA_PHY_CONTROL", SATA_PHY_CONTROL, 0x00000000},
+ {"INFORM0", INFORM0, 0x00000000},
+ {"INFORM1", INFORM1, 0x00000000},
+ {"INFORM2", INFORM2, 0x00000000},
+ {"INFORM3", INFORM3, 0x00000000},
+ {"INFORM4", INFORM4, 0x00000000},
+ {"INFORM5", INFORM5, 0x00000000},
+ {"INFORM6", INFORM6, 0x00000000},
+ {"INFORM7", INFORM7, 0x00000000},
+ {"PMU_DEBUG", PMU_DEBUG, 0x00000000},
+ {"ARM_CORE0_SYS_PWR_REG", ARM_CORE0_SYS_PWR_REG, 0xFFFFFFFF},
+ {"ARM_CORE1_SYS_PWR_REG", ARM_CORE1_SYS_PWR_REG, 0xFFFFFFFF},
+ {"ARM_COMMON_SYS_PWR_REG", ARM_COMMON_SYS_PWR_REG, 0xFFFFFFFF},
+ {"ARM_CPU_L2_0_SYS_PWR_REG", ARM_CPU_L2_0_SYS_PWR_REG, 0xFFFFFFFF},
+ {"ARM_CPU_L2_1_SYS_PWR_REG", ARM_CPU_L2_1_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_ACLKSTOP_SYS_PWR_REG", CMU_ACLKSTOP_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_SCLKSTOP_SYS_PWR_REG", CMU_SCLKSTOP_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_RESET_SYS_PWR_REG", CMU_RESET_SYS_PWR_REG, 0xFFFFFFFF},
+ {"APLL_SYSCLK_SYS_PWR_REG", APLL_SYSCLK_SYS_PWR_REG, 0xFFFFFFFF},
+ {"MPLL_SYSCLK_SYS_PWR_REG", MPLL_SYSCLK_SYS_PWR_REG, 0xFFFFFFFF},
+ {"VPLL_SYSCLK_SYS_PWR_REG", VPLL_SYSCLK_SYS_PWR_REG, 0xFFFFFFFF},
+ {"EPLL_SYSCLK_SYS_PWR_REG", EPLL_SYSCLK_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_CLKSTOP_GPS_ALIVE_SYS_PWR_REG", CMU_CLKSTOP_GPS_ALIVE_SYS_PWR_REG,
+ 0xFFFFFFFF},
+ {"CMU_RESET_GPS_ALIVE_SYS_PWR_REG", CMU_RESET_GPS_ALIVE_SYS_PWR_REG,
+ 0xFFFFFFFF},
+ {"CMU_CLKSTOP_CAM_SYS_PWR_REG", CMU_CLKSTOP_CAM_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_CLKSTOP_TV_SYS_PWR_REG", CMU_CLKSTOP_TV_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_CLKSTOP_MFC_SYS_PWR_REG", CMU_CLKSTOP_MFC_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_CLKSTOP_G3D_SYS_PWR_REG", CMU_CLKSTOP_G3D_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_CLKSTOP_LCD0_SYS_PWR_REG", CMU_CLKSTOP_LCD0_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_CLKSTOP_LCD1_SYS_PWR_REG", CMU_CLKSTOP_LCD1_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_CLKSTOP_MAUDIO_SYS_PWR_REG", CMU_CLKSTOP_MAUDIO_SYS_PWR_REG,
+ 0xFFFFFFFF},
+ {"CMU_CLKSTOP_GPS_SYS_PWR_REG", CMU_CLKSTOP_GPS_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_RESET_CAM_SYS_PWR_REG", CMU_RESET_CAM_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_RESET_TV_SYS_PWR_REG", CMU_RESET_TV_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_RESET_MFC_SYS_PWR_REG", CMU_RESET_MFC_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_RESET_G3D_SYS_PWR_REG", CMU_RESET_G3D_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_RESET_LCD0_SYS_PWR_REG", CMU_RESET_LCD0_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_RESET_LCD1_SYS_PWR_REG", CMU_RESET_LCD1_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_RESET_MAUDIO_SYS_PWR_REG", CMU_RESET_MAUDIO_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CMU_RESET_GPS_SYS_PWR_REG", CMU_RESET_GPS_SYS_PWR_REG, 0xFFFFFFFF},
+ {"TOP_BUS_SYS_PWR_REG", TOP_BUS_SYS_PWR_REG, 0xFFFFFFFF},
+ {"TOP_RETENTION_SYS_PWR_REG", TOP_RETENTION_SYS_PWR_REG, 0xFFFFFFFF},
+ {"TOP_PWR_SYS_PWR_REG", TOP_PWR_SYS_PWR_REG, 0xFFFFFFFF},
+ {"LOGIC_RESET_SYS_PWR_REG", LOGIC_RESET_SYS_PWR_REG, 0xFFFFFFFF},
+ {"OneNANDXL_MEM_SYS_PWR_REG", OneNANDXL_MEM_SYS_PWR_REG, 0xFFFFFFFF},
+ {"MODEMIF_MEM_SYS_PWR_REG", MODEMIF_MEM_SYS_PWR_REG, 0xFFFFFFFF},
+ {"USBDEVICE_MEM_SYS_PWR_REG", USBDEVICE_MEM_SYS_PWR_REG, 0xFFFFFFFF},
+ {"SDMMC_MEM_SYS_PWR_REG", SDMMC_MEM_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CSSYS_MEM_SYS_PWR_REG", CSSYS_MEM_SYS_PWR_REG, 0xFFFFFFFF},
+ {"SECSS_MEM_SYS_PWR_REG", SECSS_MEM_SYS_PWR_REG, 0xFFFFFFFF},
+ {"PCIe_MEM_SYS_PWR_REG", PCIe_MEM_SYS_PWR_REG, 0xFFFFFFFF},
+ {"SATA_MEM_SYS_PWR_REG", SATA_MEM_SYS_PWR_REG, 0xFFFFFFFF},
+ {"PAD_RETENTION_DRAM_SYS_PWR_REG", PAD_RETENTION_DRAM_SYS_PWR_REG,
+ 0xFFFFFFFF},
+ {"PAD_RETENTION_MAUDIO_SYS_PWR_REG", PAD_RETENTION_MAUDIO_SYS_PWR_REG,
+ 0xFFFFFFFF},
+ {"PAD_RETENTION_GPIO_SYS_PWR_REG", PAD_RETENTION_GPIO_SYS_PWR_REG,
+ 0xFFFFFFFF},
+ {"PAD_RETENTION_UART_SYS_PWR_REG", PAD_RETENTION_UART_SYS_PWR_REG,
+ 0xFFFFFFFF},
+ {"PAD_RETENTION_MMCA_SYS_PWR_REG", PAD_RETENTION_MMCA_SYS_PWR_REG,
+ 0xFFFFFFFF},
+ {"PAD_RETENTION_MMCB_SYS_PWR_REG", PAD_RETENTION_MMCB_SYS_PWR_REG,
+ 0xFFFFFFFF},
+ {"PAD_RETENTION_EBIA_SYS_PWR_REG", PAD_RETENTION_EBIA_SYS_PWR_REG,
+ 0xFFFFFFFF},
+ {"PAD_RETENTION_EBIB_SYS_PWR_REG", PAD_RETENTION_EBIB_SYS_PWR_REG,
+ 0xFFFFFFFF},
+ {"PAD_ISOLATION_SYS_PWR_REG", PAD_ISOLATION_SYS_PWR_REG, 0xFFFFFFFF},
+ {"PAD_ALV_SEL_SYS_PWR_REG", PAD_ALV_SEL_SYS_PWR_REG, 0xFFFFFFFF},
+ {"XUSBXTI_SYS_PWR_REG", XUSBXTI_SYS_PWR_REG, 0xFFFFFFFF},
+ {"XXTI_SYS_PWR_REG", XXTI_SYS_PWR_REG, 0xFFFFFFFF},
+ {"EXT_REGULATOR_SYS_PWR_REG", EXT_REGULATOR_SYS_PWR_REG, 0xFFFFFFFF},
+ {"GPIO_MODE_SYS_PWR_REG", GPIO_MODE_SYS_PWR_REG, 0xFFFFFFFF},
+ {"GPIO_MODE_MAUDIO_SYS_PWR_REG", GPIO_MODE_MAUDIO_SYS_PWR_REG, 0xFFFFFFFF},
+ {"CAM_SYS_PWR_REG", CAM_SYS_PWR_REG, 0xFFFFFFFF},
+ {"TV_SYS_PWR_REG", TV_SYS_PWR_REG, 0xFFFFFFFF},
+ {"MFC_SYS_PWR_REG", MFC_SYS_PWR_REG, 0xFFFFFFFF},
+ {"G3D_SYS_PWR_REG", G3D_SYS_PWR_REG, 0xFFFFFFFF},
+ {"LCD0_SYS_PWR_REG", LCD0_SYS_PWR_REG, 0xFFFFFFFF},
+ {"LCD1_SYS_PWR_REG", LCD1_SYS_PWR_REG, 0xFFFFFFFF},
+ {"MAUDIO_SYS_PWR_REG", MAUDIO_SYS_PWR_REG, 0xFFFFFFFF},
+ {"GPS_SYS_PWR_REG", GPS_SYS_PWR_REG, 0xFFFFFFFF},
+ {"GPS_ALIVE_SYS_PWR_REG", GPS_ALIVE_SYS_PWR_REG, 0xFFFFFFFF},
+ {"ARM_CORE0_CONFIGURATION", ARM_CORE0_CONFIGURATION, 0x00000003},
+ {"ARM_CORE0_STATUS", ARM_CORE0_STATUS, 0x00030003},
+ {"ARM_CORE0_OPTION", ARM_CORE0_OPTION, 0x01010001},
+ {"ARM_CORE1_CONFIGURATION", ARM_CORE1_CONFIGURATION, 0x00000003},
+ {"ARM_CORE1_STATUS", ARM_CORE1_STATUS, 0x00030003},
+ {"ARM_CORE1_OPTION", ARM_CORE1_OPTION, 0x01010001},
+ {"ARM_COMMON_OPTION", ARM_COMMON_OPTION, 0x00000001},
+ {"ARM_CPU_L2_0_CONFIGURATION", ARM_CPU_L2_0_CONFIGURATION, 0x00000003},
+ {"ARM_CPU_L2_0_STATUS", ARM_CPU_L2_0_STATUS, 0x00000003},
+ {"ARM_CPU_L2_1_CONFIGURATION", ARM_CPU_L2_1_CONFIGURATION, 0x00000003},
+ {"ARM_CPU_L2_1_STATUS", ARM_CPU_L2_1_STATUS, 0x00000003},
+ {"PAD_RETENTION_MAUDIO_OPTION", PAD_RETENTION_MAUDIO_OPTION, 0x00000000},
+ {"PAD_RETENTION_GPIO_OPTION", PAD_RETENTION_GPIO_OPTION, 0x00000000},
+ {"PAD_RETENTION_UART_OPTION", PAD_RETENTION_UART_OPTION, 0x00000000},
+ {"PAD_RETENTION_MMCA_OPTION", PAD_RETENTION_MMCA_OPTION, 0x00000000},
+ {"PAD_RETENTION_MMCB_OPTION", PAD_RETENTION_MMCB_OPTION, 0x00000000},
+ {"PAD_RETENTION_EBIA_OPTION", PAD_RETENTION_EBIA_OPTION, 0x00000000},
+ {"PAD_RETENTION_EBIB_OPTION", PAD_RETENTION_EBIB_OPTION, 0x00000000},
+ {"PS_HOLD_CONTROL", PS_HOLD_CONTROL, 0x00005200},
+ {"XUSBXTI_CONFIGURATION", XUSBXTI_CONFIGURATION, 0x00000001},
+ {"XUSBXTI_STATUS", XUSBXTI_STATUS, 0x00000001},
+ {"XUSBXTI_DURATION", XUSBXTI_DURATION, 0xFFF00000},
+ {"XXTI_CONFIGURATION", XXTI_CONFIGURATION, 0x00000001},
+ {"XXTI_STATUS", XXTI_STATUS, 0x00000001},
+ {"XXTI_DURATION", XXTI_DURATION, 0xFFF00000},
+ {"EXT_REGULATOR_DURATION", EXT_REGULATOR_DURATION, 0xFFF03FFF},
+ {"CAM_CONFIGURATION", CAM_CONFIGURATION, 0x00000007},
+ {"CAM_STATUS", CAM_STATUS, 0x00060007},
+ {"CAM_OPTION", CAM_OPTION, 0x00000001},
+ {"TV_CONFIGURATION", TV_CONFIGURATION, 0x00000007},
+ {"TV_STATUS", TV_STATUS, 0x00060007},
+ {"TV_OPTION", TV_OPTION, 0x00000001},
+ {"MFC_CONFIGURATION", MFC_CONFIGURATION, 0x00000007},
+ {"MFC_STATUS", MFC_STATUS, 0x00060007},
+ {"MFC_OPTION", MFC_OPTION, 0x00000001},
+ {"G3D_CONFIGURATION", G3D_CONFIGURATION, 0x00000007},
+ {"G3D_STATUS", G3D_STATUS, 0x00060007},
+ {"G3D_OPTION", G3D_OPTION, 0x00000001},
+ {"LCD0_CONFIGURATION", LCD0_CONFIGURATION, 0x00000007},
+ {"LCD0_STATUS", LCD0_STATUS, 0x00060007},
+ {"LCD0_OPTION", LCD0_OPTION, 0x00000001},
+ {"LCD1_CONFIGURATION", LCD1_CONFIGURATION, 0x00000007},
+ {"LCD1_STATUS", LCD1_STATUS, 0x00060007},
+ {"LCD1_OPTION", LCD1_OPTION, 0x00000001},
+ {"GPS_CONFIGURATION", GPS_CONFIGURATION, 0x00000007},
+ {"GPS_STATUS", GPS_STATUS, 0x00060007},
+ {"GPS_OPTION", GPS_OPTION, 0x00000001},
+ {"GPS_ALIVE_CONFIGURATION", GPS_ALIVE_CONFIGURATION, 0x00000007},
+ {"GPS_ALIVE_STATUS", GPS_ALIVE_STATUS, 0x00060007},
+ {"GPS_ALIVE_OPTION", GPS_ALIVE_OPTION, 0x00000001},
+};
+
+#define PMU_NUM_OF_REGISTERS \
+ (sizeof(exynos4210_pmu_regs) / sizeof(Exynos4210PmuReg))
+
+typedef struct Exynos4210PmuState {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+ uint32_t reg[PMU_NUM_OF_REGISTERS];
+} Exynos4210PmuState;
+
+static uint64_t exynos4210_pmu_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ Exynos4210PmuState *s = (Exynos4210PmuState *)opaque;
+ unsigned i;
+ const Exynos4210PmuReg *reg_p = exynos4210_pmu_regs;
+
+ for (i = 0; i < PMU_NUM_OF_REGISTERS; i++) {
+ if (reg_p->offset == offset) {
+ PRINT_DEBUG_EXTEND("%s [0x%04x] -> 0x%04x\n", reg_p->name,
+ (uint32_t)offset, s->reg[i]);
+ return s->reg[i];
+ }
+ reg_p++;
+ }
+ PRINT_DEBUG("QEMU PMU ERROR: bad read offset 0x%04x\n", (uint32_t)offset);
+ return 0;
+}
+
+static void exynos4210_pmu_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ Exynos4210PmuState *s = (Exynos4210PmuState *)opaque;
+ unsigned i;
+ const Exynos4210PmuReg *reg_p = exynos4210_pmu_regs;
+
+ for (i = 0; i < PMU_NUM_OF_REGISTERS; i++) {
+ if (reg_p->offset == offset) {
+ PRINT_DEBUG_EXTEND("%s <0x%04x> <- 0x%04x\n", reg_p->name,
+ (uint32_t)offset, (uint32_t)val);
+ s->reg[i] = val;
+ return;
+ }
+ reg_p++;
+ }
+ PRINT_DEBUG("QEMU PMU ERROR: bad write offset 0x%04x\n", (uint32_t)offset);
+}
+
+static const MemoryRegionOps exynos4210_pmu_ops = {
+ .read = exynos4210_pmu_read,
+ .write = exynos4210_pmu_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ .unaligned = false
+ }
+};
+
+static void exynos4210_pmu_reset(DeviceState *dev)
+{
+ Exynos4210PmuState *s =
+ container_of(dev, Exynos4210PmuState, busdev.qdev);
+ unsigned i;
+
+ /* Set default values for registers */
+ for (i = 0; i < PMU_NUM_OF_REGISTERS; i++) {
+ s->reg[i] = exynos4210_pmu_regs[i].reset_value;
+ }
+}
+
+static int exynos4210_pmu_init(SysBusDevice *dev)
+{
+ Exynos4210PmuState *s = FROM_SYSBUS(Exynos4210PmuState, dev);
+
+ /* memory mapping */
+ memory_region_init_io(&s->iomem, &exynos4210_pmu_ops, s, "exynos4210.pmu",
+ EXYNOS4210_PMU_REGS_MEM_SIZE);
+ sysbus_init_mmio(dev, &s->iomem);
+ return 0;
+}
+
+static const VMStateDescription exynos4210_pmu_vmstate = {
+ .name = "exynos4210.pmu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(reg, Exynos4210PmuState, PMU_NUM_OF_REGISTERS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void exynos4210_pmu_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+ k->init = exynos4210_pmu_init;
+ dc->reset = exynos4210_pmu_reset;
+ dc->vmsd = &exynos4210_pmu_vmstate;
+}
+
+static const TypeInfo exynos4210_pmu_info = {
+ .name = "exynos4210.pmu",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(Exynos4210PmuState),
+ .class_init = exynos4210_pmu_class_init,
+};
+
+static void exynos4210_pmu_register(void)
+{
+ type_register_static(&exynos4210_pmu_info);
+}
+
+type_init(exynos4210_pmu_register)
diff --git a/hw/misc/imx_ccm.c b/hw/misc/imx_ccm.c
new file mode 100644
index 0000000..c153a24
--- /dev/null
+++ b/hw/misc/imx_ccm.c
@@ -0,0 +1,321 @@
+/*
+ * IMX31 Clock Control Module
+ *
+ * Copyright (C) 2012 NICTA
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * To get the timer frequencies right, we need to emulate at least part of
+ * the CCM.
+ */
+
+#include "hw/hw.h"
+#include "hw/sysbus.h"
+#include "sysemu/sysemu.h"
+#include "hw/arm/imx.h"
+
+#define CKIH_FREQ 26000000 /* 26MHz crystal input */
+#define CKIL_FREQ 32768 /* nominal 32khz clock */
+
+
+//#define DEBUG_CCM 1
+#ifdef DEBUG_CCM
+#define DPRINTF(fmt, args...) \
+do { printf("imx_ccm: " fmt , ##args); } while (0)
+#else
+#define DPRINTF(fmt, args...) do {} while (0)
+#endif
+
+static int imx_ccm_post_load(void *opaque, int version_id);
+
+typedef struct {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+
+ uint32_t ccmr;
+ uint32_t pdr0;
+ uint32_t pdr1;
+ uint32_t mpctl;
+ uint32_t spctl;
+ uint32_t cgr[3];
+ uint32_t pmcr0;
+ uint32_t pmcr1;
+
+ /* Frequencies precalculated on register changes */
+ uint32_t pll_refclk_freq;
+ uint32_t mcu_clk_freq;
+ uint32_t hsp_clk_freq;
+ uint32_t ipg_clk_freq;
+} IMXCCMState;
+
+static const VMStateDescription vmstate_imx_ccm = {
+ .name = "imx-ccm",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(ccmr, IMXCCMState),
+ VMSTATE_UINT32(pdr0, IMXCCMState),
+ VMSTATE_UINT32(pdr1, IMXCCMState),
+ VMSTATE_UINT32(mpctl, IMXCCMState),
+ VMSTATE_UINT32(spctl, IMXCCMState),
+ VMSTATE_UINT32_ARRAY(cgr, IMXCCMState, 3),
+ VMSTATE_UINT32(pmcr0, IMXCCMState),
+ VMSTATE_UINT32(pmcr1, IMXCCMState),
+ VMSTATE_UINT32(pll_refclk_freq, IMXCCMState),
+ },
+ .post_load = imx_ccm_post_load,
+};
+
+/* CCMR */
+#define CCMR_FPME (1<<0)
+#define CCMR_MPE (1<<3)
+#define CCMR_MDS (1<<7)
+#define CCMR_FPMF (1<<26)
+#define CCMR_PRCS (3<<1)
+
+/* PDR0 */
+#define PDR0_MCU_PODF_SHIFT (0)
+#define PDR0_MCU_PODF_MASK (0x7)
+#define PDR0_MAX_PODF_SHIFT (3)
+#define PDR0_MAX_PODF_MASK (0x7)
+#define PDR0_IPG_PODF_SHIFT (6)
+#define PDR0_IPG_PODF_MASK (0x3)
+#define PDR0_NFC_PODF_SHIFT (8)
+#define PDR0_NFC_PODF_MASK (0x7)
+#define PDR0_HSP_PODF_SHIFT (11)
+#define PDR0_HSP_PODF_MASK (0x7)
+#define PDR0_PER_PODF_SHIFT (16)
+#define PDR0_PER_PODF_MASK (0x1f)
+#define PDR0_CSI_PODF_SHIFT (23)
+#define PDR0_CSI_PODF_MASK (0x1ff)
+
+#define EXTRACT(value, name) (((value) >> PDR0_##name##_PODF_SHIFT) \
+ & PDR0_##name##_PODF_MASK)
+#define INSERT(value, name) (((value) & PDR0_##name##_PODF_MASK) << \
+ PDR0_##name##_PODF_SHIFT)
+/* PLL control registers */
+#define PD(v) (((v) >> 26) & 0xf)
+#define MFD(v) (((v) >> 16) & 0x3ff)
+#define MFI(v) (((v) >> 10) & 0xf);
+#define MFN(v) ((v) & 0x3ff)
+
+#define PLL_PD(x) (((x) & 0xf) << 26)
+#define PLL_MFD(x) (((x) & 0x3ff) << 16)
+#define PLL_MFI(x) (((x) & 0xf) << 10)
+#define PLL_MFN(x) (((x) & 0x3ff) << 0)
+
+uint32_t imx_clock_frequency(DeviceState *dev, IMXClk clock)
+{
+ IMXCCMState *s = container_of(dev, IMXCCMState, busdev.qdev);
+
+ switch (clock) {
+ case NOCLK:
+ return 0;
+ case MCU:
+ return s->mcu_clk_freq;
+ case HSP:
+ return s->hsp_clk_freq;
+ case IPG:
+ return s->ipg_clk_freq;
+ case CLK_32k:
+ return CKIL_FREQ;
+ }
+ return 0;
+}
+
+/*
+ * Calculate PLL output frequency
+ */
+static uint32_t calc_pll(uint32_t pllreg, uint32_t base_freq)
+{
+ int32_t mfn = MFN(pllreg); /* Numerator */
+ uint32_t mfi = MFI(pllreg); /* Integer part */
+ uint32_t mfd = 1 + MFD(pllreg); /* Denominator */
+ uint32_t pd = 1 + PD(pllreg); /* Pre-divider */
+
+ if (mfi < 5) {
+ mfi = 5;
+ }
+ /* mfn is 10-bit signed twos-complement */
+ mfn <<= 32 - 10;
+ mfn >>= 32 - 10;
+
+ return ((2 * (base_freq >> 10) * (mfi * mfd + mfn)) /
+ (mfd * pd)) << 10;
+}
+
+static void update_clocks(IMXCCMState *s)
+{
+ /*
+ * If we ever emulate more clocks, this should switch to a data-driven
+ * approach
+ */
+
+ if ((s->ccmr & CCMR_PRCS) == 1) {
+ s->pll_refclk_freq = CKIL_FREQ * 1024;
+ } else {
+ s->pll_refclk_freq = CKIH_FREQ;
+ }
+
+ /* ipg_clk_arm aka MCU clock */
+ if ((s->ccmr & CCMR_MDS) || !(s->ccmr & CCMR_MPE)) {
+ s->mcu_clk_freq = s->pll_refclk_freq;
+ } else {
+ s->mcu_clk_freq = calc_pll(s->mpctl, s->pll_refclk_freq);
+ }
+
+ /* High-speed clock */
+ s->hsp_clk_freq = s->mcu_clk_freq / (1 + EXTRACT(s->pdr0, HSP));
+ s->ipg_clk_freq = s->hsp_clk_freq / (1 + EXTRACT(s->pdr0, IPG));
+
+ DPRINTF("Clocks: mcu %uMHz, HSP %uMHz, IPG %uHz\n",
+ s->mcu_clk_freq / 1000000,
+ s->hsp_clk_freq / 1000000,
+ s->ipg_clk_freq);
+}
+
+static void imx_ccm_reset(DeviceState *dev)
+{
+ IMXCCMState *s = container_of(dev, IMXCCMState, busdev.qdev);
+
+ s->ccmr = 0x074b0b7b;
+ s->pdr0 = 0xff870b48;
+ s->pdr1 = 0x49fcfe7f;
+ s->mpctl = PLL_PD(1) | PLL_MFD(0) | PLL_MFI(6) | PLL_MFN(0);
+ s->cgr[0] = s->cgr[1] = s->cgr[2] = 0xffffffff;
+ s->spctl = PLL_PD(1) | PLL_MFD(4) | PLL_MFI(0xc) | PLL_MFN(1);
+ s->pmcr0 = 0x80209828;
+
+ update_clocks(s);
+}
+
+static uint64_t imx_ccm_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ IMXCCMState *s = (IMXCCMState *)opaque;
+
+ DPRINTF("read(offset=%x)", offset >> 2);
+ switch (offset >> 2) {
+ case 0: /* CCMR */
+ DPRINTF(" ccmr = 0x%x\n", s->ccmr);
+ return s->ccmr;
+ case 1:
+ DPRINTF(" pdr0 = 0x%x\n", s->pdr0);
+ return s->pdr0;
+ case 2:
+ DPRINTF(" pdr1 = 0x%x\n", s->pdr1);
+ return s->pdr1;
+ case 4:
+ DPRINTF(" mpctl = 0x%x\n", s->mpctl);
+ return s->mpctl;
+ case 6:
+ DPRINTF(" spctl = 0x%x\n", s->spctl);
+ return s->spctl;
+ case 8:
+ DPRINTF(" cgr0 = 0x%x\n", s->cgr[0]);
+ return s->cgr[0];
+ case 9:
+ DPRINTF(" cgr1 = 0x%x\n", s->cgr[1]);
+ return s->cgr[1];
+ case 10:
+ DPRINTF(" cgr2 = 0x%x\n", s->cgr[2]);
+ return s->cgr[2];
+ case 18: /* LTR1 */
+ return 0x00004040;
+ case 23:
+ DPRINTF(" pcmr0 = 0x%x\n", s->pmcr0);
+ return s->pmcr0;
+ }
+ DPRINTF(" return 0\n");
+ return 0;
+}
+
+static void imx_ccm_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ IMXCCMState *s = (IMXCCMState *)opaque;
+
+ DPRINTF("write(offset=%x, value = %x)\n",
+ offset >> 2, (unsigned int)value);
+ switch (offset >> 2) {
+ case 0:
+ s->ccmr = CCMR_FPMF | (value & 0x3b6fdfff);
+ break;
+ case 1:
+ s->pdr0 = value & 0xff9f3fff;
+ break;
+ case 2:
+ s->pdr1 = value;
+ break;
+ case 4:
+ s->mpctl = value & 0xbfff3fff;
+ break;
+ case 6:
+ s->spctl = value & 0xbfff3fff;
+ break;
+ case 8:
+ s->cgr[0] = value;
+ return;
+ case 9:
+ s->cgr[1] = value;
+ return;
+ case 10:
+ s->cgr[2] = value;
+ return;
+
+ default:
+ return;
+ }
+ update_clocks(s);
+}
+
+static const struct MemoryRegionOps imx_ccm_ops = {
+ .read = imx_ccm_read,
+ .write = imx_ccm_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static int imx_ccm_init(SysBusDevice *dev)
+{
+ IMXCCMState *s = FROM_SYSBUS(typeof(*s), dev);
+
+ memory_region_init_io(&s->iomem, &imx_ccm_ops, s, "imx_ccm", 0x1000);
+ sysbus_init_mmio(dev, &s->iomem);
+
+ return 0;
+}
+
+static int imx_ccm_post_load(void *opaque, int version_id)
+{
+ IMXCCMState *s = (IMXCCMState *)opaque;
+
+ update_clocks(s);
+ return 0;
+}
+
+static void imx_ccm_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass);
+
+ sbc->init = imx_ccm_init;
+ dc->reset = imx_ccm_reset;
+ dc->vmsd = &vmstate_imx_ccm;
+ dc->desc = "i.MX Clock Control Module";
+}
+
+static const TypeInfo imx_ccm_info = {
+ .name = "imx_ccm",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IMXCCMState),
+ .class_init = imx_ccm_class_init,
+};
+
+static void imx_ccm_register_types(void)
+{
+ type_register_static(&imx_ccm_info);
+}
+
+type_init(imx_ccm_register_types)
diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
new file mode 100644
index 0000000..f92ce19
--- /dev/null
+++ b/hw/misc/ivshmem.c
@@ -0,0 +1,826 @@
+/*
+ * Inter-VM Shared Memory PCI device.
+ *
+ * Author:
+ * Cam Macdonell <cam@cs.ualberta.ca>
+ *
+ * Based On: cirrus_vga.c
+ * Copyright (c) 2004 Fabrice Bellard
+ * Copyright (c) 2004 Makoto Suzuki (suzu)
+ *
+ * and rtl8139.c
+ * Copyright (c) 2006 Igor Kovalenko
+ *
+ * This code is licensed under the GNU GPL v2.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+#include "hw/hw.h"
+#include "hw/i386/pc.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/msix.h"
+#include "sysemu/kvm.h"
+#include "migration/migration.h"
+#include "qapi/qmp/qerror.h"
+#include "qemu/event_notifier.h"
+#include "char/char.h"
+
+#include <sys/mman.h>
+#include <sys/types.h>
+
+#define PCI_VENDOR_ID_IVSHMEM PCI_VENDOR_ID_REDHAT_QUMRANET
+#define PCI_DEVICE_ID_IVSHMEM 0x1110
+
+#define IVSHMEM_IOEVENTFD 0
+#define IVSHMEM_MSI 1
+
+#define IVSHMEM_PEER 0
+#define IVSHMEM_MASTER 1
+
+#define IVSHMEM_REG_BAR_SIZE 0x100
+
+//#define DEBUG_IVSHMEM
+#ifdef DEBUG_IVSHMEM
+#define IVSHMEM_DPRINTF(fmt, ...) \
+ do {printf("IVSHMEM: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define IVSHMEM_DPRINTF(fmt, ...)
+#endif
+
+typedef struct Peer {
+ int nb_eventfds;
+ EventNotifier *eventfds;
+} Peer;
+
+typedef struct EventfdEntry {
+ PCIDevice *pdev;
+ int vector;
+} EventfdEntry;
+
+typedef struct IVShmemState {
+ PCIDevice dev;
+ uint32_t intrmask;
+ uint32_t intrstatus;
+ uint32_t doorbell;
+
+ CharDriverState **eventfd_chr;
+ CharDriverState *server_chr;
+ MemoryRegion ivshmem_mmio;
+
+ /* We might need to register the BAR before we actually have the memory.
+ * So prepare a container MemoryRegion for the BAR immediately and
+ * add a subregion when we have the memory.
+ */
+ MemoryRegion bar;
+ MemoryRegion ivshmem;
+ uint64_t ivshmem_size; /* size of shared memory region */
+ uint32_t ivshmem_attr;
+ uint32_t ivshmem_64bit;
+ int shm_fd; /* shared memory file descriptor */
+
+ Peer *peers;
+ int nb_peers; /* how many guests we have space for */
+ int max_peer; /* maximum numbered peer */
+
+ int vm_id;
+ uint32_t vectors;
+ uint32_t features;
+ EventfdEntry *eventfd_table;
+
+ Error *migration_blocker;
+
+ char * shmobj;
+ char * sizearg;
+ char * role;
+ int role_val; /* scalar to avoid multiple string comparisons */
+} IVShmemState;
+
+/* registers for the Inter-VM shared memory device */
+enum ivshmem_registers {
+ INTRMASK = 0,
+ INTRSTATUS = 4,
+ IVPOSITION = 8,
+ DOORBELL = 12,
+};
+
+static inline uint32_t ivshmem_has_feature(IVShmemState *ivs,
+ unsigned int feature) {
+ return (ivs->features & (1 << feature));
+}
+
+static inline bool is_power_of_two(uint64_t x) {
+ return (x & (x - 1)) == 0;
+}
+
+/* accessing registers - based on rtl8139 */
+static void ivshmem_update_irq(IVShmemState *s, int val)
+{
+ int isr;
+ isr = (s->intrstatus & s->intrmask) & 0xffffffff;
+
+ /* don't print ISR resets */
+ if (isr) {
+ IVSHMEM_DPRINTF("Set IRQ to %d (%04x %04x)\n",
+ isr ? 1 : 0, s->intrstatus, s->intrmask);
+ }
+
+ qemu_set_irq(s->dev.irq[0], (isr != 0));
+}
+
+static void ivshmem_IntrMask_write(IVShmemState *s, uint32_t val)
+{
+ IVSHMEM_DPRINTF("IntrMask write(w) val = 0x%04x\n", val);
+
+ s->intrmask = val;
+
+ ivshmem_update_irq(s, val);
+}
+
+static uint32_t ivshmem_IntrMask_read(IVShmemState *s)
+{
+ uint32_t ret = s->intrmask;
+
+ IVSHMEM_DPRINTF("intrmask read(w) val = 0x%04x\n", ret);
+
+ return ret;
+}
+
+static void ivshmem_IntrStatus_write(IVShmemState *s, uint32_t val)
+{
+ IVSHMEM_DPRINTF("IntrStatus write(w) val = 0x%04x\n", val);
+
+ s->intrstatus = val;
+
+ ivshmem_update_irq(s, val);
+}
+
+static uint32_t ivshmem_IntrStatus_read(IVShmemState *s)
+{
+ uint32_t ret = s->intrstatus;
+
+ /* reading ISR clears all interrupts */
+ s->intrstatus = 0;
+
+ ivshmem_update_irq(s, 0);
+
+ return ret;
+}
+
+static void ivshmem_io_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ IVShmemState *s = opaque;
+
+ uint16_t dest = val >> 16;
+ uint16_t vector = val & 0xff;
+
+ addr &= 0xfc;
+
+ IVSHMEM_DPRINTF("writing to addr " TARGET_FMT_plx "\n", addr);
+ switch (addr)
+ {
+ case INTRMASK:
+ ivshmem_IntrMask_write(s, val);
+ break;
+
+ case INTRSTATUS:
+ ivshmem_IntrStatus_write(s, val);
+ break;
+
+ case DOORBELL:
+ /* check that dest VM ID is reasonable */
+ if (dest > s->max_peer) {
+ IVSHMEM_DPRINTF("Invalid destination VM ID (%d)\n", dest);
+ break;
+ }
+
+ /* check doorbell range */
+ if (vector < s->peers[dest].nb_eventfds) {
+ IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector);
+ event_notifier_set(&s->peers[dest].eventfds[vector]);
+ }
+ break;
+ default:
+ IVSHMEM_DPRINTF("Invalid VM Doorbell VM %d\n", dest);
+ }
+}
+
+static uint64_t ivshmem_io_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+
+ IVShmemState *s = opaque;
+ uint32_t ret;
+
+ switch (addr)
+ {
+ case INTRMASK:
+ ret = ivshmem_IntrMask_read(s);
+ break;
+
+ case INTRSTATUS:
+ ret = ivshmem_IntrStatus_read(s);
+ break;
+
+ case IVPOSITION:
+ /* return my VM ID if the memory is mapped */
+ if (s->shm_fd > 0) {
+ ret = s->vm_id;
+ } else {
+ ret = -1;
+ }
+ break;
+
+ default:
+ IVSHMEM_DPRINTF("why are we reading " TARGET_FMT_plx "\n", addr);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static const MemoryRegionOps ivshmem_mmio_ops = {
+ .read = ivshmem_io_read,
+ .write = ivshmem_io_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void ivshmem_receive(void *opaque, const uint8_t *buf, int size)
+{
+ IVShmemState *s = opaque;
+
+ ivshmem_IntrStatus_write(s, *buf);
+
+ IVSHMEM_DPRINTF("ivshmem_receive 0x%02x\n", *buf);
+}
+
+static int ivshmem_can_receive(void * opaque)
+{
+ return 8;
+}
+
+static void ivshmem_event(void *opaque, int event)
+{
+ IVSHMEM_DPRINTF("ivshmem_event %d\n", event);
+}
+
+static void fake_irqfd(void *opaque, const uint8_t *buf, int size) {
+
+ EventfdEntry *entry = opaque;
+ PCIDevice *pdev = entry->pdev;
+
+ IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, entry->vector);
+ msix_notify(pdev, entry->vector);
+}
+
+static CharDriverState* create_eventfd_chr_device(void * opaque, EventNotifier *n,
+ int vector)
+{
+ /* create a event character device based on the passed eventfd */
+ IVShmemState *s = opaque;
+ CharDriverState * chr;
+ int eventfd = event_notifier_get_fd(n);
+
+ chr = qemu_chr_open_eventfd(eventfd);
+
+ if (chr == NULL) {
+ fprintf(stderr, "creating eventfd for eventfd %d failed\n", eventfd);
+ exit(-1);
+ }
+ qemu_chr_fe_claim_no_fail(chr);
+
+ /* if MSI is supported we need multiple interrupts */
+ if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
+ s->eventfd_table[vector].pdev = &s->dev;
+ s->eventfd_table[vector].vector = vector;
+
+ qemu_chr_add_handlers(chr, ivshmem_can_receive, fake_irqfd,
+ ivshmem_event, &s->eventfd_table[vector]);
+ } else {
+ qemu_chr_add_handlers(chr, ivshmem_can_receive, ivshmem_receive,
+ ivshmem_event, s);
+ }
+
+ return chr;
+
+}
+
+static int check_shm_size(IVShmemState *s, int fd) {
+ /* check that the guest isn't going to try and map more memory than the
+ * the object has allocated return -1 to indicate error */
+
+ struct stat buf;
+
+ fstat(fd, &buf);
+
+ if (s->ivshmem_size > buf.st_size) {
+ fprintf(stderr,
+ "IVSHMEM ERROR: Requested memory size greater"
+ " than shared object size (%" PRIu64 " > %" PRIu64")\n",
+ s->ivshmem_size, (uint64_t)buf.st_size);
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+/* create the shared memory BAR when we are not using the server, so we can
+ * create the BAR and map the memory immediately */
+static void create_shared_memory_BAR(IVShmemState *s, int fd) {
+
+ void * ptr;
+
+ s->shm_fd = fd;
+
+ ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+
+ memory_region_init_ram_ptr(&s->ivshmem, "ivshmem.bar2",
+ s->ivshmem_size, ptr);
+ vmstate_register_ram(&s->ivshmem, &s->dev.qdev);
+ memory_region_add_subregion(&s->bar, 0, &s->ivshmem);
+
+ /* region for shared memory */
+ pci_register_bar(&s->dev, 2, s->ivshmem_attr, &s->bar);
+}
+
+static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i)
+{
+ memory_region_add_eventfd(&s->ivshmem_mmio,
+ DOORBELL,
+ 4,
+ true,
+ (posn << 16) | i,
+ &s->peers[posn].eventfds[i]);
+}
+
+static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i)
+{
+ memory_region_del_eventfd(&s->ivshmem_mmio,
+ DOORBELL,
+ 4,
+ true,
+ (posn << 16) | i,
+ &s->peers[posn].eventfds[i]);
+}
+
+static void close_guest_eventfds(IVShmemState *s, int posn)
+{
+ int i, guest_curr_max;
+
+ if (!ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
+ return;
+ }
+
+ guest_curr_max = s->peers[posn].nb_eventfds;
+
+ memory_region_transaction_begin();
+ for (i = 0; i < guest_curr_max; i++) {
+ ivshmem_del_eventfd(s, posn, i);
+ }
+ memory_region_transaction_commit();
+ for (i = 0; i < guest_curr_max; i++) {
+ event_notifier_cleanup(&s->peers[posn].eventfds[i]);
+ }
+
+ g_free(s->peers[posn].eventfds);
+ s->peers[posn].nb_eventfds = 0;
+}
+
+/* this function increase the dynamic storage need to store data about other
+ * guests */
+static void increase_dynamic_storage(IVShmemState *s, int new_min_size) {
+
+ int j, old_nb_alloc;
+
+ old_nb_alloc = s->nb_peers;
+
+ while (new_min_size >= s->nb_peers)
+ s->nb_peers = s->nb_peers * 2;
+
+ IVSHMEM_DPRINTF("bumping storage to %d guests\n", s->nb_peers);
+ s->peers = g_realloc(s->peers, s->nb_peers * sizeof(Peer));
+
+ /* zero out new pointers */
+ for (j = old_nb_alloc; j < s->nb_peers; j++) {
+ s->peers[j].eventfds = NULL;
+ s->peers[j].nb_eventfds = 0;
+ }
+}
+
+static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
+{
+ IVShmemState *s = opaque;
+ int incoming_fd, tmp_fd;
+ int guest_max_eventfd;
+ long incoming_posn;
+
+ memcpy(&incoming_posn, buf, sizeof(long));
+ /* pick off s->server_chr->msgfd and store it, posn should accompany msg */
+ tmp_fd = qemu_chr_fe_get_msgfd(s->server_chr);
+ IVSHMEM_DPRINTF("posn is %ld, fd is %d\n", incoming_posn, tmp_fd);
+
+ /* make sure we have enough space for this guest */
+ if (incoming_posn >= s->nb_peers) {
+ increase_dynamic_storage(s, incoming_posn);
+ }
+
+ if (tmp_fd == -1) {
+ /* if posn is positive and unseen before then this is our posn*/
+ if ((incoming_posn >= 0) &&
+ (s->peers[incoming_posn].eventfds == NULL)) {
+ /* receive our posn */
+ s->vm_id = incoming_posn;
+ return;
+ } else {
+ /* otherwise an fd == -1 means an existing guest has gone away */
+ IVSHMEM_DPRINTF("posn %ld has gone away\n", incoming_posn);
+ close_guest_eventfds(s, incoming_posn);
+ return;
+ }
+ }
+
+ /* because of the implementation of get_msgfd, we need a dup */
+ incoming_fd = dup(tmp_fd);
+
+ if (incoming_fd == -1) {
+ fprintf(stderr, "could not allocate file descriptor %s\n",
+ strerror(errno));
+ return;
+ }
+
+ /* if the position is -1, then it's shared memory region fd */
+ if (incoming_posn == -1) {
+
+ void * map_ptr;
+
+ s->max_peer = 0;
+
+ if (check_shm_size(s, incoming_fd) == -1) {
+ exit(-1);
+ }
+
+ /* mmap the region and map into the BAR2 */
+ map_ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED,
+ incoming_fd, 0);
+ memory_region_init_ram_ptr(&s->ivshmem,
+ "ivshmem.bar2", s->ivshmem_size, map_ptr);
+ vmstate_register_ram(&s->ivshmem, &s->dev.qdev);
+
+ IVSHMEM_DPRINTF("guest h/w addr = %" PRIu64 ", size = %" PRIu64 "\n",
+ s->ivshmem_offset, s->ivshmem_size);
+
+ memory_region_add_subregion(&s->bar, 0, &s->ivshmem);
+
+ /* only store the fd if it is successfully mapped */
+ s->shm_fd = incoming_fd;
+
+ return;
+ }
+
+ /* each guest has an array of eventfds, and we keep track of how many
+ * guests for each VM */
+ guest_max_eventfd = s->peers[incoming_posn].nb_eventfds;
+
+ if (guest_max_eventfd == 0) {
+ /* one eventfd per MSI vector */
+ s->peers[incoming_posn].eventfds = g_new(EventNotifier, s->vectors);
+ }
+
+ /* this is an eventfd for a particular guest VM */
+ IVSHMEM_DPRINTF("eventfds[%ld][%d] = %d\n", incoming_posn,
+ guest_max_eventfd, incoming_fd);
+ event_notifier_init_fd(&s->peers[incoming_posn].eventfds[guest_max_eventfd],
+ incoming_fd);
+
+ /* increment count for particular guest */
+ s->peers[incoming_posn].nb_eventfds++;
+
+ /* keep track of the maximum VM ID */
+ if (incoming_posn > s->max_peer) {
+ s->max_peer = incoming_posn;
+ }
+
+ if (incoming_posn == s->vm_id) {
+ s->eventfd_chr[guest_max_eventfd] = create_eventfd_chr_device(s,
+ &s->peers[s->vm_id].eventfds[guest_max_eventfd],
+ guest_max_eventfd);
+ }
+
+ if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
+ ivshmem_add_eventfd(s, incoming_posn, guest_max_eventfd);
+ }
+}
+
+/* Select the MSI-X vectors used by device.
+ * ivshmem maps events to vectors statically, so
+ * we just enable all vectors on init and after reset. */
+static void ivshmem_use_msix(IVShmemState * s)
+{
+ int i;
+
+ if (!msix_present(&s->dev)) {
+ return;
+ }
+
+ for (i = 0; i < s->vectors; i++) {
+ msix_vector_use(&s->dev, i);
+ }
+}
+
+static void ivshmem_reset(DeviceState *d)
+{
+ IVShmemState *s = DO_UPCAST(IVShmemState, dev.qdev, d);
+
+ s->intrstatus = 0;
+ ivshmem_use_msix(s);
+}
+
+static uint64_t ivshmem_get_size(IVShmemState * s) {
+
+ uint64_t value;
+ char *ptr;
+
+ value = strtoull(s->sizearg, &ptr, 10);
+ switch (*ptr) {
+ case 0: case 'M': case 'm':
+ value <<= 20;
+ break;
+ case 'G': case 'g':
+ value <<= 30;
+ break;
+ default:
+ fprintf(stderr, "qemu: invalid ram size: %s\n", s->sizearg);
+ exit(1);
+ }
+
+ /* BARs must be a power of 2 */
+ if (!is_power_of_two(value)) {
+ fprintf(stderr, "ivshmem: size must be power of 2\n");
+ exit(1);
+ }
+
+ return value;
+}
+
+static void ivshmem_setup_msi(IVShmemState * s)
+{
+ if (msix_init_exclusive_bar(&s->dev, s->vectors, 1)) {
+ IVSHMEM_DPRINTF("msix initialization failed\n");
+ exit(1);
+ }
+
+ IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors);
+
+ /* allocate QEMU char devices for receiving interrupts */
+ s->eventfd_table = g_malloc0(s->vectors * sizeof(EventfdEntry));
+
+ ivshmem_use_msix(s);
+}
+
+static void ivshmem_save(QEMUFile* f, void *opaque)
+{
+ IVShmemState *proxy = opaque;
+
+ IVSHMEM_DPRINTF("ivshmem_save\n");
+ pci_device_save(&proxy->dev, f);
+
+ if (ivshmem_has_feature(proxy, IVSHMEM_MSI)) {
+ msix_save(&proxy->dev, f);
+ } else {
+ qemu_put_be32(f, proxy->intrstatus);
+ qemu_put_be32(f, proxy->intrmask);
+ }
+
+}
+
+static int ivshmem_load(QEMUFile* f, void *opaque, int version_id)
+{
+ IVSHMEM_DPRINTF("ivshmem_load\n");
+
+ IVShmemState *proxy = opaque;
+ int ret;
+
+ if (version_id > 0) {
+ return -EINVAL;
+ }
+
+ if (proxy->role_val == IVSHMEM_PEER) {
+ fprintf(stderr, "ivshmem: 'peer' devices are not migratable\n");
+ return -EINVAL;
+ }
+
+ ret = pci_device_load(&proxy->dev, f);
+ if (ret) {
+ return ret;
+ }
+
+ if (ivshmem_has_feature(proxy, IVSHMEM_MSI)) {
+ msix_load(&proxy->dev, f);
+ ivshmem_use_msix(proxy);
+ } else {
+ proxy->intrstatus = qemu_get_be32(f);
+ proxy->intrmask = qemu_get_be32(f);
+ }
+
+ return 0;
+}
+
+static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
+ uint32_t val, int len)
+{
+ pci_default_write_config(pci_dev, address, val, len);
+ msix_write_config(pci_dev, address, val, len);
+}
+
+static int pci_ivshmem_init(PCIDevice *dev)
+{
+ IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
+ uint8_t *pci_conf;
+
+ if (s->sizearg == NULL)
+ s->ivshmem_size = 4 << 20; /* 4 MB default */
+ else {
+ s->ivshmem_size = ivshmem_get_size(s);
+ }
+
+ register_savevm(&s->dev.qdev, "ivshmem", 0, 0, ivshmem_save, ivshmem_load,
+ dev);
+
+ /* IRQFD requires MSI */
+ if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) &&
+ !ivshmem_has_feature(s, IVSHMEM_MSI)) {
+ fprintf(stderr, "ivshmem: ioeventfd/irqfd requires MSI\n");
+ exit(1);
+ }
+
+ /* check that role is reasonable */
+ if (s->role) {
+ if (strncmp(s->role, "peer", 5) == 0) {
+ s->role_val = IVSHMEM_PEER;
+ } else if (strncmp(s->role, "master", 7) == 0) {
+ s->role_val = IVSHMEM_MASTER;
+ } else {
+ fprintf(stderr, "ivshmem: 'role' must be 'peer' or 'master'\n");
+ exit(1);
+ }
+ } else {
+ s->role_val = IVSHMEM_MASTER; /* default */
+ }
+
+ if (s->role_val == IVSHMEM_PEER) {
+ error_set(&s->migration_blocker, QERR_DEVICE_FEATURE_BLOCKS_MIGRATION,
+ "peer mode", "ivshmem");
+ migrate_add_blocker(s->migration_blocker);
+ }
+
+ pci_conf = s->dev.config;
+ pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
+
+ pci_config_set_interrupt_pin(pci_conf, 1);
+
+ s->shm_fd = 0;
+
+ memory_region_init_io(&s->ivshmem_mmio, &ivshmem_mmio_ops, s,
+ "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
+
+ /* region for registers*/
+ pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
+ &s->ivshmem_mmio);
+
+ memory_region_init(&s->bar, "ivshmem-bar2-container", s->ivshmem_size);
+ s->ivshmem_attr = PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_PREFETCH;
+ if (s->ivshmem_64bit) {
+ s->ivshmem_attr |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+ }
+
+ if ((s->server_chr != NULL) &&
+ (strncmp(s->server_chr->filename, "unix:", 5) == 0)) {
+ /* if we get a UNIX socket as the parameter we will talk
+ * to the ivshmem server to receive the memory region */
+
+ if (s->shmobj != NULL) {
+ fprintf(stderr, "WARNING: do not specify both 'chardev' "
+ "and 'shm' with ivshmem\n");
+ }
+
+ IVSHMEM_DPRINTF("using shared memory server (socket = %s)\n",
+ s->server_chr->filename);
+
+ if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
+ ivshmem_setup_msi(s);
+ }
+
+ /* we allocate enough space for 16 guests and grow as needed */
+ s->nb_peers = 16;
+ s->vm_id = -1;
+
+ /* allocate/initialize space for interrupt handling */
+ s->peers = g_malloc0(s->nb_peers * sizeof(Peer));
+
+ pci_register_bar(&s->dev, 2, s->ivshmem_attr, &s->bar);
+
+ s->eventfd_chr = g_malloc0(s->vectors * sizeof(CharDriverState *));
+
+ qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive, ivshmem_read,
+ ivshmem_event, s);
+ } else {
+ /* just map the file immediately, we're not using a server */
+ int fd;
+
+ if (s->shmobj == NULL) {
+ fprintf(stderr, "Must specify 'chardev' or 'shm' to ivshmem\n");
+ }
+
+ IVSHMEM_DPRINTF("using shm_open (shm object = %s)\n", s->shmobj);
+
+ /* try opening with O_EXCL and if it succeeds zero the memory
+ * by truncating to 0 */
+ if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR|O_EXCL,
+ S_IRWXU|S_IRWXG|S_IRWXO)) > 0) {
+ /* truncate file to length PCI device's memory */
+ if (ftruncate(fd, s->ivshmem_size) != 0) {
+ fprintf(stderr, "ivshmem: could not truncate shared file\n");
+ }
+
+ } else if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR,
+ S_IRWXU|S_IRWXG|S_IRWXO)) < 0) {
+ fprintf(stderr, "ivshmem: could not open shared file\n");
+ exit(-1);
+
+ }
+
+ if (check_shm_size(s, fd) == -1) {
+ exit(-1);
+ }
+
+ create_shared_memory_BAR(s, fd);
+
+ }
+
+ s->dev.config_write = ivshmem_write_config;
+
+ return 0;
+}
+
+static void pci_ivshmem_uninit(PCIDevice *dev)
+{
+ IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
+
+ if (s->migration_blocker) {
+ migrate_del_blocker(s->migration_blocker);
+ error_free(s->migration_blocker);
+ }
+
+ memory_region_destroy(&s->ivshmem_mmio);
+ memory_region_del_subregion(&s->bar, &s->ivshmem);
+ vmstate_unregister_ram(&s->ivshmem, &s->dev.qdev);
+ memory_region_destroy(&s->ivshmem);
+ memory_region_destroy(&s->bar);
+ unregister_savevm(&dev->qdev, "ivshmem", s);
+}
+
+static Property ivshmem_properties[] = {
+ DEFINE_PROP_CHR("chardev", IVShmemState, server_chr),
+ DEFINE_PROP_STRING("size", IVShmemState, sizearg),
+ DEFINE_PROP_UINT32("vectors", IVShmemState, vectors, 1),
+ DEFINE_PROP_BIT("ioeventfd", IVShmemState, features, IVSHMEM_IOEVENTFD, false),
+ DEFINE_PROP_BIT("msi", IVShmemState, features, IVSHMEM_MSI, true),
+ DEFINE_PROP_STRING("shm", IVShmemState, shmobj),
+ DEFINE_PROP_STRING("role", IVShmemState, role),
+ DEFINE_PROP_UINT32("use64", IVShmemState, ivshmem_64bit, 1),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void ivshmem_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->init = pci_ivshmem_init;
+ k->exit = pci_ivshmem_uninit;
+ k->vendor_id = PCI_VENDOR_ID_IVSHMEM;
+ k->device_id = PCI_DEVICE_ID_IVSHMEM;
+ k->class_id = PCI_CLASS_MEMORY_RAM;
+ dc->reset = ivshmem_reset;
+ dc->props = ivshmem_properties;
+}
+
+static const TypeInfo ivshmem_info = {
+ .name = "ivshmem",
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(IVShmemState),
+ .class_init = ivshmem_class_init,
+};
+
+static void ivshmem_register_types(void)
+{
+ type_register_static(&ivshmem_info);
+}
+
+type_init(ivshmem_register_types)
diff --git a/hw/misc/lm32_sys.c b/hw/misc/lm32_sys.c
new file mode 100644
index 0000000..33a3b80
--- /dev/null
+++ b/hw/misc/lm32_sys.c
@@ -0,0 +1,172 @@
+/*
+ * QEMU model of the LatticeMico32 system control block.
+ *
+ * Copyright (c) 2010 Michael Walle <michael@walle.cc>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * This model is mainly intended for testing purposes and doesn't fit to any
+ * real hardware. On the one hand it provides a control register (R_CTRL) on
+ * the other hand it supports the lm32 tests.
+ *
+ * A write to the control register causes a system shutdown.
+ * Tests first write the pointer to a test name to the test name register
+ * (R_TESTNAME) and then write a zero to the pass/fail register (R_PASSFAIL) if
+ * the test is passed or any non-zero value to it if the test is failed.
+ */
+
+#include "hw/hw.h"
+#include "hw/sysbus.h"
+#include "trace.h"
+#include "qemu/log.h"
+#include "qemu/error-report.h"
+#include "sysemu/sysemu.h"
+#include "qemu/log.h"
+
+enum {
+ R_CTRL = 0,
+ R_PASSFAIL,
+ R_TESTNAME,
+ R_MAX
+};
+
+#define MAX_TESTNAME_LEN 16
+
+struct LM32SysState {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+ uint32_t base;
+ uint32_t regs[R_MAX];
+ uint8_t testname[MAX_TESTNAME_LEN];
+};
+typedef struct LM32SysState LM32SysState;
+
+static void copy_testname(LM32SysState *s)
+{
+ cpu_physical_memory_read(s->regs[R_TESTNAME], s->testname,
+ MAX_TESTNAME_LEN);
+ s->testname[MAX_TESTNAME_LEN - 1] = '\0';
+}
+
+static void sys_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ LM32SysState *s = opaque;
+ char *testname;
+
+ trace_lm32_sys_memory_write(addr, value);
+
+ addr >>= 2;
+ switch (addr) {
+ case R_CTRL:
+ qemu_system_shutdown_request();
+ break;
+ case R_PASSFAIL:
+ s->regs[addr] = value;
+ testname = (char *)s->testname;
+ qemu_log("TC %-16s %s\n", testname, (value) ? "FAILED" : "OK");
+ break;
+ case R_TESTNAME:
+ s->regs[addr] = value;
+ copy_testname(s);
+ break;
+
+ default:
+ error_report("lm32_sys: write access to unknown register 0x"
+ TARGET_FMT_plx, addr << 2);
+ break;
+ }
+}
+
+static bool sys_ops_accepts(void *opaque, hwaddr addr,
+ unsigned size, bool is_write)
+{
+ return is_write && size == 4;
+}
+
+static const MemoryRegionOps sys_ops = {
+ .write = sys_write,
+ .valid.accepts = sys_ops_accepts,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void sys_reset(DeviceState *d)
+{
+ LM32SysState *s = container_of(d, LM32SysState, busdev.qdev);
+ int i;
+
+ for (i = 0; i < R_MAX; i++) {
+ s->regs[i] = 0;
+ }
+ memset(s->testname, 0, MAX_TESTNAME_LEN);
+}
+
+static int lm32_sys_init(SysBusDevice *dev)
+{
+ LM32SysState *s = FROM_SYSBUS(typeof(*s), dev);
+
+ memory_region_init_io(&s->iomem, &sys_ops , s, "sys", R_MAX * 4);
+ sysbus_init_mmio(dev, &s->iomem);
+
+ /* Note: This device is not created in the board initialization,
+ * instead it has to be added with the -device parameter. Therefore,
+ * the device maps itself. */
+ sysbus_mmio_map(dev, 0, s->base);
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_lm32_sys = {
+ .name = "lm32-sys",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, LM32SysState, R_MAX),
+ VMSTATE_BUFFER(testname, LM32SysState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property lm32_sys_properties[] = {
+ DEFINE_PROP_UINT32("base", LM32SysState, base, 0xffff0000),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void lm32_sys_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+ k->init = lm32_sys_init;
+ dc->reset = sys_reset;
+ dc->vmsd = &vmstate_lm32_sys;
+ dc->props = lm32_sys_properties;
+}
+
+static const TypeInfo lm32_sys_info = {
+ .name = "lm32-sys",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(LM32SysState),
+ .class_init = lm32_sys_class_init,
+};
+
+static void lm32_sys_register_types(void)
+{
+ type_register_static(&lm32_sys_info);
+}
+
+type_init(lm32_sys_register_types)
diff --git a/hw/misc/macio/Makefile.objs b/hw/misc/macio/Makefile.objs
new file mode 100644
index 0000000..ef7ac24
--- /dev/null
+++ b/hw/misc/macio/Makefile.objs
@@ -0,0 +1,3 @@
+common-obj-y += macio.o
+common-obj-$(CONFIG_CUDA) += cuda.o
+common-obj-$(CONFIG_MAC_DBDMA) += mac_dbdma.o
diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c
new file mode 100644
index 0000000..f797796
--- /dev/null
+++ b/hw/misc/macio/cuda.c
@@ -0,0 +1,740 @@
+/*
+ * QEMU PowerMac CUDA device support
+ *
+ * Copyright (c) 2004-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/ppc/mac.h"
+#include "hw/input/adb.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+
+/* XXX: implement all timer modes */
+
+/* debug CUDA */
+//#define DEBUG_CUDA
+
+/* debug CUDA packets */
+//#define DEBUG_CUDA_PACKET
+
+#ifdef DEBUG_CUDA
+#define CUDA_DPRINTF(fmt, ...) \
+ do { printf("CUDA: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define CUDA_DPRINTF(fmt, ...)
+#endif
+
+/* Bits in B data register: all active low */
+#define TREQ 0x08 /* Transfer request (input) */
+#define TACK 0x10 /* Transfer acknowledge (output) */
+#define TIP 0x20 /* Transfer in progress (output) */
+
+/* Bits in ACR */
+#define SR_CTRL 0x1c /* Shift register control bits */
+#define SR_EXT 0x0c /* Shift on external clock */
+#define SR_OUT 0x10 /* Shift out if 1 */
+
+/* Bits in IFR and IER */
+#define IER_SET 0x80 /* set bits in IER */
+#define IER_CLR 0 /* clear bits in IER */
+#define SR_INT 0x04 /* Shift register full/empty */
+#define T1_INT 0x40 /* Timer 1 interrupt */
+#define T2_INT 0x20 /* Timer 2 interrupt */
+
+/* Bits in ACR */
+#define T1MODE 0xc0 /* Timer 1 mode */
+#define T1MODE_CONT 0x40 /* continuous interrupts */
+
+/* commands (1st byte) */
+#define ADB_PACKET 0
+#define CUDA_PACKET 1
+#define ERROR_PACKET 2
+#define TIMER_PACKET 3
+#define POWER_PACKET 4
+#define MACIIC_PACKET 5
+#define PMU_PACKET 6
+
+
+/* CUDA commands (2nd byte) */
+#define CUDA_WARM_START 0x0
+#define CUDA_AUTOPOLL 0x1
+#define CUDA_GET_6805_ADDR 0x2
+#define CUDA_GET_TIME 0x3
+#define CUDA_GET_PRAM 0x7
+#define CUDA_SET_6805_ADDR 0x8
+#define CUDA_SET_TIME 0x9
+#define CUDA_POWERDOWN 0xa
+#define CUDA_POWERUP_TIME 0xb
+#define CUDA_SET_PRAM 0xc
+#define CUDA_MS_RESET 0xd
+#define CUDA_SEND_DFAC 0xe
+#define CUDA_BATTERY_SWAP_SENSE 0x10
+#define CUDA_RESET_SYSTEM 0x11
+#define CUDA_SET_IPL 0x12
+#define CUDA_FILE_SERVER_FLAG 0x13
+#define CUDA_SET_AUTO_RATE 0x14
+#define CUDA_GET_AUTO_RATE 0x16
+#define CUDA_SET_DEVICE_LIST 0x19
+#define CUDA_GET_DEVICE_LIST 0x1a
+#define CUDA_SET_ONE_SECOND_MODE 0x1b
+#define CUDA_SET_POWER_MESSAGES 0x21
+#define CUDA_GET_SET_IIC 0x22
+#define CUDA_WAKEUP 0x23
+#define CUDA_TIMER_TICKLE 0x24
+#define CUDA_COMBINED_FORMAT_IIC 0x25
+
+#define CUDA_TIMER_FREQ (4700000 / 6)
+#define CUDA_ADB_POLL_FREQ 50
+
+/* CUDA returns time_t's offset from Jan 1, 1904, not 1970 */
+#define RTC_OFFSET 2082844800
+
+static void cuda_update(CUDAState *s);
+static void cuda_receive_packet_from_host(CUDAState *s,
+ const uint8_t *data, int len);
+static void cuda_timer_update(CUDAState *s, CUDATimer *ti,
+ int64_t current_time);
+
+static void cuda_update_irq(CUDAState *s)
+{
+ if (s->ifr & s->ier & (SR_INT | T1_INT)) {
+ qemu_irq_raise(s->irq);
+ } else {
+ qemu_irq_lower(s->irq);
+ }
+}
+
+static unsigned int get_counter(CUDATimer *s)
+{
+ int64_t d;
+ unsigned int counter;
+
+ d = muldiv64(qemu_get_clock_ns(vm_clock) - s->load_time,
+ CUDA_TIMER_FREQ, get_ticks_per_sec());
+ if (s->index == 0) {
+ /* the timer goes down from latch to -1 (period of latch + 2) */
+ if (d <= (s->counter_value + 1)) {
+ counter = (s->counter_value - d) & 0xffff;
+ } else {
+ counter = (d - (s->counter_value + 1)) % (s->latch + 2);
+ counter = (s->latch - counter) & 0xffff;
+ }
+ } else {
+ counter = (s->counter_value - d) & 0xffff;
+ }
+ return counter;
+}
+
+static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val)
+{
+ CUDA_DPRINTF("T%d.counter=%d\n", 1 + (ti->timer == NULL), val);
+ ti->load_time = qemu_get_clock_ns(vm_clock);
+ ti->counter_value = val;
+ cuda_timer_update(s, ti, ti->load_time);
+}
+
+static int64_t get_next_irq_time(CUDATimer *s, int64_t current_time)
+{
+ int64_t d, next_time;
+ unsigned int counter;
+
+ /* current counter value */
+ d = muldiv64(current_time - s->load_time,
+ CUDA_TIMER_FREQ, get_ticks_per_sec());
+ /* the timer goes down from latch to -1 (period of latch + 2) */
+ if (d <= (s->counter_value + 1)) {
+ counter = (s->counter_value - d) & 0xffff;
+ } else {
+ counter = (d - (s->counter_value + 1)) % (s->latch + 2);
+ counter = (s->latch - counter) & 0xffff;
+ }
+
+ /* Note: we consider the irq is raised on 0 */
+ if (counter == 0xffff) {
+ next_time = d + s->latch + 1;
+ } else if (counter == 0) {
+ next_time = d + s->latch + 2;
+ } else {
+ next_time = d + counter;
+ }
+ CUDA_DPRINTF("latch=%d counter=%" PRId64 " delta_next=%" PRId64 "\n",
+ s->latch, d, next_time - d);
+ next_time = muldiv64(next_time, get_ticks_per_sec(), CUDA_TIMER_FREQ) +
+ s->load_time;
+ if (next_time <= current_time)
+ next_time = current_time + 1;
+ return next_time;
+}
+
+static void cuda_timer_update(CUDAState *s, CUDATimer *ti,
+ int64_t current_time)
+{
+ if (!ti->timer)
+ return;
+ if ((s->acr & T1MODE) != T1MODE_CONT) {
+ qemu_del_timer(ti->timer);
+ } else {
+ ti->next_irq_time = get_next_irq_time(ti, current_time);
+ qemu_mod_timer(ti->timer, ti->next_irq_time);
+ }
+}
+
+static void cuda_timer1(void *opaque)
+{
+ CUDAState *s = opaque;
+ CUDATimer *ti = &s->timers[0];
+
+ cuda_timer_update(s, ti, ti->next_irq_time);
+ s->ifr |= T1_INT;
+ cuda_update_irq(s);
+}
+
+static uint32_t cuda_readb(void *opaque, hwaddr addr)
+{
+ CUDAState *s = opaque;
+ uint32_t val;
+
+ addr = (addr >> 9) & 0xf;
+ switch(addr) {
+ case 0:
+ val = s->b;
+ break;
+ case 1:
+ val = s->a;
+ break;
+ case 2:
+ val = s->dirb;
+ break;
+ case 3:
+ val = s->dira;
+ break;
+ case 4:
+ val = get_counter(&s->timers[0]) & 0xff;
+ s->ifr &= ~T1_INT;
+ cuda_update_irq(s);
+ break;
+ case 5:
+ val = get_counter(&s->timers[0]) >> 8;
+ cuda_update_irq(s);
+ break;
+ case 6:
+ val = s->timers[0].latch & 0xff;
+ break;
+ case 7:
+ /* XXX: check this */
+ val = (s->timers[0].latch >> 8) & 0xff;
+ break;
+ case 8:
+ val = get_counter(&s->timers[1]) & 0xff;
+ s->ifr &= ~T2_INT;
+ break;
+ case 9:
+ val = get_counter(&s->timers[1]) >> 8;
+ break;
+ case 10:
+ val = s->sr;
+ s->ifr &= ~SR_INT;
+ cuda_update_irq(s);
+ break;
+ case 11:
+ val = s->acr;
+ break;
+ case 12:
+ val = s->pcr;
+ break;
+ case 13:
+ val = s->ifr;
+ if (s->ifr & s->ier)
+ val |= 0x80;
+ break;
+ case 14:
+ val = s->ier | 0x80;
+ break;
+ default:
+ case 15:
+ val = s->anh;
+ break;
+ }
+ if (addr != 13 || val != 0) {
+ CUDA_DPRINTF("read: reg=0x%x val=%02x\n", (int)addr, val);
+ }
+
+ return val;
+}
+
+static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val)
+{
+ CUDAState *s = opaque;
+
+ addr = (addr >> 9) & 0xf;
+ CUDA_DPRINTF("write: reg=0x%x val=%02x\n", (int)addr, val);
+
+ switch(addr) {
+ case 0:
+ s->b = val;
+ cuda_update(s);
+ break;
+ case 1:
+ s->a = val;
+ break;
+ case 2:
+ s->dirb = val;
+ break;
+ case 3:
+ s->dira = val;
+ break;
+ case 4:
+ s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
+ cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
+ break;
+ case 5:
+ s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
+ s->ifr &= ~T1_INT;
+ set_counter(s, &s->timers[0], s->timers[0].latch);
+ break;
+ case 6:
+ s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
+ cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
+ break;
+ case 7:
+ s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
+ s->ifr &= ~T1_INT;
+ cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
+ break;
+ case 8:
+ s->timers[1].latch = val;
+ set_counter(s, &s->timers[1], val);
+ break;
+ case 9:
+ set_counter(s, &s->timers[1], (val << 8) | s->timers[1].latch);
+ break;
+ case 10:
+ s->sr = val;
+ break;
+ case 11:
+ s->acr = val;
+ cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock));
+ cuda_update(s);
+ break;
+ case 12:
+ s->pcr = val;
+ break;
+ case 13:
+ /* reset bits */
+ s->ifr &= ~val;
+ cuda_update_irq(s);
+ break;
+ case 14:
+ if (val & IER_SET) {
+ /* set bits */
+ s->ier |= val & 0x7f;
+ } else {
+ /* reset bits */
+ s->ier &= ~val;
+ }
+ cuda_update_irq(s);
+ break;
+ default:
+ case 15:
+ s->anh = val;
+ break;
+ }
+}
+
+/* NOTE: TIP and TREQ are negated */
+static void cuda_update(CUDAState *s)
+{
+ int packet_received, len;
+
+ packet_received = 0;
+ if (!(s->b & TIP)) {
+ /* transfer requested from host */
+
+ if (s->acr & SR_OUT) {
+ /* data output */
+ if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
+ if (s->data_out_index < sizeof(s->data_out)) {
+ CUDA_DPRINTF("send: %02x\n", s->sr);
+ s->data_out[s->data_out_index++] = s->sr;
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+ }
+ }
+ } else {
+ if (s->data_in_index < s->data_in_size) {
+ /* data input */
+ if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
+ s->sr = s->data_in[s->data_in_index++];
+ CUDA_DPRINTF("recv: %02x\n", s->sr);
+ /* indicate end of transfer */
+ if (s->data_in_index >= s->data_in_size) {
+ s->b = (s->b | TREQ);
+ }
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+ }
+ }
+ }
+ } else {
+ /* no transfer requested: handle sync case */
+ if ((s->last_b & TIP) && (s->b & TACK) != (s->last_b & TACK)) {
+ /* update TREQ state each time TACK change state */
+ if (s->b & TACK)
+ s->b = (s->b | TREQ);
+ else
+ s->b = (s->b & ~TREQ);
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+ } else {
+ if (!(s->last_b & TIP)) {
+ /* handle end of host to cuda transfer */
+ packet_received = (s->data_out_index > 0);
+ /* always an IRQ at the end of transfer */
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+ }
+ /* signal if there is data to read */
+ if (s->data_in_index < s->data_in_size) {
+ s->b = (s->b & ~TREQ);
+ }
+ }
+ }
+
+ s->last_acr = s->acr;
+ s->last_b = s->b;
+
+ /* NOTE: cuda_receive_packet_from_host() can call cuda_update()
+ recursively */
+ if (packet_received) {
+ len = s->data_out_index;
+ s->data_out_index = 0;
+ cuda_receive_packet_from_host(s, s->data_out, len);
+ }
+}
+
+static void cuda_send_packet_to_host(CUDAState *s,
+ const uint8_t *data, int len)
+{
+#ifdef DEBUG_CUDA_PACKET
+ {
+ int i;
+ printf("cuda_send_packet_to_host:\n");
+ for(i = 0; i < len; i++)
+ printf(" %02x", data[i]);
+ printf("\n");
+ }
+#endif
+ memcpy(s->data_in, data, len);
+ s->data_in_size = len;
+ s->data_in_index = 0;
+ cuda_update(s);
+ s->ifr |= SR_INT;
+ cuda_update_irq(s);
+}
+
+static void cuda_adb_poll(void *opaque)
+{
+ CUDAState *s = opaque;
+ uint8_t obuf[ADB_MAX_OUT_LEN + 2];
+ int olen;
+
+ olen = adb_poll(&s->adb_bus, obuf + 2);
+ if (olen > 0) {
+ obuf[0] = ADB_PACKET;
+ obuf[1] = 0x40; /* polled data */
+ cuda_send_packet_to_host(s, obuf, olen + 2);
+ }
+ qemu_mod_timer(s->adb_poll_timer,
+ qemu_get_clock_ns(vm_clock) +
+ (get_ticks_per_sec() / CUDA_ADB_POLL_FREQ));
+}
+
+static void cuda_receive_packet(CUDAState *s,
+ const uint8_t *data, int len)
+{
+ uint8_t obuf[16];
+ int autopoll;
+ uint32_t ti;
+
+ switch(data[0]) {
+ case CUDA_AUTOPOLL:
+ autopoll = (data[1] != 0);
+ if (autopoll != s->autopoll) {
+ s->autopoll = autopoll;
+ if (autopoll) {
+ qemu_mod_timer(s->adb_poll_timer,
+ qemu_get_clock_ns(vm_clock) +
+ (get_ticks_per_sec() / CUDA_ADB_POLL_FREQ));
+ } else {
+ qemu_del_timer(s->adb_poll_timer);
+ }
+ }
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = data[1];
+ cuda_send_packet_to_host(s, obuf, 2);
+ break;
+ case CUDA_SET_TIME:
+ ti = (((uint32_t)data[1]) << 24) + (((uint32_t)data[2]) << 16) + (((uint32_t)data[3]) << 8) + data[4];
+ s->tick_offset = ti - (qemu_get_clock_ns(vm_clock) / get_ticks_per_sec());
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ obuf[2] = 0;
+ cuda_send_packet_to_host(s, obuf, 3);
+ break;
+ case CUDA_GET_TIME:
+ ti = s->tick_offset + (qemu_get_clock_ns(vm_clock) / get_ticks_per_sec());
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ obuf[2] = 0;
+ obuf[3] = ti >> 24;
+ obuf[4] = ti >> 16;
+ obuf[5] = ti >> 8;
+ obuf[6] = ti;
+ cuda_send_packet_to_host(s, obuf, 7);
+ break;
+ case CUDA_FILE_SERVER_FLAG:
+ case CUDA_SET_DEVICE_LIST:
+ case CUDA_SET_AUTO_RATE:
+ case CUDA_SET_POWER_MESSAGES:
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ cuda_send_packet_to_host(s, obuf, 2);
+ break;
+ case CUDA_POWERDOWN:
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ cuda_send_packet_to_host(s, obuf, 2);
+ qemu_system_shutdown_request();
+ break;
+ case CUDA_RESET_SYSTEM:
+ obuf[0] = CUDA_PACKET;
+ obuf[1] = 0;
+ cuda_send_packet_to_host(s, obuf, 2);
+ qemu_system_reset_request();
+ break;
+ default:
+ break;
+ }
+}
+
+static void cuda_receive_packet_from_host(CUDAState *s,
+ const uint8_t *data, int len)
+{
+#ifdef DEBUG_CUDA_PACKET
+ {
+ int i;
+ printf("cuda_receive_packet_from_host:\n");
+ for(i = 0; i < len; i++)
+ printf(" %02x", data[i]);
+ printf("\n");
+ }
+#endif
+ switch(data[0]) {
+ case ADB_PACKET:
+ {
+ uint8_t obuf[ADB_MAX_OUT_LEN + 2];
+ int olen;
+ olen = adb_request(&s->adb_bus, obuf + 2, data + 1, len - 1);
+ if (olen > 0) {
+ obuf[0] = ADB_PACKET;
+ obuf[1] = 0x00;
+ } else {
+ /* error */
+ obuf[0] = ADB_PACKET;
+ obuf[1] = -olen;
+ olen = 0;
+ }
+ cuda_send_packet_to_host(s, obuf, olen + 2);
+ }
+ break;
+ case CUDA_PACKET:
+ cuda_receive_packet(s, data + 1, len - 1);
+ break;
+ }
+}
+
+static void cuda_writew (void *opaque, hwaddr addr, uint32_t value)
+{
+}
+
+static void cuda_writel (void *opaque, hwaddr addr, uint32_t value)
+{
+}
+
+static uint32_t cuda_readw (void *opaque, hwaddr addr)
+{
+ return 0;
+}
+
+static uint32_t cuda_readl (void *opaque, hwaddr addr)
+{
+ return 0;
+}
+
+static const MemoryRegionOps cuda_ops = {
+ .old_mmio = {
+ .write = {
+ cuda_writeb,
+ cuda_writew,
+ cuda_writel,
+ },
+ .read = {
+ cuda_readb,
+ cuda_readw,
+ cuda_readl,
+ },
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static bool cuda_timer_exist(void *opaque, int version_id)
+{
+ CUDATimer *s = opaque;
+
+ return s->timer != NULL;
+}
+
+static const VMStateDescription vmstate_cuda_timer = {
+ .name = "cuda_timer",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .minimum_version_id_old = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT16(latch, CUDATimer),
+ VMSTATE_UINT16(counter_value, CUDATimer),
+ VMSTATE_INT64(load_time, CUDATimer),
+ VMSTATE_INT64(next_irq_time, CUDATimer),
+ VMSTATE_TIMER_TEST(timer, CUDATimer, cuda_timer_exist),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_cuda = {
+ .name = "cuda",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(a, CUDAState),
+ VMSTATE_UINT8(b, CUDAState),
+ VMSTATE_UINT8(dira, CUDAState),
+ VMSTATE_UINT8(dirb, CUDAState),
+ VMSTATE_UINT8(sr, CUDAState),
+ VMSTATE_UINT8(acr, CUDAState),
+ VMSTATE_UINT8(pcr, CUDAState),
+ VMSTATE_UINT8(ifr, CUDAState),
+ VMSTATE_UINT8(ier, CUDAState),
+ VMSTATE_UINT8(anh, CUDAState),
+ VMSTATE_INT32(data_in_size, CUDAState),
+ VMSTATE_INT32(data_in_index, CUDAState),
+ VMSTATE_INT32(data_out_index, CUDAState),
+ VMSTATE_UINT8(autopoll, CUDAState),
+ VMSTATE_BUFFER(data_in, CUDAState),
+ VMSTATE_BUFFER(data_out, CUDAState),
+ VMSTATE_UINT32(tick_offset, CUDAState),
+ VMSTATE_STRUCT_ARRAY(timers, CUDAState, 2, 1,
+ vmstate_cuda_timer, CUDATimer),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void cuda_reset(DeviceState *dev)
+{
+ CUDAState *s = CUDA(dev);
+
+ s->b = 0;
+ s->a = 0;
+ s->dirb = 0;
+ s->dira = 0;
+ s->sr = 0;
+ s->acr = 0;
+ s->pcr = 0;
+ s->ifr = 0;
+ s->ier = 0;
+ // s->ier = T1_INT | SR_INT;
+ s->anh = 0;
+ s->data_in_size = 0;
+ s->data_in_index = 0;
+ s->data_out_index = 0;
+ s->autopoll = 0;
+
+ s->timers[0].latch = 0xffff;
+ set_counter(s, &s->timers[0], 0xffff);
+
+ s->timers[1].latch = 0;
+ set_counter(s, &s->timers[1], 0xffff);
+}
+
+static void cuda_realizefn(DeviceState *dev, Error **errp)
+{
+ CUDAState *s = CUDA(dev);
+ struct tm tm;
+
+ s->timers[0].timer = qemu_new_timer_ns(vm_clock, cuda_timer1, s);
+
+ qemu_get_timedate(&tm, 0);
+ s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET;
+
+ s->adb_poll_timer = qemu_new_timer_ns(vm_clock, cuda_adb_poll, s);
+}
+
+static void cuda_initfn(Object *obj)
+{
+ SysBusDevice *d = SYS_BUS_DEVICE(obj);
+ CUDAState *s = CUDA(obj);
+ int i;
+
+ memory_region_init_io(&s->mem, &cuda_ops, s, "cuda", 0x2000);
+ sysbus_init_mmio(d, &s->mem);
+ sysbus_init_irq(d, &s->irq);
+
+ for (i = 0; i < ARRAY_SIZE(s->timers); i++) {
+ s->timers[i].index = i;
+ }
+
+ qbus_create_inplace((BusState *)&s->adb_bus, TYPE_ADB_BUS, DEVICE(obj),
+ "adb.0");
+}
+
+static void cuda_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = cuda_realizefn;
+ dc->reset = cuda_reset;
+ dc->vmsd = &vmstate_cuda;
+}
+
+static const TypeInfo cuda_type_info = {
+ .name = TYPE_CUDA,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(CUDAState),
+ .instance_init = cuda_initfn,
+ .class_init = cuda_class_init,
+};
+
+static void cuda_register_types(void)
+{
+ type_register_static(&cuda_type_info);
+}
+
+type_init(cuda_register_types)
diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c
new file mode 100644
index 0000000..a2363bb
--- /dev/null
+++ b/hw/misc/macio/mac_dbdma.c
@@ -0,0 +1,859 @@
+/*
+ * PowerMac descriptor-based DMA emulation
+ *
+ * Copyright (c) 2005-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ * Copyright (c) 2009 Laurent Vivier
+ *
+ * some parts from linux-2.6.28, arch/powerpc/include/asm/dbdma.h
+ *
+ * Definitions for using the Apple Descriptor-Based DMA controller
+ * in Power Macintosh computers.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * some parts from mol 0.9.71
+ *
+ * Descriptor based DMA emulation
+ *
+ * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/isa/isa.h"
+#include "hw/ppc/mac_dbdma.h"
+#include "qemu/main-loop.h"
+
+/* debug DBDMA */
+//#define DEBUG_DBDMA
+
+#ifdef DEBUG_DBDMA
+#define DBDMA_DPRINTF(fmt, ...) \
+ do { printf("DBDMA: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DBDMA_DPRINTF(fmt, ...)
+#endif
+
+/*
+ */
+
+/*
+ * DBDMA control/status registers. All little-endian.
+ */
+
+#define DBDMA_CONTROL 0x00
+#define DBDMA_STATUS 0x01
+#define DBDMA_CMDPTR_HI 0x02
+#define DBDMA_CMDPTR_LO 0x03
+#define DBDMA_INTR_SEL 0x04
+#define DBDMA_BRANCH_SEL 0x05
+#define DBDMA_WAIT_SEL 0x06
+#define DBDMA_XFER_MODE 0x07
+#define DBDMA_DATA2PTR_HI 0x08
+#define DBDMA_DATA2PTR_LO 0x09
+#define DBDMA_RES1 0x0A
+#define DBDMA_ADDRESS_HI 0x0B
+#define DBDMA_BRANCH_ADDR_HI 0x0C
+#define DBDMA_RES2 0x0D
+#define DBDMA_RES3 0x0E
+#define DBDMA_RES4 0x0F
+
+#define DBDMA_REGS 16
+#define DBDMA_SIZE (DBDMA_REGS * sizeof(uint32_t))
+
+#define DBDMA_CHANNEL_SHIFT 7
+#define DBDMA_CHANNEL_SIZE (1 << DBDMA_CHANNEL_SHIFT)
+
+#define DBDMA_CHANNELS (0x1000 >> DBDMA_CHANNEL_SHIFT)
+
+/* Bits in control and status registers */
+
+#define RUN 0x8000
+#define PAUSE 0x4000
+#define FLUSH 0x2000
+#define WAKE 0x1000
+#define DEAD 0x0800
+#define ACTIVE 0x0400
+#define BT 0x0100
+#define DEVSTAT 0x00ff
+
+/*
+ * DBDMA command structure. These fields are all little-endian!
+ */
+
+typedef struct dbdma_cmd {
+ uint16_t req_count; /* requested byte transfer count */
+ uint16_t command; /* command word (has bit-fields) */
+ uint32_t phy_addr; /* physical data address */
+ uint32_t cmd_dep; /* command-dependent field */
+ uint16_t res_count; /* residual count after completion */
+ uint16_t xfer_status; /* transfer status */
+} dbdma_cmd;
+
+/* DBDMA command values in command field */
+
+#define COMMAND_MASK 0xf000
+#define OUTPUT_MORE 0x0000 /* transfer memory data to stream */
+#define OUTPUT_LAST 0x1000 /* ditto followed by end marker */
+#define INPUT_MORE 0x2000 /* transfer stream data to memory */
+#define INPUT_LAST 0x3000 /* ditto, expect end marker */
+#define STORE_WORD 0x4000 /* write word (4 bytes) to device reg */
+#define LOAD_WORD 0x5000 /* read word (4 bytes) from device reg */
+#define DBDMA_NOP 0x6000 /* do nothing */
+#define DBDMA_STOP 0x7000 /* suspend processing */
+
+/* Key values in command field */
+
+#define KEY_MASK 0x0700
+#define KEY_STREAM0 0x0000 /* usual data stream */
+#define KEY_STREAM1 0x0100 /* control/status stream */
+#define KEY_STREAM2 0x0200 /* device-dependent stream */
+#define KEY_STREAM3 0x0300 /* device-dependent stream */
+#define KEY_STREAM4 0x0400 /* reserved */
+#define KEY_REGS 0x0500 /* device register space */
+#define KEY_SYSTEM 0x0600 /* system memory-mapped space */
+#define KEY_DEVICE 0x0700 /* device memory-mapped space */
+
+/* Interrupt control values in command field */
+
+#define INTR_MASK 0x0030
+#define INTR_NEVER 0x0000 /* don't interrupt */
+#define INTR_IFSET 0x0010 /* intr if condition bit is 1 */
+#define INTR_IFCLR 0x0020 /* intr if condition bit is 0 */
+#define INTR_ALWAYS 0x0030 /* always interrupt */
+
+/* Branch control values in command field */
+
+#define BR_MASK 0x000c
+#define BR_NEVER 0x0000 /* don't branch */
+#define BR_IFSET 0x0004 /* branch if condition bit is 1 */
+#define BR_IFCLR 0x0008 /* branch if condition bit is 0 */
+#define BR_ALWAYS 0x000c /* always branch */
+
+/* Wait control values in command field */
+
+#define WAIT_MASK 0x0003
+#define WAIT_NEVER 0x0000 /* don't wait */
+#define WAIT_IFSET 0x0001 /* wait if condition bit is 1 */
+#define WAIT_IFCLR 0x0002 /* wait if condition bit is 0 */
+#define WAIT_ALWAYS 0x0003 /* always wait */
+
+typedef struct DBDMA_channel {
+ int channel;
+ uint32_t regs[DBDMA_REGS];
+ qemu_irq irq;
+ DBDMA_io io;
+ DBDMA_rw rw;
+ DBDMA_flush flush;
+ dbdma_cmd current;
+ int processing;
+} DBDMA_channel;
+
+typedef struct {
+ MemoryRegion mem;
+ DBDMA_channel channels[DBDMA_CHANNELS];
+} DBDMAState;
+
+#ifdef DEBUG_DBDMA
+static void dump_dbdma_cmd(dbdma_cmd *cmd)
+{
+ printf("dbdma_cmd %p\n", cmd);
+ printf(" req_count 0x%04x\n", le16_to_cpu(cmd->req_count));
+ printf(" command 0x%04x\n", le16_to_cpu(cmd->command));
+ printf(" phy_addr 0x%08x\n", le32_to_cpu(cmd->phy_addr));
+ printf(" cmd_dep 0x%08x\n", le32_to_cpu(cmd->cmd_dep));
+ printf(" res_count 0x%04x\n", le16_to_cpu(cmd->res_count));
+ printf(" xfer_status 0x%04x\n", le16_to_cpu(cmd->xfer_status));
+}
+#else
+static void dump_dbdma_cmd(dbdma_cmd *cmd)
+{
+}
+#endif
+static void dbdma_cmdptr_load(DBDMA_channel *ch)
+{
+ DBDMA_DPRINTF("dbdma_cmdptr_load 0x%08x\n",
+ ch->regs[DBDMA_CMDPTR_LO]);
+ cpu_physical_memory_read(ch->regs[DBDMA_CMDPTR_LO],
+ (uint8_t*)&ch->current, sizeof(dbdma_cmd));
+}
+
+static void dbdma_cmdptr_save(DBDMA_channel *ch)
+{
+ DBDMA_DPRINTF("dbdma_cmdptr_save 0x%08x\n",
+ ch->regs[DBDMA_CMDPTR_LO]);
+ DBDMA_DPRINTF("xfer_status 0x%08x res_count 0x%04x\n",
+ le16_to_cpu(ch->current.xfer_status),
+ le16_to_cpu(ch->current.res_count));
+ cpu_physical_memory_write(ch->regs[DBDMA_CMDPTR_LO],
+ (uint8_t*)&ch->current, sizeof(dbdma_cmd));
+}
+
+static void kill_channel(DBDMA_channel *ch)
+{
+ DBDMA_DPRINTF("kill_channel\n");
+
+ ch->regs[DBDMA_STATUS] |= DEAD;
+ ch->regs[DBDMA_STATUS] &= ~ACTIVE;
+
+ qemu_irq_raise(ch->irq);
+}
+
+static void conditional_interrupt(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+ uint16_t intr;
+ uint16_t sel_mask, sel_value;
+ uint32_t status;
+ int cond;
+
+ DBDMA_DPRINTF("conditional_interrupt\n");
+
+ intr = le16_to_cpu(current->command) & INTR_MASK;
+
+ switch(intr) {
+ case INTR_NEVER: /* don't interrupt */
+ return;
+ case INTR_ALWAYS: /* always interrupt */
+ qemu_irq_raise(ch->irq);
+ return;
+ }
+
+ status = ch->regs[DBDMA_STATUS] & DEVSTAT;
+
+ sel_mask = (ch->regs[DBDMA_INTR_SEL] >> 16) & 0x0f;
+ sel_value = ch->regs[DBDMA_INTR_SEL] & 0x0f;
+
+ cond = (status & sel_mask) == (sel_value & sel_mask);
+
+ switch(intr) {
+ case INTR_IFSET: /* intr if condition bit is 1 */
+ if (cond)
+ qemu_irq_raise(ch->irq);
+ return;
+ case INTR_IFCLR: /* intr if condition bit is 0 */
+ if (!cond)
+ qemu_irq_raise(ch->irq);
+ return;
+ }
+}
+
+static int conditional_wait(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+ uint16_t wait;
+ uint16_t sel_mask, sel_value;
+ uint32_t status;
+ int cond;
+
+ DBDMA_DPRINTF("conditional_wait\n");
+
+ wait = le16_to_cpu(current->command) & WAIT_MASK;
+
+ switch(wait) {
+ case WAIT_NEVER: /* don't wait */
+ return 0;
+ case WAIT_ALWAYS: /* always wait */
+ return 1;
+ }
+
+ status = ch->regs[DBDMA_STATUS] & DEVSTAT;
+
+ sel_mask = (ch->regs[DBDMA_WAIT_SEL] >> 16) & 0x0f;
+ sel_value = ch->regs[DBDMA_WAIT_SEL] & 0x0f;
+
+ cond = (status & sel_mask) == (sel_value & sel_mask);
+
+ switch(wait) {
+ case WAIT_IFSET: /* wait if condition bit is 1 */
+ if (cond)
+ return 1;
+ return 0;
+ case WAIT_IFCLR: /* wait if condition bit is 0 */
+ if (!cond)
+ return 1;
+ return 0;
+ }
+ return 0;
+}
+
+static void next(DBDMA_channel *ch)
+{
+ uint32_t cp;
+
+ ch->regs[DBDMA_STATUS] &= ~BT;
+
+ cp = ch->regs[DBDMA_CMDPTR_LO];
+ ch->regs[DBDMA_CMDPTR_LO] = cp + sizeof(dbdma_cmd);
+ dbdma_cmdptr_load(ch);
+}
+
+static void branch(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+
+ ch->regs[DBDMA_CMDPTR_LO] = current->cmd_dep;
+ ch->regs[DBDMA_STATUS] |= BT;
+ dbdma_cmdptr_load(ch);
+}
+
+static void conditional_branch(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+ uint16_t br;
+ uint16_t sel_mask, sel_value;
+ uint32_t status;
+ int cond;
+
+ DBDMA_DPRINTF("conditional_branch\n");
+
+ /* check if we must branch */
+
+ br = le16_to_cpu(current->command) & BR_MASK;
+
+ switch(br) {
+ case BR_NEVER: /* don't branch */
+ next(ch);
+ return;
+ case BR_ALWAYS: /* always branch */
+ branch(ch);
+ return;
+ }
+
+ status = ch->regs[DBDMA_STATUS] & DEVSTAT;
+
+ sel_mask = (ch->regs[DBDMA_BRANCH_SEL] >> 16) & 0x0f;
+ sel_value = ch->regs[DBDMA_BRANCH_SEL] & 0x0f;
+
+ cond = (status & sel_mask) == (sel_value & sel_mask);
+
+ switch(br) {
+ case BR_IFSET: /* branch if condition bit is 1 */
+ if (cond)
+ branch(ch);
+ else
+ next(ch);
+ return;
+ case BR_IFCLR: /* branch if condition bit is 0 */
+ if (!cond)
+ branch(ch);
+ else
+ next(ch);
+ return;
+ }
+}
+
+static QEMUBH *dbdma_bh;
+static void channel_run(DBDMA_channel *ch);
+
+static void dbdma_end(DBDMA_io *io)
+{
+ DBDMA_channel *ch = io->channel;
+ dbdma_cmd *current = &ch->current;
+
+ if (conditional_wait(ch))
+ goto wait;
+
+ current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
+ current->res_count = cpu_to_le16(io->len);
+ dbdma_cmdptr_save(ch);
+ if (io->is_last)
+ ch->regs[DBDMA_STATUS] &= ~FLUSH;
+
+ conditional_interrupt(ch);
+ conditional_branch(ch);
+
+wait:
+ ch->processing = 0;
+ if ((ch->regs[DBDMA_STATUS] & RUN) &&
+ (ch->regs[DBDMA_STATUS] & ACTIVE))
+ channel_run(ch);
+}
+
+static void start_output(DBDMA_channel *ch, int key, uint32_t addr,
+ uint16_t req_count, int is_last)
+{
+ DBDMA_DPRINTF("start_output\n");
+
+ /* KEY_REGS, KEY_DEVICE and KEY_STREAM
+ * are not implemented in the mac-io chip
+ */
+
+ DBDMA_DPRINTF("addr 0x%x key 0x%x\n", addr, key);
+ if (!addr || key > KEY_STREAM3) {
+ kill_channel(ch);
+ return;
+ }
+
+ ch->io.addr = addr;
+ ch->io.len = req_count;
+ ch->io.is_last = is_last;
+ ch->io.dma_end = dbdma_end;
+ ch->io.is_dma_out = 1;
+ ch->processing = 1;
+ if (ch->rw) {
+ ch->rw(&ch->io);
+ }
+}
+
+static void start_input(DBDMA_channel *ch, int key, uint32_t addr,
+ uint16_t req_count, int is_last)
+{
+ DBDMA_DPRINTF("start_input\n");
+
+ /* KEY_REGS, KEY_DEVICE and KEY_STREAM
+ * are not implemented in the mac-io chip
+ */
+
+ if (!addr || key > KEY_STREAM3) {
+ kill_channel(ch);
+ return;
+ }
+
+ ch->io.addr = addr;
+ ch->io.len = req_count;
+ ch->io.is_last = is_last;
+ ch->io.dma_end = dbdma_end;
+ ch->io.is_dma_out = 0;
+ ch->processing = 1;
+ if (ch->rw) {
+ ch->rw(&ch->io);
+ }
+}
+
+static void load_word(DBDMA_channel *ch, int key, uint32_t addr,
+ uint16_t len)
+{
+ dbdma_cmd *current = &ch->current;
+ uint32_t val;
+
+ DBDMA_DPRINTF("load_word\n");
+
+ /* only implements KEY_SYSTEM */
+
+ if (key != KEY_SYSTEM) {
+ printf("DBDMA: LOAD_WORD, unimplemented key %x\n", key);
+ kill_channel(ch);
+ return;
+ }
+
+ cpu_physical_memory_read(addr, (uint8_t*)&val, len);
+
+ if (len == 2)
+ val = (val << 16) | (current->cmd_dep & 0x0000ffff);
+ else if (len == 1)
+ val = (val << 24) | (current->cmd_dep & 0x00ffffff);
+
+ current->cmd_dep = val;
+
+ if (conditional_wait(ch))
+ goto wait;
+
+ current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
+ dbdma_cmdptr_save(ch);
+ ch->regs[DBDMA_STATUS] &= ~FLUSH;
+
+ conditional_interrupt(ch);
+ next(ch);
+
+wait:
+ qemu_bh_schedule(dbdma_bh);
+}
+
+static void store_word(DBDMA_channel *ch, int key, uint32_t addr,
+ uint16_t len)
+{
+ dbdma_cmd *current = &ch->current;
+ uint32_t val;
+
+ DBDMA_DPRINTF("store_word\n");
+
+ /* only implements KEY_SYSTEM */
+
+ if (key != KEY_SYSTEM) {
+ printf("DBDMA: STORE_WORD, unimplemented key %x\n", key);
+ kill_channel(ch);
+ return;
+ }
+
+ val = current->cmd_dep;
+ if (len == 2)
+ val >>= 16;
+ else if (len == 1)
+ val >>= 24;
+
+ cpu_physical_memory_write(addr, (uint8_t*)&val, len);
+
+ if (conditional_wait(ch))
+ goto wait;
+
+ current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
+ dbdma_cmdptr_save(ch);
+ ch->regs[DBDMA_STATUS] &= ~FLUSH;
+
+ conditional_interrupt(ch);
+ next(ch);
+
+wait:
+ qemu_bh_schedule(dbdma_bh);
+}
+
+static void nop(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+
+ if (conditional_wait(ch))
+ goto wait;
+
+ current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
+ dbdma_cmdptr_save(ch);
+
+ conditional_interrupt(ch);
+ conditional_branch(ch);
+
+wait:
+ qemu_bh_schedule(dbdma_bh);
+}
+
+static void stop(DBDMA_channel *ch)
+{
+ ch->regs[DBDMA_STATUS] &= ~(ACTIVE|DEAD|FLUSH);
+
+ /* the stop command does not increment command pointer */
+}
+
+static void channel_run(DBDMA_channel *ch)
+{
+ dbdma_cmd *current = &ch->current;
+ uint16_t cmd, key;
+ uint16_t req_count;
+ uint32_t phy_addr;
+
+ DBDMA_DPRINTF("channel_run\n");
+ dump_dbdma_cmd(current);
+
+ /* clear WAKE flag at command fetch */
+
+ ch->regs[DBDMA_STATUS] &= ~WAKE;
+
+ cmd = le16_to_cpu(current->command) & COMMAND_MASK;
+
+ switch (cmd) {
+ case DBDMA_NOP:
+ nop(ch);
+ return;
+
+ case DBDMA_STOP:
+ stop(ch);
+ return;
+ }
+
+ key = le16_to_cpu(current->command) & 0x0700;
+ req_count = le16_to_cpu(current->req_count);
+ phy_addr = le32_to_cpu(current->phy_addr);
+
+ if (key == KEY_STREAM4) {
+ printf("command %x, invalid key 4\n", cmd);
+ kill_channel(ch);
+ return;
+ }
+
+ switch (cmd) {
+ case OUTPUT_MORE:
+ start_output(ch, key, phy_addr, req_count, 0);
+ return;
+
+ case OUTPUT_LAST:
+ start_output(ch, key, phy_addr, req_count, 1);
+ return;
+
+ case INPUT_MORE:
+ start_input(ch, key, phy_addr, req_count, 0);
+ return;
+
+ case INPUT_LAST:
+ start_input(ch, key, phy_addr, req_count, 1);
+ return;
+ }
+
+ if (key < KEY_REGS) {
+ printf("command %x, invalid key %x\n", cmd, key);
+ key = KEY_SYSTEM;
+ }
+
+ /* for LOAD_WORD and STORE_WORD, req_count is on 3 bits
+ * and BRANCH is invalid
+ */
+
+ req_count = req_count & 0x0007;
+ if (req_count & 0x4) {
+ req_count = 4;
+ phy_addr &= ~3;
+ } else if (req_count & 0x2) {
+ req_count = 2;
+ phy_addr &= ~1;
+ } else
+ req_count = 1;
+
+ switch (cmd) {
+ case LOAD_WORD:
+ load_word(ch, key, phy_addr, req_count);
+ return;
+
+ case STORE_WORD:
+ store_word(ch, key, phy_addr, req_count);
+ return;
+ }
+}
+
+static void DBDMA_run(DBDMAState *s)
+{
+ int channel;
+
+ for (channel = 0; channel < DBDMA_CHANNELS; channel++) {
+ DBDMA_channel *ch = &s->channels[channel];
+ uint32_t status = ch->regs[DBDMA_STATUS];
+ if (!ch->processing && (status & RUN) && (status & ACTIVE)) {
+ channel_run(ch);
+ }
+ }
+}
+
+static void DBDMA_run_bh(void *opaque)
+{
+ DBDMAState *s = opaque;
+
+ DBDMA_DPRINTF("DBDMA_run_bh\n");
+
+ DBDMA_run(s);
+}
+
+void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq,
+ DBDMA_rw rw, DBDMA_flush flush,
+ void *opaque)
+{
+ DBDMAState *s = dbdma;
+ DBDMA_channel *ch = &s->channels[nchan];
+
+ DBDMA_DPRINTF("DBDMA_register_channel 0x%x\n", nchan);
+
+ ch->irq = irq;
+ ch->channel = nchan;
+ ch->rw = rw;
+ ch->flush = flush;
+ ch->io.opaque = opaque;
+ ch->io.channel = ch;
+}
+
+static void
+dbdma_control_write(DBDMA_channel *ch)
+{
+ uint16_t mask, value;
+ uint32_t status;
+
+ mask = (ch->regs[DBDMA_CONTROL] >> 16) & 0xffff;
+ value = ch->regs[DBDMA_CONTROL] & 0xffff;
+
+ value &= (RUN | PAUSE | FLUSH | WAKE | DEVSTAT);
+
+ status = ch->regs[DBDMA_STATUS];
+
+ status = (value & mask) | (status & ~mask);
+
+ if (status & WAKE)
+ status |= ACTIVE;
+ if (status & RUN) {
+ status |= ACTIVE;
+ status &= ~DEAD;
+ }
+ if (status & PAUSE)
+ status &= ~ACTIVE;
+ if ((ch->regs[DBDMA_STATUS] & RUN) && !(status & RUN)) {
+ /* RUN is cleared */
+ status &= ~(ACTIVE|DEAD);
+ if ((status & FLUSH) && ch->flush) {
+ ch->flush(&ch->io);
+ status &= ~FLUSH;
+ }
+ }
+
+ DBDMA_DPRINTF(" status 0x%08x\n", status);
+
+ ch->regs[DBDMA_STATUS] = status;
+
+ if (status & ACTIVE)
+ qemu_bh_schedule(dbdma_bh);
+ if ((status & FLUSH) && ch->flush)
+ ch->flush(&ch->io);
+}
+
+static void dbdma_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ int channel = addr >> DBDMA_CHANNEL_SHIFT;
+ DBDMAState *s = opaque;
+ DBDMA_channel *ch = &s->channels[channel];
+ int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
+
+ DBDMA_DPRINTF("writel 0x" TARGET_FMT_plx " <= 0x%08x\n", addr, value);
+ DBDMA_DPRINTF("channel 0x%x reg 0x%x\n",
+ (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
+
+ /* cmdptr cannot be modified if channel is RUN or ACTIVE */
+
+ if (reg == DBDMA_CMDPTR_LO &&
+ (ch->regs[DBDMA_STATUS] & (RUN | ACTIVE)))
+ return;
+
+ ch->regs[reg] = value;
+
+ switch(reg) {
+ case DBDMA_CONTROL:
+ dbdma_control_write(ch);
+ break;
+ case DBDMA_CMDPTR_LO:
+ /* 16-byte aligned */
+ ch->regs[DBDMA_CMDPTR_LO] &= ~0xf;
+ dbdma_cmdptr_load(ch);
+ break;
+ case DBDMA_STATUS:
+ case DBDMA_INTR_SEL:
+ case DBDMA_BRANCH_SEL:
+ case DBDMA_WAIT_SEL:
+ /* nothing to do */
+ break;
+ case DBDMA_XFER_MODE:
+ case DBDMA_CMDPTR_HI:
+ case DBDMA_DATA2PTR_HI:
+ case DBDMA_DATA2PTR_LO:
+ case DBDMA_ADDRESS_HI:
+ case DBDMA_BRANCH_ADDR_HI:
+ case DBDMA_RES1:
+ case DBDMA_RES2:
+ case DBDMA_RES3:
+ case DBDMA_RES4:
+ /* unused */
+ break;
+ }
+}
+
+static uint64_t dbdma_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ uint32_t value;
+ int channel = addr >> DBDMA_CHANNEL_SHIFT;
+ DBDMAState *s = opaque;
+ DBDMA_channel *ch = &s->channels[channel];
+ int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
+
+ value = ch->regs[reg];
+
+ DBDMA_DPRINTF("readl 0x" TARGET_FMT_plx " => 0x%08x\n", addr, value);
+ DBDMA_DPRINTF("channel 0x%x reg 0x%x\n",
+ (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
+
+ switch(reg) {
+ case DBDMA_CONTROL:
+ value = 0;
+ break;
+ case DBDMA_STATUS:
+ case DBDMA_CMDPTR_LO:
+ case DBDMA_INTR_SEL:
+ case DBDMA_BRANCH_SEL:
+ case DBDMA_WAIT_SEL:
+ /* nothing to do */
+ break;
+ case DBDMA_XFER_MODE:
+ case DBDMA_CMDPTR_HI:
+ case DBDMA_DATA2PTR_HI:
+ case DBDMA_DATA2PTR_LO:
+ case DBDMA_ADDRESS_HI:
+ case DBDMA_BRANCH_ADDR_HI:
+ /* unused */
+ value = 0;
+ break;
+ case DBDMA_RES1:
+ case DBDMA_RES2:
+ case DBDMA_RES3:
+ case DBDMA_RES4:
+ /* reserved */
+ break;
+ }
+
+ return value;
+}
+
+static const MemoryRegionOps dbdma_ops = {
+ .read = dbdma_read,
+ .write = dbdma_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static const VMStateDescription vmstate_dbdma_channel = {
+ .name = "dbdma_channel",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .minimum_version_id_old = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, struct DBDMA_channel, DBDMA_REGS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_dbdma = {
+ .name = "dbdma",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .minimum_version_id_old = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(channels, DBDMAState, DBDMA_CHANNELS, 1,
+ vmstate_dbdma_channel, DBDMA_channel),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void dbdma_reset(void *opaque)
+{
+ DBDMAState *s = opaque;
+ int i;
+
+ for (i = 0; i < DBDMA_CHANNELS; i++)
+ memset(s->channels[i].regs, 0, DBDMA_SIZE);
+}
+
+void* DBDMA_init (MemoryRegion **dbdma_mem)
+{
+ DBDMAState *s;
+
+ s = g_malloc0(sizeof(DBDMAState));
+
+ memory_region_init_io(&s->mem, &dbdma_ops, s, "dbdma", 0x1000);
+ *dbdma_mem = &s->mem;
+ vmstate_register(NULL, -1, &vmstate_dbdma, s);
+ qemu_register_reset(dbdma_reset, s);
+
+ dbdma_bh = qemu_bh_new(DBDMA_run_bh, s);
+
+ return s;
+}
diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c
new file mode 100644
index 0000000..2f389dd
--- /dev/null
+++ b/hw/misc/macio/macio.c
@@ -0,0 +1,305 @@
+/*
+ * PowerMac MacIO device emulation
+ *
+ * Copyright (c) 2005-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/ppc/mac.h"
+#include "hw/pci/pci.h"
+#include "hw/ppc/mac_dbdma.h"
+#include "hw/char/escc.h"
+
+#define TYPE_MACIO "macio"
+#define MACIO(obj) OBJECT_CHECK(MacIOState, (obj), TYPE_MACIO)
+
+typedef struct MacIOState
+{
+ /*< private >*/
+ PCIDevice parent;
+ /*< public >*/
+
+ MemoryRegion bar;
+ CUDAState cuda;
+ void *dbdma;
+ MemoryRegion *pic_mem;
+ MemoryRegion *escc_mem;
+} MacIOState;
+
+#define OLDWORLD_MACIO(obj) \
+ OBJECT_CHECK(OldWorldMacIOState, (obj), TYPE_OLDWORLD_MACIO)
+
+typedef struct OldWorldMacIOState {
+ /*< private >*/
+ MacIOState parent_obj;
+ /*< public >*/
+
+ qemu_irq irqs[3];
+
+ MacIONVRAMState nvram;
+ MACIOIDEState ide;
+} OldWorldMacIOState;
+
+#define NEWWORLD_MACIO(obj) \
+ OBJECT_CHECK(NewWorldMacIOState, (obj), TYPE_NEWWORLD_MACIO)
+
+typedef struct NewWorldMacIOState {
+ /*< private >*/
+ MacIOState parent_obj;
+ /*< public >*/
+ qemu_irq irqs[5];
+ MACIOIDEState ide[2];
+} NewWorldMacIOState;
+
+static void macio_bar_setup(MacIOState *macio_state)
+{
+ MemoryRegion *bar = &macio_state->bar;
+
+ if (macio_state->escc_mem) {
+ memory_region_add_subregion(bar, 0x13000, macio_state->escc_mem);
+ }
+}
+
+static int macio_common_initfn(PCIDevice *d)
+{
+ MacIOState *s = MACIO(d);
+ SysBusDevice *sysbus_dev;
+ int ret;
+
+ d->config[0x3d] = 0x01; // interrupt on pin 1
+
+ ret = qdev_init(DEVICE(&s->cuda));
+ if (ret < 0) {
+ return ret;
+ }
+ sysbus_dev = SYS_BUS_DEVICE(&s->cuda);
+ memory_region_add_subregion(&s->bar, 0x16000,
+ sysbus_mmio_get_region(sysbus_dev, 0));
+
+ macio_bar_setup(s);
+ pci_register_bar(d, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar);
+
+ return 0;
+}
+
+static int macio_oldworld_initfn(PCIDevice *d)
+{
+ MacIOState *s = MACIO(d);
+ OldWorldMacIOState *os = OLDWORLD_MACIO(d);
+ SysBusDevice *sysbus_dev;
+ int ret = macio_common_initfn(d);
+ if (ret < 0) {
+ return ret;
+ }
+
+ sysbus_dev = SYS_BUS_DEVICE(&s->cuda);
+ sysbus_connect_irq(sysbus_dev, 0, os->irqs[0]);
+
+ ret = qdev_init(DEVICE(&os->nvram));
+ if (ret < 0) {
+ return ret;
+ }
+ sysbus_dev = SYS_BUS_DEVICE(&os->nvram);
+ memory_region_add_subregion(&s->bar, 0x60000,
+ sysbus_mmio_get_region(sysbus_dev, 0));
+ pmac_format_nvram_partition(&os->nvram, os->nvram.size);
+
+ if (s->pic_mem) {
+ /* Heathrow PIC */
+ memory_region_add_subregion(&s->bar, 0x00000, s->pic_mem);
+ }
+
+ sysbus_dev = SYS_BUS_DEVICE(&os->ide);
+ sysbus_connect_irq(sysbus_dev, 0, os->irqs[1]);
+ sysbus_connect_irq(sysbus_dev, 1, os->irqs[2]);
+ macio_ide_register_dma(&os->ide, s->dbdma, 0x16);
+ ret = qdev_init(DEVICE(&os->ide));
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static void macio_oldworld_init(Object *obj)
+{
+ MacIOState *s = MACIO(obj);
+ OldWorldMacIOState *os = OLDWORLD_MACIO(obj);
+ DeviceState *dev;
+
+ qdev_init_gpio_out(DEVICE(obj), os->irqs, ARRAY_SIZE(os->irqs));
+
+ object_initialize(&os->nvram, TYPE_MACIO_NVRAM);
+ dev = DEVICE(&os->nvram);
+ qdev_prop_set_uint32(dev, "size", 0x2000);
+ qdev_prop_set_uint32(dev, "it_shift", 4);
+
+ object_initialize(&os->ide, TYPE_MACIO_IDE);
+ qdev_set_parent_bus(DEVICE(&os->ide), sysbus_get_default());
+ memory_region_add_subregion(&s->bar, 0x1f000 + (1 * 0x1000), &os->ide.mem);
+ object_property_add_child(obj, "ide", OBJECT(&os->ide), NULL);
+}
+
+static int macio_newworld_initfn(PCIDevice *d)
+{
+ MacIOState *s = MACIO(d);
+ NewWorldMacIOState *ns = NEWWORLD_MACIO(d);
+ SysBusDevice *sysbus_dev;
+ int ret = macio_common_initfn(d);
+ if (ret < 0) {
+ return ret;
+ }
+
+ sysbus_dev = SYS_BUS_DEVICE(&s->cuda);
+ sysbus_connect_irq(sysbus_dev, 0, ns->irqs[0]);
+
+ if (s->pic_mem) {
+ /* OpenPIC */
+ memory_region_add_subregion(&s->bar, 0x40000, s->pic_mem);
+ }
+
+ sysbus_dev = SYS_BUS_DEVICE(&ns->ide[0]);
+ sysbus_connect_irq(sysbus_dev, 0, ns->irqs[1]);
+ sysbus_connect_irq(sysbus_dev, 1, ns->irqs[2]);
+ macio_ide_register_dma(&ns->ide[0], s->dbdma, 0x16);
+ ret = qdev_init(DEVICE(&ns->ide[0]));
+ if (ret < 0) {
+ return ret;
+ }
+
+ sysbus_dev = SYS_BUS_DEVICE(&ns->ide[1]);
+ sysbus_connect_irq(sysbus_dev, 0, ns->irqs[3]);
+ sysbus_connect_irq(sysbus_dev, 1, ns->irqs[4]);
+ macio_ide_register_dma(&ns->ide[1], s->dbdma, 0x1a);
+ ret = qdev_init(DEVICE(&ns->ide[1]));
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static void macio_newworld_init(Object *obj)
+{
+ MacIOState *s = MACIO(obj);
+ NewWorldMacIOState *ns = NEWWORLD_MACIO(obj);
+ int i;
+ gchar *name;
+
+ qdev_init_gpio_out(DEVICE(obj), ns->irqs, ARRAY_SIZE(ns->irqs));
+
+ for (i = 0; i < 2; i++) {
+ object_initialize(&ns->ide[i], TYPE_MACIO_IDE);
+ qdev_set_parent_bus(DEVICE(&ns->ide[i]), sysbus_get_default());
+ memory_region_add_subregion(&s->bar, 0x1f000 + ((i + 1) * 0x1000),
+ &ns->ide[i].mem);
+ name = g_strdup_printf("ide[%i]", i);
+ object_property_add_child(obj, name, OBJECT(&ns->ide[i]), NULL);
+ g_free(name);
+ }
+}
+
+static void macio_instance_init(Object *obj)
+{
+ MacIOState *s = MACIO(obj);
+ MemoryRegion *dbdma_mem;
+
+ memory_region_init(&s->bar, "macio", 0x80000);
+
+ object_initialize(&s->cuda, TYPE_CUDA);
+ qdev_set_parent_bus(DEVICE(&s->cuda), sysbus_get_default());
+ object_property_add_child(obj, "cuda", OBJECT(&s->cuda), NULL);
+
+ s->dbdma = DBDMA_init(&dbdma_mem);
+ memory_region_add_subregion(&s->bar, 0x08000, dbdma_mem);
+}
+
+static void macio_oldworld_class_init(ObjectClass *oc, void *data)
+{
+ PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc);
+
+ pdc->init = macio_oldworld_initfn;
+ pdc->device_id = PCI_DEVICE_ID_APPLE_343S1201;
+}
+
+static void macio_newworld_class_init(ObjectClass *oc, void *data)
+{
+ PCIDeviceClass *pdc = PCI_DEVICE_CLASS(oc);
+
+ pdc->init = macio_newworld_initfn;
+ pdc->device_id = PCI_DEVICE_ID_APPLE_UNI_N_KEYL;
+}
+
+static void macio_class_init(ObjectClass *klass, void *data)
+{
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->vendor_id = PCI_VENDOR_ID_APPLE;
+ k->class_id = PCI_CLASS_OTHERS << 8;
+}
+
+static const TypeInfo macio_oldworld_type_info = {
+ .name = TYPE_OLDWORLD_MACIO,
+ .parent = TYPE_MACIO,
+ .instance_size = sizeof(OldWorldMacIOState),
+ .instance_init = macio_oldworld_init,
+ .class_init = macio_oldworld_class_init,
+};
+
+static const TypeInfo macio_newworld_type_info = {
+ .name = TYPE_NEWWORLD_MACIO,
+ .parent = TYPE_MACIO,
+ .instance_size = sizeof(NewWorldMacIOState),
+ .instance_init = macio_newworld_init,
+ .class_init = macio_newworld_class_init,
+};
+
+static const TypeInfo macio_type_info = {
+ .name = TYPE_MACIO,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(MacIOState),
+ .instance_init = macio_instance_init,
+ .abstract = true,
+ .class_init = macio_class_init,
+};
+
+static void macio_register_types(void)
+{
+ type_register_static(&macio_type_info);
+ type_register_static(&macio_oldworld_type_info);
+ type_register_static(&macio_newworld_type_info);
+}
+
+type_init(macio_register_types)
+
+void macio_init(PCIDevice *d,
+ MemoryRegion *pic_mem,
+ MemoryRegion *escc_mem)
+{
+ MacIOState *macio_state = MACIO(d);
+
+ macio_state->pic_mem = pic_mem;
+ macio_state->escc_mem = escc_mem;
+ /* Note: this code is strongly inspirated from the corresponding code
+ in PearPC */
+
+ qdev_init_nofail(DEVICE(d));
+}
diff --git a/hw/misc/max111x.c b/hw/misc/max111x.c
new file mode 100644
index 0000000..d477ecd
--- /dev/null
+++ b/hw/misc/max111x.c
@@ -0,0 +1,193 @@
+/*
+ * Maxim MAX1110/1111 ADC chip emulation.
+ *
+ * Copyright (c) 2006 Openedhand Ltd.
+ * Written by Andrzej Zaborowski <balrog@zabor.org>
+ *
+ * This code is licensed under the GNU GPLv2.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include "hw/ssi.h"
+
+typedef struct {
+ SSISlave ssidev;
+ qemu_irq interrupt;
+ uint8_t tb1, rb2, rb3;
+ int cycle;
+
+ uint8_t input[8];
+ int inputs, com;
+} MAX111xState;
+
+/* Control-byte bitfields */
+#define CB_PD0 (1 << 0)
+#define CB_PD1 (1 << 1)
+#define CB_SGL (1 << 2)
+#define CB_UNI (1 << 3)
+#define CB_SEL0 (1 << 4)
+#define CB_SEL1 (1 << 5)
+#define CB_SEL2 (1 << 6)
+#define CB_START (1 << 7)
+
+#define CHANNEL_NUM(v, b0, b1, b2) \
+ ((((v) >> (2 + (b0))) & 4) | \
+ (((v) >> (3 + (b1))) & 2) | \
+ (((v) >> (4 + (b2))) & 1))
+
+static uint32_t max111x_read(MAX111xState *s)
+{
+ if (!s->tb1)
+ return 0;
+
+ switch (s->cycle ++) {
+ case 1:
+ return s->rb2;
+ case 2:
+ return s->rb3;
+ }
+
+ return 0;
+}
+
+/* Interpret a control-byte */
+static void max111x_write(MAX111xState *s, uint32_t value)
+{
+ int measure, chan;
+
+ /* Ignore the value if START bit is zero */
+ if (!(value & CB_START))
+ return;
+
+ s->cycle = 0;
+
+ if (!(value & CB_PD1)) {
+ s->tb1 = 0;
+ return;
+ }
+
+ s->tb1 = value;
+
+ if (s->inputs == 8)
+ chan = CHANNEL_NUM(value, 1, 0, 2);
+ else
+ chan = CHANNEL_NUM(value & ~CB_SEL0, 0, 1, 2);
+
+ if (value & CB_SGL)
+ measure = s->input[chan] - s->com;
+ else
+ measure = s->input[chan] - s->input[chan ^ 1];
+
+ if (!(value & CB_UNI))
+ measure ^= 0x80;
+
+ s->rb2 = (measure >> 2) & 0x3f;
+ s->rb3 = (measure << 6) & 0xc0;
+
+ /* FIXME: When should the IRQ be lowered? */
+ qemu_irq_raise(s->interrupt);
+}
+
+static uint32_t max111x_transfer(SSISlave *dev, uint32_t value)
+{
+ MAX111xState *s = FROM_SSI_SLAVE(MAX111xState, dev);
+ max111x_write(s, value);
+ return max111x_read(s);
+}
+
+static const VMStateDescription vmstate_max111x = {
+ .name = "max111x",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_SSI_SLAVE(ssidev, MAX111xState),
+ VMSTATE_UINT8(tb1, MAX111xState),
+ VMSTATE_UINT8(rb2, MAX111xState),
+ VMSTATE_UINT8(rb3, MAX111xState),
+ VMSTATE_INT32_EQUAL(inputs, MAX111xState),
+ VMSTATE_INT32(com, MAX111xState),
+ VMSTATE_ARRAY_INT32_UNSAFE(input, MAX111xState, inputs,
+ vmstate_info_uint8, uint8_t),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int max111x_init(SSISlave *dev, int inputs)
+{
+ MAX111xState *s = FROM_SSI_SLAVE(MAX111xState, dev);
+
+ qdev_init_gpio_out(&dev->qdev, &s->interrupt, 1);
+
+ s->inputs = inputs;
+ /* TODO: add a user interface for setting these */
+ s->input[0] = 0xf0;
+ s->input[1] = 0xe0;
+ s->input[2] = 0xd0;
+ s->input[3] = 0xc0;
+ s->input[4] = 0xb0;
+ s->input[5] = 0xa0;
+ s->input[6] = 0x90;
+ s->input[7] = 0x80;
+ s->com = 0;
+
+ vmstate_register(&dev->qdev, -1, &vmstate_max111x, s);
+ return 0;
+}
+
+static int max1110_init(SSISlave *dev)
+{
+ return max111x_init(dev, 8);
+}
+
+static int max1111_init(SSISlave *dev)
+{
+ return max111x_init(dev, 4);
+}
+
+void max111x_set_input(DeviceState *dev, int line, uint8_t value)
+{
+ MAX111xState *s = FROM_SSI_SLAVE(MAX111xState, SSI_SLAVE_FROM_QDEV(dev));
+ assert(line >= 0 && line < s->inputs);
+ s->input[line] = value;
+}
+
+static void max1110_class_init(ObjectClass *klass, void *data)
+{
+ SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
+
+ k->init = max1110_init;
+ k->transfer = max111x_transfer;
+}
+
+static const TypeInfo max1110_info = {
+ .name = "max1110",
+ .parent = TYPE_SSI_SLAVE,
+ .instance_size = sizeof(MAX111xState),
+ .class_init = max1110_class_init,
+};
+
+static void max1111_class_init(ObjectClass *klass, void *data)
+{
+ SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
+
+ k->init = max1111_init;
+ k->transfer = max111x_transfer;
+}
+
+static const TypeInfo max1111_info = {
+ .name = "max1111",
+ .parent = TYPE_SSI_SLAVE,
+ .instance_size = sizeof(MAX111xState),
+ .class_init = max1111_class_init,
+};
+
+static void max111x_register_types(void)
+{
+ type_register_static(&max1110_info);
+ type_register_static(&max1111_info);
+}
+
+type_init(max111x_register_types)
diff --git a/hw/misc/milkymist-hpdmc.c b/hw/misc/milkymist-hpdmc.c
new file mode 100644
index 0000000..d922f6f
--- /dev/null
+++ b/hw/misc/milkymist-hpdmc.c
@@ -0,0 +1,170 @@
+/*
+ * QEMU model of the Milkymist High Performance Dynamic Memory Controller.
+ *
+ * Copyright (c) 2010 Michael Walle <michael@walle.cc>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Specification available at:
+ * http://www.milkymist.org/socdoc/hpdmc.pdf
+ */
+
+#include "hw/hw.h"
+#include "hw/sysbus.h"
+#include "trace.h"
+#include "qemu/error-report.h"
+
+enum {
+ R_SYSTEM = 0,
+ R_BYPASS,
+ R_TIMING,
+ R_IODELAY,
+ R_MAX
+};
+
+enum {
+ IODELAY_DQSDELAY_RDY = (1<<5),
+ IODELAY_PLL1_LOCKED = (1<<6),
+ IODELAY_PLL2_LOCKED = (1<<7),
+};
+
+struct MilkymistHpdmcState {
+ SysBusDevice busdev;
+ MemoryRegion regs_region;
+
+ uint32_t regs[R_MAX];
+};
+typedef struct MilkymistHpdmcState MilkymistHpdmcState;
+
+static uint64_t hpdmc_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ MilkymistHpdmcState *s = opaque;
+ uint32_t r = 0;
+
+ addr >>= 2;
+ switch (addr) {
+ case R_SYSTEM:
+ case R_BYPASS:
+ case R_TIMING:
+ case R_IODELAY:
+ r = s->regs[addr];
+ break;
+
+ default:
+ error_report("milkymist_hpdmc: read access to unknown register 0x"
+ TARGET_FMT_plx, addr << 2);
+ break;
+ }
+
+ trace_milkymist_hpdmc_memory_read(addr << 2, r);
+
+ return r;
+}
+
+static void hpdmc_write(void *opaque, hwaddr addr, uint64_t value,
+ unsigned size)
+{
+ MilkymistHpdmcState *s = opaque;
+
+ trace_milkymist_hpdmc_memory_write(addr, value);
+
+ addr >>= 2;
+ switch (addr) {
+ case R_SYSTEM:
+ case R_BYPASS:
+ case R_TIMING:
+ s->regs[addr] = value;
+ break;
+ case R_IODELAY:
+ /* ignore writes */
+ break;
+
+ default:
+ error_report("milkymist_hpdmc: write access to unknown register 0x"
+ TARGET_FMT_plx, addr << 2);
+ break;
+ }
+}
+
+static const MemoryRegionOps hpdmc_mmio_ops = {
+ .read = hpdmc_read,
+ .write = hpdmc_write,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void milkymist_hpdmc_reset(DeviceState *d)
+{
+ MilkymistHpdmcState *s = container_of(d, MilkymistHpdmcState, busdev.qdev);
+ int i;
+
+ for (i = 0; i < R_MAX; i++) {
+ s->regs[i] = 0;
+ }
+
+ /* defaults */
+ s->regs[R_IODELAY] = IODELAY_DQSDELAY_RDY | IODELAY_PLL1_LOCKED
+ | IODELAY_PLL2_LOCKED;
+}
+
+static int milkymist_hpdmc_init(SysBusDevice *dev)
+{
+ MilkymistHpdmcState *s = FROM_SYSBUS(typeof(*s), dev);
+
+ memory_region_init_io(&s->regs_region, &hpdmc_mmio_ops, s,
+ "milkymist-hpdmc", R_MAX * 4);
+ sysbus_init_mmio(dev, &s->regs_region);
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_milkymist_hpdmc = {
+ .name = "milkymist-hpdmc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, MilkymistHpdmcState, R_MAX),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void milkymist_hpdmc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+ k->init = milkymist_hpdmc_init;
+ dc->reset = milkymist_hpdmc_reset;
+ dc->vmsd = &vmstate_milkymist_hpdmc;
+}
+
+static const TypeInfo milkymist_hpdmc_info = {
+ .name = "milkymist-hpdmc",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(MilkymistHpdmcState),
+ .class_init = milkymist_hpdmc_class_init,
+};
+
+static void milkymist_hpdmc_register_types(void)
+{
+ type_register_static(&milkymist_hpdmc_info);
+}
+
+type_init(milkymist_hpdmc_register_types)
diff --git a/hw/misc/milkymist-pfpu.c b/hw/misc/milkymist-pfpu.c
new file mode 100644
index 0000000..ad44b4d
--- /dev/null
+++ b/hw/misc/milkymist-pfpu.c
@@ -0,0 +1,544 @@
+/*
+ * QEMU model of the Milkymist programmable FPU.
+ *
+ * Copyright (c) 2010 Michael Walle <michael@walle.cc>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Specification available at:
+ * http://www.milkymist.org/socdoc/pfpu.pdf
+ *
+ */
+
+#include "hw/hw.h"
+#include "hw/sysbus.h"
+#include "trace.h"
+#include "qemu/log.h"
+#include "qemu/error-report.h"
+#include <math.h>
+
+/* #define TRACE_EXEC */
+
+#ifdef TRACE_EXEC
+# define D_EXEC(x) x
+#else
+# define D_EXEC(x)
+#endif
+
+enum {
+ R_CTL = 0,
+ R_MESHBASE,
+ R_HMESHLAST,
+ R_VMESHLAST,
+ R_CODEPAGE,
+ R_VERTICES,
+ R_COLLISIONS,
+ R_STRAYWRITES,
+ R_LASTDMA,
+ R_PC,
+ R_DREGBASE,
+ R_CODEBASE,
+ R_MAX
+};
+
+enum {
+ CTL_START_BUSY = (1<<0),
+};
+
+enum {
+ OP_NOP = 0,
+ OP_FADD,
+ OP_FSUB,
+ OP_FMUL,
+ OP_FABS,
+ OP_F2I,
+ OP_I2F,
+ OP_VECTOUT,
+ OP_SIN,
+ OP_COS,
+ OP_ABOVE,
+ OP_EQUAL,
+ OP_COPY,
+ OP_IF,
+ OP_TSIGN,
+ OP_QUAKE,
+};
+
+enum {
+ GPR_X = 0,
+ GPR_Y = 1,
+ GPR_FLAGS = 2,
+};
+
+enum {
+ LATENCY_FADD = 5,
+ LATENCY_FSUB = 5,
+ LATENCY_FMUL = 7,
+ LATENCY_FABS = 2,
+ LATENCY_F2I = 2,
+ LATENCY_I2F = 3,
+ LATENCY_VECTOUT = 0,
+ LATENCY_SIN = 4,
+ LATENCY_COS = 4,
+ LATENCY_ABOVE = 2,
+ LATENCY_EQUAL = 2,
+ LATENCY_COPY = 2,
+ LATENCY_IF = 2,
+ LATENCY_TSIGN = 2,
+ LATENCY_QUAKE = 2,
+ MAX_LATENCY = 7
+};
+
+#define GPR_BEGIN 0x100
+#define GPR_END 0x17f
+#define MICROCODE_BEGIN 0x200
+#define MICROCODE_END 0x3ff
+#define MICROCODE_WORDS 2048
+
+#define REINTERPRET_CAST(type, val) (*((type *)&(val)))
+
+#ifdef TRACE_EXEC
+static const char *opcode_to_str[] = {
+ "NOP", "FADD", "FSUB", "FMUL", "FABS", "F2I", "I2F", "VECTOUT",
+ "SIN", "COS", "ABOVE", "EQUAL", "COPY", "IF", "TSIGN", "QUAKE",
+};
+#endif
+
+struct MilkymistPFPUState {
+ SysBusDevice busdev;
+ MemoryRegion regs_region;
+ CharDriverState *chr;
+ qemu_irq irq;
+
+ uint32_t regs[R_MAX];
+ uint32_t gp_regs[128];
+ uint32_t microcode[MICROCODE_WORDS];
+
+ int output_queue_pos;
+ uint32_t output_queue[MAX_LATENCY];
+};
+typedef struct MilkymistPFPUState MilkymistPFPUState;
+
+static inline hwaddr
+get_dma_address(uint32_t base, uint32_t x, uint32_t y)
+{
+ return base + 8 * (128 * y + x);
+}
+
+static inline void
+output_queue_insert(MilkymistPFPUState *s, uint32_t val, int pos)
+{
+ s->output_queue[(s->output_queue_pos + pos) % MAX_LATENCY] = val;
+}
+
+static inline uint32_t
+output_queue_remove(MilkymistPFPUState *s)
+{
+ return s->output_queue[s->output_queue_pos];
+}
+
+static inline void
+output_queue_advance(MilkymistPFPUState *s)
+{
+ s->output_queue[s->output_queue_pos] = 0;
+ s->output_queue_pos = (s->output_queue_pos + 1) % MAX_LATENCY;
+}
+
+static int pfpu_decode_insn(MilkymistPFPUState *s)
+{
+ uint32_t pc = s->regs[R_PC];
+ uint32_t insn = s->microcode[pc];
+ uint32_t reg_a = (insn >> 18) & 0x7f;
+ uint32_t reg_b = (insn >> 11) & 0x7f;
+ uint32_t op = (insn >> 7) & 0xf;
+ uint32_t reg_d = insn & 0x7f;
+ uint32_t r = 0;
+ int latency = 0;
+
+ switch (op) {
+ case OP_NOP:
+ break;
+ case OP_FADD:
+ {
+ float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]);
+ float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]);
+ float t = a + b;
+ r = REINTERPRET_CAST(uint32_t, t);
+ latency = LATENCY_FADD;
+ D_EXEC(qemu_log("ADD a=%f b=%f t=%f, r=%08x\n", a, b, t, r));
+ } break;
+ case OP_FSUB:
+ {
+ float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]);
+ float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]);
+ float t = a - b;
+ r = REINTERPRET_CAST(uint32_t, t);
+ latency = LATENCY_FSUB;
+ D_EXEC(qemu_log("SUB a=%f b=%f t=%f, r=%08x\n", a, b, t, r));
+ } break;
+ case OP_FMUL:
+ {
+ float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]);
+ float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]);
+ float t = a * b;
+ r = REINTERPRET_CAST(uint32_t, t);
+ latency = LATENCY_FMUL;
+ D_EXEC(qemu_log("MUL a=%f b=%f t=%f, r=%08x\n", a, b, t, r));
+ } break;
+ case OP_FABS:
+ {
+ float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]);
+ float t = fabsf(a);
+ r = REINTERPRET_CAST(uint32_t, t);
+ latency = LATENCY_FABS;
+ D_EXEC(qemu_log("ABS a=%f t=%f, r=%08x\n", a, t, r));
+ } break;
+ case OP_F2I:
+ {
+ float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]);
+ int32_t t = a;
+ r = REINTERPRET_CAST(uint32_t, t);
+ latency = LATENCY_F2I;
+ D_EXEC(qemu_log("F2I a=%f t=%d, r=%08x\n", a, t, r));
+ } break;
+ case OP_I2F:
+ {
+ int32_t a = REINTERPRET_CAST(int32_t, s->gp_regs[reg_a]);
+ float t = a;
+ r = REINTERPRET_CAST(uint32_t, t);
+ latency = LATENCY_I2F;
+ D_EXEC(qemu_log("I2F a=%08x t=%f, r=%08x\n", a, t, r));
+ } break;
+ case OP_VECTOUT:
+ {
+ uint32_t a = cpu_to_be32(s->gp_regs[reg_a]);
+ uint32_t b = cpu_to_be32(s->gp_regs[reg_b]);
+ hwaddr dma_ptr =
+ get_dma_address(s->regs[R_MESHBASE],
+ s->gp_regs[GPR_X], s->gp_regs[GPR_Y]);
+ cpu_physical_memory_write(dma_ptr, (uint8_t *)&a, 4);
+ cpu_physical_memory_write(dma_ptr + 4, (uint8_t *)&b, 4);
+ s->regs[R_LASTDMA] = dma_ptr + 4;
+ D_EXEC(qemu_log("VECTOUT a=%08x b=%08x dma=%08x\n", a, b, dma_ptr));
+ trace_milkymist_pfpu_vectout(a, b, dma_ptr);
+ } break;
+ case OP_SIN:
+ {
+ int32_t a = REINTERPRET_CAST(int32_t, s->gp_regs[reg_a]);
+ float t = sinf(a * (1.0f / (M_PI * 4096.0f)));
+ r = REINTERPRET_CAST(uint32_t, t);
+ latency = LATENCY_SIN;
+ D_EXEC(qemu_log("SIN a=%d t=%f, r=%08x\n", a, t, r));
+ } break;
+ case OP_COS:
+ {
+ int32_t a = REINTERPRET_CAST(int32_t, s->gp_regs[reg_a]);
+ float t = cosf(a * (1.0f / (M_PI * 4096.0f)));
+ r = REINTERPRET_CAST(uint32_t, t);
+ latency = LATENCY_COS;
+ D_EXEC(qemu_log("COS a=%d t=%f, r=%08x\n", a, t, r));
+ } break;
+ case OP_ABOVE:
+ {
+ float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]);
+ float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]);
+ float t = (a > b) ? 1.0f : 0.0f;
+ r = REINTERPRET_CAST(uint32_t, t);
+ latency = LATENCY_ABOVE;
+ D_EXEC(qemu_log("ABOVE a=%f b=%f t=%f, r=%08x\n", a, b, t, r));
+ } break;
+ case OP_EQUAL:
+ {
+ float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]);
+ float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]);
+ float t = (a == b) ? 1.0f : 0.0f;
+ r = REINTERPRET_CAST(uint32_t, t);
+ latency = LATENCY_EQUAL;
+ D_EXEC(qemu_log("EQUAL a=%f b=%f t=%f, r=%08x\n", a, b, t, r));
+ } break;
+ case OP_COPY:
+ {
+ r = s->gp_regs[reg_a];
+ latency = LATENCY_COPY;
+ D_EXEC(qemu_log("COPY"));
+ } break;
+ case OP_IF:
+ {
+ float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]);
+ float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]);
+ uint32_t f = s->gp_regs[GPR_FLAGS];
+ float t = (f != 0) ? a : b;
+ r = REINTERPRET_CAST(uint32_t, t);
+ latency = LATENCY_IF;
+ D_EXEC(qemu_log("IF f=%u a=%f b=%f t=%f, r=%08x\n", f, a, b, t, r));
+ } break;
+ case OP_TSIGN:
+ {
+ float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]);
+ float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]);
+ float t = (b < 0) ? -a : a;
+ r = REINTERPRET_CAST(uint32_t, t);
+ latency = LATENCY_TSIGN;
+ D_EXEC(qemu_log("TSIGN a=%f b=%f t=%f, r=%08x\n", a, b, t, r));
+ } break;
+ case OP_QUAKE:
+ {
+ uint32_t a = s->gp_regs[reg_a];
+ r = 0x5f3759df - (a >> 1);
+ latency = LATENCY_QUAKE;
+ D_EXEC(qemu_log("QUAKE a=%d r=%08x\n", a, r));
+ } break;
+
+ default:
+ error_report("milkymist_pfpu: unknown opcode %d", op);
+ break;
+ }
+
+ if (!reg_d) {
+ D_EXEC(qemu_log("%04d %8s R%03d, R%03d <L=%d, E=%04d>\n",
+ s->regs[R_PC], opcode_to_str[op], reg_a, reg_b, latency,
+ s->regs[R_PC] + latency));
+ } else {
+ D_EXEC(qemu_log("%04d %8s R%03d, R%03d <L=%d, E=%04d> -> R%03d\n",
+ s->regs[R_PC], opcode_to_str[op], reg_a, reg_b, latency,
+ s->regs[R_PC] + latency, reg_d));
+ }
+
+ if (op == OP_VECTOUT) {
+ return 0;
+ }
+
+ /* store output for this cycle */
+ if (reg_d) {
+ uint32_t val = output_queue_remove(s);
+ D_EXEC(qemu_log("R%03d <- 0x%08x\n", reg_d, val));
+ s->gp_regs[reg_d] = val;
+ }
+
+ output_queue_advance(s);
+
+ /* store op output */
+ if (op != OP_NOP) {
+ output_queue_insert(s, r, latency-1);
+ }
+
+ /* advance PC */
+ s->regs[R_PC]++;
+
+ return 1;
+};
+
+static void pfpu_start(MilkymistPFPUState *s)
+{
+ int x, y;
+ int i;
+
+ for (y = 0; y <= s->regs[R_VMESHLAST]; y++) {
+ for (x = 0; x <= s->regs[R_HMESHLAST]; x++) {
+ D_EXEC(qemu_log("\nprocessing x=%d y=%d\n", x, y));
+
+ /* set current position */
+ s->gp_regs[GPR_X] = x;
+ s->gp_regs[GPR_Y] = y;
+
+ /* run microcode on this position */
+ i = 0;
+ while (pfpu_decode_insn(s)) {
+ /* decode at most MICROCODE_WORDS instructions */
+ if (i++ >= MICROCODE_WORDS) {
+ error_report("milkymist_pfpu: too many instructions "
+ "executed in microcode. No VECTOUT?");
+ break;
+ }
+ }
+
+ /* reset pc for next run */
+ s->regs[R_PC] = 0;
+ }
+ }
+
+ s->regs[R_VERTICES] = x * y;
+
+ trace_milkymist_pfpu_pulse_irq();
+ qemu_irq_pulse(s->irq);
+}
+
+static inline int get_microcode_address(MilkymistPFPUState *s, uint32_t addr)
+{
+ return (512 * s->regs[R_CODEPAGE]) + addr - MICROCODE_BEGIN;
+}
+
+static uint64_t pfpu_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ MilkymistPFPUState *s = opaque;
+ uint32_t r = 0;
+
+ addr >>= 2;
+ switch (addr) {
+ case R_CTL:
+ case R_MESHBASE:
+ case R_HMESHLAST:
+ case R_VMESHLAST:
+ case R_CODEPAGE:
+ case R_VERTICES:
+ case R_COLLISIONS:
+ case R_STRAYWRITES:
+ case R_LASTDMA:
+ case R_PC:
+ case R_DREGBASE:
+ case R_CODEBASE:
+ r = s->regs[addr];
+ break;
+ case GPR_BEGIN ... GPR_END:
+ r = s->gp_regs[addr - GPR_BEGIN];
+ break;
+ case MICROCODE_BEGIN ... MICROCODE_END:
+ r = s->microcode[get_microcode_address(s, addr)];
+ break;
+
+ default:
+ error_report("milkymist_pfpu: read access to unknown register 0x"
+ TARGET_FMT_plx, addr << 2);
+ break;
+ }
+
+ trace_milkymist_pfpu_memory_read(addr << 2, r);
+
+ return r;
+}
+
+static void pfpu_write(void *opaque, hwaddr addr, uint64_t value,
+ unsigned size)
+{
+ MilkymistPFPUState *s = opaque;
+
+ trace_milkymist_pfpu_memory_write(addr, value);
+
+ addr >>= 2;
+ switch (addr) {
+ case R_CTL:
+ if (value & CTL_START_BUSY) {
+ pfpu_start(s);
+ }
+ break;
+ case R_MESHBASE:
+ case R_HMESHLAST:
+ case R_VMESHLAST:
+ case R_CODEPAGE:
+ case R_VERTICES:
+ case R_COLLISIONS:
+ case R_STRAYWRITES:
+ case R_LASTDMA:
+ case R_PC:
+ case R_DREGBASE:
+ case R_CODEBASE:
+ s->regs[addr] = value;
+ break;
+ case GPR_BEGIN ... GPR_END:
+ s->gp_regs[addr - GPR_BEGIN] = value;
+ break;
+ case MICROCODE_BEGIN ... MICROCODE_END:
+ s->microcode[get_microcode_address(s, addr)] = value;
+ break;
+
+ default:
+ error_report("milkymist_pfpu: write access to unknown register 0x"
+ TARGET_FMT_plx, addr << 2);
+ break;
+ }
+}
+
+static const MemoryRegionOps pfpu_mmio_ops = {
+ .read = pfpu_read,
+ .write = pfpu_write,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void milkymist_pfpu_reset(DeviceState *d)
+{
+ MilkymistPFPUState *s = container_of(d, MilkymistPFPUState, busdev.qdev);
+ int i;
+
+ for (i = 0; i < R_MAX; i++) {
+ s->regs[i] = 0;
+ }
+ for (i = 0; i < 128; i++) {
+ s->gp_regs[i] = 0;
+ }
+ for (i = 0; i < MICROCODE_WORDS; i++) {
+ s->microcode[i] = 0;
+ }
+ s->output_queue_pos = 0;
+ for (i = 0; i < MAX_LATENCY; i++) {
+ s->output_queue[i] = 0;
+ }
+}
+
+static int milkymist_pfpu_init(SysBusDevice *dev)
+{
+ MilkymistPFPUState *s = FROM_SYSBUS(typeof(*s), dev);
+
+ sysbus_init_irq(dev, &s->irq);
+
+ memory_region_init_io(&s->regs_region, &pfpu_mmio_ops, s,
+ "milkymist-pfpu", MICROCODE_END * 4);
+ sysbus_init_mmio(dev, &s->regs_region);
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_milkymist_pfpu = {
+ .name = "milkymist-pfpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, MilkymistPFPUState, R_MAX),
+ VMSTATE_UINT32_ARRAY(gp_regs, MilkymistPFPUState, 128),
+ VMSTATE_UINT32_ARRAY(microcode, MilkymistPFPUState, MICROCODE_WORDS),
+ VMSTATE_INT32(output_queue_pos, MilkymistPFPUState),
+ VMSTATE_UINT32_ARRAY(output_queue, MilkymistPFPUState, MAX_LATENCY),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void milkymist_pfpu_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+ k->init = milkymist_pfpu_init;
+ dc->reset = milkymist_pfpu_reset;
+ dc->vmsd = &vmstate_milkymist_pfpu;
+}
+
+static const TypeInfo milkymist_pfpu_info = {
+ .name = "milkymist-pfpu",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(MilkymistPFPUState),
+ .class_init = milkymist_pfpu_class_init,
+};
+
+static void milkymist_pfpu_register_types(void)
+{
+ type_register_static(&milkymist_pfpu_info);
+}
+
+type_init(milkymist_pfpu_register_types)
diff --git a/hw/misc/mst_fpga.c b/hw/misc/mst_fpga.c
new file mode 100644
index 0000000..1dd1505
--- /dev/null
+++ b/hw/misc/mst_fpga.c
@@ -0,0 +1,263 @@
+/*
+ * PXA270-based Intel Mainstone platforms.
+ * FPGA driver
+ *
+ * Copyright (c) 2007 by Armin Kuster <akuster@kama-aina.net> or
+ * <akuster@mvista.com>
+ *
+ * This code is licensed under the GNU GPL v2.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+#include "hw/hw.h"
+#include "hw/sysbus.h"
+
+/* Mainstone FPGA for extern irqs */
+#define FPGA_GPIO_PIN 0
+#define MST_NUM_IRQS 16
+#define MST_LEDDAT1 0x10
+#define MST_LEDDAT2 0x14
+#define MST_LEDCTRL 0x40
+#define MST_GPSWR 0x60
+#define MST_MSCWR1 0x80
+#define MST_MSCWR2 0x84
+#define MST_MSCWR3 0x88
+#define MST_MSCRD 0x90
+#define MST_INTMSKENA 0xc0
+#define MST_INTSETCLR 0xd0
+#define MST_PCMCIA0 0xe0
+#define MST_PCMCIA1 0xe4
+
+#define MST_PCMCIAx_READY (1 << 10)
+#define MST_PCMCIAx_nCD (1 << 5)
+
+#define MST_PCMCIA_CD0_IRQ 9
+#define MST_PCMCIA_CD1_IRQ 13
+
+typedef struct mst_irq_state{
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+
+ qemu_irq parent;
+
+ uint32_t prev_level;
+ uint32_t leddat1;
+ uint32_t leddat2;
+ uint32_t ledctrl;
+ uint32_t gpswr;
+ uint32_t mscwr1;
+ uint32_t mscwr2;
+ uint32_t mscwr3;
+ uint32_t mscrd;
+ uint32_t intmskena;
+ uint32_t intsetclr;
+ uint32_t pcmcia0;
+ uint32_t pcmcia1;
+}mst_irq_state;
+
+static void
+mst_fpga_set_irq(void *opaque, int irq, int level)
+{
+ mst_irq_state *s = (mst_irq_state *)opaque;
+ uint32_t oldint = s->intsetclr & s->intmskena;
+
+ if (level)
+ s->prev_level |= 1u << irq;
+ else
+ s->prev_level &= ~(1u << irq);
+
+ switch(irq) {
+ case MST_PCMCIA_CD0_IRQ:
+ if (level)
+ s->pcmcia0 &= ~MST_PCMCIAx_nCD;
+ else
+ s->pcmcia0 |= MST_PCMCIAx_nCD;
+ break;
+ case MST_PCMCIA_CD1_IRQ:
+ if (level)
+ s->pcmcia1 &= ~MST_PCMCIAx_nCD;
+ else
+ s->pcmcia1 |= MST_PCMCIAx_nCD;
+ break;
+ }
+
+ if ((s->intmskena & (1u << irq)) && level)
+ s->intsetclr |= 1u << irq;
+
+ if (oldint != (s->intsetclr & s->intmskena))
+ qemu_set_irq(s->parent, s->intsetclr & s->intmskena);
+}
+
+
+static uint64_t
+mst_fpga_readb(void *opaque, hwaddr addr, unsigned size)
+{
+ mst_irq_state *s = (mst_irq_state *) opaque;
+
+ switch (addr) {
+ case MST_LEDDAT1:
+ return s->leddat1;
+ case MST_LEDDAT2:
+ return s->leddat2;
+ case MST_LEDCTRL:
+ return s->ledctrl;
+ case MST_GPSWR:
+ return s->gpswr;
+ case MST_MSCWR1:
+ return s->mscwr1;
+ case MST_MSCWR2:
+ return s->mscwr2;
+ case MST_MSCWR3:
+ return s->mscwr3;
+ case MST_MSCRD:
+ return s->mscrd;
+ case MST_INTMSKENA:
+ return s->intmskena;
+ case MST_INTSETCLR:
+ return s->intsetclr;
+ case MST_PCMCIA0:
+ return s->pcmcia0;
+ case MST_PCMCIA1:
+ return s->pcmcia1;
+ default:
+ printf("Mainstone - mst_fpga_readb: Bad register offset "
+ "0x" TARGET_FMT_plx "\n", addr);
+ }
+ return 0;
+}
+
+static void
+mst_fpga_writeb(void *opaque, hwaddr addr, uint64_t value,
+ unsigned size)
+{
+ mst_irq_state *s = (mst_irq_state *) opaque;
+ value &= 0xffffffff;
+
+ switch (addr) {
+ case MST_LEDDAT1:
+ s->leddat1 = value;
+ break;
+ case MST_LEDDAT2:
+ s->leddat2 = value;
+ break;
+ case MST_LEDCTRL:
+ s->ledctrl = value;
+ break;
+ case MST_GPSWR:
+ s->gpswr = value;
+ break;
+ case MST_MSCWR1:
+ s->mscwr1 = value;
+ break;
+ case MST_MSCWR2:
+ s->mscwr2 = value;
+ break;
+ case MST_MSCWR3:
+ s->mscwr3 = value;
+ break;
+ case MST_MSCRD:
+ s->mscrd = value;
+ break;
+ case MST_INTMSKENA: /* Mask interrupt */
+ s->intmskena = (value & 0xFEEFF);
+ qemu_set_irq(s->parent, s->intsetclr & s->intmskena);
+ break;
+ case MST_INTSETCLR: /* clear or set interrupt */
+ s->intsetclr = (value & 0xFEEFF);
+ qemu_set_irq(s->parent, s->intsetclr & s->intmskena);
+ break;
+ /* For PCMCIAx allow the to change only power and reset */
+ case MST_PCMCIA0:
+ s->pcmcia0 = (value & 0x1f) | (s->pcmcia0 & ~0x1f);
+ break;
+ case MST_PCMCIA1:
+ s->pcmcia1 = (value & 0x1f) | (s->pcmcia1 & ~0x1f);
+ break;
+ default:
+ printf("Mainstone - mst_fpga_writeb: Bad register offset "
+ "0x" TARGET_FMT_plx "\n", addr);
+ }
+}
+
+static const MemoryRegionOps mst_fpga_ops = {
+ .read = mst_fpga_readb,
+ .write = mst_fpga_writeb,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static int mst_fpga_post_load(void *opaque, int version_id)
+{
+ mst_irq_state *s = (mst_irq_state *) opaque;
+
+ qemu_set_irq(s->parent, s->intsetclr & s->intmskena);
+ return 0;
+}
+
+static int mst_fpga_init(SysBusDevice *dev)
+{
+ mst_irq_state *s;
+
+ s = FROM_SYSBUS(mst_irq_state, dev);
+
+ s->pcmcia0 = MST_PCMCIAx_READY | MST_PCMCIAx_nCD;
+ s->pcmcia1 = MST_PCMCIAx_READY | MST_PCMCIAx_nCD;
+
+ sysbus_init_irq(dev, &s->parent);
+
+ /* alloc the external 16 irqs */
+ qdev_init_gpio_in(&dev->qdev, mst_fpga_set_irq, MST_NUM_IRQS);
+
+ memory_region_init_io(&s->iomem, &mst_fpga_ops, s,
+ "fpga", 0x00100000);
+ sysbus_init_mmio(dev, &s->iomem);
+ return 0;
+}
+
+static VMStateDescription vmstate_mst_fpga_regs = {
+ .name = "mainstone_fpga",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .minimum_version_id_old = 0,
+ .post_load = mst_fpga_post_load,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT32(prev_level, mst_irq_state),
+ VMSTATE_UINT32(leddat1, mst_irq_state),
+ VMSTATE_UINT32(leddat2, mst_irq_state),
+ VMSTATE_UINT32(ledctrl, mst_irq_state),
+ VMSTATE_UINT32(gpswr, mst_irq_state),
+ VMSTATE_UINT32(mscwr1, mst_irq_state),
+ VMSTATE_UINT32(mscwr2, mst_irq_state),
+ VMSTATE_UINT32(mscwr3, mst_irq_state),
+ VMSTATE_UINT32(mscrd, mst_irq_state),
+ VMSTATE_UINT32(intmskena, mst_irq_state),
+ VMSTATE_UINT32(intsetclr, mst_irq_state),
+ VMSTATE_UINT32(pcmcia0, mst_irq_state),
+ VMSTATE_UINT32(pcmcia1, mst_irq_state),
+ VMSTATE_END_OF_LIST(),
+ },
+};
+
+static void mst_fpga_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+ k->init = mst_fpga_init;
+ dc->desc = "Mainstone II FPGA";
+ dc->vmsd = &vmstate_mst_fpga_regs;
+}
+
+static const TypeInfo mst_fpga_info = {
+ .name = "mainstone-fpga",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(mst_irq_state),
+ .class_init = mst_fpga_class_init,
+};
+
+static void mst_fpga_register_types(void)
+{
+ type_register_static(&mst_fpga_info);
+}
+
+type_init(mst_fpga_register_types)
diff --git a/hw/misc/omap_clk.c b/hw/misc/omap_clk.c
new file mode 100644
index 0000000..80a3c50
--- /dev/null
+++ b/hw/misc/omap_clk.c
@@ -0,0 +1,1264 @@
+/*
+ * OMAP clocks.
+ *
+ * Copyright (C) 2006-2008 Andrzej Zaborowski <balrog@zabor.org>
+ *
+ * Clocks data comes in part from arch/arm/mach-omap1/clock.h in Linux.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "hw/hw.h"
+#include "hw/arm/omap.h"
+
+struct clk {
+ const char *name;
+ const char *alias;
+ struct clk *parent;
+ struct clk *child1;
+ struct clk *sibling;
+#define ALWAYS_ENABLED (1 << 0)
+#define CLOCK_IN_OMAP310 (1 << 10)
+#define CLOCK_IN_OMAP730 (1 << 11)
+#define CLOCK_IN_OMAP1510 (1 << 12)
+#define CLOCK_IN_OMAP16XX (1 << 13)
+#define CLOCK_IN_OMAP242X (1 << 14)
+#define CLOCK_IN_OMAP243X (1 << 15)
+#define CLOCK_IN_OMAP343X (1 << 16)
+ uint32_t flags;
+ int id;
+
+ int running; /* Is currently ticking */
+ int enabled; /* Is enabled, regardless of its input clk */
+ unsigned long rate; /* Current rate (if .running) */
+ unsigned int divisor; /* Rate relative to input (if .enabled) */
+ unsigned int multiplier; /* Rate relative to input (if .enabled) */
+ qemu_irq users[16]; /* Who to notify on change */
+ int usecount; /* Automatically idle when unused */
+};
+
+static struct clk xtal_osc12m = {
+ .name = "xtal_osc_12m",
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+};
+
+static struct clk xtal_osc32k = {
+ .name = "xtal_osc_32k",
+ .rate = 32768,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+};
+
+static struct clk ck_ref = {
+ .name = "ck_ref",
+ .alias = "clkin",
+ .parent = &xtal_osc12m,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ ALWAYS_ENABLED,
+};
+
+/* If a dpll is disabled it becomes a bypass, child clocks don't stop */
+static struct clk dpll1 = {
+ .name = "dpll1",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ ALWAYS_ENABLED,
+};
+
+static struct clk dpll2 = {
+ .name = "dpll2",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
+};
+
+static struct clk dpll3 = {
+ .name = "dpll3",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
+};
+
+static struct clk dpll4 = {
+ .name = "dpll4",
+ .parent = &ck_ref,
+ .multiplier = 4,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+};
+
+static struct clk apll = {
+ .name = "apll",
+ .parent = &ck_ref,
+ .multiplier = 48,
+ .divisor = 12,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+};
+
+static struct clk ck_48m = {
+ .name = "ck_48m",
+ .parent = &dpll4, /* either dpll4 or apll */
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+};
+
+static struct clk ck_dpll1out = {
+ .name = "ck_dpll1out",
+ .parent = &dpll1,
+ .flags = CLOCK_IN_OMAP16XX,
+};
+
+static struct clk sossi_ck = {
+ .name = "ck_sossi",
+ .parent = &ck_dpll1out,
+ .flags = CLOCK_IN_OMAP16XX,
+};
+
+static struct clk clkm1 = {
+ .name = "clkm1",
+ .alias = "ck_gen1",
+ .parent = &dpll1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ ALWAYS_ENABLED,
+};
+
+static struct clk clkm2 = {
+ .name = "clkm2",
+ .alias = "ck_gen2",
+ .parent = &dpll1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ ALWAYS_ENABLED,
+};
+
+static struct clk clkm3 = {
+ .name = "clkm3",
+ .alias = "ck_gen3",
+ .parent = &dpll1, /* either dpll1 or ck_ref */
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ ALWAYS_ENABLED,
+};
+
+static struct clk arm_ck = {
+ .name = "arm_ck",
+ .alias = "mpu_ck",
+ .parent = &clkm1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ ALWAYS_ENABLED,
+};
+
+static struct clk armper_ck = {
+ .name = "armper_ck",
+ .alias = "mpuper_ck",
+ .parent = &clkm1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+};
+
+static struct clk arm_gpio_ck = {
+ .name = "arm_gpio_ck",
+ .alias = "mpu_gpio_ck",
+ .parent = &clkm1,
+ .divisor = 1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
+};
+
+static struct clk armxor_ck = {
+ .name = "armxor_ck",
+ .alias = "mpuxor_ck",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+};
+
+static struct clk armtim_ck = {
+ .name = "armtim_ck",
+ .alias = "mputim_ck",
+ .parent = &ck_ref, /* either CLKIN or DPLL1 */
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+};
+
+static struct clk armwdt_ck = {
+ .name = "armwdt_ck",
+ .alias = "mpuwd_ck",
+ .parent = &clkm1,
+ .divisor = 14,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ ALWAYS_ENABLED,
+};
+
+static struct clk arminth_ck16xx = {
+ .name = "arminth_ck",
+ .parent = &arm_ck,
+ .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+ /* Note: On 16xx the frequency can be divided by 2 by programming
+ * ARM_CKCTL:ARM_INTHCK_SEL(14) to 1
+ *
+ * 1510 version is in TC clocks.
+ */
+};
+
+static struct clk dsp_ck = {
+ .name = "dsp_ck",
+ .parent = &clkm2,
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+};
+
+static struct clk dspmmu_ck = {
+ .name = "dspmmu_ck",
+ .parent = &clkm2,
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ ALWAYS_ENABLED,
+};
+
+static struct clk dspper_ck = {
+ .name = "dspper_ck",
+ .parent = &clkm2,
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+};
+
+static struct clk dspxor_ck = {
+ .name = "dspxor_ck",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+};
+
+static struct clk dsptim_ck = {
+ .name = "dsptim_ck",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+};
+
+static struct clk tc_ck = {
+ .name = "tc_ck",
+ .parent = &clkm3,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ CLOCK_IN_OMAP730 | CLOCK_IN_OMAP310 |
+ ALWAYS_ENABLED,
+};
+
+static struct clk arminth_ck15xx = {
+ .name = "arminth_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
+ /* Note: On 1510 the frequency follows TC_CK
+ *
+ * 16xx version is in MPU clocks.
+ */
+};
+
+static struct clk tipb_ck = {
+ /* No-idle controlled by "tc_ck" */
+ .name = "tipb_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
+};
+
+static struct clk l3_ocpi_ck = {
+ /* No-idle controlled by "tc_ck" */
+ .name = "l3_ocpi_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP16XX,
+};
+
+static struct clk tc1_ck = {
+ .name = "tc1_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP16XX,
+};
+
+static struct clk tc2_ck = {
+ .name = "tc2_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP16XX,
+};
+
+static struct clk dma_ck = {
+ /* No-idle controlled by "tc_ck" */
+ .name = "dma_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ ALWAYS_ENABLED,
+};
+
+static struct clk dma_lcdfree_ck = {
+ .name = "dma_lcdfree_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+};
+
+static struct clk api_ck = {
+ .name = "api_ck",
+ .alias = "mpui_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+};
+
+static struct clk lb_ck = {
+ .name = "lb_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
+};
+
+static struct clk lbfree_ck = {
+ .name = "lbfree_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
+};
+
+static struct clk hsab_ck = {
+ .name = "hsab_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
+};
+
+static struct clk rhea1_ck = {
+ .name = "rhea1_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+};
+
+static struct clk rhea2_ck = {
+ .name = "rhea2_ck",
+ .parent = &tc_ck,
+ .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+};
+
+static struct clk lcd_ck_16xx = {
+ .name = "lcd_ck",
+ .parent = &clkm3,
+ .flags = CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730,
+};
+
+static struct clk lcd_ck_1510 = {
+ .name = "lcd_ck",
+ .parent = &clkm3,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
+};
+
+static struct clk uart1_1510 = {
+ .name = "uart1_ck",
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck, /* either armper_ck or dpll4 */
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
+};
+
+static struct clk uart1_16xx = {
+ .name = "uart1_ck",
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck,
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP16XX,
+};
+
+static struct clk uart2_ck = {
+ .name = "uart2_ck",
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck, /* either armper_ck or dpll4 */
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310 |
+ ALWAYS_ENABLED,
+};
+
+static struct clk uart3_1510 = {
+ .name = "uart3_ck",
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck, /* either armper_ck or dpll4 */
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310 | ALWAYS_ENABLED,
+};
+
+static struct clk uart3_16xx = {
+ .name = "uart3_ck",
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck,
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP16XX,
+};
+
+static struct clk usb_clk0 = { /* 6 MHz output on W4_USB_CLK0 */
+ .name = "usb_clk0",
+ .alias = "usb.clko",
+ /* Direct from ULPD, no parent */
+ .rate = 6000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+};
+
+static struct clk usb_hhc_ck1510 = {
+ .name = "usb_hhc_ck",
+ /* Direct from ULPD, no parent */
+ .rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP310,
+};
+
+static struct clk usb_hhc_ck16xx = {
+ .name = "usb_hhc_ck",
+ /* Direct from ULPD, no parent */
+ .rate = 48000000,
+ /* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */
+ .flags = CLOCK_IN_OMAP16XX,
+};
+
+static struct clk usb_w2fc_mclk = {
+ .name = "usb_w2fc_mclk",
+ .alias = "usb_w2fc_ck",
+ .parent = &ck_48m,
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+};
+
+static struct clk mclk_1510 = {
+ .name = "mclk",
+ /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510,
+};
+
+static struct clk bclk_310 = {
+ .name = "bt_mclk_out", /* Alias midi_mclk_out? */
+ .parent = &armper_ck,
+ .flags = CLOCK_IN_OMAP310,
+};
+
+static struct clk mclk_310 = {
+ .name = "com_mclk_out",
+ .parent = &armper_ck,
+ .flags = CLOCK_IN_OMAP310,
+};
+
+static struct clk mclk_16xx = {
+ .name = "mclk",
+ /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .flags = CLOCK_IN_OMAP16XX,
+};
+
+static struct clk bclk_1510 = {
+ .name = "bclk",
+ /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510,
+};
+
+static struct clk bclk_16xx = {
+ .name = "bclk",
+ /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .flags = CLOCK_IN_OMAP16XX,
+};
+
+static struct clk mmc1_ck = {
+ .name = "mmc_ck",
+ .id = 1,
+ /* Functional clock is direct from ULPD, interface clock is ARMPER */
+ .parent = &armper_ck, /* either armper_ck or dpll4 */
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP310,
+};
+
+static struct clk mmc2_ck = {
+ .name = "mmc_ck",
+ .id = 2,
+ /* Functional clock is direct from ULPD, interface clock is ARMPER */
+ .parent = &armper_ck,
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP16XX,
+};
+
+static struct clk cam_mclk = {
+ .name = "cam.mclk",
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+ .rate = 12000000,
+};
+
+static struct clk cam_exclk = {
+ .name = "cam.exclk",
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+ /* Either 12M from cam.mclk or 48M from dpll4 */
+ .parent = &cam_mclk,
+};
+
+static struct clk cam_lclk = {
+ .name = "cam.lclk",
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
+};
+
+static struct clk i2c_fck = {
+ .name = "i2c_fck",
+ .id = 1,
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ ALWAYS_ENABLED,
+ .parent = &armxor_ck,
+};
+
+static struct clk i2c_ick = {
+ .name = "i2c_ick",
+ .id = 1,
+ .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+ .parent = &armper_ck,
+};
+
+static struct clk clk32k = {
+ .name = "clk32-kHz",
+ .flags = CLOCK_IN_OMAP310 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
+ .parent = &xtal_osc32k,
+};
+
+static struct clk ref_clk = {
+ .name = "ref_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
+ .rate = 12000000, /* 12 MHz or 13 MHz or 19.2 MHz */
+ /*.parent = sys.xtalin */
+};
+
+static struct clk apll_96m = {
+ .name = "apll_96m",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
+ .rate = 96000000,
+ /*.parent = ref_clk */
+};
+
+static struct clk apll_54m = {
+ .name = "apll_54m",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
+ .rate = 54000000,
+ /*.parent = ref_clk */
+};
+
+static struct clk sys_clk = {
+ .name = "sys_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
+ .rate = 32768,
+ /*.parent = sys.xtalin */
+};
+
+static struct clk sleep_clk = {
+ .name = "sleep_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
+ .rate = 32768,
+ /*.parent = sys.xtalin */
+};
+
+static struct clk dpll_ck = {
+ .name = "dpll",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
+ .parent = &ref_clk,
+};
+
+static struct clk dpll_x2_ck = {
+ .name = "dpll_x2",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
+ .parent = &ref_clk,
+};
+
+static struct clk wdt1_sys_clk = {
+ .name = "wdt1_sys_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | ALWAYS_ENABLED,
+ .rate = 32768,
+ /*.parent = sys.xtalin */
+};
+
+static struct clk func_96m_clk = {
+ .name = "func_96m_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .divisor = 1,
+ .parent = &apll_96m,
+};
+
+static struct clk func_48m_clk = {
+ .name = "func_48m_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .divisor = 2,
+ .parent = &apll_96m,
+};
+
+static struct clk func_12m_clk = {
+ .name = "func_12m_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .divisor = 8,
+ .parent = &apll_96m,
+};
+
+static struct clk func_54m_clk = {
+ .name = "func_54m_clk",
+ .flags = CLOCK_IN_OMAP242X,
+ .divisor = 1,
+ .parent = &apll_54m,
+};
+
+static struct clk sys_clkout = {
+ .name = "clkout",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk sys_clkout2 = {
+ .name = "clkout2",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk core_clk = {
+ .name = "core_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &dpll_x2_ck, /* Switchable between dpll_ck and clk32k */
+};
+
+static struct clk l3_clk = {
+ .name = "l3_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_clk,
+};
+
+static struct clk core_l4_iclk = {
+ .name = "core_l4_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &l3_clk,
+};
+
+static struct clk wu_l4_iclk = {
+ .name = "wu_l4_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &l3_clk,
+};
+
+static struct clk core_l3_iclk = {
+ .name = "core_l3_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_clk,
+};
+
+static struct clk core_l4_usb_clk = {
+ .name = "core_l4_usb_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &l3_clk,
+};
+
+static struct clk wu_gpt1_clk = {
+ .name = "wu_gpt1_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk wu_32k_clk = {
+ .name = "wu_32k_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk uart1_fclk = {
+ .name = "uart1_fclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &func_48m_clk,
+};
+
+static struct clk uart1_iclk = {
+ .name = "uart1_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_l4_iclk,
+};
+
+static struct clk uart2_fclk = {
+ .name = "uart2_fclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &func_48m_clk,
+};
+
+static struct clk uart2_iclk = {
+ .name = "uart2_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_l4_iclk,
+};
+
+static struct clk uart3_fclk = {
+ .name = "uart3_fclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &func_48m_clk,
+};
+
+static struct clk uart3_iclk = {
+ .name = "uart3_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_l4_iclk,
+};
+
+static struct clk mpu_fclk = {
+ .name = "mpu_fclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_clk,
+};
+
+static struct clk mpu_iclk = {
+ .name = "mpu_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_clk,
+};
+
+static struct clk int_m_fclk = {
+ .name = "int_m_fclk",
+ .alias = "mpu_intc_fclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_clk,
+};
+
+static struct clk int_m_iclk = {
+ .name = "int_m_iclk",
+ .alias = "mpu_intc_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_clk,
+};
+
+static struct clk core_gpt2_clk = {
+ .name = "core_gpt2_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk core_gpt3_clk = {
+ .name = "core_gpt3_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk core_gpt4_clk = {
+ .name = "core_gpt4_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk core_gpt5_clk = {
+ .name = "core_gpt5_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk core_gpt6_clk = {
+ .name = "core_gpt6_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk core_gpt7_clk = {
+ .name = "core_gpt7_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk core_gpt8_clk = {
+ .name = "core_gpt8_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk core_gpt9_clk = {
+ .name = "core_gpt9_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk core_gpt10_clk = {
+ .name = "core_gpt10_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk core_gpt11_clk = {
+ .name = "core_gpt11_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk core_gpt12_clk = {
+ .name = "core_gpt12_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+};
+
+static struct clk mcbsp1_clk = {
+ .name = "mcbsp1_cg",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .divisor = 2,
+ .parent = &func_96m_clk,
+};
+
+static struct clk mcbsp2_clk = {
+ .name = "mcbsp2_cg",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .divisor = 2,
+ .parent = &func_96m_clk,
+};
+
+static struct clk emul_clk = {
+ .name = "emul_ck",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &func_54m_clk,
+};
+
+static struct clk sdma_fclk = {
+ .name = "sdma_fclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &l3_clk,
+};
+
+static struct clk sdma_iclk = {
+ .name = "sdma_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_l3_iclk, /* core_l4_iclk for the configuration port */
+};
+
+static struct clk i2c1_fclk = {
+ .name = "i2c1.fclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &func_12m_clk,
+ .divisor = 1,
+};
+
+static struct clk i2c1_iclk = {
+ .name = "i2c1.iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_l4_iclk,
+};
+
+static struct clk i2c2_fclk = {
+ .name = "i2c2.fclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &func_12m_clk,
+ .divisor = 1,
+};
+
+static struct clk i2c2_iclk = {
+ .name = "i2c2.iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_l4_iclk,
+};
+
+static struct clk gpio_dbclk[5] = {
+ {
+ .name = "gpio1_dbclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &wu_32k_clk,
+ }, {
+ .name = "gpio2_dbclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &wu_32k_clk,
+ }, {
+ .name = "gpio3_dbclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &wu_32k_clk,
+ }, {
+ .name = "gpio4_dbclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &wu_32k_clk,
+ }, {
+ .name = "gpio5_dbclk",
+ .flags = CLOCK_IN_OMAP243X,
+ .parent = &wu_32k_clk,
+ },
+};
+
+static struct clk gpio_iclk = {
+ .name = "gpio_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &wu_l4_iclk,
+};
+
+static struct clk mmc_fck = {
+ .name = "mmc_fclk",
+ .flags = CLOCK_IN_OMAP242X,
+ .parent = &func_96m_clk,
+};
+
+static struct clk mmc_ick = {
+ .name = "mmc_iclk",
+ .flags = CLOCK_IN_OMAP242X,
+ .parent = &core_l4_iclk,
+};
+
+static struct clk spi_fclk[3] = {
+ {
+ .name = "spi1_fclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &func_48m_clk,
+ }, {
+ .name = "spi2_fclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &func_48m_clk,
+ }, {
+ .name = "spi3_fclk",
+ .flags = CLOCK_IN_OMAP243X,
+ .parent = &func_48m_clk,
+ },
+};
+
+static struct clk dss_clk[2] = {
+ {
+ .name = "dss_clk1",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_clk,
+ }, {
+ .name = "dss_clk2",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &sys_clk,
+ },
+};
+
+static struct clk dss_54m_clk = {
+ .name = "dss_54m_clk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &func_54m_clk,
+};
+
+static struct clk dss_l3_iclk = {
+ .name = "dss_l3_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_l3_iclk,
+};
+
+static struct clk dss_l4_iclk = {
+ .name = "dss_l4_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_l4_iclk,
+};
+
+static struct clk spi_iclk[3] = {
+ {
+ .name = "spi1_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_l4_iclk,
+ }, {
+ .name = "spi2_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .parent = &core_l4_iclk,
+ }, {
+ .name = "spi3_iclk",
+ .flags = CLOCK_IN_OMAP243X,
+ .parent = &core_l4_iclk,
+ },
+};
+
+static struct clk omapctrl_clk = {
+ .name = "omapctrl_iclk",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ /* XXX Should be in WKUP domain */
+ .parent = &core_l4_iclk,
+};
+
+static struct clk *onchip_clks[] = {
+ /* OMAP 1 */
+
+ /* non-ULPD clocks */
+ &xtal_osc12m,
+ &xtal_osc32k,
+ &ck_ref,
+ &dpll1,
+ &dpll2,
+ &dpll3,
+ &dpll4,
+ &apll,
+ &ck_48m,
+ /* CK_GEN1 clocks */
+ &clkm1,
+ &ck_dpll1out,
+ &sossi_ck,
+ &arm_ck,
+ &armper_ck,
+ &arm_gpio_ck,
+ &armxor_ck,
+ &armtim_ck,
+ &armwdt_ck,
+ &arminth_ck15xx, &arminth_ck16xx,
+ /* CK_GEN2 clocks */
+ &clkm2,
+ &dsp_ck,
+ &dspmmu_ck,
+ &dspper_ck,
+ &dspxor_ck,
+ &dsptim_ck,
+ /* CK_GEN3 clocks */
+ &clkm3,
+ &tc_ck,
+ &tipb_ck,
+ &l3_ocpi_ck,
+ &tc1_ck,
+ &tc2_ck,
+ &dma_ck,
+ &dma_lcdfree_ck,
+ &api_ck,
+ &lb_ck,
+ &lbfree_ck,
+ &hsab_ck,
+ &rhea1_ck,
+ &rhea2_ck,
+ &lcd_ck_16xx,
+ &lcd_ck_1510,
+ /* ULPD clocks */
+ &uart1_1510,
+ &uart1_16xx,
+ &uart2_ck,
+ &uart3_1510,
+ &uart3_16xx,
+ &usb_clk0,
+ &usb_hhc_ck1510, &usb_hhc_ck16xx,
+ &mclk_1510, &mclk_16xx, &mclk_310,
+ &bclk_1510, &bclk_16xx, &bclk_310,
+ &mmc1_ck,
+ &mmc2_ck,
+ &cam_mclk,
+ &cam_exclk,
+ &cam_lclk,
+ &clk32k,
+ &usb_w2fc_mclk,
+ /* Virtual clocks */
+ &i2c_fck,
+ &i2c_ick,
+
+ /* OMAP 2 */
+
+ &ref_clk,
+ &apll_96m,
+ &apll_54m,
+ &sys_clk,
+ &sleep_clk,
+ &dpll_ck,
+ &dpll_x2_ck,
+ &wdt1_sys_clk,
+ &func_96m_clk,
+ &func_48m_clk,
+ &func_12m_clk,
+ &func_54m_clk,
+ &sys_clkout,
+ &sys_clkout2,
+ &core_clk,
+ &l3_clk,
+ &core_l4_iclk,
+ &wu_l4_iclk,
+ &core_l3_iclk,
+ &core_l4_usb_clk,
+ &wu_gpt1_clk,
+ &wu_32k_clk,
+ &uart1_fclk,
+ &uart1_iclk,
+ &uart2_fclk,
+ &uart2_iclk,
+ &uart3_fclk,
+ &uart3_iclk,
+ &mpu_fclk,
+ &mpu_iclk,
+ &int_m_fclk,
+ &int_m_iclk,
+ &core_gpt2_clk,
+ &core_gpt3_clk,
+ &core_gpt4_clk,
+ &core_gpt5_clk,
+ &core_gpt6_clk,
+ &core_gpt7_clk,
+ &core_gpt8_clk,
+ &core_gpt9_clk,
+ &core_gpt10_clk,
+ &core_gpt11_clk,
+ &core_gpt12_clk,
+ &mcbsp1_clk,
+ &mcbsp2_clk,
+ &emul_clk,
+ &sdma_fclk,
+ &sdma_iclk,
+ &i2c1_fclk,
+ &i2c1_iclk,
+ &i2c2_fclk,
+ &i2c2_iclk,
+ &gpio_dbclk[0],
+ &gpio_dbclk[1],
+ &gpio_dbclk[2],
+ &gpio_dbclk[3],
+ &gpio_iclk,
+ &mmc_fck,
+ &mmc_ick,
+ &spi_fclk[0],
+ &spi_iclk[0],
+ &spi_fclk[1],
+ &spi_iclk[1],
+ &spi_fclk[2],
+ &spi_iclk[2],
+ &dss_clk[0],
+ &dss_clk[1],
+ &dss_54m_clk,
+ &dss_l3_iclk,
+ &dss_l4_iclk,
+ &omapctrl_clk,
+
+ NULL
+};
+
+void omap_clk_adduser(struct clk *clk, qemu_irq user)
+{
+ qemu_irq *i;
+
+ for (i = clk->users; *i; i ++);
+ *i = user;
+}
+
+struct clk *omap_findclk(struct omap_mpu_state_s *mpu, const char *name)
+{
+ struct clk *i;
+
+ for (i = mpu->clks; i->name; i ++)
+ if (!strcmp(i->name, name) || (i->alias && !strcmp(i->alias, name)))
+ return i;
+ hw_error("%s: %s not found\n", __FUNCTION__, name);
+}
+
+void omap_clk_get(struct clk *clk)
+{
+ clk->usecount ++;
+}
+
+void omap_clk_put(struct clk *clk)
+{
+ if (!(clk->usecount --))
+ hw_error("%s: %s is not in use\n", __FUNCTION__, clk->name);
+}
+
+static void omap_clk_update(struct clk *clk)
+{
+ int parent, running;
+ qemu_irq *user;
+ struct clk *i;
+
+ if (clk->parent)
+ parent = clk->parent->running;
+ else
+ parent = 1;
+
+ running = parent && (clk->enabled ||
+ ((clk->flags & ALWAYS_ENABLED) && clk->usecount));
+ if (clk->running != running) {
+ clk->running = running;
+ for (user = clk->users; *user; user ++)
+ qemu_set_irq(*user, running);
+ for (i = clk->child1; i; i = i->sibling)
+ omap_clk_update(i);
+ }
+}
+
+static void omap_clk_rate_update_full(struct clk *clk, unsigned long int rate,
+ unsigned long int div, unsigned long int mult)
+{
+ struct clk *i;
+ qemu_irq *user;
+
+ clk->rate = muldiv64(rate, mult, div);
+ if (clk->running)
+ for (user = clk->users; *user; user ++)
+ qemu_irq_raise(*user);
+ for (i = clk->child1; i; i = i->sibling)
+ omap_clk_rate_update_full(i, rate,
+ div * i->divisor, mult * i->multiplier);
+}
+
+static void omap_clk_rate_update(struct clk *clk)
+{
+ struct clk *i;
+ unsigned long int div, mult = div = 1;
+
+ for (i = clk; i->parent; i = i->parent) {
+ div *= i->divisor;
+ mult *= i->multiplier;
+ }
+
+ omap_clk_rate_update_full(clk, i->rate, div, mult);
+}
+
+void omap_clk_reparent(struct clk *clk, struct clk *parent)
+{
+ struct clk **p;
+
+ if (clk->parent) {
+ for (p = &clk->parent->child1; *p != clk; p = &(*p)->sibling);
+ *p = clk->sibling;
+ }
+
+ clk->parent = parent;
+ if (parent) {
+ clk->sibling = parent->child1;
+ parent->child1 = clk;
+ omap_clk_update(clk);
+ omap_clk_rate_update(clk);
+ } else
+ clk->sibling = NULL;
+}
+
+void omap_clk_onoff(struct clk *clk, int on)
+{
+ clk->enabled = on;
+ omap_clk_update(clk);
+}
+
+void omap_clk_canidle(struct clk *clk, int can)
+{
+ if (can)
+ omap_clk_put(clk);
+ else
+ omap_clk_get(clk);
+}
+
+void omap_clk_setrate(struct clk *clk, int divide, int multiply)
+{
+ clk->divisor = divide;
+ clk->multiplier = multiply;
+ omap_clk_rate_update(clk);
+}
+
+int64_t omap_clk_getrate(omap_clk clk)
+{
+ return clk->rate;
+}
+
+void omap_clk_init(struct omap_mpu_state_s *mpu)
+{
+ struct clk **i, *j, *k;
+ int count;
+ int flag;
+
+ if (cpu_is_omap310(mpu))
+ flag = CLOCK_IN_OMAP310;
+ else if (cpu_is_omap1510(mpu))
+ flag = CLOCK_IN_OMAP1510;
+ else if (cpu_is_omap2410(mpu) || cpu_is_omap2420(mpu))
+ flag = CLOCK_IN_OMAP242X;
+ else if (cpu_is_omap2430(mpu))
+ flag = CLOCK_IN_OMAP243X;
+ else if (cpu_is_omap3430(mpu))
+ flag = CLOCK_IN_OMAP243X;
+ else
+ return;
+
+ for (i = onchip_clks, count = 0; *i; i ++)
+ if ((*i)->flags & flag)
+ count ++;
+ mpu->clks = (struct clk *) g_malloc0(sizeof(struct clk) * (count + 1));
+ for (i = onchip_clks, j = mpu->clks; *i; i ++)
+ if ((*i)->flags & flag) {
+ memcpy(j, *i, sizeof(struct clk));
+ for (k = mpu->clks; k < j; k ++)
+ if (j->parent && !strcmp(j->parent->name, k->name)) {
+ j->parent = k;
+ j->sibling = k->child1;
+ k->child1 = j;
+ } else if (k->parent && !strcmp(k->parent->name, j->name)) {
+ k->parent = j;
+ k->sibling = j->child1;
+ j->child1 = k;
+ }
+ j->divisor = j->divisor ?: 1;
+ j->multiplier = j->multiplier ?: 1;
+ j ++;
+ }
+ for (j = mpu->clks; count --; j ++) {
+ omap_clk_update(j);
+ omap_clk_rate_update(j);
+ }
+}
diff --git a/hw/misc/omap_gpmc.c b/hw/misc/omap_gpmc.c
new file mode 100644
index 0000000..91adb66
--- /dev/null
+++ b/hw/misc/omap_gpmc.c
@@ -0,0 +1,894 @@
+/*
+ * TI OMAP general purpose memory controller emulation.
+ *
+ * Copyright (C) 2007-2009 Nokia Corporation
+ * Original code written by Andrzej Zaborowski <andrew@openedhand.com>
+ * Enhancements for OMAP3 and NAND support written by Juha Riihimäki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) any later version of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "hw/hw.h"
+#include "hw/block/flash.h"
+#include "hw/arm/omap.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
+
+/* General-Purpose Memory Controller */
+struct omap_gpmc_s {
+ qemu_irq irq;
+ qemu_irq drq;
+ MemoryRegion iomem;
+ int accept_256;
+
+ uint8_t revision;
+ uint8_t sysconfig;
+ uint16_t irqst;
+ uint16_t irqen;
+ uint16_t lastirq;
+ uint16_t timeout;
+ uint16_t config;
+ struct omap_gpmc_cs_file_s {
+ uint32_t config[7];
+ MemoryRegion *iomem;
+ MemoryRegion container;
+ MemoryRegion nandiomem;
+ DeviceState *dev;
+ } cs_file[8];
+ int ecc_cs;
+ int ecc_ptr;
+ uint32_t ecc_cfg;
+ ECCState ecc[9];
+ struct prefetch {
+ uint32_t config1; /* GPMC_PREFETCH_CONFIG1 */
+ uint32_t transfercount; /* GPMC_PREFETCH_CONFIG2:TRANSFERCOUNT */
+ int startengine; /* GPMC_PREFETCH_CONTROL:STARTENGINE */
+ int fifopointer; /* GPMC_PREFETCH_STATUS:FIFOPOINTER */
+ int count; /* GPMC_PREFETCH_STATUS:COUNTVALUE */
+ MemoryRegion iomem;
+ uint8_t fifo[64];
+ } prefetch;
+};
+
+#define OMAP_GPMC_8BIT 0
+#define OMAP_GPMC_16BIT 1
+#define OMAP_GPMC_NOR 0
+#define OMAP_GPMC_NAND 2
+
+static int omap_gpmc_devtype(struct omap_gpmc_cs_file_s *f)
+{
+ return (f->config[0] >> 10) & 3;
+}
+
+static int omap_gpmc_devsize(struct omap_gpmc_cs_file_s *f)
+{
+ /* devsize field is really 2 bits but we ignore the high
+ * bit to ensure consistent behaviour if the guest sets
+ * it (values 2 and 3 are reserved in the TRM)
+ */
+ return (f->config[0] >> 12) & 1;
+}
+
+/* Extract the chip-select value from the prefetch config1 register */
+static int prefetch_cs(uint32_t config1)
+{
+ return (config1 >> 24) & 7;
+}
+
+static int prefetch_threshold(uint32_t config1)
+{
+ return (config1 >> 8) & 0x7f;
+}
+
+static void omap_gpmc_int_update(struct omap_gpmc_s *s)
+{
+ /* The TRM is a bit unclear, but it seems to say that
+ * the TERMINALCOUNTSTATUS bit is set only on the
+ * transition when the prefetch engine goes from
+ * active to inactive, whereas the FIFOEVENTSTATUS
+ * bit is held high as long as the fifo has at
+ * least THRESHOLD bytes available.
+ * So we do the latter here, but TERMINALCOUNTSTATUS
+ * is set elsewhere.
+ */
+ if (s->prefetch.fifopointer >= prefetch_threshold(s->prefetch.config1)) {
+ s->irqst |= 1;
+ }
+ if ((s->irqen & s->irqst) != s->lastirq) {
+ s->lastirq = s->irqen & s->irqst;
+ qemu_set_irq(s->irq, s->lastirq);
+ }
+}
+
+static void omap_gpmc_dma_update(struct omap_gpmc_s *s, int value)
+{
+ if (s->prefetch.config1 & 4) {
+ qemu_set_irq(s->drq, value);
+ }
+}
+
+/* Access functions for when a NAND-like device is mapped into memory:
+ * all addresses in the region behave like accesses to the relevant
+ * GPMC_NAND_DATA_i register (which is actually implemented to call these)
+ */
+static uint64_t omap_nand_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque;
+ uint64_t v;
+ nand_setpins(f->dev, 0, 0, 0, 1, 0);
+ switch (omap_gpmc_devsize(f)) {
+ case OMAP_GPMC_8BIT:
+ v = nand_getio(f->dev);
+ if (size == 1) {
+ return v;
+ }
+ v |= (nand_getio(f->dev) << 8);
+ if (size == 2) {
+ return v;
+ }
+ v |= (nand_getio(f->dev) << 16);
+ v |= (nand_getio(f->dev) << 24);
+ return v;
+ case OMAP_GPMC_16BIT:
+ v = nand_getio(f->dev);
+ if (size == 1) {
+ /* 8 bit read from 16 bit device : probably a guest bug */
+ return v & 0xff;
+ }
+ if (size == 2) {
+ return v;
+ }
+ v |= (nand_getio(f->dev) << 16);
+ return v;
+ default:
+ abort();
+ }
+}
+
+static void omap_nand_setio(DeviceState *dev, uint64_t value,
+ int nandsize, int size)
+{
+ /* Write the specified value to the NAND device, respecting
+ * both size of the NAND device and size of the write access.
+ */
+ switch (nandsize) {
+ case OMAP_GPMC_8BIT:
+ switch (size) {
+ case 1:
+ nand_setio(dev, value & 0xff);
+ break;
+ case 2:
+ nand_setio(dev, value & 0xff);
+ nand_setio(dev, (value >> 8) & 0xff);
+ break;
+ case 4:
+ default:
+ nand_setio(dev, value & 0xff);
+ nand_setio(dev, (value >> 8) & 0xff);
+ nand_setio(dev, (value >> 16) & 0xff);
+ nand_setio(dev, (value >> 24) & 0xff);
+ break;
+ }
+ break;
+ case OMAP_GPMC_16BIT:
+ switch (size) {
+ case 1:
+ /* writing to a 16bit device with 8bit access is probably a guest
+ * bug; pass the value through anyway.
+ */
+ case 2:
+ nand_setio(dev, value & 0xffff);
+ break;
+ case 4:
+ default:
+ nand_setio(dev, value & 0xffff);
+ nand_setio(dev, (value >> 16) & 0xffff);
+ break;
+ }
+ break;
+ }
+}
+
+static void omap_nand_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque;
+ nand_setpins(f->dev, 0, 0, 0, 1, 0);
+ omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
+}
+
+static const MemoryRegionOps omap_nand_ops = {
+ .read = omap_nand_read,
+ .write = omap_nand_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void fill_prefetch_fifo(struct omap_gpmc_s *s)
+{
+ /* Fill the prefetch FIFO by reading data from NAND.
+ * We do this synchronously, unlike the hardware which
+ * will do this asynchronously. We refill when the
+ * FIFO has THRESHOLD bytes free, and we always refill
+ * as much data as possible starting at the top end
+ * of the FIFO.
+ * (We have to refill at THRESHOLD rather than waiting
+ * for the FIFO to empty to allow for the case where
+ * the FIFO size isn't an exact multiple of THRESHOLD
+ * and we're doing DMA transfers.)
+ * This means we never need to handle wrap-around in
+ * the fifo-reading code, and the next byte of data
+ * to read is always fifo[63 - fifopointer].
+ */
+ int fptr;
+ int cs = prefetch_cs(s->prefetch.config1);
+ int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0);
+ int bytes;
+ /* Don't believe the bit of the OMAP TRM that says that COUNTVALUE
+ * and TRANSFERCOUNT are in units of 16 bit words for 16 bit NAND.
+ * Instead believe the bit that says it is always a byte count.
+ */
+ bytes = 64 - s->prefetch.fifopointer;
+ if (bytes > s->prefetch.count) {
+ bytes = s->prefetch.count;
+ }
+ s->prefetch.count -= bytes;
+ s->prefetch.fifopointer += bytes;
+ fptr = 64 - s->prefetch.fifopointer;
+ /* Move the existing data in the FIFO so it sits just
+ * before what we're about to read in
+ */
+ while (fptr < (64 - bytes)) {
+ s->prefetch.fifo[fptr] = s->prefetch.fifo[fptr + bytes];
+ fptr++;
+ }
+ while (fptr < 64) {
+ if (is16bit) {
+ uint32_t v = omap_nand_read(&s->cs_file[cs], 0, 2);
+ s->prefetch.fifo[fptr++] = v & 0xff;
+ s->prefetch.fifo[fptr++] = (v >> 8) & 0xff;
+ } else {
+ s->prefetch.fifo[fptr++] = omap_nand_read(&s->cs_file[cs], 0, 1);
+ }
+ }
+ if (s->prefetch.startengine && (s->prefetch.count == 0)) {
+ /* This was the final transfer: raise TERMINALCOUNTSTATUS */
+ s->irqst |= 2;
+ s->prefetch.startengine = 0;
+ }
+ /* If there are any bytes in the FIFO at this point then
+ * we must raise a DMA request (either this is a final part
+ * transfer, or we filled the FIFO in which case we certainly
+ * have THRESHOLD bytes available)
+ */
+ if (s->prefetch.fifopointer != 0) {
+ omap_gpmc_dma_update(s, 1);
+ }
+ omap_gpmc_int_update(s);
+}
+
+/* Access functions for a NAND-like device when the prefetch/postwrite
+ * engine is enabled -- all addresses in the region behave alike:
+ * data is read or written to the FIFO.
+ */
+static uint64_t omap_gpmc_prefetch_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
+ uint32_t data;
+ if (s->prefetch.config1 & 1) {
+ /* The TRM doesn't define the behaviour if you read from the
+ * FIFO when the prefetch engine is in write mode. We choose
+ * to always return zero.
+ */
+ return 0;
+ }
+ /* Note that trying to read an empty fifo repeats the last byte */
+ if (s->prefetch.fifopointer) {
+ s->prefetch.fifopointer--;
+ }
+ data = s->prefetch.fifo[63 - s->prefetch.fifopointer];
+ if (s->prefetch.fifopointer ==
+ (64 - prefetch_threshold(s->prefetch.config1))) {
+ /* We've drained THRESHOLD bytes now. So deassert the
+ * DMA request, then refill the FIFO (which will probably
+ * assert it again.)
+ */
+ omap_gpmc_dma_update(s, 0);
+ fill_prefetch_fifo(s);
+ }
+ omap_gpmc_int_update(s);
+ return data;
+}
+
+static void omap_gpmc_prefetch_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
+ int cs = prefetch_cs(s->prefetch.config1);
+ if ((s->prefetch.config1 & 1) == 0) {
+ /* The TRM doesn't define the behaviour of writing to the
+ * FIFO when the prefetch engine is in read mode. We
+ * choose to ignore the write.
+ */
+ return;
+ }
+ if (s->prefetch.count == 0) {
+ /* The TRM doesn't define the behaviour of writing to the
+ * FIFO if the transfer is complete. We choose to ignore.
+ */
+ return;
+ }
+ /* The only reason we do any data buffering in postwrite
+ * mode is if we are talking to a 16 bit NAND device, in
+ * which case we need to buffer the first byte of the
+ * 16 bit word until the other byte arrives.
+ */
+ int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0);
+ if (is16bit) {
+ /* fifopointer alternates between 64 (waiting for first
+ * byte of word) and 63 (waiting for second byte)
+ */
+ if (s->prefetch.fifopointer == 64) {
+ s->prefetch.fifo[0] = value;
+ s->prefetch.fifopointer--;
+ } else {
+ value = (value << 8) | s->prefetch.fifo[0];
+ omap_nand_write(&s->cs_file[cs], 0, value, 2);
+ s->prefetch.count--;
+ s->prefetch.fifopointer = 64;
+ }
+ } else {
+ /* Just write the byte : fifopointer remains 64 at all times */
+ omap_nand_write(&s->cs_file[cs], 0, value, 1);
+ s->prefetch.count--;
+ }
+ if (s->prefetch.count == 0) {
+ /* Final transfer: raise TERMINALCOUNTSTATUS */
+ s->irqst |= 2;
+ s->prefetch.startengine = 0;
+ }
+ omap_gpmc_int_update(s);
+}
+
+static const MemoryRegionOps omap_prefetch_ops = {
+ .read = omap_gpmc_prefetch_read,
+ .write = omap_gpmc_prefetch_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .impl.min_access_size = 1,
+ .impl.max_access_size = 1,
+};
+
+static MemoryRegion *omap_gpmc_cs_memregion(struct omap_gpmc_s *s, int cs)
+{
+ /* Return the MemoryRegion* to map/unmap for this chipselect */
+ struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
+ if (omap_gpmc_devtype(f) == OMAP_GPMC_NOR) {
+ return f->iomem;
+ }
+ if ((s->prefetch.config1 & 0x80) &&
+ (prefetch_cs(s->prefetch.config1) == cs)) {
+ /* The prefetch engine is enabled for this CS: map the FIFO */
+ return &s->prefetch.iomem;
+ }
+ return &f->nandiomem;
+}
+
+static void omap_gpmc_cs_map(struct omap_gpmc_s *s, int cs)
+{
+ struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
+ uint32_t mask = (f->config[6] >> 8) & 0xf;
+ uint32_t base = f->config[6] & 0x3f;
+ uint32_t size;
+
+ if (!f->iomem && !f->dev) {
+ return;
+ }
+
+ if (!(f->config[6] & (1 << 6))) {
+ /* Do nothing unless CSVALID */
+ return;
+ }
+
+ /* TODO: check for overlapping regions and report access errors */
+ if (mask != 0x8 && mask != 0xc && mask != 0xe && mask != 0xf
+ && !(s->accept_256 && !mask)) {
+ fprintf(stderr, "%s: invalid chip-select mask address (0x%x)\n",
+ __func__, mask);
+ }
+
+ base <<= 24;
+ size = (0x0fffffff & ~(mask << 24)) + 1;
+ /* TODO: rather than setting the size of the mapping (which should be
+ * constant), the mask should cause wrapping of the address space, so
+ * that the same memory becomes accessible at every <i>size</i> bytes
+ * starting from <i>base</i>. */
+ memory_region_init(&f->container, "omap-gpmc-file", size);
+ memory_region_add_subregion(&f->container, 0,
+ omap_gpmc_cs_memregion(s, cs));
+ memory_region_add_subregion(get_system_memory(), base,
+ &f->container);
+}
+
+static void omap_gpmc_cs_unmap(struct omap_gpmc_s *s, int cs)
+{
+ struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
+ if (!(f->config[6] & (1 << 6))) {
+ /* Do nothing unless CSVALID */
+ return;
+ }
+ if (!f->iomem && !f->dev) {
+ return;
+ }
+ memory_region_del_subregion(get_system_memory(), &f->container);
+ memory_region_del_subregion(&f->container, omap_gpmc_cs_memregion(s, cs));
+ memory_region_destroy(&f->container);
+}
+
+void omap_gpmc_reset(struct omap_gpmc_s *s)
+{
+ int i;
+
+ s->sysconfig = 0;
+ s->irqst = 0;
+ s->irqen = 0;
+ omap_gpmc_int_update(s);
+ for (i = 0; i < 8; i++) {
+ /* This has to happen before we change any of the config
+ * used to determine which memory regions are mapped or unmapped.
+ */
+ omap_gpmc_cs_unmap(s, i);
+ }
+ s->timeout = 0;
+ s->config = 0xa00;
+ s->prefetch.config1 = 0x00004000;
+ s->prefetch.transfercount = 0x00000000;
+ s->prefetch.startengine = 0;
+ s->prefetch.fifopointer = 0;
+ s->prefetch.count = 0;
+ for (i = 0; i < 8; i ++) {
+ s->cs_file[i].config[1] = 0x101001;
+ s->cs_file[i].config[2] = 0x020201;
+ s->cs_file[i].config[3] = 0x10031003;
+ s->cs_file[i].config[4] = 0x10f1111;
+ s->cs_file[i].config[5] = 0;
+ s->cs_file[i].config[6] = 0xf00 | (i ? 0 : 1 << 6);
+
+ s->cs_file[i].config[6] = 0xf00;
+ /* In theory we could probe attached devices for some CFG1
+ * bits here, but we just retain them across resets as they
+ * were set initially by omap_gpmc_attach().
+ */
+ if (i == 0) {
+ s->cs_file[i].config[0] &= 0x00433e00;
+ s->cs_file[i].config[6] |= 1 << 6; /* CSVALID */
+ omap_gpmc_cs_map(s, i);
+ } else {
+ s->cs_file[i].config[0] &= 0x00403c00;
+ }
+ }
+ s->ecc_cs = 0;
+ s->ecc_ptr = 0;
+ s->ecc_cfg = 0x3fcff000;
+ for (i = 0; i < 9; i ++)
+ ecc_reset(&s->ecc[i]);
+}
+
+static int gpmc_wordaccess_only(hwaddr addr)
+{
+ /* Return true if the register offset is to a register that
+ * only permits word width accesses.
+ * Non-word accesses are only OK for GPMC_NAND_DATA/ADDRESS/COMMAND
+ * for any chipselect.
+ */
+ if (addr >= 0x60 && addr <= 0x1d4) {
+ int cs = (addr - 0x60) / 0x30;
+ addr -= cs * 0x30;
+ if (addr >= 0x7c && addr < 0x88) {
+ /* GPMC_NAND_COMMAND, GPMC_NAND_ADDRESS, GPMC_NAND_DATA */
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static uint64_t omap_gpmc_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
+ int cs;
+ struct omap_gpmc_cs_file_s *f;
+
+ if (size != 4 && gpmc_wordaccess_only(addr)) {
+ return omap_badwidth_read32(opaque, addr);
+ }
+
+ switch (addr) {
+ case 0x000: /* GPMC_REVISION */
+ return s->revision;
+
+ case 0x010: /* GPMC_SYSCONFIG */
+ return s->sysconfig;
+
+ case 0x014: /* GPMC_SYSSTATUS */
+ return 1; /* RESETDONE */
+
+ case 0x018: /* GPMC_IRQSTATUS */
+ return s->irqst;
+
+ case 0x01c: /* GPMC_IRQENABLE */
+ return s->irqen;
+
+ case 0x040: /* GPMC_TIMEOUT_CONTROL */
+ return s->timeout;
+
+ case 0x044: /* GPMC_ERR_ADDRESS */
+ case 0x048: /* GPMC_ERR_TYPE */
+ return 0;
+
+ case 0x050: /* GPMC_CONFIG */
+ return s->config;
+
+ case 0x054: /* GPMC_STATUS */
+ return 0x001;
+
+ case 0x060 ... 0x1d4:
+ cs = (addr - 0x060) / 0x30;
+ addr -= cs * 0x30;
+ f = s->cs_file + cs;
+ switch (addr) {
+ case 0x60: /* GPMC_CONFIG1 */
+ return f->config[0];
+ case 0x64: /* GPMC_CONFIG2 */
+ return f->config[1];
+ case 0x68: /* GPMC_CONFIG3 */
+ return f->config[2];
+ case 0x6c: /* GPMC_CONFIG4 */
+ return f->config[3];
+ case 0x70: /* GPMC_CONFIG5 */
+ return f->config[4];
+ case 0x74: /* GPMC_CONFIG6 */
+ return f->config[5];
+ case 0x78: /* GPMC_CONFIG7 */
+ return f->config[6];
+ case 0x84 ... 0x87: /* GPMC_NAND_DATA */
+ if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
+ return omap_nand_read(f, 0, size);
+ }
+ return 0;
+ }
+ break;
+
+ case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */
+ return s->prefetch.config1;
+ case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */
+ return s->prefetch.transfercount;
+ case 0x1ec: /* GPMC_PREFETCH_CONTROL */
+ return s->prefetch.startengine;
+ case 0x1f0: /* GPMC_PREFETCH_STATUS */
+ /* NB: The OMAP3 TRM is inconsistent about whether the GPMC
+ * FIFOTHRESHOLDSTATUS bit should be set when
+ * FIFOPOINTER > FIFOTHRESHOLD or when it is >= FIFOTHRESHOLD.
+ * Apparently the underlying functional spec from which the TRM was
+ * created states that the behaviour is ">=", and this also
+ * makes more conceptual sense.
+ */
+ return (s->prefetch.fifopointer << 24) |
+ ((s->prefetch.fifopointer >=
+ ((s->prefetch.config1 >> 8) & 0x7f) ? 1 : 0) << 16) |
+ s->prefetch.count;
+
+ case 0x1f4: /* GPMC_ECC_CONFIG */
+ return s->ecc_cs;
+ case 0x1f8: /* GPMC_ECC_CONTROL */
+ return s->ecc_ptr;
+ case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */
+ return s->ecc_cfg;
+ case 0x200 ... 0x220: /* GPMC_ECC_RESULT */
+ cs = (addr & 0x1f) >> 2;
+ /* TODO: check correctness */
+ return
+ ((s->ecc[cs].cp & 0x07) << 0) |
+ ((s->ecc[cs].cp & 0x38) << 13) |
+ ((s->ecc[cs].lp[0] & 0x1ff) << 3) |
+ ((s->ecc[cs].lp[1] & 0x1ff) << 19);
+
+ case 0x230: /* GPMC_TESTMODE_CTRL */
+ return 0;
+ case 0x234: /* GPMC_PSA_LSB */
+ case 0x238: /* GPMC_PSA_MSB */
+ return 0x00000000;
+ }
+
+ OMAP_BAD_REG(addr);
+ return 0;
+}
+
+static void omap_gpmc_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
+ int cs;
+ struct omap_gpmc_cs_file_s *f;
+
+ if (size != 4 && gpmc_wordaccess_only(addr)) {
+ return omap_badwidth_write32(opaque, addr, value);
+ }
+
+ switch (addr) {
+ case 0x000: /* GPMC_REVISION */
+ case 0x014: /* GPMC_SYSSTATUS */
+ case 0x054: /* GPMC_STATUS */
+ case 0x1f0: /* GPMC_PREFETCH_STATUS */
+ case 0x200 ... 0x220: /* GPMC_ECC_RESULT */
+ case 0x234: /* GPMC_PSA_LSB */
+ case 0x238: /* GPMC_PSA_MSB */
+ OMAP_RO_REG(addr);
+ break;
+
+ case 0x010: /* GPMC_SYSCONFIG */
+ if ((value >> 3) == 0x3)
+ fprintf(stderr, "%s: bad SDRAM idle mode %"PRIi64"\n",
+ __FUNCTION__, value >> 3);
+ if (value & 2)
+ omap_gpmc_reset(s);
+ s->sysconfig = value & 0x19;
+ break;
+
+ case 0x018: /* GPMC_IRQSTATUS */
+ s->irqst &= ~value;
+ omap_gpmc_int_update(s);
+ break;
+
+ case 0x01c: /* GPMC_IRQENABLE */
+ s->irqen = value & 0xf03;
+ omap_gpmc_int_update(s);
+ break;
+
+ case 0x040: /* GPMC_TIMEOUT_CONTROL */
+ s->timeout = value & 0x1ff1;
+ break;
+
+ case 0x044: /* GPMC_ERR_ADDRESS */
+ case 0x048: /* GPMC_ERR_TYPE */
+ break;
+
+ case 0x050: /* GPMC_CONFIG */
+ s->config = value & 0xf13;
+ break;
+
+ case 0x060 ... 0x1d4:
+ cs = (addr - 0x060) / 0x30;
+ addr -= cs * 0x30;
+ f = s->cs_file + cs;
+ switch (addr) {
+ case 0x60: /* GPMC_CONFIG1 */
+ f->config[0] = value & 0xffef3e13;
+ break;
+ case 0x64: /* GPMC_CONFIG2 */
+ f->config[1] = value & 0x001f1f8f;
+ break;
+ case 0x68: /* GPMC_CONFIG3 */
+ f->config[2] = value & 0x001f1f8f;
+ break;
+ case 0x6c: /* GPMC_CONFIG4 */
+ f->config[3] = value & 0x1f8f1f8f;
+ break;
+ case 0x70: /* GPMC_CONFIG5 */
+ f->config[4] = value & 0x0f1f1f1f;
+ break;
+ case 0x74: /* GPMC_CONFIG6 */
+ f->config[5] = value & 0x00000fcf;
+ break;
+ case 0x78: /* GPMC_CONFIG7 */
+ if ((f->config[6] ^ value) & 0xf7f) {
+ omap_gpmc_cs_unmap(s, cs);
+ f->config[6] = value & 0x00000f7f;
+ omap_gpmc_cs_map(s, cs);
+ }
+ break;
+ case 0x7c ... 0x7f: /* GPMC_NAND_COMMAND */
+ if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
+ nand_setpins(f->dev, 1, 0, 0, 1, 0); /* CLE */
+ omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
+ }
+ break;
+ case 0x80 ... 0x83: /* GPMC_NAND_ADDRESS */
+ if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
+ nand_setpins(f->dev, 0, 1, 0, 1, 0); /* ALE */
+ omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
+ }
+ break;
+ case 0x84 ... 0x87: /* GPMC_NAND_DATA */
+ if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
+ omap_nand_write(f, 0, value, size);
+ }
+ break;
+ default:
+ goto bad_reg;
+ }
+ break;
+
+ case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */
+ if (!s->prefetch.startengine) {
+ uint32_t newconfig1 = value & 0x7f8f7fbf;
+ uint32_t changed;
+ changed = newconfig1 ^ s->prefetch.config1;
+ if (changed & (0x80 | 0x7000000)) {
+ /* Turning the engine on or off, or mapping it somewhere else.
+ * cs_map() and cs_unmap() check the prefetch config and
+ * overall CSVALID bits, so it is sufficient to unmap-and-map
+ * both the old cs and the new one. Note that we adhere to
+ * the "unmap/change config/map" order (and not unmap twice
+ * if newcs == oldcs), otherwise we'll try to delete the wrong
+ * memory region.
+ */
+ int oldcs = prefetch_cs(s->prefetch.config1);
+ int newcs = prefetch_cs(newconfig1);
+ omap_gpmc_cs_unmap(s, oldcs);
+ if (oldcs != newcs) {
+ omap_gpmc_cs_unmap(s, newcs);
+ }
+ s->prefetch.config1 = newconfig1;
+ omap_gpmc_cs_map(s, oldcs);
+ if (oldcs != newcs) {
+ omap_gpmc_cs_map(s, newcs);
+ }
+ } else {
+ s->prefetch.config1 = newconfig1;
+ }
+ }
+ break;
+
+ case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */
+ if (!s->prefetch.startengine) {
+ s->prefetch.transfercount = value & 0x3fff;
+ }
+ break;
+
+ case 0x1ec: /* GPMC_PREFETCH_CONTROL */
+ if (s->prefetch.startengine != (value & 1)) {
+ s->prefetch.startengine = value & 1;
+ if (s->prefetch.startengine) {
+ /* Prefetch engine start */
+ s->prefetch.count = s->prefetch.transfercount;
+ if (s->prefetch.config1 & 1) {
+ /* Write */
+ s->prefetch.fifopointer = 64;
+ } else {
+ /* Read */
+ s->prefetch.fifopointer = 0;
+ fill_prefetch_fifo(s);
+ }
+ } else {
+ /* Prefetch engine forcibly stopped. The TRM
+ * doesn't define the behaviour if you do this.
+ * We clear the prefetch count, which means that
+ * we permit no more writes, and don't read any
+ * more data from NAND. The CPU can still drain
+ * the FIFO of unread data.
+ */
+ s->prefetch.count = 0;
+ }
+ omap_gpmc_int_update(s);
+ }
+ break;
+
+ case 0x1f4: /* GPMC_ECC_CONFIG */
+ s->ecc_cs = 0x8f;
+ break;
+ case 0x1f8: /* GPMC_ECC_CONTROL */
+ if (value & (1 << 8))
+ for (cs = 0; cs < 9; cs ++)
+ ecc_reset(&s->ecc[cs]);
+ s->ecc_ptr = value & 0xf;
+ if (s->ecc_ptr == 0 || s->ecc_ptr > 9) {
+ s->ecc_ptr = 0;
+ s->ecc_cs &= ~1;
+ }
+ break;
+ case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */
+ s->ecc_cfg = value & 0x3fcff1ff;
+ break;
+ case 0x230: /* GPMC_TESTMODE_CTRL */
+ if (value & 7)
+ fprintf(stderr, "%s: test mode enable attempt\n", __FUNCTION__);
+ break;
+
+ default:
+ bad_reg:
+ OMAP_BAD_REG(addr);
+ return;
+ }
+}
+
+static const MemoryRegionOps omap_gpmc_ops = {
+ .read = omap_gpmc_read,
+ .write = omap_gpmc_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+struct omap_gpmc_s *omap_gpmc_init(struct omap_mpu_state_s *mpu,
+ hwaddr base,
+ qemu_irq irq, qemu_irq drq)
+{
+ int cs;
+ struct omap_gpmc_s *s = (struct omap_gpmc_s *)
+ g_malloc0(sizeof(struct omap_gpmc_s));
+
+ memory_region_init_io(&s->iomem, &omap_gpmc_ops, s, "omap-gpmc", 0x1000);
+ memory_region_add_subregion(get_system_memory(), base, &s->iomem);
+
+ s->irq = irq;
+ s->drq = drq;
+ s->accept_256 = cpu_is_omap3630(mpu);
+ s->revision = cpu_class_omap3(mpu) ? 0x50 : 0x20;
+ s->lastirq = 0;
+ omap_gpmc_reset(s);
+
+ /* We have to register a different IO memory handler for each
+ * chip select region in case a NAND device is mapped there. We
+ * make the region the worst-case size of 256MB and rely on the
+ * container memory region in cs_map to chop it down to the actual
+ * guest-requested size.
+ */
+ for (cs = 0; cs < 8; cs++) {
+ memory_region_init_io(&s->cs_file[cs].nandiomem,
+ &omap_nand_ops,
+ &s->cs_file[cs],
+ "omap-nand",
+ 256 * 1024 * 1024);
+ }
+
+ memory_region_init_io(&s->prefetch.iomem, &omap_prefetch_ops, s,
+ "omap-gpmc-prefetch", 256 * 1024 * 1024);
+ return s;
+}
+
+void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem)
+{
+ struct omap_gpmc_cs_file_s *f;
+ assert(iomem);
+
+ if (cs < 0 || cs >= 8) {
+ fprintf(stderr, "%s: bad chip-select %i\n", __FUNCTION__, cs);
+ exit(-1);
+ }
+ f = &s->cs_file[cs];
+
+ omap_gpmc_cs_unmap(s, cs);
+ f->config[0] &= ~(0xf << 10);
+ f->iomem = iomem;
+ omap_gpmc_cs_map(s, cs);
+}
+
+void omap_gpmc_attach_nand(struct omap_gpmc_s *s, int cs, DeviceState *nand)
+{
+ struct omap_gpmc_cs_file_s *f;
+ assert(nand);
+
+ if (cs < 0 || cs >= 8) {
+ fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs);
+ exit(-1);
+ }
+ f = &s->cs_file[cs];
+
+ omap_gpmc_cs_unmap(s, cs);
+ f->config[0] &= ~(0xf << 10);
+ f->config[0] |= (OMAP_GPMC_NAND << 10);
+ f->dev = nand;
+ if (nand_getbuswidth(f->dev) == 16) {
+ f->config[0] |= OMAP_GPMC_16BIT << 12;
+ }
+ omap_gpmc_cs_map(s, cs);
+}
diff --git a/hw/misc/omap_l4.c b/hw/misc/omap_l4.c
new file mode 100644
index 0000000..ac8251f
--- /dev/null
+++ b/hw/misc/omap_l4.c
@@ -0,0 +1,162 @@
+/*
+ * TI OMAP L4 interconnect emulation.
+ *
+ * Copyright (C) 2007-2009 Nokia Corporation
+ * Written by Andrzej Zaborowski <andrew@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) any later version of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "hw/hw.h"
+#include "hw/arm/omap.h"
+
+struct omap_l4_s {
+ MemoryRegion *address_space;
+ hwaddr base;
+ int ta_num;
+ struct omap_target_agent_s ta[0];
+};
+
+struct omap_l4_s *omap_l4_init(MemoryRegion *address_space,
+ hwaddr base, int ta_num)
+{
+ struct omap_l4_s *bus = g_malloc0(
+ sizeof(*bus) + ta_num * sizeof(*bus->ta));
+
+ bus->address_space = address_space;
+ bus->ta_num = ta_num;
+ bus->base = base;
+
+ return bus;
+}
+
+hwaddr omap_l4_region_base(struct omap_target_agent_s *ta,
+ int region)
+{
+ return ta->bus->base + ta->start[region].offset;
+}
+
+hwaddr omap_l4_region_size(struct omap_target_agent_s *ta,
+ int region)
+{
+ return ta->start[region].size;
+}
+
+static uint64_t omap_l4ta_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ struct omap_target_agent_s *s = (struct omap_target_agent_s *) opaque;
+
+ if (size != 2) {
+ return omap_badwidth_read16(opaque, addr);
+ }
+
+ switch (addr) {
+ case 0x00: /* COMPONENT */
+ return s->component;
+
+ case 0x20: /* AGENT_CONTROL */
+ return s->control;
+
+ case 0x28: /* AGENT_STATUS */
+ return s->status;
+ }
+
+ OMAP_BAD_REG(addr);
+ return 0;
+}
+
+static void omap_l4ta_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ struct omap_target_agent_s *s = (struct omap_target_agent_s *) opaque;
+
+ if (size != 4) {
+ return omap_badwidth_write32(opaque, addr, value);
+ }
+
+ switch (addr) {
+ case 0x00: /* COMPONENT */
+ case 0x28: /* AGENT_STATUS */
+ OMAP_RO_REG(addr);
+ break;
+
+ case 0x20: /* AGENT_CONTROL */
+ s->control = value & 0x01000700;
+ if (value & 1) /* OCP_RESET */
+ s->status &= ~1; /* REQ_TIMEOUT */
+ break;
+
+ default:
+ OMAP_BAD_REG(addr);
+ }
+}
+
+static const MemoryRegionOps omap_l4ta_ops = {
+ .read = omap_l4ta_read,
+ .write = omap_l4ta_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+struct omap_target_agent_s *omap_l4ta_get(struct omap_l4_s *bus,
+ const struct omap_l4_region_s *regions,
+ const struct omap_l4_agent_info_s *agents,
+ int cs)
+{
+ int i;
+ struct omap_target_agent_s *ta = NULL;
+ const struct omap_l4_agent_info_s *info = NULL;
+
+ for (i = 0; i < bus->ta_num; i ++)
+ if (agents[i].ta == cs) {
+ ta = &bus->ta[i];
+ info = &agents[i];
+ break;
+ }
+ if (!ta) {
+ fprintf(stderr, "%s: bad target agent (%i)\n", __FUNCTION__, cs);
+ exit(-1);
+ }
+
+ ta->bus = bus;
+ ta->start = &regions[info->region];
+ ta->regions = info->regions;
+
+ ta->component = ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0);
+ ta->status = 0x00000000;
+ ta->control = 0x00000200; /* XXX 01000200 for L4TAO */
+
+ memory_region_init_io(&ta->iomem, &omap_l4ta_ops, ta, "omap.l4ta",
+ omap_l4_region_size(ta, info->ta_region));
+ omap_l4_attach(ta, info->ta_region, &ta->iomem);
+
+ return ta;
+}
+
+hwaddr omap_l4_attach(struct omap_target_agent_s *ta,
+ int region, MemoryRegion *mr)
+{
+ hwaddr base;
+
+ if (region < 0 || region >= ta->regions) {
+ fprintf(stderr, "%s: bad io region (%i)\n", __FUNCTION__, region);
+ exit(-1);
+ }
+
+ base = ta->bus->base + ta->start[region].offset;
+ if (mr) {
+ memory_region_add_subregion(ta->bus->address_space, base, mr);
+ }
+
+ return base;
+}
diff --git a/hw/misc/omap_sdrc.c b/hw/misc/omap_sdrc.c
new file mode 100644
index 0000000..e38b571
--- /dev/null
+++ b/hw/misc/omap_sdrc.c
@@ -0,0 +1,168 @@
+/*
+ * TI OMAP SDRAM controller emulation.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation
+ * Written by Andrzej Zaborowski <andrew@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) any later version of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "hw/hw.h"
+#include "hw/arm/omap.h"
+
+/* SDRAM Controller Subsystem */
+struct omap_sdrc_s {
+ MemoryRegion iomem;
+ uint8_t config;
+};
+
+void omap_sdrc_reset(struct omap_sdrc_s *s)
+{
+ s->config = 0x10;
+}
+
+static uint64_t omap_sdrc_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ struct omap_sdrc_s *s = (struct omap_sdrc_s *) opaque;
+
+ if (size != 4) {
+ return omap_badwidth_read32(opaque, addr);
+ }
+
+ switch (addr) {
+ case 0x00: /* SDRC_REVISION */
+ return 0x20;
+
+ case 0x10: /* SDRC_SYSCONFIG */
+ return s->config;
+
+ case 0x14: /* SDRC_SYSSTATUS */
+ return 1; /* RESETDONE */
+
+ case 0x40: /* SDRC_CS_CFG */
+ case 0x44: /* SDRC_SHARING */
+ case 0x48: /* SDRC_ERR_ADDR */
+ case 0x4c: /* SDRC_ERR_TYPE */
+ case 0x60: /* SDRC_DLLA_SCTRL */
+ case 0x64: /* SDRC_DLLA_STATUS */
+ case 0x68: /* SDRC_DLLB_CTRL */
+ case 0x6c: /* SDRC_DLLB_STATUS */
+ case 0x70: /* SDRC_POWER */
+ case 0x80: /* SDRC_MCFG_0 */
+ case 0x84: /* SDRC_MR_0 */
+ case 0x88: /* SDRC_EMR1_0 */
+ case 0x8c: /* SDRC_EMR2_0 */
+ case 0x90: /* SDRC_EMR3_0 */
+ case 0x94: /* SDRC_DCDL1_CTRL */
+ case 0x98: /* SDRC_DCDL2_CTRL */
+ case 0x9c: /* SDRC_ACTIM_CTRLA_0 */
+ case 0xa0: /* SDRC_ACTIM_CTRLB_0 */
+ case 0xa4: /* SDRC_RFR_CTRL_0 */
+ case 0xa8: /* SDRC_MANUAL_0 */
+ case 0xb0: /* SDRC_MCFG_1 */
+ case 0xb4: /* SDRC_MR_1 */
+ case 0xb8: /* SDRC_EMR1_1 */
+ case 0xbc: /* SDRC_EMR2_1 */
+ case 0xc0: /* SDRC_EMR3_1 */
+ case 0xc4: /* SDRC_ACTIM_CTRLA_1 */
+ case 0xc8: /* SDRC_ACTIM_CTRLB_1 */
+ case 0xd4: /* SDRC_RFR_CTRL_1 */
+ case 0xd8: /* SDRC_MANUAL_1 */
+ return 0x00;
+ }
+
+ OMAP_BAD_REG(addr);
+ return 0;
+}
+
+static void omap_sdrc_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ struct omap_sdrc_s *s = (struct omap_sdrc_s *) opaque;
+
+ if (size != 4) {
+ return omap_badwidth_write32(opaque, addr, value);
+ }
+
+ switch (addr) {
+ case 0x00: /* SDRC_REVISION */
+ case 0x14: /* SDRC_SYSSTATUS */
+ case 0x48: /* SDRC_ERR_ADDR */
+ case 0x64: /* SDRC_DLLA_STATUS */
+ case 0x6c: /* SDRC_DLLB_STATUS */
+ OMAP_RO_REG(addr);
+ return;
+
+ case 0x10: /* SDRC_SYSCONFIG */
+ if ((value >> 3) != 0x2)
+ fprintf(stderr, "%s: bad SDRAM idle mode %i\n",
+ __FUNCTION__, (unsigned)value >> 3);
+ if (value & 2)
+ omap_sdrc_reset(s);
+ s->config = value & 0x18;
+ break;
+
+ case 0x40: /* SDRC_CS_CFG */
+ case 0x44: /* SDRC_SHARING */
+ case 0x4c: /* SDRC_ERR_TYPE */
+ case 0x60: /* SDRC_DLLA_SCTRL */
+ case 0x68: /* SDRC_DLLB_CTRL */
+ case 0x70: /* SDRC_POWER */
+ case 0x80: /* SDRC_MCFG_0 */
+ case 0x84: /* SDRC_MR_0 */
+ case 0x88: /* SDRC_EMR1_0 */
+ case 0x8c: /* SDRC_EMR2_0 */
+ case 0x90: /* SDRC_EMR3_0 */
+ case 0x94: /* SDRC_DCDL1_CTRL */
+ case 0x98: /* SDRC_DCDL2_CTRL */
+ case 0x9c: /* SDRC_ACTIM_CTRLA_0 */
+ case 0xa0: /* SDRC_ACTIM_CTRLB_0 */
+ case 0xa4: /* SDRC_RFR_CTRL_0 */
+ case 0xa8: /* SDRC_MANUAL_0 */
+ case 0xb0: /* SDRC_MCFG_1 */
+ case 0xb4: /* SDRC_MR_1 */
+ case 0xb8: /* SDRC_EMR1_1 */
+ case 0xbc: /* SDRC_EMR2_1 */
+ case 0xc0: /* SDRC_EMR3_1 */
+ case 0xc4: /* SDRC_ACTIM_CTRLA_1 */
+ case 0xc8: /* SDRC_ACTIM_CTRLB_1 */
+ case 0xd4: /* SDRC_RFR_CTRL_1 */
+ case 0xd8: /* SDRC_MANUAL_1 */
+ break;
+
+ default:
+ OMAP_BAD_REG(addr);
+ return;
+ }
+}
+
+static const MemoryRegionOps omap_sdrc_ops = {
+ .read = omap_sdrc_read,
+ .write = omap_sdrc_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+struct omap_sdrc_s *omap_sdrc_init(MemoryRegion *sysmem,
+ hwaddr base)
+{
+ struct omap_sdrc_s *s = (struct omap_sdrc_s *)
+ g_malloc0(sizeof(struct omap_sdrc_s));
+
+ omap_sdrc_reset(s);
+
+ memory_region_init_io(&s->iomem, &omap_sdrc_ops, s, "omap.sdrc", 0x1000);
+ memory_region_add_subregion(sysmem, base, &s->iomem);
+
+ return s;
+}
diff --git a/hw/misc/omap_tap.c b/hw/misc/omap_tap.c
new file mode 100644
index 0000000..99b70d5
--- /dev/null
+++ b/hw/misc/omap_tap.c
@@ -0,0 +1,116 @@
+/*
+ * TI OMAP TEST-Chip-level TAP emulation.
+ *
+ * Copyright (C) 2007-2008 Nokia Corporation
+ * Written by Andrzej Zaborowski <andrew@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) any later version of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw/hw.h"
+#include "hw/arm/omap.h"
+
+/* TEST-Chip-level TAP */
+static uint64_t omap_tap_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque;
+
+ if (size != 4) {
+ return omap_badwidth_read32(opaque, addr);
+ }
+
+ switch (addr) {
+ case 0x204: /* IDCODE_reg */
+ switch (s->mpu_model) {
+ case omap2420:
+ case omap2422:
+ case omap2423:
+ return 0x5b5d902f; /* ES 2.2 */
+ case omap2430:
+ return 0x5b68a02f; /* ES 2.2 */
+ case omap3430:
+ return 0x1b7ae02f; /* ES 2 */
+ default:
+ hw_error("%s: Bad mpu model\n", __FUNCTION__);
+ }
+
+ case 0x208: /* PRODUCTION_ID_reg for OMAP2 */
+ case 0x210: /* PRODUCTION_ID_reg for OMAP3 */
+ switch (s->mpu_model) {
+ case omap2420:
+ return 0x000254f0; /* POP ESHS2.1.1 in N91/93/95, ES2 in N800 */
+ case omap2422:
+ return 0x000400f0;
+ case omap2423:
+ return 0x000800f0;
+ case omap2430:
+ return 0x000000f0;
+ case omap3430:
+ return 0x000000f0;
+ default:
+ hw_error("%s: Bad mpu model\n", __FUNCTION__);
+ }
+
+ case 0x20c:
+ switch (s->mpu_model) {
+ case omap2420:
+ case omap2422:
+ case omap2423:
+ return 0xcafeb5d9; /* ES 2.2 */
+ case omap2430:
+ return 0xcafeb68a; /* ES 2.2 */
+ case omap3430:
+ return 0xcafeb7ae; /* ES 2 */
+ default:
+ hw_error("%s: Bad mpu model\n", __FUNCTION__);
+ }
+
+ case 0x218: /* DIE_ID_reg */
+ return ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0);
+ case 0x21c: /* DIE_ID_reg */
+ return 0x54 << 24;
+ case 0x220: /* DIE_ID_reg */
+ return ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0);
+ case 0x224: /* DIE_ID_reg */
+ return ('Q' << 24) | ('E' << 16) | ('M' << 8) | ('U' << 0);
+ }
+
+ OMAP_BAD_REG(addr);
+ return 0;
+}
+
+static void omap_tap_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ if (size != 4) {
+ return omap_badwidth_write32(opaque, addr, value);
+ }
+
+ OMAP_BAD_REG(addr);
+}
+
+static const MemoryRegionOps omap_tap_ops = {
+ .read = omap_tap_read,
+ .write = omap_tap_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+void omap_tap_init(struct omap_target_agent_s *ta,
+ struct omap_mpu_state_s *mpu)
+{
+ memory_region_init_io(&mpu->tap_iomem, &omap_tap_ops, mpu, "omap.tap",
+ omap_l4_region_size(ta, 0));
+ omap_l4_attach(ta, 0, &mpu->tap_iomem);
+}
diff --git a/hw/misc/pc-testdev.c b/hw/misc/pc-testdev.c
new file mode 100644
index 0000000..32df175
--- /dev/null
+++ b/hw/misc/pc-testdev.c
@@ -0,0 +1,187 @@
+/*
+ * QEMU x86 ISA testdev
+ *
+ * Copyright (c) 2012 Avi Kivity, Gerd Hoffmann, Marcelo Tosatti
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/*
+ * This device is used to test KVM features specific to the x86 port, such
+ * as emulation, power management, interrupt routing, among others. It's meant
+ * to be used like:
+ *
+ * qemu-system-x86_64 -device pc-testdev -serial stdio \
+ * -device isa-debug-exit,iobase=0xf4,iosize=0x4 \
+ * -kernel /home/lmr/Code/virt-test.git/kvm/unittests/msr.flat
+ *
+ * Where msr.flat is one of the KVM unittests, present on a separate repo,
+ * git://git.kernel.org/pub/scm/virt/kvm/kvm-unit-tests.git
+*/
+
+#include "config-host.h"
+#if defined(CONFIG_POSIX)
+#include <sys/mman.h>
+#endif
+#include "hw/hw.h"
+#include "hw/qdev.h"
+#include "hw/isa/isa.h"
+
+#define IOMEM_LEN 0x10000
+
+typedef struct PCTestdev {
+ ISADevice parent_obj;
+
+ MemoryRegion ioport;
+ MemoryRegion flush;
+ MemoryRegion irq;
+ MemoryRegion iomem;
+ uint32_t ioport_data;
+ char iomem_buf[IOMEM_LEN];
+} PCTestdev;
+
+#define TYPE_TESTDEV "pc-testdev"
+#define TESTDEV(obj) \
+ OBJECT_CHECK(PCTestdev, (obj), TYPE_TESTDEV)
+
+static void test_irq_line(void *opaque, hwaddr addr, uint64_t data,
+ unsigned len)
+{
+ PCTestdev *dev = opaque;
+ ISADevice *isa = ISA_DEVICE(dev);
+
+ qemu_set_irq(isa_get_irq(isa, addr), !!data);
+}
+
+static const MemoryRegionOps test_irq_ops = {
+ .write = test_irq_line,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 1,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void test_ioport_write(void *opaque, hwaddr addr, uint64_t data,
+ unsigned len)
+{
+ PCTestdev *dev = opaque;
+ dev->ioport_data = data;
+}
+
+static uint64_t test_ioport_read(void *opaque, hwaddr addr, unsigned len)
+{
+ PCTestdev *dev = opaque;
+ return dev->ioport_data;
+}
+
+static const MemoryRegionOps test_ioport_ops = {
+ .read = test_ioport_read,
+ .write = test_ioport_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void test_flush_page(void *opaque, hwaddr addr, uint64_t data,
+ unsigned len)
+{
+ hwaddr page = 4096;
+ void *a = cpu_physical_memory_map(data & ~0xffful, &page, 0);
+
+ /* We might not be able to get the full page, only mprotect what we actually
+ have mapped */
+#if defined(CONFIG_POSIX)
+ mprotect(a, page, PROT_NONE);
+ mprotect(a, page, PROT_READ|PROT_WRITE);
+#endif
+ cpu_physical_memory_unmap(a, page, 0, 0);
+}
+
+static const MemoryRegionOps test_flush_ops = {
+ .write = test_flush_page,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static uint64_t test_iomem_read(void *opaque, hwaddr addr, unsigned len)
+{
+ PCTestdev *dev = opaque;
+ uint64_t ret = 0;
+ memcpy(&ret, &dev->iomem_buf[addr], len);
+ ret = le64_to_cpu(ret);
+
+ return ret;
+}
+
+static void test_iomem_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned len)
+{
+ PCTestdev *dev = opaque;
+ val = cpu_to_le64(val);
+ memcpy(&dev->iomem_buf[addr], &val, len);
+ dev->iomem_buf[addr] = val;
+}
+
+static const MemoryRegionOps test_iomem_ops = {
+ .read = test_iomem_read,
+ .write = test_iomem_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static int init_test_device(ISADevice *isa)
+{
+ PCTestdev *dev = TESTDEV(isa);
+ MemoryRegion *mem = isa_address_space(isa);
+ MemoryRegion *io = isa_address_space_io(isa);
+
+ memory_region_init_io(&dev->ioport, &test_ioport_ops, dev,
+ "pc-testdev-ioport", 4);
+ memory_region_init_io(&dev->flush, &test_flush_ops, dev,
+ "pc-testdev-flush-page", 4);
+ memory_region_init_io(&dev->irq, &test_irq_ops, dev,
+ "pc-testdev-irq-line", 24);
+ memory_region_init_io(&dev->iomem, &test_iomem_ops, dev,
+ "pc-testdev-iomem", IOMEM_LEN);
+
+ memory_region_add_subregion(io, 0xe0, &dev->ioport);
+ memory_region_add_subregion(io, 0xe4, &dev->flush);
+ memory_region_add_subregion(io, 0x2000, &dev->irq);
+ memory_region_add_subregion(mem, 0xff000000, &dev->iomem);
+
+ return 0;
+}
+
+static void testdev_class_init(ObjectClass *klass, void *data)
+{
+ ISADeviceClass *k = ISA_DEVICE_CLASS(klass);
+
+ k->init = init_test_device;
+}
+
+static const TypeInfo testdev_info = {
+ .name = TYPE_TESTDEV,
+ .parent = TYPE_ISA_DEVICE,
+ .instance_size = sizeof(PCTestdev),
+ .class_init = testdev_class_init,
+};
+
+static void testdev_register_types(void)
+{
+ type_register_static(&testdev_info);
+}
+
+type_init(testdev_register_types)
diff --git a/hw/misc/puv3_pm.c b/hw/misc/puv3_pm.c
new file mode 100644
index 0000000..0aacdc2
--- /dev/null
+++ b/hw/misc/puv3_pm.c
@@ -0,0 +1,149 @@
+/*
+ * Power Management device simulation in PKUnity SoC
+ *
+ * Copyright (C) 2010-2012 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation, or any later version.
+ * See the COPYING file in the top-level directory.
+ */
+#include "hw/hw.h"
+#include "hw/sysbus.h"
+
+#undef DEBUG_PUV3
+#include "hw/unicore32/puv3.h"
+
+typedef struct {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+
+ uint32_t reg_PMCR;
+ uint32_t reg_PCGR;
+ uint32_t reg_PLL_SYS_CFG;
+ uint32_t reg_PLL_DDR_CFG;
+ uint32_t reg_PLL_VGA_CFG;
+ uint32_t reg_DIVCFG;
+} PUV3PMState;
+
+static uint64_t puv3_pm_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PUV3PMState *s = opaque;
+ uint32_t ret = 0;
+
+ switch (offset) {
+ case 0x14:
+ ret = s->reg_PCGR;
+ break;
+ case 0x18:
+ ret = s->reg_PLL_SYS_CFG;
+ break;
+ case 0x1c:
+ ret = s->reg_PLL_DDR_CFG;
+ break;
+ case 0x20:
+ ret = s->reg_PLL_VGA_CFG;
+ break;
+ case 0x24:
+ ret = s->reg_DIVCFG;
+ break;
+ case 0x28: /* PLL SYS STATUS */
+ ret = 0x00002401;
+ break;
+ case 0x2c: /* PLL DDR STATUS */
+ ret = 0x00100c00;
+ break;
+ case 0x30: /* PLL VGA STATUS */
+ ret = 0x00003801;
+ break;
+ case 0x34: /* DIV STATUS */
+ ret = 0x22f52015;
+ break;
+ case 0x38: /* SW RESET */
+ ret = 0x0;
+ break;
+ case 0x44: /* PLL DFC DONE */
+ ret = 0x7;
+ break;
+ default:
+ DPRINTF("Bad offset 0x%x\n", offset);
+ }
+ DPRINTF("offset 0x%x, value 0x%x\n", offset, ret);
+
+ return ret;
+}
+
+static void puv3_pm_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ PUV3PMState *s = opaque;
+
+ switch (offset) {
+ case 0x0:
+ s->reg_PMCR = value;
+ break;
+ case 0x14:
+ s->reg_PCGR = value;
+ break;
+ case 0x18:
+ s->reg_PLL_SYS_CFG = value;
+ break;
+ case 0x1c:
+ s->reg_PLL_DDR_CFG = value;
+ break;
+ case 0x20:
+ s->reg_PLL_VGA_CFG = value;
+ break;
+ case 0x24:
+ case 0x38:
+ break;
+ default:
+ DPRINTF("Bad offset 0x%x\n", offset);
+ }
+ DPRINTF("offset 0x%x, value 0x%x\n", offset, value);
+}
+
+static const MemoryRegionOps puv3_pm_ops = {
+ .read = puv3_pm_read,
+ .write = puv3_pm_write,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static int puv3_pm_init(SysBusDevice *dev)
+{
+ PUV3PMState *s = FROM_SYSBUS(PUV3PMState, dev);
+
+ s->reg_PCGR = 0x0;
+
+ memory_region_init_io(&s->iomem, &puv3_pm_ops, s, "puv3_pm",
+ PUV3_REGS_OFFSET);
+ sysbus_init_mmio(dev, &s->iomem);
+
+ return 0;
+}
+
+static void puv3_pm_class_init(ObjectClass *klass, void *data)
+{
+ SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass);
+
+ sdc->init = puv3_pm_init;
+}
+
+static const TypeInfo puv3_pm_info = {
+ .name = "puv3_pm",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(PUV3PMState),
+ .class_init = puv3_pm_class_init,
+};
+
+static void puv3_pm_register_type(void)
+{
+ type_register_static(&puv3_pm_info);
+}
+
+type_init(puv3_pm_register_type)
diff --git a/hw/misc/pxa2xx_pcmcia.c b/hw/misc/pxa2xx_pcmcia.c
new file mode 100644
index 0000000..323d458
--- /dev/null
+++ b/hw/misc/pxa2xx_pcmcia.c
@@ -0,0 +1,207 @@
+/*
+ * Intel XScale PXA255/270 PC Card and CompactFlash Interface.
+ *
+ * Copyright (c) 2006 Openedhand Ltd.
+ * Written by Andrzej Zaborowski <balrog@zabor.org>
+ *
+ * This code is licensed under the GPLv2.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include "hw/hw.h"
+#include "hw/pcmcia.h"
+#include "hw/arm/pxa.h"
+
+
+struct PXA2xxPCMCIAState {
+ PCMCIASocket slot;
+ PCMCIACardState *card;
+ MemoryRegion common_iomem;
+ MemoryRegion attr_iomem;
+ MemoryRegion iomem;
+
+ qemu_irq irq;
+ qemu_irq cd_irq;
+};
+
+static uint64_t pxa2xx_pcmcia_common_read(void *opaque,
+ hwaddr offset, unsigned size)
+{
+ PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
+
+ if (s->slot.attached) {
+ return s->card->common_read(s->card->state, offset);
+ }
+
+ return 0;
+}
+
+static void pxa2xx_pcmcia_common_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
+
+ if (s->slot.attached) {
+ s->card->common_write(s->card->state, offset, value);
+ }
+}
+
+static uint64_t pxa2xx_pcmcia_attr_read(void *opaque,
+ hwaddr offset, unsigned size)
+{
+ PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
+
+ if (s->slot.attached) {
+ return s->card->attr_read(s->card->state, offset);
+ }
+
+ return 0;
+}
+
+static void pxa2xx_pcmcia_attr_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
+
+ if (s->slot.attached) {
+ s->card->attr_write(s->card->state, offset, value);
+ }
+}
+
+static uint64_t pxa2xx_pcmcia_io_read(void *opaque,
+ hwaddr offset, unsigned size)
+{
+ PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
+
+ if (s->slot.attached) {
+ return s->card->io_read(s->card->state, offset);
+ }
+
+ return 0;
+}
+
+static void pxa2xx_pcmcia_io_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
+
+ if (s->slot.attached) {
+ s->card->io_write(s->card->state, offset, value);
+ }
+}
+
+static const MemoryRegionOps pxa2xx_pcmcia_common_ops = {
+ .read = pxa2xx_pcmcia_common_read,
+ .write = pxa2xx_pcmcia_common_write,
+ .endianness = DEVICE_NATIVE_ENDIAN
+};
+
+static const MemoryRegionOps pxa2xx_pcmcia_attr_ops = {
+ .read = pxa2xx_pcmcia_attr_read,
+ .write = pxa2xx_pcmcia_attr_write,
+ .endianness = DEVICE_NATIVE_ENDIAN
+};
+
+static const MemoryRegionOps pxa2xx_pcmcia_io_ops = {
+ .read = pxa2xx_pcmcia_io_read,
+ .write = pxa2xx_pcmcia_io_write,
+ .endianness = DEVICE_NATIVE_ENDIAN
+};
+
+static void pxa2xx_pcmcia_set_irq(void *opaque, int line, int level)
+{
+ PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
+ if (!s->irq)
+ return;
+
+ qemu_set_irq(s->irq, level);
+}
+
+PXA2xxPCMCIAState *pxa2xx_pcmcia_init(MemoryRegion *sysmem,
+ hwaddr base)
+{
+ PXA2xxPCMCIAState *s;
+
+ s = (PXA2xxPCMCIAState *)
+ g_malloc0(sizeof(PXA2xxPCMCIAState));
+
+ /* Socket I/O Memory Space */
+ memory_region_init_io(&s->iomem, &pxa2xx_pcmcia_io_ops, s,
+ "pxa2xx-pcmcia-io", 0x04000000);
+ memory_region_add_subregion(sysmem, base | 0x00000000,
+ &s->iomem);
+
+ /* Then next 64 MB is reserved */
+
+ /* Socket Attribute Memory Space */
+ memory_region_init_io(&s->attr_iomem, &pxa2xx_pcmcia_attr_ops, s,
+ "pxa2xx-pcmcia-attribute", 0x04000000);
+ memory_region_add_subregion(sysmem, base | 0x08000000,
+ &s->attr_iomem);
+
+ /* Socket Common Memory Space */
+ memory_region_init_io(&s->common_iomem, &pxa2xx_pcmcia_common_ops, s,
+ "pxa2xx-pcmcia-common", 0x04000000);
+ memory_region_add_subregion(sysmem, base | 0x0c000000,
+ &s->common_iomem);
+
+ if (base == 0x30000000)
+ s->slot.slot_string = "PXA PC Card Socket 1";
+ else
+ s->slot.slot_string = "PXA PC Card Socket 0";
+ s->slot.irq = qemu_allocate_irqs(pxa2xx_pcmcia_set_irq, s, 1)[0];
+ pcmcia_socket_register(&s->slot);
+
+ return s;
+}
+
+/* Insert a new card into a slot */
+int pxa2xx_pcmcia_attach(void *opaque, PCMCIACardState *card)
+{
+ PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
+ if (s->slot.attached)
+ return -EEXIST;
+
+ if (s->cd_irq) {
+ qemu_irq_raise(s->cd_irq);
+ }
+
+ s->card = card;
+
+ s->slot.attached = 1;
+ s->card->slot = &s->slot;
+ s->card->attach(s->card->state);
+
+ return 0;
+}
+
+/* Eject card from the slot */
+int pxa2xx_pcmcia_dettach(void *opaque)
+{
+ PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
+ if (!s->slot.attached)
+ return -ENOENT;
+
+ s->card->detach(s->card->state);
+ s->card->slot = NULL;
+ s->card = NULL;
+
+ s->slot.attached = 0;
+
+ if (s->irq)
+ qemu_irq_lower(s->irq);
+ if (s->cd_irq)
+ qemu_irq_lower(s->cd_irq);
+
+ return 0;
+}
+
+/* Who to notify on card events */
+void pxa2xx_pcmcia_set_irq_cb(void *opaque, qemu_irq irq, qemu_irq cd_irq)
+{
+ PXA2xxPCMCIAState *s = (PXA2xxPCMCIAState *) opaque;
+ s->irq = irq;
+ s->cd_irq = cd_irq;
+}
diff --git a/hw/misc/sga.c b/hw/misc/sga.c
new file mode 100644
index 0000000..5cf4b86
--- /dev/null
+++ b/hw/misc/sga.c
@@ -0,0 +1,63 @@
+/*
+ * QEMU dummy ISA device for loading sgabios option rom.
+ *
+ * Copyright (c) 2011 Glauber Costa, Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * sgabios code originally available at code.google.com/p/sgabios
+ *
+ */
+#include "hw/pci/pci.h"
+#include "hw/i386/pc.h"
+#include "hw/loader.h"
+#include "sysemu/sysemu.h"
+
+#define SGABIOS_FILENAME "sgabios.bin"
+
+typedef struct ISAGAState {
+ ISADevice dev;
+} ISASGAState;
+
+static int sga_initfn(ISADevice *dev)
+{
+ rom_add_vga(SGABIOS_FILENAME);
+ return 0;
+}
+static void sga_class_initfn(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ISADeviceClass *ic = ISA_DEVICE_CLASS(klass);
+ ic->init = sga_initfn;
+ dc->desc = "Serial Graphics Adapter";
+}
+
+static const TypeInfo sga_info = {
+ .name = "sga",
+ .parent = TYPE_ISA_DEVICE,
+ .instance_size = sizeof(ISASGAState),
+ .class_init = sga_class_initfn,
+};
+
+static void sga_register_types(void)
+{
+ type_register_static(&sga_info);
+}
+
+type_init(sga_register_types)
diff --git a/hw/misc/slavio_misc.c b/hw/misc/slavio_misc.c
new file mode 100644
index 0000000..a7a9368
--- /dev/null
+++ b/hw/misc/slavio_misc.c
@@ -0,0 +1,508 @@
+/*
+ * QEMU Sparc SLAVIO aux io port emulation
+ *
+ * Copyright (c) 2005 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "sysemu/sysemu.h"
+#include "hw/sysbus.h"
+#include "trace.h"
+
+/*
+ * This is the auxio port, chip control and system control part of
+ * chip STP2001 (Slave I/O), also produced as NCR89C105. See
+ * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C105.txt
+ *
+ * This also includes the PMC CPU idle controller.
+ */
+
+typedef struct MiscState {
+ SysBusDevice busdev;
+ MemoryRegion cfg_iomem;
+ MemoryRegion diag_iomem;
+ MemoryRegion mdm_iomem;
+ MemoryRegion led_iomem;
+ MemoryRegion sysctrl_iomem;
+ MemoryRegion aux1_iomem;
+ MemoryRegion aux2_iomem;
+ qemu_irq irq;
+ qemu_irq fdc_tc;
+ uint32_t dummy;
+ uint8_t config;
+ uint8_t aux1, aux2;
+ uint8_t diag, mctrl;
+ uint8_t sysctrl;
+ uint16_t leds;
+} MiscState;
+
+typedef struct APCState {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+ qemu_irq cpu_halt;
+} APCState;
+
+#define MISC_SIZE 1
+#define SYSCTRL_SIZE 4
+
+#define AUX1_TC 0x02
+
+#define AUX2_PWROFF 0x01
+#define AUX2_PWRINTCLR 0x02
+#define AUX2_PWRFAIL 0x20
+
+#define CFG_PWRINTEN 0x08
+
+#define SYS_RESET 0x01
+#define SYS_RESETSTAT 0x02
+
+static void slavio_misc_update_irq(void *opaque)
+{
+ MiscState *s = opaque;
+
+ if ((s->aux2 & AUX2_PWRFAIL) && (s->config & CFG_PWRINTEN)) {
+ trace_slavio_misc_update_irq_raise();
+ qemu_irq_raise(s->irq);
+ } else {
+ trace_slavio_misc_update_irq_lower();
+ qemu_irq_lower(s->irq);
+ }
+}
+
+static void slavio_misc_reset(DeviceState *d)
+{
+ MiscState *s = container_of(d, MiscState, busdev.qdev);
+
+ // Diagnostic and system control registers not cleared in reset
+ s->config = s->aux1 = s->aux2 = s->mctrl = 0;
+}
+
+static void slavio_set_power_fail(void *opaque, int irq, int power_failing)
+{
+ MiscState *s = opaque;
+
+ trace_slavio_set_power_fail(power_failing, s->config);
+ if (power_failing && (s->config & CFG_PWRINTEN)) {
+ s->aux2 |= AUX2_PWRFAIL;
+ } else {
+ s->aux2 &= ~AUX2_PWRFAIL;
+ }
+ slavio_misc_update_irq(s);
+}
+
+static void slavio_cfg_mem_writeb(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ MiscState *s = opaque;
+
+ trace_slavio_cfg_mem_writeb(val & 0xff);
+ s->config = val & 0xff;
+ slavio_misc_update_irq(s);
+}
+
+static uint64_t slavio_cfg_mem_readb(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ MiscState *s = opaque;
+ uint32_t ret = 0;
+
+ ret = s->config;
+ trace_slavio_cfg_mem_readb(ret);
+ return ret;
+}
+
+static const MemoryRegionOps slavio_cfg_mem_ops = {
+ .read = slavio_cfg_mem_readb,
+ .write = slavio_cfg_mem_writeb,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+};
+
+static void slavio_diag_mem_writeb(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ MiscState *s = opaque;
+
+ trace_slavio_diag_mem_writeb(val & 0xff);
+ s->diag = val & 0xff;
+}
+
+static uint64_t slavio_diag_mem_readb(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ MiscState *s = opaque;
+ uint32_t ret = 0;
+
+ ret = s->diag;
+ trace_slavio_diag_mem_readb(ret);
+ return ret;
+}
+
+static const MemoryRegionOps slavio_diag_mem_ops = {
+ .read = slavio_diag_mem_readb,
+ .write = slavio_diag_mem_writeb,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+};
+
+static void slavio_mdm_mem_writeb(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ MiscState *s = opaque;
+
+ trace_slavio_mdm_mem_writeb(val & 0xff);
+ s->mctrl = val & 0xff;
+}
+
+static uint64_t slavio_mdm_mem_readb(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ MiscState *s = opaque;
+ uint32_t ret = 0;
+
+ ret = s->mctrl;
+ trace_slavio_mdm_mem_readb(ret);
+ return ret;
+}
+
+static const MemoryRegionOps slavio_mdm_mem_ops = {
+ .read = slavio_mdm_mem_readb,
+ .write = slavio_mdm_mem_writeb,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+};
+
+static void slavio_aux1_mem_writeb(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ MiscState *s = opaque;
+
+ trace_slavio_aux1_mem_writeb(val & 0xff);
+ if (val & AUX1_TC) {
+ // Send a pulse to floppy terminal count line
+ if (s->fdc_tc) {
+ qemu_irq_raise(s->fdc_tc);
+ qemu_irq_lower(s->fdc_tc);
+ }
+ val &= ~AUX1_TC;
+ }
+ s->aux1 = val & 0xff;
+}
+
+static uint64_t slavio_aux1_mem_readb(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ MiscState *s = opaque;
+ uint32_t ret = 0;
+
+ ret = s->aux1;
+ trace_slavio_aux1_mem_readb(ret);
+ return ret;
+}
+
+static const MemoryRegionOps slavio_aux1_mem_ops = {
+ .read = slavio_aux1_mem_readb,
+ .write = slavio_aux1_mem_writeb,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+};
+
+static void slavio_aux2_mem_writeb(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ MiscState *s = opaque;
+
+ val &= AUX2_PWRINTCLR | AUX2_PWROFF;
+ trace_slavio_aux2_mem_writeb(val & 0xff);
+ val |= s->aux2 & AUX2_PWRFAIL;
+ if (val & AUX2_PWRINTCLR) // Clear Power Fail int
+ val &= AUX2_PWROFF;
+ s->aux2 = val;
+ if (val & AUX2_PWROFF)
+ qemu_system_shutdown_request();
+ slavio_misc_update_irq(s);
+}
+
+static uint64_t slavio_aux2_mem_readb(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ MiscState *s = opaque;
+ uint32_t ret = 0;
+
+ ret = s->aux2;
+ trace_slavio_aux2_mem_readb(ret);
+ return ret;
+}
+
+static const MemoryRegionOps slavio_aux2_mem_ops = {
+ .read = slavio_aux2_mem_readb,
+ .write = slavio_aux2_mem_writeb,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+};
+
+static void apc_mem_writeb(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ APCState *s = opaque;
+
+ trace_apc_mem_writeb(val & 0xff);
+ qemu_irq_raise(s->cpu_halt);
+}
+
+static uint64_t apc_mem_readb(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ uint32_t ret = 0;
+
+ trace_apc_mem_readb(ret);
+ return ret;
+}
+
+static const MemoryRegionOps apc_mem_ops = {
+ .read = apc_mem_readb,
+ .write = apc_mem_writeb,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ }
+};
+
+static uint64_t slavio_sysctrl_mem_readl(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ MiscState *s = opaque;
+ uint32_t ret = 0;
+
+ switch (addr) {
+ case 0:
+ ret = s->sysctrl;
+ break;
+ default:
+ break;
+ }
+ trace_slavio_sysctrl_mem_readl(ret);
+ return ret;
+}
+
+static void slavio_sysctrl_mem_writel(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ MiscState *s = opaque;
+
+ trace_slavio_sysctrl_mem_writel(val);
+ switch (addr) {
+ case 0:
+ if (val & SYS_RESET) {
+ s->sysctrl = SYS_RESETSTAT;
+ qemu_system_reset_request();
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static const MemoryRegionOps slavio_sysctrl_mem_ops = {
+ .read = slavio_sysctrl_mem_readl,
+ .write = slavio_sysctrl_mem_writel,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static uint64_t slavio_led_mem_readw(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ MiscState *s = opaque;
+ uint32_t ret = 0;
+
+ switch (addr) {
+ case 0:
+ ret = s->leds;
+ break;
+ default:
+ break;
+ }
+ trace_slavio_led_mem_readw(ret);
+ return ret;
+}
+
+static void slavio_led_mem_writew(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ MiscState *s = opaque;
+
+ trace_slavio_led_mem_readw(val & 0xffff);
+ switch (addr) {
+ case 0:
+ s->leds = val;
+ break;
+ default:
+ break;
+ }
+}
+
+static const MemoryRegionOps slavio_led_mem_ops = {
+ .read = slavio_led_mem_readw,
+ .write = slavio_led_mem_writew,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 2,
+ .max_access_size = 2,
+ },
+};
+
+static const VMStateDescription vmstate_misc = {
+ .name ="slavio_misc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT32(dummy, MiscState),
+ VMSTATE_UINT8(config, MiscState),
+ VMSTATE_UINT8(aux1, MiscState),
+ VMSTATE_UINT8(aux2, MiscState),
+ VMSTATE_UINT8(diag, MiscState),
+ VMSTATE_UINT8(mctrl, MiscState),
+ VMSTATE_UINT8(sysctrl, MiscState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int apc_init1(SysBusDevice *dev)
+{
+ APCState *s = FROM_SYSBUS(APCState, dev);
+
+ sysbus_init_irq(dev, &s->cpu_halt);
+
+ /* Power management (APC) XXX: not a Slavio device */
+ memory_region_init_io(&s->iomem, &apc_mem_ops, s,
+ "apc", MISC_SIZE);
+ sysbus_init_mmio(dev, &s->iomem);
+ return 0;
+}
+
+static int slavio_misc_init1(SysBusDevice *dev)
+{
+ MiscState *s = FROM_SYSBUS(MiscState, dev);
+
+ sysbus_init_irq(dev, &s->irq);
+ sysbus_init_irq(dev, &s->fdc_tc);
+
+ /* 8 bit registers */
+ /* Slavio control */
+ memory_region_init_io(&s->cfg_iomem, &slavio_cfg_mem_ops, s,
+ "configuration", MISC_SIZE);
+ sysbus_init_mmio(dev, &s->cfg_iomem);
+
+ /* Diagnostics */
+ memory_region_init_io(&s->diag_iomem, &slavio_diag_mem_ops, s,
+ "diagnostic", MISC_SIZE);
+ sysbus_init_mmio(dev, &s->diag_iomem);
+
+ /* Modem control */
+ memory_region_init_io(&s->mdm_iomem, &slavio_mdm_mem_ops, s,
+ "modem", MISC_SIZE);
+ sysbus_init_mmio(dev, &s->mdm_iomem);
+
+ /* 16 bit registers */
+ /* ss600mp diag LEDs */
+ memory_region_init_io(&s->led_iomem, &slavio_led_mem_ops, s,
+ "leds", MISC_SIZE);
+ sysbus_init_mmio(dev, &s->led_iomem);
+
+ /* 32 bit registers */
+ /* System control */
+ memory_region_init_io(&s->sysctrl_iomem, &slavio_sysctrl_mem_ops, s,
+ "system-control", MISC_SIZE);
+ sysbus_init_mmio(dev, &s->sysctrl_iomem);
+
+ /* AUX 1 (Misc System Functions) */
+ memory_region_init_io(&s->aux1_iomem, &slavio_aux1_mem_ops, s,
+ "misc-system-functions", MISC_SIZE);
+ sysbus_init_mmio(dev, &s->aux1_iomem);
+
+ /* AUX 2 (Software Powerdown Control) */
+ memory_region_init_io(&s->aux2_iomem, &slavio_aux2_mem_ops, s,
+ "software-powerdown-control", MISC_SIZE);
+ sysbus_init_mmio(dev, &s->aux2_iomem);
+
+ qdev_init_gpio_in(&dev->qdev, slavio_set_power_fail, 1);
+
+ return 0;
+}
+
+static void slavio_misc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+ k->init = slavio_misc_init1;
+ dc->reset = slavio_misc_reset;
+ dc->vmsd = &vmstate_misc;
+}
+
+static const TypeInfo slavio_misc_info = {
+ .name = "slavio_misc",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(MiscState),
+ .class_init = slavio_misc_class_init,
+};
+
+static void apc_class_init(ObjectClass *klass, void *data)
+{
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+ k->init = apc_init1;
+}
+
+static const TypeInfo apc_info = {
+ .name = "apc",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(MiscState),
+ .class_init = apc_class_init,
+};
+
+static void slavio_misc_register_types(void)
+{
+ type_register_static(&slavio_misc_info);
+ type_register_static(&apc_info);
+}
+
+type_init(slavio_misc_register_types)
diff --git a/hw/misc/tmp105.c b/hw/misc/tmp105.c
new file mode 100644
index 0000000..155e03d
--- /dev/null
+++ b/hw/misc/tmp105.c
@@ -0,0 +1,269 @@
+/*
+ * Texas Instruments TMP105 temperature sensor.
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Written by Andrzej Zaborowski <andrew@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw/hw.h"
+#include "hw/i2c/i2c.h"
+#include "tmp105.h"
+#include "qapi/visitor.h"
+
+static void tmp105_interrupt_update(TMP105State *s)
+{
+ qemu_set_irq(s->pin, s->alarm ^ ((~s->config >> 2) & 1)); /* POL */
+}
+
+static void tmp105_alarm_update(TMP105State *s)
+{
+ if ((s->config >> 0) & 1) { /* SD */
+ if ((s->config >> 7) & 1) /* OS */
+ s->config &= ~(1 << 7); /* OS */
+ else
+ return;
+ }
+
+ if ((s->config >> 1) & 1) { /* TM */
+ if (s->temperature >= s->limit[1])
+ s->alarm = 1;
+ else if (s->temperature < s->limit[0])
+ s->alarm = 1;
+ } else {
+ if (s->temperature >= s->limit[1])
+ s->alarm = 1;
+ else if (s->temperature < s->limit[0])
+ s->alarm = 0;
+ }
+
+ tmp105_interrupt_update(s);
+}
+
+static void tmp105_get_temperature(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ TMP105State *s = TMP105(obj);
+ int64_t value = s->temperature;
+
+ visit_type_int(v, &value, name, errp);
+}
+
+/* Units are 0.001 centigrades relative to 0 C. */
+static void tmp105_set_temperature(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ TMP105State *s = TMP105(obj);
+ int64_t temp;
+
+ visit_type_int(v, &temp, name, errp);
+ if (error_is_set(errp)) {
+ return;
+ }
+ if (temp >= 128000 || temp < -128000) {
+ error_setg(errp, "value %" PRId64 ".%03" PRIu64 " °C is out of range",
+ temp / 1000, temp % 1000);
+ return;
+ }
+
+ s->temperature = ((int16_t) (temp * 0x800 / 128000)) << 4;
+
+ tmp105_alarm_update(s);
+}
+
+static const int tmp105_faultq[4] = { 1, 2, 4, 6 };
+
+static void tmp105_read(TMP105State *s)
+{
+ s->len = 0;
+
+ if ((s->config >> 1) & 1) { /* TM */
+ s->alarm = 0;
+ tmp105_interrupt_update(s);
+ }
+
+ switch (s->pointer & 3) {
+ case TMP105_REG_TEMPERATURE:
+ s->buf[s->len ++] = (((uint16_t) s->temperature) >> 8);
+ s->buf[s->len ++] = (((uint16_t) s->temperature) >> 0) &
+ (0xf0 << ((~s->config >> 5) & 3)); /* R */
+ break;
+
+ case TMP105_REG_CONFIG:
+ s->buf[s->len ++] = s->config;
+ break;
+
+ case TMP105_REG_T_LOW:
+ s->buf[s->len ++] = ((uint16_t) s->limit[0]) >> 8;
+ s->buf[s->len ++] = ((uint16_t) s->limit[0]) >> 0;
+ break;
+
+ case TMP105_REG_T_HIGH:
+ s->buf[s->len ++] = ((uint16_t) s->limit[1]) >> 8;
+ s->buf[s->len ++] = ((uint16_t) s->limit[1]) >> 0;
+ break;
+ }
+}
+
+static void tmp105_write(TMP105State *s)
+{
+ switch (s->pointer & 3) {
+ case TMP105_REG_TEMPERATURE:
+ break;
+
+ case TMP105_REG_CONFIG:
+ if (s->buf[0] & ~s->config & (1 << 0)) /* SD */
+ printf("%s: TMP105 shutdown\n", __FUNCTION__);
+ s->config = s->buf[0];
+ s->faults = tmp105_faultq[(s->config >> 3) & 3]; /* F */
+ tmp105_alarm_update(s);
+ break;
+
+ case TMP105_REG_T_LOW:
+ case TMP105_REG_T_HIGH:
+ if (s->len >= 3)
+ s->limit[s->pointer & 1] = (int16_t)
+ ((((uint16_t) s->buf[0]) << 8) | s->buf[1]);
+ tmp105_alarm_update(s);
+ break;
+ }
+}
+
+static int tmp105_rx(I2CSlave *i2c)
+{
+ TMP105State *s = TMP105(i2c);
+
+ if (s->len < 2) {
+ return s->buf[s->len ++];
+ } else {
+ return 0xff;
+ }
+}
+
+static int tmp105_tx(I2CSlave *i2c, uint8_t data)
+{
+ TMP105State *s = TMP105(i2c);
+
+ if (s->len == 0) {
+ s->pointer = data;
+ s->len++;
+ } else {
+ if (s->len <= 2) {
+ s->buf[s->len - 1] = data;
+ }
+ s->len++;
+ tmp105_write(s);
+ }
+
+ return 0;
+}
+
+static void tmp105_event(I2CSlave *i2c, enum i2c_event event)
+{
+ TMP105State *s = TMP105(i2c);
+
+ if (event == I2C_START_RECV) {
+ tmp105_read(s);
+ }
+
+ s->len = 0;
+}
+
+static int tmp105_post_load(void *opaque, int version_id)
+{
+ TMP105State *s = opaque;
+
+ s->faults = tmp105_faultq[(s->config >> 3) & 3]; /* F */
+
+ tmp105_interrupt_update(s);
+ return 0;
+}
+
+static const VMStateDescription vmstate_tmp105 = {
+ .name = "TMP105",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .minimum_version_id_old = 0,
+ .post_load = tmp105_post_load,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT8(len, TMP105State),
+ VMSTATE_UINT8_ARRAY(buf, TMP105State, 2),
+ VMSTATE_UINT8(pointer, TMP105State),
+ VMSTATE_UINT8(config, TMP105State),
+ VMSTATE_INT16(temperature, TMP105State),
+ VMSTATE_INT16_ARRAY(limit, TMP105State, 2),
+ VMSTATE_UINT8(alarm, TMP105State),
+ VMSTATE_I2C_SLAVE(i2c, TMP105State),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void tmp105_reset(I2CSlave *i2c)
+{
+ TMP105State *s = TMP105(i2c);
+
+ s->temperature = 0;
+ s->pointer = 0;
+ s->config = 0;
+ s->faults = tmp105_faultq[(s->config >> 3) & 3];
+ s->alarm = 0;
+
+ tmp105_interrupt_update(s);
+}
+
+static int tmp105_init(I2CSlave *i2c)
+{
+ TMP105State *s = TMP105(i2c);
+
+ qdev_init_gpio_out(&i2c->qdev, &s->pin, 1);
+
+ tmp105_reset(&s->i2c);
+
+ return 0;
+}
+
+static void tmp105_initfn(Object *obj)
+{
+ object_property_add(obj, "temperature", "int",
+ tmp105_get_temperature,
+ tmp105_set_temperature, NULL, NULL, NULL);
+}
+
+static void tmp105_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
+
+ k->init = tmp105_init;
+ k->event = tmp105_event;
+ k->recv = tmp105_rx;
+ k->send = tmp105_tx;
+ dc->vmsd = &vmstate_tmp105;
+}
+
+static const TypeInfo tmp105_info = {
+ .name = TYPE_TMP105,
+ .parent = TYPE_I2C_SLAVE,
+ .instance_size = sizeof(TMP105State),
+ .instance_init = tmp105_initfn,
+ .class_init = tmp105_class_init,
+};
+
+static void tmp105_register_types(void)
+{
+ type_register_static(&tmp105_info);
+}
+
+type_init(tmp105_register_types)
diff --git a/hw/misc/tmp105.h b/hw/misc/tmp105.h
new file mode 100644
index 0000000..9ba05ec
--- /dev/null
+++ b/hw/misc/tmp105.h
@@ -0,0 +1,47 @@
+/*
+ * Texas Instruments TMP105 Temperature Sensor
+ *
+ * Browse the data sheet:
+ *
+ * http://www.ti.com/lit/gpn/tmp105
+ *
+ * Copyright (C) 2012 Alex Horn <alex.horn@cs.ox.ac.uk>
+ * Copyright (C) 2008-2012 Andrzej Zaborowski <balrogg@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_TMP105_H
+#define QEMU_TMP105_H
+
+#include "hw/i2c/i2c.h"
+#include "hw/misc/tmp105_regs.h"
+
+#define TYPE_TMP105 "tmp105"
+#define TMP105(obj) OBJECT_CHECK(TMP105State, (obj), TYPE_TMP105)
+
+/**
+ * TMP105State:
+ * @config: Bits 5 and 6 (value 32 and 64) determine the precision of the
+ * temperature. See Table 8 in the data sheet.
+ *
+ * @see_also: http://www.ti.com/lit/gpn/tmp105
+ */
+typedef struct TMP105State {
+ /*< private >*/
+ I2CSlave i2c;
+ /*< public >*/
+
+ uint8_t len;
+ uint8_t buf[2];
+ qemu_irq pin;
+
+ uint8_t pointer;
+ uint8_t config;
+ int16_t temperature;
+ int16_t limit[2];
+ int faults;
+ uint8_t alarm;
+} TMP105State;
+
+#endif
diff --git a/hw/misc/vfio.c b/hw/misc/vfio.c
new file mode 100644
index 0000000..693a9ff
--- /dev/null
+++ b/hw/misc/vfio.c
@@ -0,0 +1,3206 @@
+/*
+ * vfio based device assignment support
+ *
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Authors:
+ * Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Based on qemu-kvm device-assignment:
+ * Adapted for KVM by Qumranet.
+ * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
+ * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
+ * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
+ * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
+ * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
+ */
+
+#include <dirent.h>
+#include <linux/vfio.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "config.h"
+#include "exec/address-spaces.h"
+#include "exec/memory.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
+#include "hw/pci/pci.h"
+#include "qemu-common.h"
+#include "qemu/error-report.h"
+#include "qemu/event_notifier.h"
+#include "qemu/queue.h"
+#include "qemu/range.h"
+#include "sysemu/kvm.h"
+#include "sysemu/sysemu.h"
+
+/* #define DEBUG_VFIO */
+#ifdef DEBUG_VFIO
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, "vfio: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+/* Extra debugging, trap acceleration paths for more logging */
+#define VFIO_ALLOW_MMAP 1
+#define VFIO_ALLOW_KVM_INTX 1
+
+struct VFIODevice;
+
+typedef struct VFIOQuirk {
+ MemoryRegion mem;
+ struct VFIODevice *vdev;
+ QLIST_ENTRY(VFIOQuirk) next;
+ uint32_t data;
+ uint32_t data2;
+} VFIOQuirk;
+
+typedef struct VFIOBAR {
+ off_t fd_offset; /* offset of BAR within device fd */
+ int fd; /* device fd, allows us to pass VFIOBAR as opaque data */
+ MemoryRegion mem; /* slow, read/write access */
+ MemoryRegion mmap_mem; /* direct mapped access */
+ void *mmap;
+ size_t size;
+ uint32_t flags; /* VFIO region flags (rd/wr/mmap) */
+ uint8_t nr; /* cache the BAR number for debug */
+ QLIST_HEAD(, VFIOQuirk) quirks;
+} VFIOBAR;
+
+typedef struct VFIOVGARegion {
+ MemoryRegion mem;
+ off_t offset;
+ int nr;
+ QLIST_HEAD(, VFIOQuirk) quirks;
+} VFIOVGARegion;
+
+typedef struct VFIOVGA {
+ off_t fd_offset;
+ int fd;
+ VFIOVGARegion region[QEMU_PCI_VGA_NUM_REGIONS];
+} VFIOVGA;
+
+typedef struct VFIOINTx {
+ bool pending; /* interrupt pending */
+ bool kvm_accel; /* set when QEMU bypass through KVM enabled */
+ uint8_t pin; /* which pin to pull for qemu_set_irq */
+ EventNotifier interrupt; /* eventfd triggered on interrupt */
+ EventNotifier unmask; /* eventfd for unmask on QEMU bypass */
+ PCIINTxRoute route; /* routing info for QEMU bypass */
+ uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */
+ QEMUTimer *mmap_timer; /* enable mmaps after periods w/o interrupts */
+} VFIOINTx;
+
+typedef struct VFIOMSIVector {
+ EventNotifier interrupt; /* eventfd triggered on interrupt */
+ struct VFIODevice *vdev; /* back pointer to device */
+ int virq; /* KVM irqchip route for QEMU bypass */
+ bool use;
+} VFIOMSIVector;
+
+enum {
+ VFIO_INT_NONE = 0,
+ VFIO_INT_INTx = 1,
+ VFIO_INT_MSI = 2,
+ VFIO_INT_MSIX = 3,
+};
+
+struct VFIOGroup;
+
+typedef struct VFIOContainer {
+ int fd; /* /dev/vfio/vfio, empowered by the attached groups */
+ struct {
+ /* enable abstraction to support various iommu backends */
+ union {
+ MemoryListener listener; /* Used by type1 iommu */
+ };
+ void (*release)(struct VFIOContainer *);
+ } iommu_data;
+ QLIST_HEAD(, VFIOGroup) group_list;
+ QLIST_ENTRY(VFIOContainer) next;
+} VFIOContainer;
+
+/* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */
+typedef struct VFIOMSIXInfo {
+ uint8_t table_bar;
+ uint8_t pba_bar;
+ uint16_t entries;
+ uint32_t table_offset;
+ uint32_t pba_offset;
+ MemoryRegion mmap_mem;
+ void *mmap;
+} VFIOMSIXInfo;
+
+typedef struct VFIODevice {
+ PCIDevice pdev;
+ int fd;
+ VFIOINTx intx;
+ unsigned int config_size;
+ uint8_t *emulated_config_bits; /* QEMU emulated bits, little-endian */
+ off_t config_offset; /* Offset of config space region within device fd */
+ unsigned int rom_size;
+ off_t rom_offset; /* Offset of ROM region within device fd */
+ int msi_cap_size;
+ VFIOMSIVector *msi_vectors;
+ VFIOMSIXInfo *msix;
+ int nr_vectors; /* Number of MSI/MSIX vectors currently in use */
+ int interrupt; /* Current interrupt type */
+ VFIOBAR bars[PCI_NUM_REGIONS - 1]; /* No ROM */
+ VFIOVGA vga; /* 0xa0000, 0x3b0, 0x3c0 */
+ PCIHostDeviceAddress host;
+ QLIST_ENTRY(VFIODevice) next;
+ struct VFIOGroup *group;
+ uint32_t features;
+#define VFIO_FEATURE_ENABLE_VGA_BIT 0
+#define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT)
+ int32_t bootindex;
+ uint8_t pm_cap;
+ bool reset_works;
+ bool has_vga;
+} VFIODevice;
+
+typedef struct VFIOGroup {
+ int fd;
+ int groupid;
+ VFIOContainer *container;
+ QLIST_HEAD(, VFIODevice) device_list;
+ QLIST_ENTRY(VFIOGroup) next;
+ QLIST_ENTRY(VFIOGroup) container_next;
+} VFIOGroup;
+
+#define MSIX_CAP_LENGTH 12
+
+static QLIST_HEAD(, VFIOContainer)
+ container_list = QLIST_HEAD_INITIALIZER(container_list);
+
+static QLIST_HEAD(, VFIOGroup)
+ group_list = QLIST_HEAD_INITIALIZER(group_list);
+
+static void vfio_disable_interrupts(VFIODevice *vdev);
+static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len);
+static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr,
+ uint32_t val, int len);
+static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled);
+
+/*
+ * Common VFIO interrupt disable
+ */
+static void vfio_disable_irqindex(VFIODevice *vdev, int index)
+{
+ struct vfio_irq_set irq_set = {
+ .argsz = sizeof(irq_set),
+ .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
+ .index = index,
+ .start = 0,
+ .count = 0,
+ };
+
+ ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
+}
+
+/*
+ * INTx
+ */
+static void vfio_unmask_intx(VFIODevice *vdev)
+{
+ struct vfio_irq_set irq_set = {
+ .argsz = sizeof(irq_set),
+ .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
+ .index = VFIO_PCI_INTX_IRQ_INDEX,
+ .start = 0,
+ .count = 1,
+ };
+
+ ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
+}
+
+#ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */
+static void vfio_mask_intx(VFIODevice *vdev)
+{
+ struct vfio_irq_set irq_set = {
+ .argsz = sizeof(irq_set),
+ .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
+ .index = VFIO_PCI_INTX_IRQ_INDEX,
+ .start = 0,
+ .count = 1,
+ };
+
+ ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
+}
+#endif
+
+/*
+ * Disabling BAR mmaping can be slow, but toggling it around INTx can
+ * also be a huge overhead. We try to get the best of both worlds by
+ * waiting until an interrupt to disable mmaps (subsequent transitions
+ * to the same state are effectively no overhead). If the interrupt has
+ * been serviced and the time gap is long enough, we re-enable mmaps for
+ * performance. This works well for things like graphics cards, which
+ * may not use their interrupt at all and are penalized to an unusable
+ * level by read/write BAR traps. Other devices, like NICs, have more
+ * regular interrupts and see much better latency by staying in non-mmap
+ * mode. We therefore set the default mmap_timeout such that a ping
+ * is just enough to keep the mmap disabled. Users can experiment with
+ * other options with the x-intx-mmap-timeout-ms parameter (a value of
+ * zero disables the timer).
+ */
+static void vfio_intx_mmap_enable(void *opaque)
+{
+ VFIODevice *vdev = opaque;
+
+ if (vdev->intx.pending) {
+ qemu_mod_timer(vdev->intx.mmap_timer,
+ qemu_get_clock_ms(vm_clock) + vdev->intx.mmap_timeout);
+ return;
+ }
+
+ vfio_mmap_set_enabled(vdev, true);
+}
+
+static void vfio_intx_interrupt(void *opaque)
+{
+ VFIODevice *vdev = opaque;
+
+ if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
+ return;
+ }
+
+ DPRINTF("%s(%04x:%02x:%02x.%x) Pin %c\n", __func__, vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function,
+ 'A' + vdev->intx.pin);
+
+ vdev->intx.pending = true;
+ qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 1);
+ vfio_mmap_set_enabled(vdev, false);
+ if (vdev->intx.mmap_timeout) {
+ qemu_mod_timer(vdev->intx.mmap_timer,
+ qemu_get_clock_ms(vm_clock) + vdev->intx.mmap_timeout);
+ }
+}
+
+static void vfio_eoi(VFIODevice *vdev)
+{
+ if (!vdev->intx.pending) {
+ return;
+ }
+
+ DPRINTF("%s(%04x:%02x:%02x.%x) EOI\n", __func__, vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function);
+
+ vdev->intx.pending = false;
+ qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
+ vfio_unmask_intx(vdev);
+}
+
+static void vfio_enable_intx_kvm(VFIODevice *vdev)
+{
+#ifdef CONFIG_KVM
+ struct kvm_irqfd irqfd = {
+ .fd = event_notifier_get_fd(&vdev->intx.interrupt),
+ .gsi = vdev->intx.route.irq,
+ .flags = KVM_IRQFD_FLAG_RESAMPLE,
+ };
+ struct vfio_irq_set *irq_set;
+ int ret, argsz;
+ int32_t *pfd;
+
+ if (!VFIO_ALLOW_KVM_INTX || !kvm_irqfds_enabled() ||
+ vdev->intx.route.mode != PCI_INTX_ENABLED ||
+ !kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) {
+ return;
+ }
+
+ /* Get to a known interrupt state */
+ qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
+ vfio_mask_intx(vdev);
+ vdev->intx.pending = false;
+ qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
+
+ /* Get an eventfd for resample/unmask */
+ if (event_notifier_init(&vdev->intx.unmask, 0)) {
+ error_report("vfio: Error: event_notifier_init failed eoi");
+ goto fail;
+ }
+
+ /* KVM triggers it, VFIO listens for it */
+ irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
+
+ if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
+ error_report("vfio: Error: Failed to setup resample irqfd: %m");
+ goto fail_irqfd;
+ }
+
+ argsz = sizeof(*irq_set) + sizeof(*pfd);
+
+ irq_set = g_malloc0(argsz);
+ irq_set->argsz = argsz;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
+ irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
+ irq_set->start = 0;
+ irq_set->count = 1;
+ pfd = (int32_t *)&irq_set->data;
+
+ *pfd = irqfd.resamplefd;
+
+ ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ g_free(irq_set);
+ if (ret) {
+ error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
+ goto fail_vfio;
+ }
+
+ /* Let'em rip */
+ vfio_unmask_intx(vdev);
+
+ vdev->intx.kvm_accel = true;
+
+ DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel enabled\n",
+ __func__, vdev->host.domain, vdev->host.bus,
+ vdev->host.slot, vdev->host.function);
+
+ return;
+
+fail_vfio:
+ irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
+ kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
+fail_irqfd:
+ event_notifier_cleanup(&vdev->intx.unmask);
+fail:
+ qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
+ vfio_unmask_intx(vdev);
+#endif
+}
+
+static void vfio_disable_intx_kvm(VFIODevice *vdev)
+{
+#ifdef CONFIG_KVM
+ struct kvm_irqfd irqfd = {
+ .fd = event_notifier_get_fd(&vdev->intx.interrupt),
+ .gsi = vdev->intx.route.irq,
+ .flags = KVM_IRQFD_FLAG_DEASSIGN,
+ };
+
+ if (!vdev->intx.kvm_accel) {
+ return;
+ }
+
+ /*
+ * Get to a known state, hardware masked, QEMU ready to accept new
+ * interrupts, QEMU IRQ de-asserted.
+ */
+ vfio_mask_intx(vdev);
+ vdev->intx.pending = false;
+ qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
+
+ /* Tell KVM to stop listening for an INTx irqfd */
+ if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
+ error_report("vfio: Error: Failed to disable INTx irqfd: %m");
+ }
+
+ /* We only need to close the eventfd for VFIO to cleanup the kernel side */
+ event_notifier_cleanup(&vdev->intx.unmask);
+
+ /* QEMU starts listening for interrupt events. */
+ qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
+
+ vdev->intx.kvm_accel = false;
+
+ /* If we've missed an event, let it re-fire through QEMU */
+ vfio_unmask_intx(vdev);
+
+ DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel disabled\n",
+ __func__, vdev->host.domain, vdev->host.bus,
+ vdev->host.slot, vdev->host.function);
+#endif
+}
+
+static void vfio_update_irq(PCIDevice *pdev)
+{
+ VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
+ PCIINTxRoute route;
+
+ if (vdev->interrupt != VFIO_INT_INTx) {
+ return;
+ }
+
+ route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
+
+ if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
+ return; /* Nothing changed */
+ }
+
+ DPRINTF("%s(%04x:%02x:%02x.%x) IRQ moved %d -> %d\n", __func__,
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function, vdev->intx.route.irq, route.irq);
+
+ vfio_disable_intx_kvm(vdev);
+
+ vdev->intx.route = route;
+
+ if (route.mode != PCI_INTX_ENABLED) {
+ return;
+ }
+
+ vfio_enable_intx_kvm(vdev);
+
+ /* Re-enable the interrupt in cased we missed an EOI */
+ vfio_eoi(vdev);
+}
+
+static int vfio_enable_intx(VFIODevice *vdev)
+{
+ uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
+ int ret, argsz;
+ struct vfio_irq_set *irq_set;
+ int32_t *pfd;
+
+ if (!pin) {
+ return 0;
+ }
+
+ vfio_disable_interrupts(vdev);
+
+ vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
+
+#ifdef CONFIG_KVM
+ /*
+ * Only conditional to avoid generating error messages on platforms
+ * where we won't actually use the result anyway.
+ */
+ if (kvm_irqfds_enabled() &&
+ kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) {
+ vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
+ vdev->intx.pin);
+ }
+#endif
+
+ ret = event_notifier_init(&vdev->intx.interrupt, 0);
+ if (ret) {
+ error_report("vfio: Error: event_notifier_init failed");
+ return ret;
+ }
+
+ argsz = sizeof(*irq_set) + sizeof(*pfd);
+
+ irq_set = g_malloc0(argsz);
+ irq_set->argsz = argsz;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
+ irq_set->start = 0;
+ irq_set->count = 1;
+ pfd = (int32_t *)&irq_set->data;
+
+ *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
+ qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
+
+ ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ g_free(irq_set);
+ if (ret) {
+ error_report("vfio: Error: Failed to setup INTx fd: %m");
+ qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
+ event_notifier_cleanup(&vdev->intx.interrupt);
+ return -errno;
+ }
+
+ vfio_enable_intx_kvm(vdev);
+
+ vdev->interrupt = VFIO_INT_INTx;
+
+ DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function);
+
+ return 0;
+}
+
+static void vfio_disable_intx(VFIODevice *vdev)
+{
+ int fd;
+
+ qemu_del_timer(vdev->intx.mmap_timer);
+ vfio_disable_intx_kvm(vdev);
+ vfio_disable_irqindex(vdev, VFIO_PCI_INTX_IRQ_INDEX);
+ vdev->intx.pending = false;
+ qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
+ vfio_mmap_set_enabled(vdev, true);
+
+ fd = event_notifier_get_fd(&vdev->intx.interrupt);
+ qemu_set_fd_handler(fd, NULL, NULL, vdev);
+ event_notifier_cleanup(&vdev->intx.interrupt);
+
+ vdev->interrupt = VFIO_INT_NONE;
+
+ DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function);
+}
+
+/*
+ * MSI/X
+ */
+static void vfio_msi_interrupt(void *opaque)
+{
+ VFIOMSIVector *vector = opaque;
+ VFIODevice *vdev = vector->vdev;
+ int nr = vector - vdev->msi_vectors;
+
+ if (!event_notifier_test_and_clear(&vector->interrupt)) {
+ return;
+ }
+
+ DPRINTF("%s(%04x:%02x:%02x.%x) vector %d\n", __func__,
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function, nr);
+
+ if (vdev->interrupt == VFIO_INT_MSIX) {
+ msix_notify(&vdev->pdev, nr);
+ } else if (vdev->interrupt == VFIO_INT_MSI) {
+ msi_notify(&vdev->pdev, nr);
+ } else {
+ error_report("vfio: MSI interrupt receieved, but not enabled?");
+ }
+}
+
+static int vfio_enable_vectors(VFIODevice *vdev, bool msix)
+{
+ struct vfio_irq_set *irq_set;
+ int ret = 0, i, argsz;
+ int32_t *fds;
+
+ argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
+
+ irq_set = g_malloc0(argsz);
+ irq_set->argsz = argsz;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
+ irq_set->start = 0;
+ irq_set->count = vdev->nr_vectors;
+ fds = (int32_t *)&irq_set->data;
+
+ for (i = 0; i < vdev->nr_vectors; i++) {
+ if (!vdev->msi_vectors[i].use) {
+ fds[i] = -1;
+ continue;
+ }
+
+ fds[i] = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
+ }
+
+ ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
+
+ g_free(irq_set);
+
+ return ret;
+}
+
+static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
+ MSIMessage *msg, IOHandler *handler)
+{
+ VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
+ VFIOMSIVector *vector;
+ int ret;
+
+ DPRINTF("%s(%04x:%02x:%02x.%x) vector %d used\n", __func__,
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function, nr);
+
+ vector = &vdev->msi_vectors[nr];
+ vector->vdev = vdev;
+ vector->use = true;
+
+ msix_vector_use(pdev, nr);
+
+ if (event_notifier_init(&vector->interrupt, 0)) {
+ error_report("vfio: Error: event_notifier_init failed");
+ }
+
+ /*
+ * Attempt to enable route through KVM irqchip,
+ * default to userspace handling if unavailable.
+ */
+ vector->virq = msg ? kvm_irqchip_add_msi_route(kvm_state, *msg) : -1;
+ if (vector->virq < 0 ||
+ kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt,
+ vector->virq) < 0) {
+ if (vector->virq >= 0) {
+ kvm_irqchip_release_virq(kvm_state, vector->virq);
+ vector->virq = -1;
+ }
+ qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
+ handler, NULL, vector);
+ }
+
+ /*
+ * We don't want to have the host allocate all possible MSI vectors
+ * for a device if they're not in use, so we shutdown and incrementally
+ * increase them as needed.
+ */
+ if (vdev->nr_vectors < nr + 1) {
+ vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX);
+ vdev->nr_vectors = nr + 1;
+ ret = vfio_enable_vectors(vdev, true);
+ if (ret) {
+ error_report("vfio: failed to enable vectors, %d", ret);
+ }
+ } else {
+ int argsz;
+ struct vfio_irq_set *irq_set;
+ int32_t *pfd;
+
+ argsz = sizeof(*irq_set) + sizeof(*pfd);
+
+ irq_set = g_malloc0(argsz);
+ irq_set->argsz = argsz;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+ irq_set->start = nr;
+ irq_set->count = 1;
+ pfd = (int32_t *)&irq_set->data;
+
+ *pfd = event_notifier_get_fd(&vector->interrupt);
+
+ ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ g_free(irq_set);
+ if (ret) {
+ error_report("vfio: failed to modify vector, %d", ret);
+ }
+ }
+
+ return 0;
+}
+
+static int vfio_msix_vector_use(PCIDevice *pdev,
+ unsigned int nr, MSIMessage msg)
+{
+ return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
+}
+
+static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
+{
+ VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
+ VFIOMSIVector *vector = &vdev->msi_vectors[nr];
+ int argsz;
+ struct vfio_irq_set *irq_set;
+ int32_t *pfd;
+
+ DPRINTF("%s(%04x:%02x:%02x.%x) vector %d released\n", __func__,
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function, nr);
+
+ /*
+ * XXX What's the right thing to do here? This turns off the interrupt
+ * completely, but do we really just want to switch the interrupt to
+ * bouncing through userspace and let msix.c drop it? Not sure.
+ */
+ msix_vector_unuse(pdev, nr);
+
+ argsz = sizeof(*irq_set) + sizeof(*pfd);
+
+ irq_set = g_malloc0(argsz);
+ irq_set->argsz = argsz;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+ irq_set->start = nr;
+ irq_set->count = 1;
+ pfd = (int32_t *)&irq_set->data;
+
+ *pfd = -1;
+
+ ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
+
+ g_free(irq_set);
+
+ if (vector->virq < 0) {
+ qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
+ NULL, NULL, NULL);
+ } else {
+ kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt,
+ vector->virq);
+ kvm_irqchip_release_virq(kvm_state, vector->virq);
+ vector->virq = -1;
+ }
+
+ event_notifier_cleanup(&vector->interrupt);
+ vector->use = false;
+}
+
+static void vfio_enable_msix(VFIODevice *vdev)
+{
+ vfio_disable_interrupts(vdev);
+
+ vdev->msi_vectors = g_malloc0(vdev->msix->entries * sizeof(VFIOMSIVector));
+
+ vdev->interrupt = VFIO_INT_MSIX;
+
+ /*
+ * Some communication channels between VF & PF or PF & fw rely on the
+ * physical state of the device and expect that enabling MSI-X from the
+ * guest enables the same on the host. When our guest is Linux, the
+ * guest driver call to pci_enable_msix() sets the enabling bit in the
+ * MSI-X capability, but leaves the vector table masked. We therefore
+ * can't rely on a vector_use callback (from request_irq() in the guest)
+ * to switch the physical device into MSI-X mode because that may come a
+ * long time after pci_enable_msix(). This code enables vector 0 with
+ * triggering to userspace, then immediately release the vector, leaving
+ * the physical device with no vectors enabled, but MSI-X enabled, just
+ * like the guest view.
+ */
+ vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
+ vfio_msix_vector_release(&vdev->pdev, 0);
+
+ if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
+ vfio_msix_vector_release, NULL)) {
+ error_report("vfio: msix_set_vector_notifiers failed");
+ }
+
+ DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function);
+}
+
+static void vfio_enable_msi(VFIODevice *vdev)
+{
+ int ret, i;
+
+ vfio_disable_interrupts(vdev);
+
+ vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
+retry:
+ vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector));
+
+ for (i = 0; i < vdev->nr_vectors; i++) {
+ MSIMessage msg;
+ VFIOMSIVector *vector = &vdev->msi_vectors[i];
+
+ vector->vdev = vdev;
+ vector->use = true;
+
+ if (event_notifier_init(&vector->interrupt, 0)) {
+ error_report("vfio: Error: event_notifier_init failed");
+ }
+
+ msg = msi_get_message(&vdev->pdev, i);
+
+ /*
+ * Attempt to enable route through KVM irqchip,
+ * default to userspace handling if unavailable.
+ */
+ vector->virq = kvm_irqchip_add_msi_route(kvm_state, msg);
+ if (vector->virq < 0 ||
+ kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt,
+ vector->virq) < 0) {
+ qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
+ vfio_msi_interrupt, NULL, vector);
+ }
+ }
+
+ ret = vfio_enable_vectors(vdev, false);
+ if (ret) {
+ if (ret < 0) {
+ error_report("vfio: Error: Failed to setup MSI fds: %m");
+ } else if (ret != vdev->nr_vectors) {
+ error_report("vfio: Error: Failed to enable %d "
+ "MSI vectors, retry with %d", vdev->nr_vectors, ret);
+ }
+
+ for (i = 0; i < vdev->nr_vectors; i++) {
+ VFIOMSIVector *vector = &vdev->msi_vectors[i];
+ if (vector->virq >= 0) {
+ kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt,
+ vector->virq);
+ kvm_irqchip_release_virq(kvm_state, vector->virq);
+ vector->virq = -1;
+ } else {
+ qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
+ NULL, NULL, NULL);
+ }
+ event_notifier_cleanup(&vector->interrupt);
+ }
+
+ g_free(vdev->msi_vectors);
+
+ if (ret > 0 && ret != vdev->nr_vectors) {
+ vdev->nr_vectors = ret;
+ goto retry;
+ }
+ vdev->nr_vectors = 0;
+
+ return;
+ }
+
+ vdev->interrupt = VFIO_INT_MSI;
+
+ DPRINTF("%s(%04x:%02x:%02x.%x) Enabled %d MSI vectors\n", __func__,
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function, vdev->nr_vectors);
+}
+
+static void vfio_disable_msi_common(VFIODevice *vdev)
+{
+ g_free(vdev->msi_vectors);
+ vdev->msi_vectors = NULL;
+ vdev->nr_vectors = 0;
+ vdev->interrupt = VFIO_INT_NONE;
+
+ vfio_enable_intx(vdev);
+}
+
+static void vfio_disable_msix(VFIODevice *vdev)
+{
+ msix_unset_vector_notifiers(&vdev->pdev);
+
+ if (vdev->nr_vectors) {
+ vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX);
+ }
+
+ vfio_disable_msi_common(vdev);
+
+ DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function);
+}
+
+static void vfio_disable_msi(VFIODevice *vdev)
+{
+ int i;
+
+ vfio_disable_irqindex(vdev, VFIO_PCI_MSI_IRQ_INDEX);
+
+ for (i = 0; i < vdev->nr_vectors; i++) {
+ VFIOMSIVector *vector = &vdev->msi_vectors[i];
+
+ if (!vector->use) {
+ continue;
+ }
+
+ if (vector->virq >= 0) {
+ kvm_irqchip_remove_irqfd_notifier(kvm_state,
+ &vector->interrupt, vector->virq);
+ kvm_irqchip_release_virq(kvm_state, vector->virq);
+ vector->virq = -1;
+ } else {
+ qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
+ NULL, NULL, NULL);
+ }
+
+ event_notifier_cleanup(&vector->interrupt);
+ }
+
+ vfio_disable_msi_common(vdev);
+
+ DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function);
+}
+
+/*
+ * IO Port/MMIO - Beware of the endians, VFIO is always little endian
+ */
+static void vfio_bar_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOBAR *bar = opaque;
+ union {
+ uint8_t byte;
+ uint16_t word;
+ uint32_t dword;
+ uint64_t qword;
+ } buf;
+
+ switch (size) {
+ case 1:
+ buf.byte = data;
+ break;
+ case 2:
+ buf.word = cpu_to_le16(data);
+ break;
+ case 4:
+ buf.dword = cpu_to_le32(data);
+ break;
+ default:
+ hw_error("vfio: unsupported write size, %d bytes\n", size);
+ break;
+ }
+
+ if (pwrite(bar->fd, &buf, size, bar->fd_offset + addr) != size) {
+ error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
+ __func__, addr, data, size);
+ }
+
+#ifdef DEBUG_VFIO
+ {
+ VFIODevice *vdev = container_of(bar, VFIODevice, bars[bar->nr]);
+
+ DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", 0x%"PRIx64
+ ", %d)\n", __func__, vdev->host.domain, vdev->host.bus,
+ vdev->host.slot, vdev->host.function, bar->nr, addr,
+ data, size);
+ }
+#endif
+
+ /*
+ * A read or write to a BAR always signals an INTx EOI. This will
+ * do nothing if not pending (including not in INTx mode). We assume
+ * that a BAR access is in response to an interrupt and that BAR
+ * accesses will service the interrupt. Unfortunately, we don't know
+ * which access will service the interrupt, so we're potentially
+ * getting quite a few host interrupts per guest interrupt.
+ */
+ vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr]));
+}
+
+static uint64_t vfio_bar_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOBAR *bar = opaque;
+ union {
+ uint8_t byte;
+ uint16_t word;
+ uint32_t dword;
+ uint64_t qword;
+ } buf;
+ uint64_t data = 0;
+
+ if (pread(bar->fd, &buf, size, bar->fd_offset + addr) != size) {
+ error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
+ __func__, addr, size);
+ return (uint64_t)-1;
+ }
+
+ switch (size) {
+ case 1:
+ data = buf.byte;
+ break;
+ case 2:
+ data = le16_to_cpu(buf.word);
+ break;
+ case 4:
+ data = le32_to_cpu(buf.dword);
+ break;
+ default:
+ hw_error("vfio: unsupported read size, %d bytes\n", size);
+ break;
+ }
+
+#ifdef DEBUG_VFIO
+ {
+ VFIODevice *vdev = container_of(bar, VFIODevice, bars[bar->nr]);
+
+ DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
+ ", %d) = 0x%"PRIx64"\n", __func__, vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function,
+ bar->nr, addr, size, data);
+ }
+#endif
+
+ /* Same as write above */
+ vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr]));
+
+ return data;
+}
+
+static const MemoryRegionOps vfio_bar_ops = {
+ .read = vfio_bar_read,
+ .write = vfio_bar_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void vfio_vga_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOVGARegion *region = opaque;
+ VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
+ union {
+ uint8_t byte;
+ uint16_t word;
+ uint32_t dword;
+ uint64_t qword;
+ } buf;
+ off_t offset = vga->fd_offset + region->offset + addr;
+
+ switch (size) {
+ case 1:
+ buf.byte = data;
+ break;
+ case 2:
+ buf.word = cpu_to_le16(data);
+ break;
+ case 4:
+ buf.dword = cpu_to_le32(data);
+ break;
+ default:
+ hw_error("vfio: unsupported write size, %d bytes\n", size);
+ break;
+ }
+
+ if (pwrite(vga->fd, &buf, size, offset) != size) {
+ error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
+ __func__, region->offset + addr, data, size);
+ }
+
+ DPRINTF("%s(0x%"HWADDR_PRIx", 0x%"PRIx64", %d)\n",
+ __func__, region->offset + addr, data, size);
+}
+
+static uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
+{
+ VFIOVGARegion *region = opaque;
+ VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
+ union {
+ uint8_t byte;
+ uint16_t word;
+ uint32_t dword;
+ uint64_t qword;
+ } buf;
+ uint64_t data = 0;
+ off_t offset = vga->fd_offset + region->offset + addr;
+
+ if (pread(vga->fd, &buf, size, offset) != size) {
+ error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
+ __func__, region->offset + addr, size);
+ return (uint64_t)-1;
+ }
+
+ switch (size) {
+ case 1:
+ data = buf.byte;
+ break;
+ case 2:
+ data = le16_to_cpu(buf.word);
+ break;
+ case 4:
+ data = le32_to_cpu(buf.dword);
+ break;
+ default:
+ hw_error("vfio: unsupported read size, %d bytes\n", size);
+ break;
+ }
+
+ DPRINTF("%s(0x%"HWADDR_PRIx", %d) = 0x%"PRIx64"\n",
+ __func__, region->offset + addr, size, data);
+
+ return data;
+}
+
+static const MemoryRegionOps vfio_vga_ops = {
+ .read = vfio_vga_read,
+ .write = vfio_vga_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+/*
+ * Device specific quirks
+ */
+
+#define PCI_VENDOR_ID_ATI 0x1002
+
+/*
+ * Device 1002:68f9 (Advanced Micro Devices [AMD] nee ATI Cedar PRO [Radeon
+ * HD 5450/6350]) reports the upper byte of the physical address of the
+ * I/O port BAR4 through VGA register 0x3c3. The BAR is 256 bytes, so the
+ * lower byte is known to be zero. Probing for this quirk reads 0xff from
+ * port 0x3c3 on some devices so we store the physical address and replace
+ * reads with the virtual address any time it matches. XXX Research when
+ * to enable quirk.
+ */
+static uint64_t vfio_ati_3c3_quirk_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOQuirk *quirk = opaque;
+ VFIODevice *vdev = quirk->vdev;
+ PCIDevice *pdev = &vdev->pdev;
+ uint64_t data = vfio_vga_read(&vdev->vga.region[QEMU_PCI_VGA_IO_HI],
+ addr + 0x3, size);
+
+ if (data == quirk->data) {
+ data = pci_get_byte(pdev->config + PCI_BASE_ADDRESS_4 + 1);
+ DPRINTF("%s(0x3c3, 1) = 0x%"PRIx64"\n", __func__, data);
+ }
+
+ return data;
+}
+
+static const MemoryRegionOps vfio_ati_3c3_quirk = {
+ .read = vfio_ati_3c3_quirk_read,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void vfio_vga_probe_ati_3c3_quirk(VFIODevice *vdev)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ off_t physoffset = vdev->config_offset + PCI_BASE_ADDRESS_4;
+ uint32_t physbar;
+ VFIOQuirk *quirk;
+
+ if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI ||
+ vdev->bars[4].size < 256) {
+ return;
+ }
+
+ /* Get I/O port BAR physical address */
+ if (pread(vdev->fd, &physbar, 4, physoffset) != 4) {
+ error_report("vfio: probe failed for ATI/AMD 0x3c3 quirk on device "
+ "%04x:%02x:%02x.%x", vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function);
+ return;
+ }
+
+ quirk = g_malloc0(sizeof(*quirk));
+ quirk->vdev = vdev;
+ quirk->data = (physbar >> 8) & 0xff;
+
+ memory_region_init_io(&quirk->mem, &vfio_ati_3c3_quirk, quirk,
+ "vfio-ati-3c3-quirk", 1);
+ memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, 3,
+ &quirk->mem);
+
+ QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks,
+ quirk, next);
+
+ DPRINTF("Enabled ATI/AMD quirk 0x3c3 for device %04x:%02x:%02x.%x\n",
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function);
+}
+
+/*
+ * Device 1002:68f9 (Advanced Micro Devices [AMD] nee ATI Cedar PRO [Radeon
+ * HD 5450/6350]) reports the physical address of MMIO BAR0 through a
+ * write/read operation on I/O port BAR4. When uint32_t 0x4010 is written
+ * to offset 0x0, the subsequent read from offset 0x4 returns the contents
+ * of BAR0. Test for this quirk on all ATI/AMD devices. XXX - Note that
+ * 0x10 is the offset of BAR0 in config sapce, is this a window to all of
+ * config space?
+ */
+static uint64_t vfio_ati_4010_quirk_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOQuirk *quirk = opaque;
+ VFIODevice *vdev = quirk->vdev;
+ PCIDevice *pdev = &vdev->pdev;
+ uint64_t data = vfio_bar_read(&vdev->bars[4], addr, size);
+
+ if (addr == 4 && size == 4 && quirk->data) {
+ data = pci_get_long(pdev->config + PCI_BASE_ADDRESS_0);
+ DPRINTF("%s(BAR4+0x4) = 0x%"PRIx64"\n", __func__, data);
+ }
+
+ quirk->data = 0;
+
+ return data;
+}
+
+static void vfio_ati_4010_quirk_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOQuirk *quirk = opaque;
+ VFIODevice *vdev = quirk->vdev;
+
+ vfio_bar_write(&vdev->bars[4], addr, data, size);
+
+ quirk->data = (addr == 0 && size == 4 && data == 0x4010) ? 1 : 0;
+}
+
+static const MemoryRegionOps vfio_ati_4010_quirk = {
+ .read = vfio_ati_4010_quirk_read,
+ .write = vfio_ati_4010_quirk_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void vfio_probe_ati_4010_quirk(VFIODevice *vdev, int nr)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ off_t physoffset = vdev->config_offset + PCI_BASE_ADDRESS_0;
+ uint32_t physbar0;
+ uint64_t data;
+ VFIOQuirk *quirk;
+
+ if (!vdev->has_vga || nr != 4 || !vdev->bars[0].size ||
+ pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) {
+ return;
+ }
+
+ /* Get I/O port BAR physical address */
+ if (pread(vdev->fd, &physbar0, 4, physoffset) != 4) {
+ error_report("vfio: probe failed for ATI/AMD 0x4010 quirk on device "
+ "%04x:%02x:%02x.%x", vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function);
+ return;
+ }
+
+ /* Write 0x4010 to I/O port BAR offset 0 */
+ vfio_bar_write(&vdev->bars[4], 0, 0x4010, 4);
+ /* Read back result */
+ data = vfio_bar_read(&vdev->bars[4], 4, 4);
+
+ /* If the register matches the physical address of BAR0, we need a quirk */
+ if (data != physbar0) {
+ return;
+ }
+
+ quirk = g_malloc0(sizeof(*quirk));
+ quirk->vdev = vdev;
+
+ memory_region_init_io(&quirk->mem, &vfio_ati_4010_quirk, quirk,
+ "vfio-ati-4010-quirk", 8);
+ memory_region_add_subregion_overlap(&vdev->bars[nr].mem, 0, &quirk->mem, 1);
+
+ QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
+
+ DPRINTF("Enabled ATI/AMD quirk 0x4010 for device %04x:%02x:%02x.%x\n",
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function);
+}
+
+/*
+ * Device 1002:5b63 (Advanced Micro Devices [AMD] nee ATI RV370 [Radeon X550])
+ * retrieves the upper half of the MMIO BAR0 physical address by writing
+ * 0xf10 to I/O port BAR1 offset 0 and reading the result from offset 6.
+ * XXX - 0x10 is the offset of BAR0 in PCI config space, this could provide
+ * full access to config space. Config space is little endian, so the data
+ * register probably starts at 0x4.
+ */
+static uint64_t vfio_ati_f10_quirk_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOQuirk *quirk = opaque;
+ VFIODevice *vdev = quirk->vdev;
+ PCIDevice *pdev = &vdev->pdev;
+ uint64_t data = vfio_bar_read(&vdev->bars[1], addr, size);
+
+ if (addr == 6 && size == 2 && quirk->data) {
+ data = pci_get_word(pdev->config + PCI_BASE_ADDRESS_0 + 2);
+ DPRINTF("%s(BAR1+0x6) = 0x%"PRIx64"\n", __func__, data);
+ }
+
+ quirk->data = 0;
+
+ return data;
+}
+
+static void vfio_ati_f10_quirk_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOQuirk *quirk = opaque;
+ VFIODevice *vdev = quirk->vdev;
+
+ vfio_bar_write(&vdev->bars[1], addr, data, size);
+
+ quirk->data = (addr == 0 && size == 4 && data == 0xf10) ? 1 : 0;
+}
+
+static const MemoryRegionOps vfio_ati_f10_quirk = {
+ .read = vfio_ati_f10_quirk_read,
+ .write = vfio_ati_f10_quirk_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void vfio_probe_ati_f10_quirk(VFIODevice *vdev, int nr)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ off_t physoffset = vdev->config_offset + PCI_BASE_ADDRESS_0;
+ uint32_t physbar0;
+ uint64_t data;
+ VFIOQuirk *quirk;
+
+ if (!vdev->has_vga || nr != 1 || !vdev->bars[0].size ||
+ pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) {
+ return;
+ }
+
+ /* Get I/O port BAR physical address */
+ if (pread(vdev->fd, &physbar0, 4, physoffset) != 4) {
+ error_report("vfio: probe failed for ATI/AMD 0xf10 quirk on device "
+ "%04x:%02x:%02x.%x", vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function);
+ return;
+ }
+
+ vfio_bar_write(&vdev->bars[1], 0, 0xf10, 4);
+ data = vfio_bar_read(&vdev->bars[1], 0x6, 2);
+
+ /* If the register matches the physical address of BAR0, we need a quirk */
+ if (data != (le32_to_cpu(physbar0) >> 16)) {
+ return;
+ }
+
+ quirk = g_malloc0(sizeof(*quirk));
+ quirk->vdev = vdev;
+
+ memory_region_init_io(&quirk->mem, &vfio_ati_f10_quirk, quirk,
+ "vfio-ati-f10-quirk", 8);
+ memory_region_add_subregion_overlap(&vdev->bars[nr].mem, 0, &quirk->mem, 1);
+
+ QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
+
+ DPRINTF("Enabled ATI/AMD quirk 0xf10 for device %04x:%02x:%02x.%x\n",
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function);
+}
+
+#define PCI_VENDOR_ID_NVIDIA 0x10de
+
+/*
+ * Nvidia has several different methods to get to config space, the
+ * nouveu project has several of these documented here:
+ * https://github.com/pathscale/envytools/tree/master/hwdocs
+ *
+ * The first quirk is actually not documented in envytools and is found
+ * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an
+ * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access
+ * the mirror of PCI config space found at BAR0 offset 0x1800. The access
+ * sequence first writes 0x338 to I/O port 0x3d4. The target offset is
+ * then written to 0x3d0. Finally 0x538 is written for a read and 0x738
+ * is written for a write to 0x3d4. The BAR0 offset is then accessible
+ * through 0x3d0. This quirk doesn't seem to be necessary on newer cards
+ * that use the I/O port BAR5 window but it doesn't hurt to leave it.
+ */
+enum {
+ NV_3D0_NONE,
+ NV_3D0_SELECT,
+ NV_3D0_WINDOW,
+ NV_3D0_READ,
+ NV_3D0_WRITE,
+};
+
+static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOQuirk *quirk = opaque;
+ VFIODevice *vdev = quirk->vdev;
+ PCIDevice *pdev = &vdev->pdev;
+ uint64_t data = vfio_vga_read(&vdev->vga.region[QEMU_PCI_VGA_IO_HI],
+ addr + 0x10, size);
+
+ if (quirk->data == NV_3D0_READ && addr == 0) {
+ data = vfio_pci_read_config(pdev, quirk->data2, size);
+ DPRINTF("%s(0x3d0, %d) = 0x%"PRIx64"\n", __func__, size, data);
+ }
+
+ quirk->data = NV_3D0_NONE;
+
+ return data;
+}
+
+static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOQuirk *quirk = opaque;
+ VFIODevice *vdev = quirk->vdev;
+ PCIDevice *pdev = &vdev->pdev;
+
+ switch (quirk->data) {
+ case NV_3D0_NONE:
+ if (addr == 4 && data == 0x338) {
+ quirk->data = NV_3D0_SELECT;
+ }
+ break;
+ case NV_3D0_SELECT:
+ quirk->data = NV_3D0_NONE;
+ if (addr == 0 && (data & ~0xff) == 0x1800) {
+ quirk->data = NV_3D0_WINDOW;
+ quirk->data2 = data & 0xff;
+ }
+ break;
+ case NV_3D0_WINDOW:
+ quirk->data = NV_3D0_NONE;
+ if (addr == 4) {
+ if (data == 0x538) {
+ quirk->data = NV_3D0_READ;
+ } else if (data == 0x738) {
+ quirk->data = NV_3D0_WRITE;
+ }
+ }
+ break;
+ case NV_3D0_WRITE:
+ quirk->data = NV_3D0_NONE;
+ if (addr == 0) {
+ vfio_pci_write_config(pdev, quirk->data2, data, size);
+ DPRINTF("%s(0x3d0, 0x%"PRIx64", %d)\n", __func__, data, size);
+ return;
+ }
+ break;
+ default:
+ quirk->data = NV_3D0_NONE;
+ }
+
+ vfio_vga_write(&vdev->vga.region[QEMU_PCI_VGA_IO_HI],
+ addr + 0x10, data, size);
+}
+
+static const MemoryRegionOps vfio_nvidia_3d0_quirk = {
+ .read = vfio_nvidia_3d0_quirk_read,
+ .write = vfio_nvidia_3d0_quirk_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void vfio_vga_probe_nvidia_3d0_quirk(VFIODevice *vdev)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ VFIOQuirk *quirk;
+
+ if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA ||
+ !vdev->bars[1].size) {
+ return;
+ }
+
+ quirk = g_malloc0(sizeof(*quirk));
+ quirk->vdev = vdev;
+
+ memory_region_init_io(&quirk->mem, &vfio_nvidia_3d0_quirk, quirk,
+ "vfio-nvidia-3d0-quirk", 6);
+ memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem,
+ 0x10, &quirk->mem);
+
+ QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks,
+ quirk, next);
+
+ DPRINTF("Enabled NVIDIA VGA 0x3d0 quirk for device %04x:%02x:%02x.%x\n",
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function);
+}
+
+/*
+ * The second quirk is documented in envytools. The I/O port BAR5 is just
+ * a set of address/data ports to the MMIO BARs. The BAR we care about is
+ * again BAR0. This backdoor is apparently a bit newer than the one above
+ * so we need to not only trap 256 bytes @0x1800, but all of PCI config
+ * space, including extended space is available at the 4k @0x88000.
+ */
+enum {
+ NV_BAR5_ADDRESS = 0x1,
+ NV_BAR5_ENABLE = 0x2,
+ NV_BAR5_MASTER = 0x4,
+ NV_BAR5_VALID = 0x7,
+};
+
+static uint64_t vfio_nvidia_bar5_window_quirk_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOQuirk *quirk = opaque;
+ VFIODevice *vdev = quirk->vdev;
+ uint64_t data = vfio_bar_read(&vdev->bars[5], addr, size);
+
+ if (addr == 0xc && quirk->data == NV_BAR5_VALID) {
+ data = vfio_pci_read_config(&vdev->pdev, quirk->data2, size);
+ DPRINTF("%s(%04x:%02x:%02x.%x:BAR5+0x%"HWADDR_PRIx", %d) = 0x%"
+ PRIx64"\n", __func__, vdev->host.domain, vdev->host.bus,
+ vdev->host.slot, vdev->host.function, addr, size, data);
+ }
+
+ return data;
+}
+
+static void vfio_nvidia_bar5_window_quirk_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOQuirk *quirk = opaque;
+ VFIODevice *vdev = quirk->vdev;
+
+ /*
+ * Use quirk->data to track enables and quirk->data2 for the offset
+ */
+ switch (addr) {
+ case 0x0:
+ if (data & 0x1) {
+ quirk->data |= NV_BAR5_MASTER;
+ } else {
+ quirk->data &= ~NV_BAR5_MASTER;
+ }
+ break;
+ case 0x4:
+ if (data & 0x1) {
+ quirk->data |= NV_BAR5_ENABLE;
+ } else {
+ quirk->data &= ~NV_BAR5_ENABLE;
+ }
+ break;
+ case 0x8:
+ if (quirk->data & NV_BAR5_MASTER) {
+ if ((data & ~0xfff) == 0x88000) {
+ quirk->data |= NV_BAR5_ADDRESS;
+ quirk->data2 = data & 0xfff;
+ } else if ((data & ~0xff) == 0x1800) {
+ quirk->data |= NV_BAR5_ADDRESS;
+ quirk->data2 = data & 0xff;
+ } else {
+ quirk->data &= ~NV_BAR5_ADDRESS;
+ }
+ }
+ break;
+ case 0xc:
+ if (quirk->data == NV_BAR5_VALID) {
+ vfio_pci_write_config(&vdev->pdev, quirk->data2, data, size);
+ DPRINTF("%s(%04x:%02x:%02x.%x:BAR5+0x%"HWADDR_PRIx", 0x%"
+ PRIx64", %d)\n", __func__, vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function,
+ addr, data, size);
+ return;
+ }
+ }
+
+ vfio_bar_write(&vdev->bars[5], addr, data, size);
+}
+
+static const MemoryRegionOps vfio_nvidia_bar5_window_quirk = {
+ .read = vfio_nvidia_bar5_window_quirk_read,
+ .write = vfio_nvidia_bar5_window_quirk_write,
+ .valid.min_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void vfio_probe_nvidia_bar5_window_quirk(VFIODevice *vdev, int nr)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ VFIOQuirk *quirk;
+
+ if (!vdev->has_vga || nr != 5 ||
+ pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) {
+ return;
+ }
+
+ quirk = g_malloc0(sizeof(*quirk));
+ quirk->vdev = vdev;
+
+ memory_region_init_io(&quirk->mem, &vfio_nvidia_bar5_window_quirk, quirk,
+ "vfio-nvidia-bar5-window-quirk", 16);
+ memory_region_add_subregion_overlap(&vdev->bars[nr].mem, 0, &quirk->mem, 1);
+
+ QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
+
+ DPRINTF("Enabled NVIDIA BAR5 window quirk for device %04x:%02x:%02x.%x\n",
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function);
+}
+
+/*
+ * Finally, BAR0 itself. We want to redirect any accesses to either
+ * 0x1800 or 0x88000 through the PCI config space access functions.
+ *
+ * NB - quirk at a page granularity or else they don't seem to work when
+ * BARs are mmap'd
+ *
+ * Here's offset 0x88000...
+ */
+static uint64_t vfio_nvidia_bar0_88000_quirk_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOQuirk *quirk = opaque;
+ VFIODevice *vdev = quirk->vdev;
+ hwaddr base = 0x88000 & TARGET_PAGE_MASK;
+ hwaddr offset = 0x88000 & ~TARGET_PAGE_MASK;
+ uint64_t data = vfio_bar_read(&vdev->bars[0], addr + base, size);
+
+ if (ranges_overlap(addr, size, offset, PCI_CONFIG_SPACE_SIZE)) {
+ data = vfio_pci_read_config(&vdev->pdev, addr - offset, size);
+
+ DPRINTF("%s(%04x:%02x:%02x.%x:BAR0+0x%"HWADDR_PRIx", %d) = 0x%"
+ PRIx64"\n", __func__, vdev->host.domain, vdev->host.bus,
+ vdev->host.slot, vdev->host.function, addr + base, size, data);
+ }
+
+ return data;
+}
+
+static void vfio_nvidia_bar0_88000_quirk_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOQuirk *quirk = opaque;
+ VFIODevice *vdev = quirk->vdev;
+ hwaddr base = 0x88000 & TARGET_PAGE_MASK;
+ hwaddr offset = 0x88000 & ~TARGET_PAGE_MASK;
+
+ if (ranges_overlap(addr, size, offset, PCI_CONFIG_SPACE_SIZE)) {
+ vfio_pci_write_config(&vdev->pdev, addr - offset, data, size);
+
+ DPRINTF("%s(%04x:%02x:%02x.%x:BAR0+0x%"HWADDR_PRIx", 0x%"
+ PRIx64", %d)\n", __func__, vdev->host.domain, vdev->host.bus,
+ vdev->host.slot, vdev->host.function, addr + base, data, size);
+ } else {
+ vfio_bar_write(&vdev->bars[0], addr + base, data, size);
+ }
+}
+
+static const MemoryRegionOps vfio_nvidia_bar0_88000_quirk = {
+ .read = vfio_nvidia_bar0_88000_quirk_read,
+ .write = vfio_nvidia_bar0_88000_quirk_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void vfio_probe_nvidia_bar0_88000_quirk(VFIODevice *vdev, int nr)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ VFIOQuirk *quirk;
+
+ if (!vdev->has_vga || nr != 0 ||
+ pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) {
+ return;
+ }
+
+ quirk = g_malloc0(sizeof(*quirk));
+ quirk->vdev = vdev;
+
+ memory_region_init_io(&quirk->mem, &vfio_nvidia_bar0_88000_quirk, quirk,
+ "vfio-nvidia-bar0-88000-quirk",
+ TARGET_PAGE_ALIGN(PCIE_CONFIG_SPACE_SIZE));
+ memory_region_add_subregion_overlap(&vdev->bars[nr].mem,
+ 0x88000 & TARGET_PAGE_MASK,
+ &quirk->mem, 1);
+
+ QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
+
+ DPRINTF("Enabled NVIDIA BAR0 0x88000 quirk for device %04x:%02x:%02x.%x\n",
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function);
+}
+
+/*
+ * And here's the same for BAR0 offset 0x1800...
+ */
+static uint64_t vfio_nvidia_bar0_1800_quirk_read(void *opaque,
+ hwaddr addr, unsigned size)
+{
+ VFIOQuirk *quirk = opaque;
+ VFIODevice *vdev = quirk->vdev;
+ hwaddr base = 0x1800 & TARGET_PAGE_MASK;
+ hwaddr offset = 0x1800 & ~TARGET_PAGE_MASK;
+ uint64_t data = vfio_bar_read(&vdev->bars[0], addr + base, size);
+
+ if (ranges_overlap(addr, size, offset, PCI_CONFIG_SPACE_SIZE)) {
+ data = vfio_pci_read_config(&vdev->pdev, addr - offset, size);
+
+ DPRINTF("%s(%04x:%02x:%02x.%x:BAR0+0x%"HWADDR_PRIx", %d) = 0x%"
+ PRIx64"\n", __func__, vdev->host.domain, vdev->host.bus,
+ vdev->host.slot, vdev->host.function, addr + base, size, data);
+ }
+
+ return data;
+}
+
+static void vfio_nvidia_bar0_1800_quirk_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ VFIOQuirk *quirk = opaque;
+ VFIODevice *vdev = quirk->vdev;
+ hwaddr base = 0x1800 & TARGET_PAGE_MASK;
+ hwaddr offset = 0x1800 & ~TARGET_PAGE_MASK;
+
+ if (ranges_overlap(addr, size, offset, PCI_CONFIG_SPACE_SIZE)) {
+ vfio_pci_write_config(&vdev->pdev, addr - offset, data, size);
+
+ DPRINTF("%s(%04x:%02x:%02x.%x:BAR0+0x%"HWADDR_PRIx", 0x%"
+ PRIx64", %d)\n", __func__, vdev->host.domain, vdev->host.bus,
+ vdev->host.slot, vdev->host.function, addr + base, data, size);
+ } else {
+ vfio_bar_write(&vdev->bars[0], addr + base, data, size);
+ }
+}
+
+static const MemoryRegionOps vfio_nvidia_bar0_1800_quirk = {
+ .read = vfio_nvidia_bar0_1800_quirk_read,
+ .write = vfio_nvidia_bar0_1800_quirk_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void vfio_probe_nvidia_bar0_1800_quirk(VFIODevice *vdev, int nr)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ VFIOQuirk *quirk;
+
+ if (!vdev->has_vga || nr != 0 ||
+ pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) {
+ return;
+ }
+
+ /* Log the chipset ID */
+ DPRINTF("Nvidia NV%02x\n",
+ (unsigned int)(vfio_bar_read(&vdev->bars[0], 0, 4) >> 20) & 0xff);
+
+ quirk = g_malloc0(sizeof(*quirk));
+ quirk->vdev = vdev;
+
+ memory_region_init_io(&quirk->mem, &vfio_nvidia_bar0_1800_quirk, quirk,
+ "vfio-nvidia-bar0-1800-quirk",
+ TARGET_PAGE_ALIGN(PCI_CONFIG_SPACE_SIZE));
+ memory_region_add_subregion_overlap(&vdev->bars[nr].mem,
+ 0x1800 & TARGET_PAGE_MASK,
+ &quirk->mem, 1);
+
+ QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
+
+ DPRINTF("Enabled NVIDIA BAR0 0x1800 quirk for device %04x:%02x:%02x.%x\n",
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function);
+}
+
+/*
+ * TODO - Some Nvidia devices provide config access to their companion HDA
+ * device and even to their parent bridge via these config space mirrors.
+ * Add quirks for those regions.
+ */
+
+/*
+ * Common quirk probe entry points.
+ */
+static void vfio_vga_quirk_setup(VFIODevice *vdev)
+{
+ vfio_vga_probe_ati_3c3_quirk(vdev);
+ vfio_vga_probe_nvidia_3d0_quirk(vdev);
+}
+
+static void vfio_vga_quirk_teardown(VFIODevice *vdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vdev->vga.region); i++) {
+ while (!QLIST_EMPTY(&vdev->vga.region[i].quirks)) {
+ VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga.region[i].quirks);
+ memory_region_del_subregion(&vdev->vga.region[i].mem, &quirk->mem);
+ QLIST_REMOVE(quirk, next);
+ g_free(quirk);
+ }
+ }
+}
+
+static void vfio_bar_quirk_setup(VFIODevice *vdev, int nr)
+{
+ vfio_probe_ati_4010_quirk(vdev, nr);
+ vfio_probe_ati_f10_quirk(vdev, nr);
+ vfio_probe_nvidia_bar5_window_quirk(vdev, nr);
+ vfio_probe_nvidia_bar0_88000_quirk(vdev, nr);
+ vfio_probe_nvidia_bar0_1800_quirk(vdev, nr);
+}
+
+static void vfio_bar_quirk_teardown(VFIODevice *vdev, int nr)
+{
+ VFIOBAR *bar = &vdev->bars[nr];
+
+ while (!QLIST_EMPTY(&bar->quirks)) {
+ VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks);
+ memory_region_del_subregion(&bar->mem, &quirk->mem);
+ QLIST_REMOVE(quirk, next);
+ g_free(quirk);
+ }
+}
+
+/*
+ * PCI config space
+ */
+static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
+{
+ VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
+ uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
+
+ memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
+ emu_bits = le32_to_cpu(emu_bits);
+
+ if (emu_bits) {
+ emu_val = pci_default_read_config(pdev, addr, len);
+ }
+
+ if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
+ ssize_t ret;
+
+ ret = pread(vdev->fd, &phys_val, len, vdev->config_offset + addr);
+ if (ret != len) {
+ error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m",
+ __func__, vdev->host.domain, vdev->host.bus,
+ vdev->host.slot, vdev->host.function, addr, len);
+ return -errno;
+ }
+ phys_val = le32_to_cpu(phys_val);
+ }
+
+ val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
+
+ DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, len=0x%x) %x\n", __func__,
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function, addr, len, val);
+
+ return val;
+}
+
+static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr,
+ uint32_t val, int len)
+{
+ VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
+ uint32_t val_le = cpu_to_le32(val);
+
+ DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, 0x%x, len=0x%x)\n", __func__,
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function, addr, val, len);
+
+ /* Write everything to VFIO, let it filter out what we can't write */
+ if (pwrite(vdev->fd, &val_le, len, vdev->config_offset + addr) != len) {
+ error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m",
+ __func__, vdev->host.domain, vdev->host.bus,
+ vdev->host.slot, vdev->host.function, addr, val, len);
+ }
+
+ /* MSI/MSI-X Enabling/Disabling */
+ if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
+ ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
+ int is_enabled, was_enabled = msi_enabled(pdev);
+
+ pci_default_write_config(pdev, addr, val, len);
+
+ is_enabled = msi_enabled(pdev);
+
+ if (!was_enabled && is_enabled) {
+ vfio_enable_msi(vdev);
+ } else if (was_enabled && !is_enabled) {
+ vfio_disable_msi(vdev);
+ }
+ } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
+ ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
+ int is_enabled, was_enabled = msix_enabled(pdev);
+
+ pci_default_write_config(pdev, addr, val, len);
+
+ is_enabled = msix_enabled(pdev);
+
+ if (!was_enabled && is_enabled) {
+ vfio_enable_msix(vdev);
+ } else if (was_enabled && !is_enabled) {
+ vfio_disable_msix(vdev);
+ }
+ } else {
+ /* Write everything to QEMU to keep emulated bits correct */
+ pci_default_write_config(pdev, addr, val, len);
+ }
+}
+
+/*
+ * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
+ */
+static int vfio_dma_unmap(VFIOContainer *container,
+ hwaddr iova, ram_addr_t size)
+{
+ struct vfio_iommu_type1_dma_unmap unmap = {
+ .argsz = sizeof(unmap),
+ .flags = 0,
+ .iova = iova,
+ .size = size,
+ };
+
+ if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
+ DPRINTF("VFIO_UNMAP_DMA: %d\n", -errno);
+ return -errno;
+ }
+
+ return 0;
+}
+
+static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
+ ram_addr_t size, void *vaddr, bool readonly)
+{
+ struct vfio_iommu_type1_dma_map map = {
+ .argsz = sizeof(map),
+ .flags = VFIO_DMA_MAP_FLAG_READ,
+ .vaddr = (__u64)(uintptr_t)vaddr,
+ .iova = iova,
+ .size = size,
+ };
+
+ if (!readonly) {
+ map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
+ }
+
+ /*
+ * Try the mapping, if it fails with EBUSY, unmap the region and try
+ * again. This shouldn't be necessary, but we sometimes see it in
+ * the the VGA ROM space.
+ */
+ if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
+ (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
+ ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
+ return 0;
+ }
+
+ DPRINTF("VFIO_MAP_DMA: %d\n", -errno);
+ return -errno;
+}
+
+static bool vfio_listener_skipped_section(MemoryRegionSection *section)
+{
+ return !memory_region_is_ram(section->mr);
+}
+
+static void vfio_listener_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer,
+ iommu_data.listener);
+ hwaddr iova, end;
+ void *vaddr;
+ int ret;
+
+ if (vfio_listener_skipped_section(section)) {
+ DPRINTF("SKIPPING region_add %"HWADDR_PRIx" - %"PRIx64"\n",
+ section->offset_within_address_space,
+ section->offset_within_address_space + section->size - 1);
+ return;
+ }
+
+ if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
+ (section->offset_within_region & ~TARGET_PAGE_MASK))) {
+ error_report("%s received unaligned region", __func__);
+ return;
+ }
+
+ iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
+ end = (section->offset_within_address_space + section->size) &
+ TARGET_PAGE_MASK;
+
+ if (iova >= end) {
+ return;
+ }
+
+ vaddr = memory_region_get_ram_ptr(section->mr) +
+ section->offset_within_region +
+ (iova - section->offset_within_address_space);
+
+ DPRINTF("region_add %"HWADDR_PRIx" - %"HWADDR_PRIx" [%p]\n",
+ iova, end - 1, vaddr);
+
+ ret = vfio_dma_map(container, iova, end - iova, vaddr, section->readonly);
+ if (ret) {
+ error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx", %p) = %d (%m)",
+ container, iova, end - iova, vaddr, ret);
+ }
+}
+
+static void vfio_listener_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer,
+ iommu_data.listener);
+ hwaddr iova, end;
+ int ret;
+
+ if (vfio_listener_skipped_section(section)) {
+ DPRINTF("SKIPPING region_del %"HWADDR_PRIx" - %"PRIx64"\n",
+ section->offset_within_address_space,
+ section->offset_within_address_space + section->size - 1);
+ return;
+ }
+
+ if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
+ (section->offset_within_region & ~TARGET_PAGE_MASK))) {
+ error_report("%s received unaligned region", __func__);
+ return;
+ }
+
+ iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
+ end = (section->offset_within_address_space + section->size) &
+ TARGET_PAGE_MASK;
+
+ if (iova >= end) {
+ return;
+ }
+
+ DPRINTF("region_del %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
+ iova, end - 1);
+
+ ret = vfio_dma_unmap(container, iova, end - iova);
+ if (ret) {
+ error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx") = %d (%m)",
+ container, iova, end - iova, ret);
+ }
+}
+
+static MemoryListener vfio_memory_listener = {
+ .region_add = vfio_listener_region_add,
+ .region_del = vfio_listener_region_del,
+};
+
+static void vfio_listener_release(VFIOContainer *container)
+{
+ memory_listener_unregister(&container->iommu_data.listener);
+}
+
+/*
+ * Interrupt setup
+ */
+static void vfio_disable_interrupts(VFIODevice *vdev)
+{
+ switch (vdev->interrupt) {
+ case VFIO_INT_INTx:
+ vfio_disable_intx(vdev);
+ break;
+ case VFIO_INT_MSI:
+ vfio_disable_msi(vdev);
+ break;
+ case VFIO_INT_MSIX:
+ vfio_disable_msix(vdev);
+ break;
+ }
+}
+
+static int vfio_setup_msi(VFIODevice *vdev, int pos)
+{
+ uint16_t ctrl;
+ bool msi_64bit, msi_maskbit;
+ int ret, entries;
+
+ if (pread(vdev->fd, &ctrl, sizeof(ctrl),
+ vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
+ return -errno;
+ }
+ ctrl = le16_to_cpu(ctrl);
+
+ msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
+ msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
+ entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
+
+ DPRINTF("%04x:%02x:%02x.%x PCI MSI CAP @0x%x\n", vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function, pos);
+
+ ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit);
+ if (ret < 0) {
+ if (ret == -ENOTSUP) {
+ return 0;
+ }
+ error_report("vfio: msi_init failed");
+ return ret;
+ }
+ vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
+
+ return 0;
+}
+
+/*
+ * We don't have any control over how pci_add_capability() inserts
+ * capabilities into the chain. In order to setup MSI-X we need a
+ * MemoryRegion for the BAR. In order to setup the BAR and not
+ * attempt to mmap the MSI-X table area, which VFIO won't allow, we
+ * need to first look for where the MSI-X table lives. So we
+ * unfortunately split MSI-X setup across two functions.
+ */
+static int vfio_early_setup_msix(VFIODevice *vdev)
+{
+ uint8_t pos;
+ uint16_t ctrl;
+ uint32_t table, pba;
+
+ pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
+ if (!pos) {
+ return 0;
+ }
+
+ if (pread(vdev->fd, &ctrl, sizeof(ctrl),
+ vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
+ return -errno;
+ }
+
+ if (pread(vdev->fd, &table, sizeof(table),
+ vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
+ return -errno;
+ }
+
+ if (pread(vdev->fd, &pba, sizeof(pba),
+ vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
+ return -errno;
+ }
+
+ ctrl = le16_to_cpu(ctrl);
+ table = le32_to_cpu(table);
+ pba = le32_to_cpu(pba);
+
+ vdev->msix = g_malloc0(sizeof(*(vdev->msix)));
+ vdev->msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
+ vdev->msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
+ vdev->msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
+ vdev->msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
+ vdev->msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+
+ DPRINTF("%04x:%02x:%02x.%x "
+ "PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d\n",
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function, pos, vdev->msix->table_bar,
+ vdev->msix->table_offset, vdev->msix->entries);
+
+ return 0;
+}
+
+static int vfio_setup_msix(VFIODevice *vdev, int pos)
+{
+ int ret;
+
+ ret = msix_init(&vdev->pdev, vdev->msix->entries,
+ &vdev->bars[vdev->msix->table_bar].mem,
+ vdev->msix->table_bar, vdev->msix->table_offset,
+ &vdev->bars[vdev->msix->pba_bar].mem,
+ vdev->msix->pba_bar, vdev->msix->pba_offset, pos);
+ if (ret < 0) {
+ if (ret == -ENOTSUP) {
+ return 0;
+ }
+ error_report("vfio: msix_init failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vfio_teardown_msi(VFIODevice *vdev)
+{
+ msi_uninit(&vdev->pdev);
+
+ if (vdev->msix) {
+ msix_uninit(&vdev->pdev, &vdev->bars[vdev->msix->table_bar].mem,
+ &vdev->bars[vdev->msix->pba_bar].mem);
+ }
+}
+
+/*
+ * Resource setup
+ */
+static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled)
+{
+ int i;
+
+ for (i = 0; i < PCI_ROM_SLOT; i++) {
+ VFIOBAR *bar = &vdev->bars[i];
+
+ if (!bar->size) {
+ continue;
+ }
+
+ memory_region_set_enabled(&bar->mmap_mem, enabled);
+ if (vdev->msix && vdev->msix->table_bar == i) {
+ memory_region_set_enabled(&vdev->msix->mmap_mem, enabled);
+ }
+ }
+}
+
+static void vfio_unmap_bar(VFIODevice *vdev, int nr)
+{
+ VFIOBAR *bar = &vdev->bars[nr];
+
+ if (!bar->size) {
+ return;
+ }
+
+ vfio_bar_quirk_teardown(vdev, nr);
+
+ memory_region_del_subregion(&bar->mem, &bar->mmap_mem);
+ munmap(bar->mmap, memory_region_size(&bar->mmap_mem));
+
+ if (vdev->msix && vdev->msix->table_bar == nr) {
+ memory_region_del_subregion(&bar->mem, &vdev->msix->mmap_mem);
+ munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem));
+ }
+
+ memory_region_destroy(&bar->mem);
+}
+
+static int vfio_mmap_bar(VFIOBAR *bar, MemoryRegion *mem, MemoryRegion *submem,
+ void **map, size_t size, off_t offset,
+ const char *name)
+{
+ int ret = 0;
+
+ if (VFIO_ALLOW_MMAP && size && bar->flags & VFIO_REGION_INFO_FLAG_MMAP) {
+ int prot = 0;
+
+ if (bar->flags & VFIO_REGION_INFO_FLAG_READ) {
+ prot |= PROT_READ;
+ }
+
+ if (bar->flags & VFIO_REGION_INFO_FLAG_WRITE) {
+ prot |= PROT_WRITE;
+ }
+
+ *map = mmap(NULL, size, prot, MAP_SHARED,
+ bar->fd, bar->fd_offset + offset);
+ if (*map == MAP_FAILED) {
+ *map = NULL;
+ ret = -errno;
+ goto empty_region;
+ }
+
+ memory_region_init_ram_ptr(submem, name, size, *map);
+ } else {
+empty_region:
+ /* Create a zero sized sub-region to make cleanup easy. */
+ memory_region_init(submem, name, 0);
+ }
+
+ memory_region_add_subregion(mem, offset, submem);
+
+ return ret;
+}
+
+static void vfio_map_bar(VFIODevice *vdev, int nr)
+{
+ VFIOBAR *bar = &vdev->bars[nr];
+ unsigned size = bar->size;
+ char name[64];
+ uint32_t pci_bar;
+ uint8_t type;
+ int ret;
+
+ /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
+ if (!size) {
+ return;
+ }
+
+ snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d",
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function, nr);
+
+ /* Determine what type of BAR this is for registration */
+ ret = pread(vdev->fd, &pci_bar, sizeof(pci_bar),
+ vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
+ if (ret != sizeof(pci_bar)) {
+ error_report("vfio: Failed to read BAR %d (%m)", nr);
+ return;
+ }
+
+ pci_bar = le32_to_cpu(pci_bar);
+ type = pci_bar & (pci_bar & PCI_BASE_ADDRESS_SPACE_IO ?
+ ~PCI_BASE_ADDRESS_IO_MASK : ~PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* A "slow" read/write mapping underlies all BARs */
+ memory_region_init_io(&bar->mem, &vfio_bar_ops, bar, name, size);
+ pci_register_bar(&vdev->pdev, nr, type, &bar->mem);
+
+ /*
+ * We can't mmap areas overlapping the MSIX vector table, so we
+ * potentially insert a direct-mapped subregion before and after it.
+ */
+ if (vdev->msix && vdev->msix->table_bar == nr) {
+ size = vdev->msix->table_offset & TARGET_PAGE_MASK;
+ }
+
+ strncat(name, " mmap", sizeof(name) - strlen(name) - 1);
+ if (vfio_mmap_bar(bar, &bar->mem,
+ &bar->mmap_mem, &bar->mmap, size, 0, name)) {
+ error_report("%s unsupported. Performance may be slow", name);
+ }
+
+ if (vdev->msix && vdev->msix->table_bar == nr) {
+ unsigned start;
+
+ start = TARGET_PAGE_ALIGN(vdev->msix->table_offset +
+ (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
+
+ size = start < bar->size ? bar->size - start : 0;
+ strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1);
+ /* VFIOMSIXInfo contains another MemoryRegion for this mapping */
+ if (vfio_mmap_bar(bar, &bar->mem, &vdev->msix->mmap_mem,
+ &vdev->msix->mmap, size, start, name)) {
+ error_report("%s unsupported. Performance may be slow", name);
+ }
+ }
+
+ vfio_bar_quirk_setup(vdev, nr);
+}
+
+static void vfio_map_bars(VFIODevice *vdev)
+{
+ int i;
+
+ for (i = 0; i < PCI_ROM_SLOT; i++) {
+ vfio_map_bar(vdev, i);
+ }
+
+ if (vdev->has_vga) {
+ memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem,
+ &vfio_vga_ops,
+ &vdev->vga.region[QEMU_PCI_VGA_MEM],
+ "vfio-vga-mmio@0xa0000",
+ QEMU_PCI_VGA_MEM_SIZE);
+ memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem,
+ &vfio_vga_ops,
+ &vdev->vga.region[QEMU_PCI_VGA_IO_LO],
+ "vfio-vga-io@0x3b0",
+ QEMU_PCI_VGA_IO_LO_SIZE);
+ memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem,
+ &vfio_vga_ops,
+ &vdev->vga.region[QEMU_PCI_VGA_IO_HI],
+ "vfio-vga-io@0x3c0",
+ QEMU_PCI_VGA_IO_HI_SIZE);
+
+ pci_register_vga(&vdev->pdev, &vdev->vga.region[QEMU_PCI_VGA_MEM].mem,
+ &vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem,
+ &vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem);
+ vfio_vga_quirk_setup(vdev);
+ }
+}
+
+static void vfio_unmap_bars(VFIODevice *vdev)
+{
+ int i;
+
+ for (i = 0; i < PCI_ROM_SLOT; i++) {
+ vfio_unmap_bar(vdev, i);
+ }
+
+ if (vdev->has_vga) {
+ vfio_vga_quirk_teardown(vdev);
+ pci_unregister_vga(&vdev->pdev);
+ memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem);
+ memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem);
+ memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem);
+ }
+}
+
+/*
+ * General setup
+ */
+static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
+{
+ uint8_t tmp, next = 0xff;
+
+ for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
+ tmp = pdev->config[tmp + 1]) {
+ if (tmp > pos && tmp < next) {
+ next = tmp;
+ }
+ }
+
+ return next - pos;
+}
+
+static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
+{
+ pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
+}
+
+static void vfio_add_emulated_word(VFIODevice *vdev, int pos,
+ uint16_t val, uint16_t mask)
+{
+ vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
+ vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
+ vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
+}
+
+static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
+{
+ pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
+}
+
+static void vfio_add_emulated_long(VFIODevice *vdev, int pos,
+ uint32_t val, uint32_t mask)
+{
+ vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
+ vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
+ vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
+}
+
+static int vfio_setup_pcie_cap(VFIODevice *vdev, int pos, uint8_t size)
+{
+ uint16_t flags;
+ uint8_t type;
+
+ flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
+ type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
+
+ if (type != PCI_EXP_TYPE_ENDPOINT &&
+ type != PCI_EXP_TYPE_LEG_END &&
+ type != PCI_EXP_TYPE_RC_END) {
+
+ error_report("vfio: Assignment of PCIe type 0x%x "
+ "devices is not currently supported", type);
+ return -EINVAL;
+ }
+
+ if (!pci_bus_is_express(vdev->pdev.bus)) {
+ /*
+ * Use express capability as-is on PCI bus. It doesn't make much
+ * sense to even expose, but some drivers (ex. tg3) depend on it
+ * and guests don't seem to be particular about it. We'll need
+ * to revist this or force express devices to express buses if we
+ * ever expose an IOMMU to the guest.
+ */
+ } else if (pci_bus_is_root(vdev->pdev.bus)) {
+ /*
+ * On a Root Complex bus Endpoints become Root Complex Integrated
+ * Endpoints, which changes the type and clears the LNK & LNK2 fields.
+ */
+ if (type == PCI_EXP_TYPE_ENDPOINT) {
+ vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
+ PCI_EXP_TYPE_RC_END << 4,
+ PCI_EXP_FLAGS_TYPE);
+
+ /* Link Capabilities, Status, and Control goes away */
+ if (size > PCI_EXP_LNKCTL) {
+ vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
+ vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
+ vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
+
+#ifndef PCI_EXP_LNKCAP2
+#define PCI_EXP_LNKCAP2 44
+#endif
+#ifndef PCI_EXP_LNKSTA2
+#define PCI_EXP_LNKSTA2 50
+#endif
+ /* Link 2 Capabilities, Status, and Control goes away */
+ if (size > PCI_EXP_LNKCAP2) {
+ vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
+ vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
+ vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
+ }
+ }
+
+ } else if (type == PCI_EXP_TYPE_LEG_END) {
+ /*
+ * Legacy endpoints don't belong on the root complex. Windows
+ * seems to be happier with devices if we skip the capability.
+ */
+ return 0;
+ }
+
+ } else {
+ /*
+ * Convert Root Complex Integrated Endpoints to regular endpoints.
+ * These devices don't support LNK/LNK2 capabilities, so make them up.
+ */
+ if (type == PCI_EXP_TYPE_RC_END) {
+ vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
+ PCI_EXP_TYPE_ENDPOINT << 4,
+ PCI_EXP_FLAGS_TYPE);
+ vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
+ PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
+ vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
+ }
+
+ /* Mark the Link Status bits as emulated to allow virtual negotiation */
+ vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
+ pci_get_word(vdev->pdev.config + pos +
+ PCI_EXP_LNKSTA),
+ PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
+ }
+
+ pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size);
+ if (pos >= 0) {
+ vdev->pdev.exp.exp_cap = pos;
+ }
+
+ return pos;
+}
+
+static int vfio_add_std_cap(VFIODevice *vdev, uint8_t pos)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ uint8_t cap_id, next, size;
+ int ret;
+
+ cap_id = pdev->config[pos];
+ next = pdev->config[pos + 1];
+
+ /*
+ * If it becomes important to configure capabilities to their actual
+ * size, use this as the default when it's something we don't recognize.
+ * Since QEMU doesn't actually handle many of the config accesses,
+ * exact size doesn't seem worthwhile.
+ */
+ size = vfio_std_cap_max_size(pdev, pos);
+
+ /*
+ * pci_add_capability always inserts the new capability at the head
+ * of the chain. Therefore to end up with a chain that matches the
+ * physical device, we insert from the end by making this recursive.
+ * This is also why we pre-caclulate size above as cached config space
+ * will be changed as we unwind the stack.
+ */
+ if (next) {
+ ret = vfio_add_std_cap(vdev, next);
+ if (ret) {
+ return ret;
+ }
+ } else {
+ /* Begin the rebuild, use QEMU emulated list bits */
+ pdev->config[PCI_CAPABILITY_LIST] = 0;
+ vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
+ vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
+ }
+
+ /* Use emulated next pointer to allow dropping caps */
+ pci_set_byte(vdev->emulated_config_bits + pos + 1, 0xff);
+
+ switch (cap_id) {
+ case PCI_CAP_ID_MSI:
+ ret = vfio_setup_msi(vdev, pos);
+ break;
+ case PCI_CAP_ID_EXP:
+ ret = vfio_setup_pcie_cap(vdev, pos, size);
+ break;
+ case PCI_CAP_ID_MSIX:
+ ret = vfio_setup_msix(vdev, pos);
+ break;
+ case PCI_CAP_ID_PM:
+ vdev->pm_cap = pos;
+ default:
+ ret = pci_add_capability(pdev, cap_id, pos, size);
+ break;
+ }
+
+ if (ret < 0) {
+ error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
+ "0x%x[0x%x]@0x%x: %d", vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function,
+ cap_id, size, pos, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vfio_add_capabilities(VFIODevice *vdev)
+{
+ PCIDevice *pdev = &vdev->pdev;
+
+ if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
+ !pdev->config[PCI_CAPABILITY_LIST]) {
+ return 0; /* Nothing to add */
+ }
+
+ return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
+}
+
+static int vfio_load_rom(VFIODevice *vdev)
+{
+ uint64_t size = vdev->rom_size;
+ char name[32];
+ off_t off = 0, voff = vdev->rom_offset;
+ ssize_t bytes;
+ void *ptr;
+
+ /* If loading ROM from file, pci handles it */
+ if (vdev->pdev.romfile || !vdev->pdev.rom_bar || !size) {
+ return 0;
+ }
+
+ DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function);
+
+ snprintf(name, sizeof(name), "vfio[%04x:%02x:%02x.%x].rom",
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function);
+ memory_region_init_ram(&vdev->pdev.rom, name, size);
+ ptr = memory_region_get_ram_ptr(&vdev->pdev.rom);
+ memset(ptr, 0xff, size);
+
+ while (size) {
+ bytes = pread(vdev->fd, ptr + off, size, voff + off);
+ if (bytes == 0) {
+ break; /* expect that we could get back less than the ROM BAR */
+ } else if (bytes > 0) {
+ off += bytes;
+ size -= bytes;
+ } else {
+ if (errno == EINTR || errno == EAGAIN) {
+ continue;
+ }
+ error_report("vfio: Error reading device ROM: %m");
+ memory_region_destroy(&vdev->pdev.rom);
+ return -errno;
+ }
+ }
+
+ pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, 0, &vdev->pdev.rom);
+ vdev->pdev.has_rom = true;
+ return 0;
+}
+
+static int vfio_connect_container(VFIOGroup *group)
+{
+ VFIOContainer *container;
+ int ret, fd;
+
+ if (group->container) {
+ return 0;
+ }
+
+ QLIST_FOREACH(container, &container_list, next) {
+ if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
+ group->container = container;
+ QLIST_INSERT_HEAD(&container->group_list, group, container_next);
+ return 0;
+ }
+ }
+
+ fd = qemu_open("/dev/vfio/vfio", O_RDWR);
+ if (fd < 0) {
+ error_report("vfio: failed to open /dev/vfio/vfio: %m");
+ return -errno;
+ }
+
+ ret = ioctl(fd, VFIO_GET_API_VERSION);
+ if (ret != VFIO_API_VERSION) {
+ error_report("vfio: supported vfio version: %d, "
+ "reported version: %d", VFIO_API_VERSION, ret);
+ close(fd);
+ return -EINVAL;
+ }
+
+ container = g_malloc0(sizeof(*container));
+ container->fd = fd;
+
+ if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
+ ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
+ if (ret) {
+ error_report("vfio: failed to set group container: %m");
+ g_free(container);
+ close(fd);
+ return -errno;
+ }
+
+ ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
+ if (ret) {
+ error_report("vfio: failed to set iommu for container: %m");
+ g_free(container);
+ close(fd);
+ return -errno;
+ }
+
+ container->iommu_data.listener = vfio_memory_listener;
+ container->iommu_data.release = vfio_listener_release;
+
+ memory_listener_register(&container->iommu_data.listener, &address_space_memory);
+ } else {
+ error_report("vfio: No available IOMMU models");
+ g_free(container);
+ close(fd);
+ return -EINVAL;
+ }
+
+ QLIST_INIT(&container->group_list);
+ QLIST_INSERT_HEAD(&container_list, container, next);
+
+ group->container = container;
+ QLIST_INSERT_HEAD(&container->group_list, group, container_next);
+
+ return 0;
+}
+
+static void vfio_disconnect_container(VFIOGroup *group)
+{
+ VFIOContainer *container = group->container;
+
+ if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
+ error_report("vfio: error disconnecting group %d from container",
+ group->groupid);
+ }
+
+ QLIST_REMOVE(group, container_next);
+ group->container = NULL;
+
+ if (QLIST_EMPTY(&container->group_list)) {
+ if (container->iommu_data.release) {
+ container->iommu_data.release(container);
+ }
+ QLIST_REMOVE(container, next);
+ DPRINTF("vfio_disconnect_container: close container->fd\n");
+ close(container->fd);
+ g_free(container);
+ }
+}
+
+static VFIOGroup *vfio_get_group(int groupid)
+{
+ VFIOGroup *group;
+ char path[32];
+ struct vfio_group_status status = { .argsz = sizeof(status) };
+
+ QLIST_FOREACH(group, &group_list, next) {
+ if (group->groupid == groupid) {
+ return group;
+ }
+ }
+
+ group = g_malloc0(sizeof(*group));
+
+ snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
+ group->fd = qemu_open(path, O_RDWR);
+ if (group->fd < 0) {
+ error_report("vfio: error opening %s: %m", path);
+ g_free(group);
+ return NULL;
+ }
+
+ if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
+ error_report("vfio: error getting group status: %m");
+ close(group->fd);
+ g_free(group);
+ return NULL;
+ }
+
+ if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
+ error_report("vfio: error, group %d is not viable, please ensure "
+ "all devices within the iommu_group are bound to their "
+ "vfio bus driver.", groupid);
+ close(group->fd);
+ g_free(group);
+ return NULL;
+ }
+
+ group->groupid = groupid;
+ QLIST_INIT(&group->device_list);
+
+ if (vfio_connect_container(group)) {
+ error_report("vfio: failed to setup container for group %d", groupid);
+ close(group->fd);
+ g_free(group);
+ return NULL;
+ }
+
+ QLIST_INSERT_HEAD(&group_list, group, next);
+
+ return group;
+}
+
+static void vfio_put_group(VFIOGroup *group)
+{
+ if (!QLIST_EMPTY(&group->device_list)) {
+ return;
+ }
+
+ vfio_disconnect_container(group);
+ QLIST_REMOVE(group, next);
+ DPRINTF("vfio_put_group: close group->fd\n");
+ close(group->fd);
+ g_free(group);
+}
+
+static int vfio_get_device(VFIOGroup *group, const char *name, VFIODevice *vdev)
+{
+ struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
+ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
+ int ret, i;
+
+ ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
+ if (ret < 0) {
+ error_report("vfio: error getting device %s from group %d: %m",
+ name, group->groupid);
+ error_printf("Verify all devices in group %d are bound to vfio-pci "
+ "or pci-stub and not already in use\n", group->groupid);
+ return ret;
+ }
+
+ vdev->fd = ret;
+ vdev->group = group;
+ QLIST_INSERT_HEAD(&group->device_list, vdev, next);
+
+ /* Sanity check device */
+ ret = ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &dev_info);
+ if (ret) {
+ error_report("vfio: error getting device info: %m");
+ goto error;
+ }
+
+ DPRINTF("Device %s flags: %u, regions: %u, irgs: %u\n", name,
+ dev_info.flags, dev_info.num_regions, dev_info.num_irqs);
+
+ if (!(dev_info.flags & VFIO_DEVICE_FLAGS_PCI)) {
+ error_report("vfio: Um, this isn't a PCI device");
+ goto error;
+ }
+
+ vdev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
+ if (!vdev->reset_works) {
+ error_report("Warning, device %s does not support reset", name);
+ }
+
+ if (dev_info.num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
+ error_report("vfio: unexpected number of io regions %u",
+ dev_info.num_regions);
+ goto error;
+ }
+
+ if (dev_info.num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
+ error_report("vfio: unexpected number of irqs %u", dev_info.num_irqs);
+ goto error;
+ }
+
+ for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
+ reg_info.index = i;
+
+ ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
+ if (ret) {
+ error_report("vfio: Error getting region %d info: %m", i);
+ goto error;
+ }
+
+ DPRINTF("Device %s region %d:\n", name, i);
+ DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
+ (unsigned long)reg_info.size, (unsigned long)reg_info.offset,
+ (unsigned long)reg_info.flags);
+
+ vdev->bars[i].flags = reg_info.flags;
+ vdev->bars[i].size = reg_info.size;
+ vdev->bars[i].fd_offset = reg_info.offset;
+ vdev->bars[i].fd = vdev->fd;
+ vdev->bars[i].nr = i;
+ QLIST_INIT(&vdev->bars[i].quirks);
+ }
+
+ reg_info.index = VFIO_PCI_ROM_REGION_INDEX;
+
+ ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
+ if (ret) {
+ error_report("vfio: Error getting ROM info: %m");
+ goto error;
+ }
+
+ DPRINTF("Device %s ROM:\n", name);
+ DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
+ (unsigned long)reg_info.size, (unsigned long)reg_info.offset,
+ (unsigned long)reg_info.flags);
+
+ vdev->rom_size = reg_info.size;
+ vdev->rom_offset = reg_info.offset;
+
+ reg_info.index = VFIO_PCI_CONFIG_REGION_INDEX;
+
+ ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
+ if (ret) {
+ error_report("vfio: Error getting config info: %m");
+ goto error;
+ }
+
+ DPRINTF("Device %s config:\n", name);
+ DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
+ (unsigned long)reg_info.size, (unsigned long)reg_info.offset,
+ (unsigned long)reg_info.flags);
+
+ vdev->config_size = reg_info.size;
+ if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
+ vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
+ }
+ vdev->config_offset = reg_info.offset;
+
+ if ((vdev->features & VFIO_FEATURE_ENABLE_VGA) &&
+ dev_info.num_regions > VFIO_PCI_VGA_REGION_INDEX) {
+ struct vfio_region_info vga_info = {
+ .argsz = sizeof(vga_info),
+ .index = VFIO_PCI_VGA_REGION_INDEX,
+ };
+
+ ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &vga_info);
+ if (ret) {
+ error_report(
+ "vfio: Device does not support requested feature x-vga");
+ goto error;
+ }
+
+ if (!(vga_info.flags & VFIO_REGION_INFO_FLAG_READ) ||
+ !(vga_info.flags & VFIO_REGION_INFO_FLAG_WRITE) ||
+ vga_info.size < 0xbffff + 1) {
+ error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
+ (unsigned long)vga_info.flags,
+ (unsigned long)vga_info.size);
+ goto error;
+ }
+
+ vdev->vga.fd_offset = vga_info.offset;
+ vdev->vga.fd = vdev->fd;
+
+ vdev->vga.region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
+ vdev->vga.region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
+ QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_MEM].quirks);
+
+ vdev->vga.region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
+ vdev->vga.region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
+ QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].quirks);
+
+ vdev->vga.region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
+ vdev->vga.region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
+ QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks);
+
+ vdev->has_vga = true;
+ }
+
+error:
+ if (ret) {
+ QLIST_REMOVE(vdev, next);
+ vdev->group = NULL;
+ close(vdev->fd);
+ }
+ return ret;
+}
+
+static void vfio_put_device(VFIODevice *vdev)
+{
+ QLIST_REMOVE(vdev, next);
+ vdev->group = NULL;
+ DPRINTF("vfio_put_device: close vdev->fd\n");
+ close(vdev->fd);
+ if (vdev->msix) {
+ g_free(vdev->msix);
+ vdev->msix = NULL;
+ }
+}
+
+static int vfio_initfn(PCIDevice *pdev)
+{
+ VFIODevice *pvdev, *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
+ VFIOGroup *group;
+ char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
+ ssize_t len;
+ struct stat st;
+ int groupid;
+ int ret;
+
+ /* Check that the host device exists */
+ snprintf(path, sizeof(path),
+ "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function);
+ if (stat(path, &st) < 0) {
+ error_report("vfio: error: no such host device: %s", path);
+ return -errno;
+ }
+
+ strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1);
+
+ len = readlink(path, iommu_group_path, PATH_MAX);
+ if (len <= 0) {
+ error_report("vfio: error no iommu_group for device");
+ return -errno;
+ }
+
+ iommu_group_path[len] = 0;
+ group_name = basename(iommu_group_path);
+
+ if (sscanf(group_name, "%d", &groupid) != 1) {
+ error_report("vfio: error reading %s: %m", path);
+ return -errno;
+ }
+
+ DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function, groupid);
+
+ group = vfio_get_group(groupid);
+ if (!group) {
+ error_report("vfio: failed to get group %d", groupid);
+ return -ENOENT;
+ }
+
+ snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x",
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function);
+
+ QLIST_FOREACH(pvdev, &group->device_list, next) {
+ if (pvdev->host.domain == vdev->host.domain &&
+ pvdev->host.bus == vdev->host.bus &&
+ pvdev->host.slot == vdev->host.slot &&
+ pvdev->host.function == vdev->host.function) {
+
+ error_report("vfio: error: device %s is already attached", path);
+ vfio_put_group(group);
+ return -EBUSY;
+ }
+ }
+
+ ret = vfio_get_device(group, path, vdev);
+ if (ret) {
+ error_report("vfio: failed to get device %s", path);
+ vfio_put_group(group);
+ return ret;
+ }
+
+ /* Get a copy of config space */
+ ret = pread(vdev->fd, vdev->pdev.config,
+ MIN(pci_config_size(&vdev->pdev), vdev->config_size),
+ vdev->config_offset);
+ if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
+ ret = ret < 0 ? -errno : -EFAULT;
+ error_report("vfio: Failed to read device config space");
+ goto out_put;
+ }
+
+ /* vfio emulates a lot for us, but some bits need extra love */
+ vdev->emulated_config_bits = g_malloc0(vdev->config_size);
+
+ /* QEMU can choose to expose the ROM or not */
+ memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
+
+ /* QEMU can change multi-function devices to single function, or reverse */
+ vdev->emulated_config_bits[PCI_HEADER_TYPE] =
+ PCI_HEADER_TYPE_MULTI_FUNCTION;
+
+ /*
+ * Clear host resource mapping info. If we choose not to register a
+ * BAR, such as might be the case with the option ROM, we can get
+ * confusing, unwritable, residual addresses from the host here.
+ */
+ memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
+ memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
+
+ vfio_load_rom(vdev);
+
+ ret = vfio_early_setup_msix(vdev);
+ if (ret) {
+ goto out_put;
+ }
+
+ vfio_map_bars(vdev);
+
+ ret = vfio_add_capabilities(vdev);
+ if (ret) {
+ goto out_teardown;
+ }
+
+ /* QEMU emulates all of MSI & MSIX */
+ if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
+ memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
+ MSIX_CAP_LENGTH);
+ }
+
+ if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
+ memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
+ vdev->msi_cap_size);
+ }
+
+ if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
+ vdev->intx.mmap_timer = qemu_new_timer_ms(vm_clock,
+ vfio_intx_mmap_enable, vdev);
+ pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq);
+ ret = vfio_enable_intx(vdev);
+ if (ret) {
+ goto out_teardown;
+ }
+ }
+
+ add_boot_device_path(vdev->bootindex, &pdev->qdev, NULL);
+
+ return 0;
+
+out_teardown:
+ pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
+ vfio_teardown_msi(vdev);
+ vfio_unmap_bars(vdev);
+out_put:
+ g_free(vdev->emulated_config_bits);
+ vfio_put_device(vdev);
+ vfio_put_group(group);
+ return ret;
+}
+
+static void vfio_exitfn(PCIDevice *pdev)
+{
+ VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
+ VFIOGroup *group = vdev->group;
+
+ pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
+ vfio_disable_interrupts(vdev);
+ if (vdev->intx.mmap_timer) {
+ qemu_free_timer(vdev->intx.mmap_timer);
+ }
+ vfio_teardown_msi(vdev);
+ vfio_unmap_bars(vdev);
+ g_free(vdev->emulated_config_bits);
+ vfio_put_device(vdev);
+ vfio_put_group(group);
+}
+
+static void vfio_pci_reset(DeviceState *dev)
+{
+ PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
+ VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
+ uint16_t cmd;
+
+ DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function);
+
+ vfio_disable_interrupts(vdev);
+
+ /* Make sure the device is in D0 */
+ if (vdev->pm_cap) {
+ uint16_t pmcsr;
+ uint8_t state;
+
+ pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
+ state = pmcsr & PCI_PM_CTRL_STATE_MASK;
+ if (state) {
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
+ /* vfio handles the necessary delay here */
+ pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
+ state = pmcsr & PCI_PM_CTRL_STATE_MASK;
+ if (state) {
+ error_report("vfio: Unable to power on device, stuck in D%d\n",
+ state);
+ }
+ }
+ }
+
+ /*
+ * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
+ * Also put INTx Disable in known state.
+ */
+ cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
+ cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+ PCI_COMMAND_INTX_DISABLE);
+ vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
+
+ if (vdev->reset_works) {
+ if (ioctl(vdev->fd, VFIO_DEVICE_RESET)) {
+ error_report("vfio: Error unable to reset physical device "
+ "(%04x:%02x:%02x.%x): %m", vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function);
+ }
+ }
+
+ vfio_enable_intx(vdev);
+}
+
+static Property vfio_pci_dev_properties[] = {
+ DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIODevice, host),
+ DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIODevice,
+ intx.mmap_timeout, 1100),
+ DEFINE_PROP_BIT("x-vga", VFIODevice, features,
+ VFIO_FEATURE_ENABLE_VGA_BIT, false),
+ DEFINE_PROP_INT32("bootindex", VFIODevice, bootindex, -1),
+ /*
+ * TODO - support passed fds... is this necessary?
+ * DEFINE_PROP_STRING("vfiofd", VFIODevice, vfiofd_name),
+ * DEFINE_PROP_STRING("vfiogroupfd, VFIODevice, vfiogroupfd_name),
+ */
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static const VMStateDescription vfio_pci_vmstate = {
+ .name = "vfio-pci",
+ .unmigratable = 1,
+};
+
+static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
+
+ dc->reset = vfio_pci_reset;
+ dc->props = vfio_pci_dev_properties;
+ dc->vmsd = &vfio_pci_vmstate;
+ dc->desc = "VFIO-based PCI device assignment";
+ pdc->init = vfio_initfn;
+ pdc->exit = vfio_exitfn;
+ pdc->config_read = vfio_pci_read_config;
+ pdc->config_write = vfio_pci_write_config;
+ pdc->is_express = 1; /* We might be */
+}
+
+static const TypeInfo vfio_pci_dev_info = {
+ .name = "vfio-pci",
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(VFIODevice),
+ .class_init = vfio_pci_dev_class_init,
+};
+
+static void register_vfio_pci_dev_type(void)
+{
+ type_register_static(&vfio_pci_dev_info);
+}
+
+type_init(register_vfio_pci_dev_type)
diff --git a/hw/misc/vmport.c b/hw/misc/vmport.c
new file mode 100644
index 0000000..0d07ea1
--- /dev/null
+++ b/hw/misc/vmport.c
@@ -0,0 +1,170 @@
+/*
+ * QEMU VMPort emulation
+ *
+ * Copyright (C) 2007 Hervé Poussineau
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/isa/isa.h"
+#include "hw/i386/pc.h"
+#include "sysemu/kvm.h"
+#include "hw/qdev.h"
+
+//#define VMPORT_DEBUG
+
+#define VMPORT_CMD_GETVERSION 0x0a
+#define VMPORT_CMD_GETRAMSIZE 0x14
+
+#define VMPORT_ENTRIES 0x2c
+#define VMPORT_MAGIC 0x564D5868
+
+typedef struct _VMPortState
+{
+ ISADevice dev;
+ MemoryRegion io;
+ IOPortReadFunc *func[VMPORT_ENTRIES];
+ void *opaque[VMPORT_ENTRIES];
+} VMPortState;
+
+static VMPortState *port_state;
+
+void vmport_register(unsigned char command, IOPortReadFunc *func, void *opaque)
+{
+ if (command >= VMPORT_ENTRIES)
+ return;
+
+ port_state->func[command] = func;
+ port_state->opaque[command] = opaque;
+}
+
+static uint64_t vmport_ioport_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ VMPortState *s = opaque;
+ CPUX86State *env = cpu_single_env;
+ unsigned char command;
+ uint32_t eax;
+
+ cpu_synchronize_state(env);
+
+ eax = env->regs[R_EAX];
+ if (eax != VMPORT_MAGIC)
+ return eax;
+
+ command = env->regs[R_ECX];
+ if (command >= VMPORT_ENTRIES)
+ return eax;
+ if (!s->func[command])
+ {
+#ifdef VMPORT_DEBUG
+ fprintf(stderr, "vmport: unknown command %x\n", command);
+#endif
+ return eax;
+ }
+
+ return s->func[command](s->opaque[command], addr);
+}
+
+static void vmport_ioport_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ CPUX86State *env = cpu_single_env;
+
+ env->regs[R_EAX] = vmport_ioport_read(opaque, addr, 4);
+}
+
+static uint32_t vmport_cmd_get_version(void *opaque, uint32_t addr)
+{
+ CPUX86State *env = cpu_single_env;
+ env->regs[R_EBX] = VMPORT_MAGIC;
+ return 6;
+}
+
+static uint32_t vmport_cmd_ram_size(void *opaque, uint32_t addr)
+{
+ CPUX86State *env = cpu_single_env;
+ env->regs[R_EBX] = 0x1177;
+ return ram_size;
+}
+
+/* vmmouse helpers */
+void vmmouse_get_data(uint32_t *data)
+{
+ CPUX86State *env = cpu_single_env;
+
+ data[0] = env->regs[R_EAX]; data[1] = env->regs[R_EBX];
+ data[2] = env->regs[R_ECX]; data[3] = env->regs[R_EDX];
+ data[4] = env->regs[R_ESI]; data[5] = env->regs[R_EDI];
+}
+
+void vmmouse_set_data(const uint32_t *data)
+{
+ CPUX86State *env = cpu_single_env;
+
+ env->regs[R_EAX] = data[0]; env->regs[R_EBX] = data[1];
+ env->regs[R_ECX] = data[2]; env->regs[R_EDX] = data[3];
+ env->regs[R_ESI] = data[4]; env->regs[R_EDI] = data[5];
+}
+
+static const MemoryRegionOps vmport_ops = {
+ .read = vmport_ioport_read,
+ .write = vmport_ioport_write,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static int vmport_initfn(ISADevice *dev)
+{
+ VMPortState *s = DO_UPCAST(VMPortState, dev, dev);
+
+ memory_region_init_io(&s->io, &vmport_ops, s, "vmport", 1);
+ isa_register_ioport(dev, &s->io, 0x5658);
+
+ port_state = s;
+ /* Register some generic port commands */
+ vmport_register(VMPORT_CMD_GETVERSION, vmport_cmd_get_version, NULL);
+ vmport_register(VMPORT_CMD_GETRAMSIZE, vmport_cmd_ram_size, NULL);
+ return 0;
+}
+
+static void vmport_class_initfn(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ISADeviceClass *ic = ISA_DEVICE_CLASS(klass);
+ ic->init = vmport_initfn;
+ dc->no_user = 1;
+}
+
+static const TypeInfo vmport_info = {
+ .name = "vmport",
+ .parent = TYPE_ISA_DEVICE,
+ .instance_size = sizeof(VMPortState),
+ .class_init = vmport_class_initfn,
+};
+
+static void vmport_register_types(void)
+{
+ type_register_static(&vmport_info);
+}
+
+type_init(vmport_register_types)
diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c
new file mode 100644
index 0000000..8418327
--- /dev/null
+++ b/hw/misc/zynq_slcr.c
@@ -0,0 +1,536 @@
+/*
+ * Status and system control registers for Xilinx Zynq Platform
+ *
+ * Copyright (c) 2011 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2012 PetaLogix Pty Ltd.
+ * Based on hw/arm_sysctl.c, written by Paul Brook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw/hw.h"
+#include "qemu/timer.h"
+#include "hw/sysbus.h"
+#include "sysemu/sysemu.h"
+
+#ifdef ZYNQ_ARM_SLCR_ERR_DEBUG
+#define DB_PRINT(...) do { \
+ fprintf(stderr, ": %s: ", __func__); \
+ fprintf(stderr, ## __VA_ARGS__); \
+ } while (0);
+#else
+ #define DB_PRINT(...)
+#endif
+
+#define XILINX_LOCK_KEY 0x767b
+#define XILINX_UNLOCK_KEY 0xdf0d
+
+typedef enum {
+ ARM_PLL_CTRL,
+ DDR_PLL_CTRL,
+ IO_PLL_CTRL,
+ PLL_STATUS,
+ ARM_PPL_CFG,
+ DDR_PLL_CFG,
+ IO_PLL_CFG,
+ PLL_BG_CTRL,
+ PLL_MAX
+} PLLValues;
+
+typedef enum {
+ ARM_CLK_CTRL,
+ DDR_CLK_CTRL,
+ DCI_CLK_CTRL,
+ APER_CLK_CTRL,
+ USB0_CLK_CTRL,
+ USB1_CLK_CTRL,
+ GEM0_RCLK_CTRL,
+ GEM1_RCLK_CTRL,
+ GEM0_CLK_CTRL,
+ GEM1_CLK_CTRL,
+ SMC_CLK_CTRL,
+ LQSPI_CLK_CTRL,
+ SDIO_CLK_CTRL,
+ UART_CLK_CTRL,
+ SPI_CLK_CTRL,
+ CAN_CLK_CTRL,
+ CAN_MIOCLK_CTRL,
+ DBG_CLK_CTRL,
+ PCAP_CLK_CTRL,
+ TOPSW_CLK_CTRL,
+ CLK_MAX
+} ClkValues;
+
+typedef enum {
+ CLK_CTRL,
+ THR_CTRL,
+ THR_CNT,
+ THR_STA,
+ FPGA_MAX
+} FPGAValues;
+
+typedef enum {
+ SYNC_CTRL,
+ SYNC_STATUS,
+ BANDGAP_TRIP,
+ CC_TEST,
+ PLL_PREDIVISOR,
+ CLK_621_TRUE,
+ PICTURE_DBG,
+ PICTURE_DBG_UCNT,
+ PICTURE_DBG_LCNT,
+ MISC_MAX
+} MiscValues;
+
+typedef enum {
+ PSS,
+ DDDR,
+ DMAC = 3,
+ USB,
+ GEM,
+ SDIO,
+ SPI,
+ CAN,
+ I2C,
+ UART,
+ GPIO,
+ LQSPI,
+ SMC,
+ OCM,
+ DEVCI,
+ FPGA,
+ A9_CPU,
+ RS_AWDT,
+ RST_REASON,
+ RST_REASON_CLR,
+ REBOOT_STATUS,
+ BOOT_MODE,
+ RESET_MAX
+} ResetValues;
+
+typedef struct {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+
+ union {
+ struct {
+ uint16_t scl;
+ uint16_t lockval;
+ uint32_t pll[PLL_MAX]; /* 0x100 - 0x11C */
+ uint32_t clk[CLK_MAX]; /* 0x120 - 0x16C */
+ uint32_t fpga[4][FPGA_MAX]; /* 0x170 - 0x1AC */
+ uint32_t misc[MISC_MAX]; /* 0x1B0 - 0x1D8 */
+ uint32_t reset[RESET_MAX]; /* 0x200 - 0x25C */
+ uint32_t apu_ctrl; /* 0x300 */
+ uint32_t wdt_clk_sel; /* 0x304 */
+ uint32_t tz_ocm[3]; /* 0x400 - 0x408 */
+ uint32_t tz_ddr; /* 0x430 */
+ uint32_t tz_dma[3]; /* 0x440 - 0x448 */
+ uint32_t tz_misc[3]; /* 0x450 - 0x458 */
+ uint32_t tz_fpga[2]; /* 0x484 - 0x488 */
+ uint32_t dbg_ctrl; /* 0x500 */
+ uint32_t pss_idcode; /* 0x530 */
+ uint32_t ddr[8]; /* 0x600 - 0x620 - 0x604-missing */
+ uint32_t mio[54]; /* 0x700 - 0x7D4 */
+ uint32_t mio_func[4]; /* 0x800 - 0x810 */
+ uint32_t sd[2]; /* 0x830 - 0x834 */
+ uint32_t lvl_shftr_en; /* 0x900 */
+ uint32_t ocm_cfg; /* 0x910 */
+ uint32_t cpu_ram[8]; /* 0xA00 - 0xA1C */
+ uint32_t iou[7]; /* 0xA30 - 0xA48 */
+ uint32_t dmac_ram; /* 0xA50 */
+ uint32_t afi[4][3]; /* 0xA60 - 0xA8C */
+ uint32_t ocm[3]; /* 0xA90 - 0xA98 */
+ uint32_t devci_ram; /* 0xAA0 */
+ uint32_t csg_ram; /* 0xAB0 */
+ uint32_t gpiob[12]; /* 0xB00 - 0xB2C */
+ uint32_t ddriob[14]; /* 0xB40 - 0xB74 */
+ };
+ uint8_t data[0x1000];
+ };
+} ZynqSLCRState;
+
+static void zynq_slcr_reset(DeviceState *d)
+{
+ int i;
+ ZynqSLCRState *s =
+ FROM_SYSBUS(ZynqSLCRState, SYS_BUS_DEVICE(d));
+
+ DB_PRINT("RESET\n");
+
+ s->lockval = 1;
+ /* 0x100 - 0x11C */
+ s->pll[ARM_PLL_CTRL] = 0x0001A008;
+ s->pll[DDR_PLL_CTRL] = 0x0001A008;
+ s->pll[IO_PLL_CTRL] = 0x0001A008;
+ s->pll[PLL_STATUS] = 0x0000003F;
+ s->pll[ARM_PPL_CFG] = 0x00014000;
+ s->pll[DDR_PLL_CFG] = 0x00014000;
+ s->pll[IO_PLL_CFG] = 0x00014000;
+
+ /* 0x120 - 0x16C */
+ s->clk[ARM_CLK_CTRL] = 0x1F000400;
+ s->clk[DDR_CLK_CTRL] = 0x18400003;
+ s->clk[DCI_CLK_CTRL] = 0x01E03201;
+ s->clk[APER_CLK_CTRL] = 0x01FFCCCD;
+ s->clk[USB0_CLK_CTRL] = s->clk[USB1_CLK_CTRL] = 0x00101941;
+ s->clk[GEM0_RCLK_CTRL] = s->clk[GEM1_RCLK_CTRL] = 0x00000001;
+ s->clk[GEM0_CLK_CTRL] = s->clk[GEM1_CLK_CTRL] = 0x00003C01;
+ s->clk[SMC_CLK_CTRL] = 0x00003C01;
+ s->clk[LQSPI_CLK_CTRL] = 0x00002821;
+ s->clk[SDIO_CLK_CTRL] = 0x00001E03;
+ s->clk[UART_CLK_CTRL] = 0x00003F03;
+ s->clk[SPI_CLK_CTRL] = 0x00003F03;
+ s->clk[CAN_CLK_CTRL] = 0x00501903;
+ s->clk[DBG_CLK_CTRL] = 0x00000F03;
+ s->clk[PCAP_CLK_CTRL] = 0x00000F01;
+
+ /* 0x170 - 0x1AC */
+ s->fpga[0][CLK_CTRL] = s->fpga[1][CLK_CTRL] = s->fpga[2][CLK_CTRL] =
+ s->fpga[3][CLK_CTRL] = 0x00101800;
+ s->fpga[0][THR_STA] = s->fpga[1][THR_STA] = s->fpga[2][THR_STA] =
+ s->fpga[3][THR_STA] = 0x00010000;
+
+ /* 0x1B0 - 0x1D8 */
+ s->misc[BANDGAP_TRIP] = 0x0000001F;
+ s->misc[PLL_PREDIVISOR] = 0x00000001;
+ s->misc[CLK_621_TRUE] = 0x00000001;
+
+ /* 0x200 - 0x25C */
+ s->reset[FPGA] = 0x01F33F0F;
+ s->reset[RST_REASON] = 0x00000040;
+
+ /* 0x700 - 0x7D4 */
+ for (i = 0; i < 54; i++) {
+ s->mio[i] = 0x00001601;
+ }
+ for (i = 2; i <= 8; i++) {
+ s->mio[i] = 0x00000601;
+ }
+
+ /* MIO_MST_TRI0, MIO_MST_TRI1 */
+ s->mio_func[2] = s->mio_func[3] = 0xFFFFFFFF;
+
+ s->cpu_ram[0] = s->cpu_ram[1] = s->cpu_ram[3] =
+ s->cpu_ram[4] = s->cpu_ram[7] = 0x00010101;
+ s->cpu_ram[2] = s->cpu_ram[5] = 0x01010101;
+ s->cpu_ram[6] = 0x00000001;
+
+ s->iou[0] = s->iou[1] = s->iou[2] = s->iou[3] = 0x09090909;
+ s->iou[4] = s->iou[5] = 0x00090909;
+ s->iou[6] = 0x00000909;
+
+ s->dmac_ram = 0x00000009;
+
+ s->afi[0][0] = s->afi[0][1] = 0x09090909;
+ s->afi[1][0] = s->afi[1][1] = 0x09090909;
+ s->afi[2][0] = s->afi[2][1] = 0x09090909;
+ s->afi[3][0] = s->afi[3][1] = 0x09090909;
+ s->afi[0][2] = s->afi[1][2] = s->afi[2][2] = s->afi[3][2] = 0x00000909;
+
+ s->ocm[0] = 0x01010101;
+ s->ocm[1] = s->ocm[2] = 0x09090909;
+
+ s->devci_ram = 0x00000909;
+ s->csg_ram = 0x00000001;
+
+ s->ddriob[0] = s->ddriob[1] = s->ddriob[2] = s->ddriob[3] = 0x00000e00;
+ s->ddriob[4] = s->ddriob[5] = s->ddriob[6] = 0x00000e00;
+ s->ddriob[12] = 0x00000021;
+}
+
+static inline uint32_t zynq_slcr_read_imp(void *opaque,
+ hwaddr offset)
+{
+ ZynqSLCRState *s = (ZynqSLCRState *)opaque;
+
+ switch (offset) {
+ case 0x0: /* SCL */
+ return s->scl;
+ case 0x4: /* LOCK */
+ case 0x8: /* UNLOCK */
+ DB_PRINT("Reading SCLR_LOCK/UNLOCK is not enabled\n");
+ return 0;
+ case 0x0C: /* LOCKSTA */
+ return s->lockval;
+ case 0x100 ... 0x11C:
+ return s->pll[(offset - 0x100) / 4];
+ case 0x120 ... 0x16C:
+ return s->clk[(offset - 0x120) / 4];
+ case 0x170 ... 0x1AC:
+ return s->fpga[0][(offset - 0x170) / 4];
+ case 0x1B0 ... 0x1D8:
+ return s->misc[(offset - 0x1B0) / 4];
+ case 0x200 ... 0x258:
+ return s->reset[(offset - 0x200) / 4];
+ case 0x25c:
+ return 1;
+ case 0x300:
+ return s->apu_ctrl;
+ case 0x304:
+ return s->wdt_clk_sel;
+ case 0x400 ... 0x408:
+ return s->tz_ocm[(offset - 0x400) / 4];
+ case 0x430:
+ return s->tz_ddr;
+ case 0x440 ... 0x448:
+ return s->tz_dma[(offset - 0x440) / 4];
+ case 0x450 ... 0x458:
+ return s->tz_misc[(offset - 0x450) / 4];
+ case 0x484 ... 0x488:
+ return s->tz_fpga[(offset - 0x484) / 4];
+ case 0x500:
+ return s->dbg_ctrl;
+ case 0x530:
+ return s->pss_idcode;
+ case 0x600 ... 0x620:
+ if (offset == 0x604) {
+ goto bad_reg;
+ }
+ return s->ddr[(offset - 0x600) / 4];
+ case 0x700 ... 0x7D4:
+ return s->mio[(offset - 0x700) / 4];
+ case 0x800 ... 0x810:
+ return s->mio_func[(offset - 0x800) / 4];
+ case 0x830 ... 0x834:
+ return s->sd[(offset - 0x830) / 4];
+ case 0x900:
+ return s->lvl_shftr_en;
+ case 0x910:
+ return s->ocm_cfg;
+ case 0xA00 ... 0xA1C:
+ return s->cpu_ram[(offset - 0xA00) / 4];
+ case 0xA30 ... 0xA48:
+ return s->iou[(offset - 0xA30) / 4];
+ case 0xA50:
+ return s->dmac_ram;
+ case 0xA60 ... 0xA8C:
+ return s->afi[0][(offset - 0xA60) / 4];
+ case 0xA90 ... 0xA98:
+ return s->ocm[(offset - 0xA90) / 4];
+ case 0xAA0:
+ return s->devci_ram;
+ case 0xAB0:
+ return s->csg_ram;
+ case 0xB00 ... 0xB2C:
+ return s->gpiob[(offset - 0xB00) / 4];
+ case 0xB40 ... 0xB74:
+ return s->ddriob[(offset - 0xB40) / 4];
+ default:
+ bad_reg:
+ DB_PRINT("Bad register offset 0x%x\n", (int)offset);
+ return 0;
+ }
+}
+
+static uint64_t zynq_slcr_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ uint32_t ret = zynq_slcr_read_imp(opaque, offset);
+
+ DB_PRINT("addr: %08x data: %08x\n", (unsigned)offset, (unsigned)ret);
+ return ret;
+}
+
+static void zynq_slcr_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ ZynqSLCRState *s = (ZynqSLCRState *)opaque;
+
+ DB_PRINT("offset: %08x data: %08x\n", (unsigned)offset, (unsigned)val);
+
+ switch (offset) {
+ case 0x00: /* SCL */
+ s->scl = val & 0x1;
+ return;
+ case 0x4: /* SLCR_LOCK */
+ if ((val & 0xFFFF) == XILINX_LOCK_KEY) {
+ DB_PRINT("XILINX LOCK 0xF8000000 + 0x%x <= 0x%x\n", (int)offset,
+ (unsigned)val & 0xFFFF);
+ s->lockval = 1;
+ } else {
+ DB_PRINT("WRONG XILINX LOCK KEY 0xF8000000 + 0x%x <= 0x%x\n",
+ (int)offset, (unsigned)val & 0xFFFF);
+ }
+ return;
+ case 0x8: /* SLCR_UNLOCK */
+ if ((val & 0xFFFF) == XILINX_UNLOCK_KEY) {
+ DB_PRINT("XILINX UNLOCK 0xF8000000 + 0x%x <= 0x%x\n", (int)offset,
+ (unsigned)val & 0xFFFF);
+ s->lockval = 0;
+ } else {
+ DB_PRINT("WRONG XILINX UNLOCK KEY 0xF8000000 + 0x%x <= 0x%x\n",
+ (int)offset, (unsigned)val & 0xFFFF);
+ }
+ return;
+ case 0xc: /* LOCKSTA */
+ DB_PRINT("Writing SCLR_LOCKSTA is not enabled\n");
+ return;
+ }
+
+ if (!s->lockval) {
+ switch (offset) {
+ case 0x100 ... 0x11C:
+ if (offset == 0x10C) {
+ goto bad_reg;
+ }
+ s->pll[(offset - 0x100) / 4] = val;
+ break;
+ case 0x120 ... 0x16C:
+ s->clk[(offset - 0x120) / 4] = val;
+ break;
+ case 0x170 ... 0x1AC:
+ s->fpga[0][(offset - 0x170) / 4] = val;
+ break;
+ case 0x1B0 ... 0x1D8:
+ s->misc[(offset - 0x1B0) / 4] = val;
+ break;
+ case 0x200 ... 0x25C:
+ if (offset == 0x250) {
+ goto bad_reg;
+ }
+ s->reset[(offset - 0x200) / 4] = val;
+ break;
+ case 0x300:
+ s->apu_ctrl = val;
+ break;
+ case 0x304:
+ s->wdt_clk_sel = val;
+ break;
+ case 0x400 ... 0x408:
+ s->tz_ocm[(offset - 0x400) / 4] = val;
+ break;
+ case 0x430:
+ s->tz_ddr = val;
+ break;
+ case 0x440 ... 0x448:
+ s->tz_dma[(offset - 0x440) / 4] = val;
+ break;
+ case 0x450 ... 0x458:
+ s->tz_misc[(offset - 0x450) / 4] = val;
+ break;
+ case 0x484 ... 0x488:
+ s->tz_fpga[(offset - 0x484) / 4] = val;
+ break;
+ case 0x500:
+ s->dbg_ctrl = val;
+ break;
+ case 0x530:
+ s->pss_idcode = val;
+ break;
+ case 0x600 ... 0x620:
+ if (offset == 0x604) {
+ goto bad_reg;
+ }
+ s->ddr[(offset - 0x600) / 4] = val;
+ break;
+ case 0x700 ... 0x7D4:
+ s->mio[(offset - 0x700) / 4] = val;
+ break;
+ case 0x800 ... 0x810:
+ s->mio_func[(offset - 0x800) / 4] = val;
+ break;
+ case 0x830 ... 0x834:
+ s->sd[(offset - 0x830) / 4] = val;
+ break;
+ case 0x900:
+ s->lvl_shftr_en = val;
+ break;
+ case 0x910:
+ break;
+ case 0xA00 ... 0xA1C:
+ s->cpu_ram[(offset - 0xA00) / 4] = val;
+ break;
+ case 0xA30 ... 0xA48:
+ s->iou[(offset - 0xA30) / 4] = val;
+ break;
+ case 0xA50:
+ s->dmac_ram = val;
+ break;
+ case 0xA60 ... 0xA8C:
+ s->afi[0][(offset - 0xA60) / 4] = val;
+ break;
+ case 0xA90:
+ s->ocm[0] = val;
+ break;
+ case 0xAA0:
+ s->devci_ram = val;
+ break;
+ case 0xAB0:
+ s->csg_ram = val;
+ break;
+ case 0xB00 ... 0xB2C:
+ if (offset == 0xB20 || offset == 0xB2C) {
+ goto bad_reg;
+ }
+ s->gpiob[(offset - 0xB00) / 4] = val;
+ break;
+ case 0xB40 ... 0xB74:
+ s->ddriob[(offset - 0xB40) / 4] = val;
+ break;
+ default:
+ bad_reg:
+ DB_PRINT("Bad register write %x <= %08x\n", (int)offset,
+ (unsigned)val);
+ }
+ } else {
+ DB_PRINT("SCLR registers are locked. Unlock them first\n");
+ }
+}
+
+static const MemoryRegionOps slcr_ops = {
+ .read = zynq_slcr_read,
+ .write = zynq_slcr_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static int zynq_slcr_init(SysBusDevice *dev)
+{
+ ZynqSLCRState *s = FROM_SYSBUS(ZynqSLCRState, dev);
+
+ memory_region_init_io(&s->iomem, &slcr_ops, s, "slcr", 0x1000);
+ sysbus_init_mmio(dev, &s->iomem);
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_zynq_slcr = {
+ .name = "zynq_slcr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(data, ZynqSLCRState, 0x1000),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void zynq_slcr_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass);
+
+ sdc->init = zynq_slcr_init;
+ dc->vmsd = &vmstate_zynq_slcr;
+ dc->reset = zynq_slcr_reset;
+}
+
+static const TypeInfo zynq_slcr_info = {
+ .class_init = zynq_slcr_class_init,
+ .name = "xilinx,zynq_slcr",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(ZynqSLCRState),
+};
+
+static void zynq_slcr_register_types(void)
+{
+ type_register_static(&zynq_slcr_info);
+}
+
+type_init(zynq_slcr_register_types)