aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS7
-rw-r--r--block/io.c6
-rw-r--r--docs/conf.py2
-rw-r--r--hw/acpi/cpu.c2
-rw-r--r--hw/acpi/ich9.c3
-rw-r--r--hw/acpi/memory_hotplug.c2
-rw-r--r--hw/acpi/piix4.c2
-rw-r--r--hw/acpi/tco.c1
-rw-r--r--hw/arm/armv7m.c4
-rw-r--r--hw/arm/aspeed_ast2600.c2
-rw-r--r--hw/arm/omap1.c2
-rw-r--r--hw/arm/pxa2xx.c2
-rw-r--r--hw/arm/strongarm.c2
-rw-r--r--hw/arm/xlnx-versal-virt.c25
-rw-r--r--hw/arm/xlnx-versal.c190
-rw-r--r--hw/audio/pcspk.c1
-rw-r--r--hw/block/m25p80.c2
-rw-r--r--hw/char/exynos4210_uart.c2
-rw-r--r--hw/display/macfb.c1
-rw-r--r--hw/dma/xlnx-zdma.c1
-rw-r--r--hw/dma/xlnx_csu_dma.c18
-rw-r--r--hw/gpio/imx_gpio.c1
-rw-r--r--hw/intc/arm_gicv3.c1
-rw-r--r--hw/intc/arm_gicv3_common.c9
-rw-r--r--hw/intc/arm_gicv3_its.c258
-rw-r--r--hw/intc/arm_gicv3_redist.c115
-rw-r--r--hw/intc/gicv3_internal.h43
-rw-r--r--hw/intc/pnv_xive.c22
-rw-r--r--hw/intc/trace-events8
-rw-r--r--hw/misc/bcm2835_mbox.c1
-rw-r--r--hw/misc/mac_via.c2
-rw-r--r--hw/misc/macio/cuda.c2
-rw-r--r--hw/misc/macio/pmu.c2
-rw-r--r--hw/misc/meson.build5
-rw-r--r--hw/misc/xlnx-versal-pmc-iou-slcr.c1446
-rw-r--r--hw/net/can/can_kvaser_pci.c1
-rw-r--r--hw/net/can/can_mioe3680_pci.c1
-rw-r--r--hw/net/can/can_pcm3680_pci.c1
-rw-r--r--hw/net/can/can_sja1000.c2
-rw-r--r--hw/net/can/ctucan_core.c2
-rw-r--r--hw/net/can/ctucan_pci.c1
-rw-r--r--hw/pci-host/pnv_phb3.c17
-rw-r--r--hw/pci-host/pnv_phb4.c17
-rw-r--r--hw/ppc/ppc.c1
-rw-r--r--hw/ppc/spapr.c2
-rw-r--r--hw/ppc/spapr_cpu_core.c5
-rw-r--r--hw/ppc/spapr_rtc.c2
-rw-r--r--hw/ppc/spapr_vof.c2
-rw-r--r--hw/ppc/vof.c1
-rw-r--r--hw/rtc/allwinner-rtc.c2
-rw-r--r--hw/rtc/aspeed_rtc.c2
-rw-r--r--hw/rtc/ds1338.c2
-rw-r--r--hw/rtc/exynos4210_rtc.c2
-rw-r--r--hw/rtc/goldfish_rtc.c2
-rw-r--r--hw/rtc/m41t80.c2
-rw-r--r--hw/rtc/m48t59.c2
-rw-r--r--hw/rtc/mc146818rtc.c2
-rw-r--r--hw/rtc/pl031.c2
-rw-r--r--hw/rtc/twl92230.c2
-rw-r--r--hw/rtc/xlnx-zynqmp-rtc.c2
-rw-r--r--hw/s390x/tod-tcg.c2
-rw-r--r--hw/scsi/megasas.c3
-rw-r--r--hw/scsi/mptsas.c1
-rw-r--r--hw/ssi/meson.build1
-rw-r--r--hw/ssi/xlnx-versal-ospi.c1853
-rw-r--r--hw/virtio/virtio-mmio.c1
-rw-r--r--hw/virtio/virtio-pci.c1
-rw-r--r--hw/virtio/virtio.c1
-rw-r--r--include/hw/arm/xlnx-versal.h30
-rw-r--r--include/hw/dma/xlnx_csu_dma.h24
-rw-r--r--include/hw/intc/arm_gicv3_its_common.h1
-rw-r--r--include/hw/misc/xlnx-versal-pmc-iou-slcr.h78
-rw-r--r--include/hw/ppc/vof.h5
-rw-r--r--include/hw/ssi/xlnx-versal-ospi.h111
-rw-r--r--include/qemu-common.h5
-rw-r--r--include/sysemu/rtc.h58
-rw-r--r--migration/migration.c26
-rw-r--r--migration/multifd-zlib.c61
-rw-r--r--migration/multifd-zstd.c63
-rw-r--r--migration/multifd.c148
-rw-r--r--migration/multifd.h33
-rw-r--r--migration/postcopy-ram.c96
-rw-r--r--migration/ram.c293
-rw-r--r--migration/ram.h4
-rw-r--r--migration/savevm.c5
-rw-r--r--migration/trace-events29
-rw-r--r--monitor/hmp-cmds.c12
-rw-r--r--nbd/server.c1
-rw-r--r--net/dump.c2
-rw-r--r--qapi/block-export.json3
-rw-r--r--qapi/migration.json13
-rwxr-xr-xscripts/update-linux-headers.sh16
-rw-r--r--softmmu/rtc.c2
-rw-r--r--target/arm/helper.c13
-rw-r--r--target/arm/internals.h2
-rw-r--r--target/arm/m_helper.c2
-rw-r--r--target/openrisc/machine.c1
-rw-r--r--target/ppc/cpu-models.c2
-rw-r--r--target/ppc/cpu-models.h1
-rw-r--r--target/ppc/cpu.h9
-rw-r--r--target/ppc/cpu_init.c150
-rw-r--r--target/ppc/excp_helper.c674
-rw-r--r--target/ppc/helper.h2
-rw-r--r--target/ppc/helper_regs.c12
-rw-r--r--target/ppc/int_helper.c21
-rw-r--r--target/ppc/machine.c2
-rw-r--r--target/ppc/mfrom_table.c.inc78
-rw-r--r--target/ppc/mfrom_table_gen.c34
-rw-r--r--target/ppc/mmu_common.c18
-rw-r--r--target/ppc/mmu_helper.c12
-rw-r--r--target/ppc/translate.c32
-rw-r--r--target/sparc/machine.c4
-rwxr-xr-xtests/qemu-iotests/tests/block-status-cache139
-rw-r--r--tests/qemu-iotests/tests/block-status-cache.out5
114 files changed, 5563 insertions, 901 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index b7487f9..9814580 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -818,7 +818,6 @@ F: docs/system/arm/palm.rst
Raspberry Pi
M: Peter Maydell <peter.maydell@linaro.org>
-R: Andrew Baumann <Andrew.Baumann@microsoft.com>
R: Philippe Mathieu-Daudé <f4bug@amsat.org>
L: qemu-arm@nongnu.org
S: Odd Fixes
@@ -958,6 +957,12 @@ F: hw/display/dpcd.c
F: include/hw/display/dpcd.h
F: docs/system/arm/xlnx-versal-virt.rst
+Xilinx Versal OSPI
+M: Francisco Iglesias <francisco.iglesias@xilinx.com>
+S: Maintained
+F: hw/ssi/xlnx-versal-ospi.c
+F: include/hw/ssi/xlnx-versal-ospi.h
+
ARM ACPI Subsystem
M: Shannon Zhao <shannon.zhaosl@gmail.com>
L: qemu-arm@nongnu.org
diff --git a/block/io.c b/block/io.c
index bb0a254..4e4cb55 100644
--- a/block/io.c
+++ b/block/io.c
@@ -2497,8 +2497,12 @@ static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
* non-protocol nodes, and then it is never used. However, filling
* the cache requires an RCU update, so double check here to avoid
* such an update if possible.
+ *
+ * Check want_zero, because we only want to update the cache when we
+ * have accurate information about what is zero and what is data.
*/
- if (ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
+ if (want_zero &&
+ ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
QLIST_EMPTY(&bs->children))
{
/*
diff --git a/docs/conf.py b/docs/conf.py
index e790159..49dab44 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -98,7 +98,7 @@ default_role = 'any'
# General information about the project.
project = u'QEMU'
-copyright = u'2021, The QEMU Project Developers'
+copyright = u'2022, The QEMU Project Developers'
author = u'The QEMU Project Developers'
# The version info for the project you're documenting, acts as replacement for
diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c
index b20903e..3646dbf 100644
--- a/hw/acpi/cpu.c
+++ b/hw/acpi/cpu.c
@@ -297,7 +297,6 @@ static const VMStateDescription vmstate_cpuhp_sts = {
.name = "CPU hotplug device state",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_BOOL(is_inserting, AcpiCpuStatus),
VMSTATE_BOOL(is_removing, AcpiCpuStatus),
@@ -311,7 +310,6 @@ const VMStateDescription vmstate_cpu_hotplug = {
.name = "CPU hotplug state",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32(selector, CPUHotplugState),
VMSTATE_UINT8(command, CPUHotplugState),
diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c
index ebe08ed..bd9bbad 100644
--- a/hw/acpi/ich9.c
+++ b/hw/acpi/ich9.c
@@ -163,7 +163,6 @@ static const VMStateDescription vmstate_memhp_state = {
.name = "ich9_pm/memhp",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.needed = vmstate_test_use_memhp,
.fields = (VMStateField[]) {
VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, ICH9LPCPMRegs),
@@ -181,7 +180,6 @@ static const VMStateDescription vmstate_tco_io_state = {
.name = "ich9_pm/tco",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.needed = vmstate_test_use_tco,
.fields = (VMStateField[]) {
VMSTATE_STRUCT(tco_regs, ICH9LPCPMRegs, 1, vmstate_tco_io_sts,
@@ -208,7 +206,6 @@ static const VMStateDescription vmstate_cpuhp_state = {
.name = "ich9_pm/cpuhp",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.needed = vmstate_test_use_cpuhp,
.pre_load = vmstate_cpuhp_pre_load,
.fields = (VMStateField[]) {
diff --git a/hw/acpi/memory_hotplug.c b/hw/acpi/memory_hotplug.c
index d0fffcf..a581a21 100644
--- a/hw/acpi/memory_hotplug.c
+++ b/hw/acpi/memory_hotplug.c
@@ -318,7 +318,6 @@ static const VMStateDescription vmstate_memhp_sts = {
.name = "memory hotplug device state",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_BOOL(is_enabled, MemStatus),
VMSTATE_BOOL(is_inserting, MemStatus),
@@ -332,7 +331,6 @@ const VMStateDescription vmstate_memory_hotplug = {
.name = "memory hotplug state",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32(selector, MemHotplugState),
VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs, MemHotplugState, dev_count,
diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
index f0b5fac..cc37fa3 100644
--- a/hw/acpi/piix4.c
+++ b/hw/acpi/piix4.c
@@ -230,7 +230,6 @@ static const VMStateDescription vmstate_memhp_state = {
.name = "piix4_pm/memhp",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.needed = vmstate_test_use_memhp,
.fields = (VMStateField[]) {
VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, PIIX4PMState),
@@ -255,7 +254,6 @@ static const VMStateDescription vmstate_cpuhp_state = {
.name = "piix4_pm/cpuhp",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.needed = vmstate_test_use_cpuhp,
.pre_load = vmstate_cpuhp_pre_load,
.fields = (VMStateField[]) {
diff --git a/hw/acpi/tco.c b/hw/acpi/tco.c
index cf1e68a..4783721 100644
--- a/hw/acpi/tco.c
+++ b/hw/acpi/tco.c
@@ -239,7 +239,6 @@ const VMStateDescription vmstate_tco_io_sts = {
.name = "tco io device status",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT16(tco.rld, TCOIORegs),
VMSTATE_UINT8(tco.din, TCOIORegs),
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
index 8d08db8..ceb76df 100644
--- a/hw/arm/armv7m.c
+++ b/hw/arm/armv7m.c
@@ -520,8 +520,8 @@ static const VMStateDescription vmstate_armv7m = {
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
- VMSTATE_CLOCK(refclk, SysTickState),
- VMSTATE_CLOCK(cpuclk, SysTickState),
+ VMSTATE_CLOCK(refclk, ARMv7MState),
+ VMSTATE_CLOCK(cpuclk, ARMv7MState),
VMSTATE_END_OF_LIST()
}
};
diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c
index 8f37bdb..12f6edc 100644
--- a/hw/arm/aspeed_ast2600.c
+++ b/hw/arm/aspeed_ast2600.c
@@ -29,7 +29,7 @@ static const hwaddr aspeed_soc_ast2600_memmap[] = {
[ASPEED_DEV_PWM] = 0x1E610000,
[ASPEED_DEV_FMC] = 0x1E620000,
[ASPEED_DEV_SPI1] = 0x1E630000,
- [ASPEED_DEV_SPI2] = 0x1E641000,
+ [ASPEED_DEV_SPI2] = 0x1E631000,
[ASPEED_DEV_EHCI1] = 0x1E6A1000,
[ASPEED_DEV_EHCI2] = 0x1E6A3000,
[ASPEED_DEV_MII1] = 0x1E650000,
diff --git a/hw/arm/omap1.c b/hw/arm/omap1.c
index 180d378..9852c2a 100644
--- a/hw/arm/omap1.c
+++ b/hw/arm/omap1.c
@@ -21,7 +21,6 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qapi/error.h"
-#include "qemu-common.h"
#include "cpu.h"
#include "exec/address-spaces.h"
#include "hw/hw.h"
@@ -35,6 +34,7 @@
#include "sysemu/qtest.h"
#include "sysemu/reset.h"
#include "sysemu/runstate.h"
+#include "sysemu/rtc.h"
#include "qemu/range.h"
#include "hw/sysbus.h"
#include "qemu/cutils.h"
diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c
index 15a247e..a6f938f 100644
--- a/hw/arm/pxa2xx.c
+++ b/hw/arm/pxa2xx.c
@@ -8,7 +8,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qapi/error.h"
@@ -27,6 +26,7 @@
#include "chardev/char-fe.h"
#include "sysemu/blockdev.h"
#include "sysemu/qtest.h"
+#include "sysemu/rtc.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
#include "qom/object.h"
diff --git a/hw/arm/strongarm.c b/hw/arm/strongarm.c
index 939a57d..39b8f01 100644
--- a/hw/arm/strongarm.c
+++ b/hw/arm/strongarm.c
@@ -28,7 +28,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "cpu.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
@@ -41,6 +40,7 @@
#include "chardev/char-fe.h"
#include "chardev/char-serial.h"
#include "sysemu/sysemu.h"
+#include "sysemu/rtc.h"
#include "hw/ssi/ssi.h"
#include "qapi/error.h"
#include "qemu/cutils.h"
diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c
index 0c5edc8..3f56ae2 100644
--- a/hw/arm/xlnx-versal-virt.c
+++ b/hw/arm/xlnx-versal-virt.c
@@ -25,6 +25,8 @@
#define TYPE_XLNX_VERSAL_VIRT_MACHINE MACHINE_TYPE_NAME("xlnx-versal-virt")
OBJECT_DECLARE_SIMPLE_TYPE(VersalVirt, XLNX_VERSAL_VIRT_MACHINE)
+#define XLNX_VERSAL_NUM_OSPI_FLASH 4
+
struct VersalVirt {
MachineState parent_obj;
@@ -365,7 +367,7 @@ static void fdt_add_bbram_node(VersalVirt *s)
qemu_fdt_add_subnode(s->fdt, name);
qemu_fdt_setprop_cells(s->fdt, name, "interrupts",
- GIC_FDT_IRQ_TYPE_SPI, VERSAL_BBRAM_APB_IRQ_0,
+ GIC_FDT_IRQ_TYPE_SPI, VERSAL_PMC_APB_IRQ,
GIC_FDT_IRQ_FLAGS_LEVEL_HI);
qemu_fdt_setprop(s->fdt, name, "interrupt-names",
interrupt_names, sizeof(interrupt_names));
@@ -691,6 +693,27 @@ static void versal_virt_init(MachineState *machine)
exit(EXIT_FAILURE);
}
}
+
+ for (i = 0; i < XLNX_VERSAL_NUM_OSPI_FLASH; i++) {
+ BusState *spi_bus;
+ DeviceState *flash_dev;
+ qemu_irq cs_line;
+ DriveInfo *dinfo = drive_get(IF_MTD, 0, i);
+
+ spi_bus = qdev_get_child_bus(DEVICE(&s->soc.pmc.iou.ospi), "spi0");
+
+ flash_dev = qdev_new("mt35xu01g");
+ if (dinfo) {
+ qdev_prop_set_drive_err(flash_dev, "drive",
+ blk_by_legacy_dinfo(dinfo), &error_fatal);
+ }
+ qdev_realize_and_unref(flash_dev, spi_bus, &error_fatal);
+
+ cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0);
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.pmc.iou.ospi),
+ i + 1, cs_line);
+ }
}
static void versal_virt_machine_instance_init(Object *obj)
diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c
index b2705b6..ab58beb 100644
--- a/hw/arm/xlnx-versal.c
+++ b/hw/arm/xlnx-versal.c
@@ -21,10 +21,15 @@
#include "kvm_arm.h"
#include "hw/misc/unimp.h"
#include "hw/arm/xlnx-versal.h"
+#include "qemu/log.h"
+#include "hw/sysbus.h"
#define XLNX_VERSAL_ACPU_TYPE ARM_CPU_TYPE_NAME("cortex-a72")
#define GEM_REVISION 0x40070106
+#define VERSAL_NUM_PMC_APB_IRQS 3
+#define NUM_OSPI_IRQ_LINES 3
+
static void versal_create_apu_cpus(Versal *s)
{
int i;
@@ -260,6 +265,26 @@ static void versal_create_sds(Versal *s, qemu_irq *pic)
}
}
+static void versal_create_pmc_apb_irq_orgate(Versal *s, qemu_irq *pic)
+{
+ DeviceState *orgate;
+
+ /*
+ * The VERSAL_PMC_APB_IRQ is an 'or' of the interrupts from the following
+ * models:
+ * - RTC
+ * - BBRAM
+ * - PMC SLCR
+ */
+ object_initialize_child(OBJECT(s), "pmc-apb-irq-orgate",
+ &s->pmc.apb_irq_orgate, TYPE_OR_IRQ);
+ orgate = DEVICE(&s->pmc.apb_irq_orgate);
+ object_property_set_int(OBJECT(orgate),
+ "num-lines", VERSAL_NUM_PMC_APB_IRQS, &error_fatal);
+ qdev_realize(orgate, NULL, &error_fatal);
+ qdev_connect_gpio_out(orgate, 0, pic[VERSAL_PMC_APB_IRQ]);
+}
+
static void versal_create_rtc(Versal *s, qemu_irq *pic)
{
SysBusDevice *sbd;
@@ -277,7 +302,8 @@ static void versal_create_rtc(Versal *s, qemu_irq *pic)
* TODO: Connect the ALARM and SECONDS interrupts once our RTC model
* supports them.
*/
- sysbus_connect_irq(sbd, 1, pic[VERSAL_RTC_APB_ERR_IRQ]);
+ sysbus_connect_irq(sbd, 1,
+ qdev_get_gpio_in(DEVICE(&s->pmc.apb_irq_orgate), 0));
}
static void versal_create_xrams(Versal *s, qemu_irq *pic)
@@ -328,7 +354,8 @@ static void versal_create_bbram(Versal *s, qemu_irq *pic)
sysbus_realize(sbd, &error_fatal);
memory_region_add_subregion(&s->mr_ps, MM_PMC_BBRAM_CTRL,
sysbus_mmio_get_region(sbd, 0));
- sysbus_connect_irq(sbd, 0, pic[VERSAL_BBRAM_APB_IRQ_0]);
+ sysbus_connect_irq(sbd, 0,
+ qdev_get_gpio_in(DEVICE(&s->pmc.apb_irq_orgate), 1));
}
static void versal_realize_efuse_part(Versal *s, Object *dev, hwaddr base)
@@ -369,6 +396,114 @@ static void versal_create_efuse(Versal *s, qemu_irq *pic)
sysbus_connect_irq(SYS_BUS_DEVICE(ctrl), 0, pic[VERSAL_EFUSE_IRQ]);
}
+static void versal_create_pmc_iou_slcr(Versal *s, qemu_irq *pic)
+{
+ SysBusDevice *sbd;
+
+ object_initialize_child(OBJECT(s), "versal-pmc-iou-slcr", &s->pmc.iou.slcr,
+ TYPE_XILINX_VERSAL_PMC_IOU_SLCR);
+
+ sbd = SYS_BUS_DEVICE(&s->pmc.iou.slcr);
+ sysbus_realize(sbd, &error_fatal);
+
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_PMC_IOU_SLCR,
+ sysbus_mmio_get_region(sbd, 0));
+
+ sysbus_connect_irq(sbd, 0,
+ qdev_get_gpio_in(DEVICE(&s->pmc.apb_irq_orgate), 2));
+}
+
+static void versal_create_ospi(Versal *s, qemu_irq *pic)
+{
+ SysBusDevice *sbd;
+ MemoryRegion *mr_dac;
+ qemu_irq ospi_mux_sel;
+ DeviceState *orgate;
+
+ memory_region_init(&s->pmc.iou.ospi.linear_mr, OBJECT(s),
+ "versal-ospi-linear-mr" , MM_PMC_OSPI_DAC_SIZE);
+
+ object_initialize_child(OBJECT(s), "versal-ospi", &s->pmc.iou.ospi.ospi,
+ TYPE_XILINX_VERSAL_OSPI);
+
+ mr_dac = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->pmc.iou.ospi.ospi), 1);
+ memory_region_add_subregion(&s->pmc.iou.ospi.linear_mr, 0x0, mr_dac);
+
+ /* Create the OSPI destination DMA */
+ object_initialize_child(OBJECT(s), "versal-ospi-dma-dst",
+ &s->pmc.iou.ospi.dma_dst,
+ TYPE_XLNX_CSU_DMA);
+
+ object_property_set_link(OBJECT(&s->pmc.iou.ospi.dma_dst),
+ "dma", OBJECT(get_system_memory()),
+ &error_abort);
+
+ sbd = SYS_BUS_DEVICE(&s->pmc.iou.ospi.dma_dst);
+ sysbus_realize(sbd, &error_fatal);
+
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_OSPI_DMA_DST,
+ sysbus_mmio_get_region(sbd, 0));
+
+ /* Create the OSPI source DMA */
+ object_initialize_child(OBJECT(s), "versal-ospi-dma-src",
+ &s->pmc.iou.ospi.dma_src,
+ TYPE_XLNX_CSU_DMA);
+
+ object_property_set_bool(OBJECT(&s->pmc.iou.ospi.dma_src), "is-dst",
+ false, &error_abort);
+
+ object_property_set_link(OBJECT(&s->pmc.iou.ospi.dma_src),
+ "dma", OBJECT(mr_dac), &error_abort);
+
+ object_property_set_link(OBJECT(&s->pmc.iou.ospi.dma_src),
+ "stream-connected-dma",
+ OBJECT(&s->pmc.iou.ospi.dma_dst),
+ &error_abort);
+
+ sbd = SYS_BUS_DEVICE(&s->pmc.iou.ospi.dma_src);
+ sysbus_realize(sbd, &error_fatal);
+
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_OSPI_DMA_SRC,
+ sysbus_mmio_get_region(sbd, 0));
+
+ /* Realize the OSPI */
+ object_property_set_link(OBJECT(&s->pmc.iou.ospi.ospi), "dma-src",
+ OBJECT(&s->pmc.iou.ospi.dma_src), &error_abort);
+
+ sbd = SYS_BUS_DEVICE(&s->pmc.iou.ospi.ospi);
+ sysbus_realize(sbd, &error_fatal);
+
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_OSPI,
+ sysbus_mmio_get_region(sbd, 0));
+
+ memory_region_add_subregion(&s->mr_ps, MM_PMC_OSPI_DAC,
+ &s->pmc.iou.ospi.linear_mr);
+
+ /* ospi_mux_sel */
+ ospi_mux_sel = qdev_get_gpio_in_named(DEVICE(&s->pmc.iou.ospi.ospi),
+ "ospi-mux-sel", 0);
+ qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr), "ospi-mux-sel", 0,
+ ospi_mux_sel);
+
+ /* OSPI irq */
+ object_initialize_child(OBJECT(s), "ospi-irq-orgate",
+ &s->pmc.iou.ospi.irq_orgate, TYPE_OR_IRQ);
+ object_property_set_int(OBJECT(&s->pmc.iou.ospi.irq_orgate),
+ "num-lines", NUM_OSPI_IRQ_LINES, &error_fatal);
+
+ orgate = DEVICE(&s->pmc.iou.ospi.irq_orgate);
+ qdev_realize(orgate, NULL, &error_fatal);
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pmc.iou.ospi.ospi), 0,
+ qdev_get_gpio_in(orgate, 0));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pmc.iou.ospi.dma_src), 0,
+ qdev_get_gpio_in(orgate, 1));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->pmc.iou.ospi.dma_dst), 0,
+ qdev_get_gpio_in(orgate, 2));
+
+ qdev_connect_gpio_out(orgate, 0, pic[VERSAL_OSPI_IRQ]);
+}
+
/* This takes the board allocated linear DDR memory and creates aliases
* for each split DDR range/aperture on the Versal address map.
*/
@@ -425,8 +560,31 @@ static void versal_unimp_area(Versal *s, const char *name,
memory_region_add_subregion(mr, base, mr_dev);
}
+static void versal_unimp_sd_emmc_sel(void *opaque, int n, int level)
+{
+ qemu_log_mask(LOG_UNIMP,
+ "Selecting between enabling SD mode or eMMC mode on "
+ "controller %d is not yet implemented\n", n);
+}
+
+static void versal_unimp_qspi_ospi_mux_sel(void *opaque, int n, int level)
+{
+ qemu_log_mask(LOG_UNIMP,
+ "Selecting between enabling the QSPI or OSPI linear address "
+ "region is not yet implemented\n");
+}
+
+static void versal_unimp_irq_parity_imr(void *opaque, int n, int level)
+{
+ qemu_log_mask(LOG_UNIMP,
+ "PMC SLCR parity interrupt behaviour "
+ "is not yet implemented\n");
+}
+
static void versal_unimp(Versal *s)
{
+ qemu_irq gpio_in;
+
versal_unimp_area(s, "psm", &s->mr_ps,
MM_PSM_START, MM_PSM_END - MM_PSM_START);
versal_unimp_area(s, "crl", &s->mr_ps,
@@ -441,6 +599,31 @@ static void versal_unimp(Versal *s)
MM_IOU_SCNTR, MM_IOU_SCNTR_SIZE);
versal_unimp_area(s, "iou-scntr-seucre", &s->mr_ps,
MM_IOU_SCNTRS, MM_IOU_SCNTRS_SIZE);
+
+ qdev_init_gpio_in_named(DEVICE(s), versal_unimp_sd_emmc_sel,
+ "sd-emmc-sel-dummy", 2);
+ qdev_init_gpio_in_named(DEVICE(s), versal_unimp_qspi_ospi_mux_sel,
+ "qspi-ospi-mux-sel-dummy", 1);
+ qdev_init_gpio_in_named(DEVICE(s), versal_unimp_irq_parity_imr,
+ "irq-parity-imr-dummy", 1);
+
+ gpio_in = qdev_get_gpio_in_named(DEVICE(s), "sd-emmc-sel-dummy", 0);
+ qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr), "sd-emmc-sel", 0,
+ gpio_in);
+
+ gpio_in = qdev_get_gpio_in_named(DEVICE(s), "sd-emmc-sel-dummy", 1);
+ qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr), "sd-emmc-sel", 1,
+ gpio_in);
+
+ gpio_in = qdev_get_gpio_in_named(DEVICE(s), "qspi-ospi-mux-sel-dummy", 0);
+ qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr),
+ "qspi-ospi-mux-sel", 0,
+ gpio_in);
+
+ gpio_in = qdev_get_gpio_in_named(DEVICE(s), "irq-parity-imr-dummy", 0);
+ qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr),
+ SYSBUS_DEVICE_GPIO_IRQ, 0,
+ gpio_in);
}
static void versal_realize(DeviceState *dev, Error **errp)
@@ -455,10 +638,13 @@ static void versal_realize(DeviceState *dev, Error **errp)
versal_create_gems(s, pic);
versal_create_admas(s, pic);
versal_create_sds(s, pic);
+ versal_create_pmc_apb_irq_orgate(s, pic);
versal_create_rtc(s, pic);
versal_create_xrams(s, pic);
versal_create_bbram(s, pic);
versal_create_efuse(s, pic);
+ versal_create_pmc_iou_slcr(s, pic);
+ versal_create_ospi(s, pic);
versal_map_ddr(s);
versal_unimp(s);
diff --git a/hw/audio/pcspk.c b/hw/audio/pcspk.c
index b056c05..dfc7ebc 100644
--- a/hw/audio/pcspk.c
+++ b/hw/audio/pcspk.c
@@ -209,7 +209,6 @@ static const VMStateDescription vmstate_spk = {
.name = "pcspk",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.needed = migrate_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(data_on, PCSpkState),
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
index b77503d..c6bf3c6 100644
--- a/hw/block/m25p80.c
+++ b/hw/block/m25p80.c
@@ -255,6 +255,8 @@ static const FlashPartInfo known_devices[] = {
{ INFO("n25q512a", 0x20ba20, 0, 64 << 10, 1024, ER_4K) },
{ INFO("n25q512ax3", 0x20ba20, 0x1000, 64 << 10, 1024, ER_4K) },
{ INFO("mt25ql512ab", 0x20ba20, 0x1044, 64 << 10, 1024, ER_4K | ER_32K) },
+ { INFO_STACKED("mt35xu01g", 0x2c5b1b, 0x104100, 128 << 10, 1024,
+ ER_4K | ER_32K, 2) },
{ INFO_STACKED("n25q00", 0x20ba21, 0x1000, 64 << 10, 2048, ER_4K, 4) },
{ INFO_STACKED("n25q00a", 0x20bb21, 0x1000, 64 << 10, 2048, ER_4K, 4) },
{ INFO_STACKED("mt25ql01g", 0x20ba21, 0x1040, 64 << 10, 2048, ER_4K, 2) },
diff --git a/hw/char/exynos4210_uart.c b/hw/char/exynos4210_uart.c
index 80d401a..addcd59 100644
--- a/hw/char/exynos4210_uart.c
+++ b/hw/char/exynos4210_uart.c
@@ -628,7 +628,6 @@ static const VMStateDescription vmstate_exynos4210_uart_fifo = {
.name = "exynos4210.uart.fifo",
.version_id = 1,
.minimum_version_id = 1,
- .post_load = exynos4210_uart_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT32(sp, Exynos4210UartFIFO),
VMSTATE_UINT32(rp, Exynos4210UartFIFO),
@@ -641,6 +640,7 @@ static const VMStateDescription vmstate_exynos4210_uart = {
.name = "exynos4210.uart",
.version_id = 1,
.minimum_version_id = 1,
+ .post_load = exynos4210_uart_post_load,
.fields = (VMStateField[]) {
VMSTATE_STRUCT(rx, Exynos4210UartState, 1,
vmstate_exynos4210_uart_fifo, Exynos4210UartFIFO),
diff --git a/hw/display/macfb.c b/hw/display/macfb.c
index 4bd7c3a..2eeb80c 100644
--- a/hw/display/macfb.c
+++ b/hw/display/macfb.c
@@ -616,7 +616,6 @@ static const VMStateDescription vmstate_macfb = {
.name = "macfb",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.post_load = macfb_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT8_ARRAY(color_palette, MacfbState, 256 * 3),
diff --git a/hw/dma/xlnx-zdma.c b/hw/dma/xlnx-zdma.c
index a5a92b4..4eb7f66 100644
--- a/hw/dma/xlnx-zdma.c
+++ b/hw/dma/xlnx-zdma.c
@@ -806,7 +806,6 @@ static const VMStateDescription vmstate_zdma = {
.name = TYPE_XLNX_ZDMA,
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32_ARRAY(regs, XlnxZDMA, ZDMA_R_MAX),
VMSTATE_UINT32(state, XlnxZDMA),
diff --git a/hw/dma/xlnx_csu_dma.c b/hw/dma/xlnx_csu_dma.c
index 896bb35..84f782f 100644
--- a/hw/dma/xlnx_csu_dma.c
+++ b/hw/dma/xlnx_csu_dma.c
@@ -472,6 +472,20 @@ static uint64_t addr_msb_pre_write(RegisterInfo *reg, uint64_t val)
return val & R_ADDR_MSB_ADDR_MSB_MASK;
}
+static MemTxResult xlnx_csu_dma_class_read(XlnxCSUDMA *s, hwaddr addr,
+ uint32_t len)
+{
+ RegisterInfo *reg = &s->regs_info[R_SIZE];
+ uint64_t we = MAKE_64BIT_MASK(0, 4 * 8);
+
+ s->regs[R_ADDR] = addr;
+ s->regs[R_ADDR_MSB] = (uint64_t)addr >> 32;
+
+ register_write(reg, len, we, object_get_typename(OBJECT(s)), false);
+
+ return (s->regs[R_SIZE] == 0) ? MEMTX_OK : MEMTX_ERROR;
+}
+
static const RegisterAccessInfo *xlnx_csu_dma_regs_info[] = {
#define DMACH_REGINFO(NAME, snd) \
(const RegisterAccessInfo []) { \
@@ -663,7 +677,6 @@ static const VMStateDescription vmstate_xlnx_csu_dma = {
.name = TYPE_XLNX_CSU_DMA,
.version_id = 0,
.minimum_version_id = 0,
- .minimum_version_id_old = 0,
.fields = (VMStateField[]) {
VMSTATE_PTIMER(src_timer, XlnxCSUDMA),
VMSTATE_UINT16(width, XlnxCSUDMA),
@@ -696,6 +709,7 @@ static void xlnx_csu_dma_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
StreamSinkClass *ssc = STREAM_SINK_CLASS(klass);
+ XlnxCSUDMAClass *xcdc = XLNX_CSU_DMA_CLASS(klass);
dc->reset = xlnx_csu_dma_reset;
dc->realize = xlnx_csu_dma_realize;
@@ -704,6 +718,8 @@ static void xlnx_csu_dma_class_init(ObjectClass *klass, void *data)
ssc->push = xlnx_csu_dma_stream_push;
ssc->can_push = xlnx_csu_dma_stream_can_push;
+
+ xcdc->read = xlnx_csu_dma_class_read;
}
static void xlnx_csu_dma_init(Object *obj)
diff --git a/hw/gpio/imx_gpio.c b/hw/gpio/imx_gpio.c
index 7a59180..c7f98b7 100644
--- a/hw/gpio/imx_gpio.c
+++ b/hw/gpio/imx_gpio.c
@@ -277,7 +277,6 @@ static const VMStateDescription vmstate_imx_gpio = {
.name = TYPE_IMX_GPIO,
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32(dr, IMXGPIOState),
VMSTATE_UINT32(gdir, IMXGPIOState),
diff --git a/hw/intc/arm_gicv3.c b/hw/intc/arm_gicv3.c
index 715df54..6d3c8ee 100644
--- a/hw/intc/arm_gicv3.c
+++ b/hw/intc/arm_gicv3.c
@@ -166,6 +166,7 @@ static void gicv3_redist_update_noirqset(GICv3CPUState *cs)
}
if ((cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) && cs->gic->lpi_enable &&
+ (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) &&
(cs->hpplpi.prio != 0xff)) {
if (irqbetter(cs, cs->hpplpi.irq, cs->hpplpi.prio)) {
cs->hppi.irq = cs->hpplpi.irq;
diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c
index 9884d2e..4ca5ae9 100644
--- a/hw/intc/arm_gicv3_common.c
+++ b/hw/intc/arm_gicv3_common.c
@@ -357,6 +357,11 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
return;
}
+ if (s->lpi_enable) {
+ address_space_init(&s->dma_as, s->dma,
+ "gicv3-its-sysmem");
+ }
+
s->cpu = g_new0(GICv3CPUState, s->num_cpu);
for (i = 0; i < s->num_cpu; i++) {
@@ -424,6 +429,10 @@ static void arm_gicv3_common_reset(DeviceState *dev)
cs->level = 0;
cs->gicr_ctlr = 0;
+ if (s->lpi_enable) {
+ /* Our implementation supports clearing GICR_CTLR.EnableLPIs */
+ cs->gicr_ctlr |= GICR_CTLR_CES;
+ }
cs->gicr_statusr[GICV3_S] = 0;
cs->gicr_statusr[GICV3_NS] = 0;
cs->gicr_waker = GICR_WAKER_ProcessorSleep | GICR_WAKER_ChildrenAsleep;
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
index b2f6a8c..51d9be4 100644
--- a/hw/intc/arm_gicv3_its.c
+++ b/hw/intc/arm_gicv3_its.c
@@ -13,6 +13,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
+#include "trace.h"
#include "hw/qdev-properties.h"
#include "hw/intc/arm_gicv3_its_common.h"
#include "gicv3_internal.h"
@@ -255,10 +256,10 @@ static ItsCmdResult process_its_cmd(GICv3ITSState *s, uint64_t value,
eventid = (value & EVENTID_MASK);
- if (devid >= s->dt.num_ids) {
+ if (devid >= s->dt.num_entries) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid command attributes: devid %d>=%d",
- __func__, devid, s->dt.num_ids);
+ __func__, devid, s->dt.num_entries);
return CMD_CONTINUE;
}
@@ -299,7 +300,7 @@ static ItsCmdResult process_its_cmd(GICv3ITSState *s, uint64_t value,
return CMD_CONTINUE;
}
- if (icid >= s->ct.num_ids) {
+ if (icid >= s->ct.num_entries) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
__func__, icid);
@@ -383,10 +384,10 @@ static ItsCmdResult process_mapti(GICv3ITSState *s, uint64_t value,
icid = value & ICID_MASK;
- if (devid >= s->dt.num_ids) {
+ if (devid >= s->dt.num_entries) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid command attributes: devid %d>=%d",
- __func__, devid, s->dt.num_ids);
+ __func__, devid, s->dt.num_entries);
return CMD_CONTINUE;
}
@@ -399,7 +400,7 @@ static ItsCmdResult process_mapti(GICv3ITSState *s, uint64_t value,
num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
- if ((icid >= s->ct.num_ids)
+ if ((icid >= s->ct.num_entries)
|| !dte_valid || (eventid >= num_eventids) ||
(((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) &&
(pIntid != INTID_SPURIOUS))) {
@@ -484,7 +485,7 @@ static ItsCmdResult process_mapc(GICv3ITSState *s, uint32_t offset)
valid = (value & CMD_FIELD_VALID_MASK);
- if ((icid >= s->ct.num_ids) || (rdbase >= s->gicv3->num_cpu)) {
+ if ((icid >= s->ct.num_entries) || (rdbase >= s->gicv3->num_cpu)) {
qemu_log_mask(LOG_GUEST_ERROR,
"ITS MAPC: invalid collection table attributes "
"icid %d rdbase %" PRIu64 "\n", icid, rdbase);
@@ -565,7 +566,7 @@ static ItsCmdResult process_mapd(GICv3ITSState *s, uint64_t value,
valid = (value & CMD_FIELD_VALID_MASK);
- if ((devid >= s->dt.num_ids) ||
+ if ((devid >= s->dt.num_entries) ||
(size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
qemu_log_mask(LOG_GUEST_ERROR,
"ITS MAPD: invalid device table attributes "
@@ -581,6 +582,201 @@ static ItsCmdResult process_mapd(GICv3ITSState *s, uint64_t value,
return update_dte(s, devid, valid, size, itt_addr) ? CMD_CONTINUE : CMD_STALL;
}
+static ItsCmdResult process_movall(GICv3ITSState *s, uint64_t value,
+ uint32_t offset)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ MemTxResult res = MEMTX_OK;
+ uint64_t rd1, rd2;
+
+ /* No fields in dwords 0 or 1 */
+ offset += NUM_BYTES_IN_DW;
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+ if (res != MEMTX_OK) {
+ return CMD_STALL;
+ }
+
+ rd1 = FIELD_EX64(value, MOVALL_2, RDBASE1);
+ if (rd1 >= s->gicv3->num_cpu) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: RDBASE1 %" PRId64
+ " out of range (must be less than %d)\n",
+ __func__, rd1, s->gicv3->num_cpu);
+ return CMD_CONTINUE;
+ }
+
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+ if (res != MEMTX_OK) {
+ return CMD_STALL;
+ }
+
+ rd2 = FIELD_EX64(value, MOVALL_3, RDBASE2);
+ if (rd2 >= s->gicv3->num_cpu) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: RDBASE2 %" PRId64
+ " out of range (must be less than %d)\n",
+ __func__, rd2, s->gicv3->num_cpu);
+ return CMD_CONTINUE;
+ }
+
+ if (rd1 == rd2) {
+ /* Move to same target must succeed as a no-op */
+ return CMD_CONTINUE;
+ }
+
+ /* Move all pending LPIs from redistributor 1 to redistributor 2 */
+ gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
+
+ return CMD_CONTINUE;
+}
+
+static ItsCmdResult process_movi(GICv3ITSState *s, uint64_t value,
+ uint32_t offset)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ MemTxResult res = MEMTX_OK;
+ uint32_t devid, eventid, intid;
+ uint16_t old_icid, new_icid;
+ uint64_t old_cte, new_cte;
+ uint64_t old_rdbase, new_rdbase;
+ uint64_t dte;
+ bool dte_valid, ite_valid, cte_valid;
+ uint64_t num_eventids;
+ IteEntry ite = {};
+
+ devid = FIELD_EX64(value, MOVI_0, DEVICEID);
+
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+ if (res != MEMTX_OK) {
+ return CMD_STALL;
+ }
+ eventid = FIELD_EX64(value, MOVI_1, EVENTID);
+
+ offset += NUM_BYTES_IN_DW;
+ value = address_space_ldq_le(as, s->cq.base_addr + offset,
+ MEMTXATTRS_UNSPECIFIED, &res);
+ if (res != MEMTX_OK) {
+ return CMD_STALL;
+ }
+ new_icid = FIELD_EX64(value, MOVI_2, ICID);
+
+ if (devid >= s->dt.num_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: devid %d>=%d",
+ __func__, devid, s->dt.num_entries);
+ return CMD_CONTINUE;
+ }
+ dte = get_dte(s, devid, &res);
+ if (res != MEMTX_OK) {
+ return CMD_STALL;
+ }
+
+ dte_valid = FIELD_EX64(dte, DTE, VALID);
+ if (!dte_valid) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: "
+ "invalid dte: %"PRIx64" for %d\n",
+ __func__, dte, devid);
+ return CMD_CONTINUE;
+ }
+
+ num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
+ if (eventid >= num_eventids) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: eventid %d >= %"
+ PRId64 "\n",
+ __func__, eventid, num_eventids);
+ return CMD_CONTINUE;
+ }
+
+ ite_valid = get_ite(s, eventid, dte, &old_icid, &intid, &res);
+ if (res != MEMTX_OK) {
+ return CMD_STALL;
+ }
+
+ if (!ite_valid) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: invalid ITE\n",
+ __func__);
+ return CMD_CONTINUE;
+ }
+
+ if (old_icid >= s->ct.num_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
+ __func__, old_icid);
+ return CMD_CONTINUE;
+ }
+
+ if (new_icid >= s->ct.num_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: ICID 0x%x\n",
+ __func__, new_icid);
+ return CMD_CONTINUE;
+ }
+
+ cte_valid = get_cte(s, old_icid, &old_cte, &res);
+ if (res != MEMTX_OK) {
+ return CMD_STALL;
+ }
+ if (!cte_valid) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: "
+ "invalid cte: %"PRIx64"\n",
+ __func__, old_cte);
+ return CMD_CONTINUE;
+ }
+
+ cte_valid = get_cte(s, new_icid, &new_cte, &res);
+ if (res != MEMTX_OK) {
+ return CMD_STALL;
+ }
+ if (!cte_valid) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: "
+ "invalid cte: %"PRIx64"\n",
+ __func__, new_cte);
+ return CMD_CONTINUE;
+ }
+
+ old_rdbase = FIELD_EX64(old_cte, CTE, RDBASE);
+ if (old_rdbase >= s->gicv3->num_cpu) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: CTE has invalid rdbase 0x%"PRIx64"\n",
+ __func__, old_rdbase);
+ return CMD_CONTINUE;
+ }
+
+ new_rdbase = FIELD_EX64(new_cte, CTE, RDBASE);
+ if (new_rdbase >= s->gicv3->num_cpu) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: CTE has invalid rdbase 0x%"PRIx64"\n",
+ __func__, new_rdbase);
+ return CMD_CONTINUE;
+ }
+
+ if (old_rdbase != new_rdbase) {
+ /* Move the LPI from the old redistributor to the new one */
+ gicv3_redist_mov_lpi(&s->gicv3->cpu[old_rdbase],
+ &s->gicv3->cpu[new_rdbase],
+ intid);
+ }
+
+ /* Update the ICID field in the interrupt translation table entry */
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, 1);
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, intid);
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
+ ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, new_icid);
+ return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL;
+}
+
/*
* Current implementation blocks until all
* commands are processed
@@ -634,6 +830,8 @@ static void process_cmdq(GICv3ITSState *s)
cmd = (data & CMD_MASK);
+ trace_gicv3_its_process_command(rd_offset, cmd);
+
switch (cmd) {
case GITS_CMD_INT:
result = process_its_cmd(s, data, cq_offset, INTERRUPT);
@@ -676,6 +874,12 @@ static void process_cmdq(GICv3ITSState *s)
gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
}
break;
+ case GITS_CMD_MOVI:
+ result = process_movi(s, data, cq_offset);
+ break;
+ case GITS_CMD_MOVALL:
+ result = process_movall(s, data, cq_offset);
+ break;
default:
break;
}
@@ -788,7 +992,7 @@ static void extract_table_params(GICv3ITSState *s)
L1TABLE_ENTRY_SIZE) *
(page_sz / td->entry_sz));
}
- td->num_ids = 1ULL << idbits;
+ td->num_entries = MIN(td->num_entries, 1ULL << idbits);
}
}
@@ -810,6 +1014,18 @@ static void extract_cmdq_params(GICv3ITSState *s)
}
}
+static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
+ uint64_t *data, unsigned size,
+ MemTxAttrs attrs)
+{
+ /*
+ * GITS_TRANSLATER is write-only, and all other addresses
+ * in the interrupt translation space frame are RES0.
+ */
+ *data = 0;
+ return MEMTX_OK;
+}
+
static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
uint64_t data, unsigned size,
MemTxAttrs attrs)
@@ -818,6 +1034,8 @@ static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
bool result = true;
uint32_t devid = 0;
+ trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
+
switch (offset) {
case GITS_TRANSLATER:
if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
@@ -848,7 +1066,6 @@ static bool its_writel(GICv3ITSState *s, hwaddr offset,
s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
extract_table_params(s);
extract_cmdq_params(s);
- s->creadr = 0;
process_cmdq(s);
} else {
s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
@@ -862,7 +1079,6 @@ static bool its_writel(GICv3ITSState *s, hwaddr offset,
if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
s->cbaser = deposit64(s->cbaser, 0, 32, value);
s->creadr = 0;
- s->cwriter = s->creadr;
}
break;
case GITS_CBASER + 4:
@@ -873,7 +1089,6 @@ static bool its_writel(GICv3ITSState *s, hwaddr offset,
if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
s->cbaser = deposit64(s->cbaser, 32, 32, value);
s->creadr = 0;
- s->cwriter = s->creadr;
}
break;
case GITS_CWRITER:
@@ -915,6 +1130,10 @@ static bool its_writel(GICv3ITSState *s, hwaddr offset,
if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
index = (offset - GITS_BASER) / 8;
+ if (s->baser[index] == 0) {
+ /* Unimplemented GITS_BASERn: RAZ/WI */
+ break;
+ }
if (offset & 7) {
value <<= 32;
value &= ~GITS_BASER_RO_MASK;
@@ -1011,6 +1230,10 @@ static bool its_writell(GICv3ITSState *s, hwaddr offset,
*/
if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
index = (offset - GITS_BASER) / 8;
+ if (s->baser[index] == 0) {
+ /* Unimplemented GITS_BASERn: RAZ/WI */
+ break;
+ }
s->baser[index] &= GITS_BASER_RO_MASK;
s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
}
@@ -1023,7 +1246,6 @@ static bool its_writell(GICv3ITSState *s, hwaddr offset,
if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
s->cbaser = value;
s->creadr = 0;
- s->cwriter = s->creadr;
}
break;
case GITS_CWRITER:
@@ -1107,6 +1329,7 @@ static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest read at offset " TARGET_FMT_plx
"size %u\n", __func__, offset, size);
+ trace_gicv3_its_badread(offset, size);
/*
* The spec requires that reserved registers are RAZ/WI;
* so use false returns from leaf functions as a way to
@@ -1114,6 +1337,8 @@ static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
* the caller, or we'll cause a spurious guest data abort.
*/
*data = 0;
+ } else {
+ trace_gicv3_its_read(offset, *data, size);
}
return MEMTX_OK;
}
@@ -1140,12 +1365,15 @@ static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest write at offset " TARGET_FMT_plx
"size %u\n", __func__, offset, size);
+ trace_gicv3_its_badwrite(offset, data, size);
/*
* The spec requires that reserved registers are RAZ/WI;
* so use false returns from leaf functions as a way to
* trigger the guest-error logging but don't return it to
* the caller, or we'll cause a spurious guest data abort.
*/
+ } else {
+ trace_gicv3_its_write(offset, data, size);
}
return MEMTX_OK;
}
@@ -1161,6 +1389,7 @@ static const MemoryRegionOps gicv3_its_control_ops = {
};
static const MemoryRegionOps gicv3_its_translation_ops = {
+ .read_with_attrs = gicv3_its_translation_read,
.write_with_attrs = gicv3_its_translation_write,
.valid.min_access_size = 2,
.valid.max_access_size = 4,
@@ -1183,9 +1412,6 @@ static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
- address_space_init(&s->gicv3->dma_as, s->gicv3->dma,
- "gicv3-its-sysmem");
-
/* set the ITS default features supported */
s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c
index 99b11ca..412a04f 100644
--- a/hw/intc/arm_gicv3_redist.c
+++ b/hw/intc/arm_gicv3_redist.c
@@ -591,8 +591,7 @@ void gicv3_redist_update_lpi_only(GICv3CPUState *cs)
idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
GICD_TYPER_IDBITS);
- if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || !cs->gicr_propbaser ||
- !cs->gicr_pendbaser) {
+ if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
return;
}
@@ -673,9 +672,8 @@ void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level)
idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
GICD_TYPER_IDBITS);
- if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || !cs->gicr_propbaser ||
- !cs->gicr_pendbaser || (irq > (1ULL << (idbits + 1)) - 1) ||
- irq < GICV3_LPI_INTID_START) {
+ if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
+ (irq > (1ULL << (idbits + 1)) - 1) || irq < GICV3_LPI_INTID_START) {
return;
}
@@ -683,6 +681,113 @@ void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level)
gicv3_redist_lpi_pending(cs, irq, level);
}
+void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq)
+{
+ /*
+ * Move the specified LPI's pending state from the source redistributor
+ * to the destination.
+ *
+ * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
+ * we choose to NOP. If LPIs are disabled on source there's nothing
+ * to be transferred anyway.
+ */
+ AddressSpace *as = &src->gic->dma_as;
+ uint64_t idbits;
+ uint32_t pendt_size;
+ uint64_t src_baddr;
+ uint8_t src_pend;
+
+ if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
+ !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
+ return;
+ }
+
+ idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS),
+ GICD_TYPER_IDBITS);
+ idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS),
+ idbits);
+
+ pendt_size = 1ULL << (idbits + 1);
+ if ((irq / 8) >= pendt_size) {
+ return;
+ }
+
+ src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
+
+ address_space_read(as, src_baddr + (irq / 8),
+ MEMTXATTRS_UNSPECIFIED, &src_pend, sizeof(src_pend));
+ if (!extract32(src_pend, irq % 8, 1)) {
+ /* Not pending on source, nothing to do */
+ return;
+ }
+ src_pend &= ~(1 << (irq % 8));
+ address_space_write(as, src_baddr + (irq / 8),
+ MEMTXATTRS_UNSPECIFIED, &src_pend, sizeof(src_pend));
+ if (irq == src->hpplpi.irq) {
+ /*
+ * We just made this LPI not-pending so only need to update
+ * if it was previously the highest priority pending LPI
+ */
+ gicv3_redist_update_lpi(src);
+ }
+ /* Mark it pending on the destination */
+ gicv3_redist_lpi_pending(dest, irq, 1);
+}
+
+void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest)
+{
+ /*
+ * We must move all pending LPIs from the source redistributor
+ * to the destination. That is, for every pending LPI X on
+ * src, we must set it not-pending on src and pending on dest.
+ * LPIs that are already pending on dest are not cleared.
+ *
+ * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
+ * we choose to NOP. If LPIs are disabled on source there's nothing
+ * to be transferred anyway.
+ */
+ AddressSpace *as = &src->gic->dma_as;
+ uint64_t idbits;
+ uint32_t pendt_size;
+ uint64_t src_baddr, dest_baddr;
+ int i;
+
+ if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) ||
+ !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) {
+ return;
+ }
+
+ idbits = MIN(FIELD_EX64(src->gicr_propbaser, GICR_PROPBASER, IDBITS),
+ GICD_TYPER_IDBITS);
+ idbits = MIN(FIELD_EX64(dest->gicr_propbaser, GICR_PROPBASER, IDBITS),
+ idbits);
+
+ pendt_size = 1ULL << (idbits + 1);
+ src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
+ dest_baddr = dest->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
+
+ for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
+ uint8_t src_pend, dest_pend;
+
+ address_space_read(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED,
+ &src_pend, sizeof(src_pend));
+ if (!src_pend) {
+ continue;
+ }
+ address_space_read(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED,
+ &dest_pend, sizeof(dest_pend));
+ dest_pend |= src_pend;
+ src_pend = 0;
+ address_space_write(as, src_baddr + i, MEMTXATTRS_UNSPECIFIED,
+ &src_pend, sizeof(src_pend));
+ address_space_write(as, dest_baddr + i, MEMTXATTRS_UNSPECIFIED,
+ &dest_pend, sizeof(dest_pend));
+ }
+
+ gicv3_redist_update_lpi(src);
+ gicv3_redist_update_lpi(dest);
+}
+
void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level)
{
/* Update redistributor state for a change in an external PPI input line */
diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h
index 1eeb990..b1af26d 100644
--- a/hw/intc/gicv3_internal.h
+++ b/hw/intc/gicv3_internal.h
@@ -110,6 +110,7 @@
#define GICR_NSACR (GICR_SGI_OFFSET + 0x0E00)
#define GICR_CTLR_ENABLE_LPIS (1U << 0)
+#define GICR_CTLR_CES (1U << 1)
#define GICR_CTLR_RWP (1U << 3)
#define GICR_CTLR_DPG0 (1U << 24)
#define GICR_CTLR_DPG1NS (1U << 25)
@@ -314,16 +315,18 @@ FIELD(GITS_TYPER, CIL, 36, 1)
#define CMD_MASK 0xff
/* ITS Commands */
-#define GITS_CMD_CLEAR 0x04
-#define GITS_CMD_DISCARD 0x0F
+#define GITS_CMD_MOVI 0x01
#define GITS_CMD_INT 0x03
-#define GITS_CMD_MAPC 0x09
+#define GITS_CMD_CLEAR 0x04
+#define GITS_CMD_SYNC 0x05
#define GITS_CMD_MAPD 0x08
-#define GITS_CMD_MAPI 0x0B
+#define GITS_CMD_MAPC 0x09
#define GITS_CMD_MAPTI 0x0A
+#define GITS_CMD_MAPI 0x0B
#define GITS_CMD_INV 0x0C
#define GITS_CMD_INVALL 0x0D
-#define GITS_CMD_SYNC 0x05
+#define GITS_CMD_MOVALL 0x0E
+#define GITS_CMD_DISCARD 0x0F
/* MAPC command fields */
#define ICID_LENGTH 16
@@ -354,6 +357,15 @@ FIELD(MAPC, RDBASE, 16, 32)
#define L2_TABLE_VALID_MASK CMD_FIELD_VALID_MASK
#define TABLE_ENTRY_VALID_MASK (1ULL << 0)
+/* MOVALL command fields */
+FIELD(MOVALL_2, RDBASE1, 16, 36)
+FIELD(MOVALL_3, RDBASE2, 16, 36)
+
+/* MOVI command fields */
+FIELD(MOVI_0, DEVICEID, 32, 32)
+FIELD(MOVI_1, EVENTID, 0, 32)
+FIELD(MOVI_2, ICID, 0, 16)
+
/*
* 12 bytes Interrupt translation Table Entry size
* as per Table 5.3 in GICv3 spec
@@ -496,6 +508,27 @@ void gicv3_redist_update_lpi(GICv3CPUState *cs);
* an incoming migration has loaded new state.
*/
void gicv3_redist_update_lpi_only(GICv3CPUState *cs);
+/**
+ * gicv3_redist_mov_lpi:
+ * @src: source redistributor
+ * @dest: destination redistributor
+ * @irq: LPI to update
+ *
+ * Move the pending state of the specified LPI from @src to @dest,
+ * as required by the ITS MOVI command.
+ */
+void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq);
+/**
+ * gicv3_redist_movall_lpis:
+ * @src: source redistributor
+ * @dest: destination redistributor
+ *
+ * Scan the LPI pending table for @src, and for each pending LPI there
+ * mark it as not-pending for @src and pending for @dest, as required
+ * by the ITS MOVALL command.
+ */
+void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest);
+
void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns);
void gicv3_init_cpuif(GICv3State *s);
diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c
index bb20751..621b20a 100644
--- a/hw/intc/pnv_xive.c
+++ b/hw/intc/pnv_xive.c
@@ -172,7 +172,12 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
/* Get the page size of the indirect table. */
vsd_addr = vsd & VSD_ADDRESS_MASK;
- ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
+ if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
+ MEMTXATTRS_UNSPECIFIED)) {
+ xive_error(xive, "VST: failed to access %s entry %x @0x%" PRIx64,
+ info->name, idx, vsd_addr);
+ return 0;
+ }
if (!(vsd & VSD_ADDRESS_MASK)) {
#ifdef XIVE_DEBUG
@@ -195,8 +200,12 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
/* Load the VSD we are looking for, if not already done */
if (vsd_idx) {
vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
- ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
- MEMTXATTRS_UNSPECIFIED);
+ if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
+ MEMTXATTRS_UNSPECIFIED)) {
+ xive_error(xive, "VST: failed to access %s entry %x @0x%"
+ PRIx64, info->name, vsd_idx, vsd_addr);
+ return 0;
+ }
if (!(vsd & VSD_ADDRESS_MASK)) {
#ifdef XIVE_DEBUG
@@ -543,7 +552,12 @@ static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
/* Get the page size of the indirect table. */
vsd_addr = vsd & VSD_ADDRESS_MASK;
- ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
+ if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
+ MEMTXATTRS_UNSPECIFIED)) {
+ xive_error(xive, "VST: failed to access %s entry @0x%" PRIx64,
+ info->name, vsd_addr);
+ return 0;
+ }
if (!(vsd & VSD_ADDRESS_MASK)) {
#ifdef XIVE_DEBUG
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
index 9aba7e3..b28cda4 100644
--- a/hw/intc/trace-events
+++ b/hw/intc/trace-events
@@ -169,6 +169,14 @@ gicv3_redist_badwrite(uint32_t cpu, uint64_t offset, uint64_t data, unsigned siz
gicv3_redist_set_irq(uint32_t cpu, int irq, int level) "GICv3 redistributor 0x%x interrupt %d level changed to %d"
gicv3_redist_send_sgi(uint32_t cpu, int irq) "GICv3 redistributor 0x%x pending SGI %d"
+# arm_gicv3_its.c
+gicv3_its_read(uint64_t offset, uint64_t data, unsigned size) "GICv3 ITS read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
+gicv3_its_badread(uint64_t offset, unsigned size) "GICv3 ITS read: offset 0x%" PRIx64 " size %u: error"
+gicv3_its_write(uint64_t offset, uint64_t data, unsigned size) "GICv3 ITS write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u"
+gicv3_its_badwrite(uint64_t offset, uint64_t data, unsigned size) "GICv3 ITS write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u: error"
+gicv3_its_translation_write(uint64_t offset, uint64_t data, unsigned size, uint32_t requester_id) "GICv3 ITS TRANSLATER write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u requester_id 0x%x"
+gicv3_its_process_command(uint32_t rd_offset, uint8_t cmd) "GICv3 ITS: processing command at offset 0x%x: 0x%x"
+
# armv7m_nvic.c
nvic_recompute_state(int vectpending, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d vectpending_prio %d exception_prio %d"
nvic_recompute_state_secure(int vectpending, bool vectpending_is_s_banked, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d is_s_banked %d vectpending_prio %d exception_prio %d"
diff --git a/hw/misc/bcm2835_mbox.c b/hw/misc/bcm2835_mbox.c
index 9f73cbd..04e53c9 100644
--- a/hw/misc/bcm2835_mbox.c
+++ b/hw/misc/bcm2835_mbox.c
@@ -271,7 +271,6 @@ static const VMStateDescription vmstate_bcm2835_mbox = {
.name = TYPE_BCM2835_MBOX,
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_BOOL_ARRAY(available, BCM2835MboxState, MBOX_CHAN_COUNT),
VMSTATE_STRUCT_ARRAY(mbox, BCM2835MboxState, 2, 1,
diff --git a/hw/misc/mac_via.c b/hw/misc/mac_via.c
index b378e6b..71b74c3 100644
--- a/hw/misc/mac_via.c
+++ b/hw/misc/mac_via.c
@@ -16,7 +16,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "migration/vmstate.h"
#include "hw/sysbus.h"
#include "hw/irq.h"
@@ -30,6 +29,7 @@
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
#include "sysemu/block-backend.h"
+#include "sysemu/rtc.h"
#include "trace.h"
#include "qemu/log.h"
diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c
index e917a6a..233daf1 100644
--- a/hw/misc/macio/cuda.c
+++ b/hw/misc/macio/cuda.c
@@ -24,7 +24,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "hw/ppc/mac.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
@@ -34,6 +33,7 @@
#include "qapi/error.h"
#include "qemu/timer.h"
#include "sysemu/runstate.h"
+#include "sysemu/rtc.h"
#include "qapi/error.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
diff --git a/hw/misc/macio/pmu.c b/hw/misc/macio/pmu.c
index eb39c64..76c608e 100644
--- a/hw/misc/macio/pmu.c
+++ b/hw/misc/macio/pmu.c
@@ -29,7 +29,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "hw/ppc/mac.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
@@ -41,6 +40,7 @@
#include "qapi/error.h"
#include "qemu/timer.h"
#include "sysemu/runstate.h"
+#include "sysemu/rtc.h"
#include "qapi/error.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
index d1a1169..6dcbe04 100644
--- a/hw/misc/meson.build
+++ b/hw/misc/meson.build
@@ -84,7 +84,10 @@ softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files(
))
softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_misc.c'))
softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq_slcr.c'))
-softmmu_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-xramc.c'))
+softmmu_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files(
+ 'xlnx-versal-xramc.c',
+ 'xlnx-versal-pmc-iou-slcr.c',
+))
softmmu_ss.add(when: 'CONFIG_STM32F2XX_SYSCFG', if_true: files('stm32f2xx_syscfg.c'))
softmmu_ss.add(when: 'CONFIG_STM32F4XX_SYSCFG', if_true: files('stm32f4xx_syscfg.c'))
softmmu_ss.add(when: 'CONFIG_STM32F4XX_EXTI', if_true: files('stm32f4xx_exti.c'))
diff --git a/hw/misc/xlnx-versal-pmc-iou-slcr.c b/hw/misc/xlnx-versal-pmc-iou-slcr.c
new file mode 100644
index 0000000..07b7ebc
--- /dev/null
+++ b/hw/misc/xlnx-versal-pmc-iou-slcr.c
@@ -0,0 +1,1446 @@
+/*
+ * QEMU model of Versal's PMC IOU SLCR (system level control registers)
+ *
+ * Copyright (c) 2021 Xilinx Inc.
+ * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "hw/irq.h"
+#include "qemu/bitops.h"
+#include "qemu/log.h"
+#include "migration/vmstate.h"
+#include "hw/qdev-properties.h"
+#include "hw/misc/xlnx-versal-pmc-iou-slcr.h"
+
+#ifndef XILINX_VERSAL_PMC_IOU_SLCR_ERR_DEBUG
+#define XILINX_VERSAL_PMC_IOU_SLCR_ERR_DEBUG 0
+#endif
+
+REG32(MIO_PIN_0, 0x0)
+ FIELD(MIO_PIN_0, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_0, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_0, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_0, L0_SEL, 1, 2)
+REG32(MIO_PIN_1, 0x4)
+ FIELD(MIO_PIN_1, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_1, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_1, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_1, L0_SEL, 1, 2)
+REG32(MIO_PIN_2, 0x8)
+ FIELD(MIO_PIN_2, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_2, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_2, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_2, L0_SEL, 1, 2)
+REG32(MIO_PIN_3, 0xc)
+ FIELD(MIO_PIN_3, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_3, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_3, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_3, L0_SEL, 1, 2)
+REG32(MIO_PIN_4, 0x10)
+ FIELD(MIO_PIN_4, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_4, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_4, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_4, L0_SEL, 1, 2)
+REG32(MIO_PIN_5, 0x14)
+ FIELD(MIO_PIN_5, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_5, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_5, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_5, L0_SEL, 1, 2)
+REG32(MIO_PIN_6, 0x18)
+ FIELD(MIO_PIN_6, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_6, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_6, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_6, L0_SEL, 1, 2)
+REG32(MIO_PIN_7, 0x1c)
+ FIELD(MIO_PIN_7, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_7, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_7, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_7, L0_SEL, 1, 2)
+REG32(MIO_PIN_8, 0x20)
+ FIELD(MIO_PIN_8, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_8, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_8, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_8, L0_SEL, 1, 2)
+REG32(MIO_PIN_9, 0x24)
+ FIELD(MIO_PIN_9, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_9, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_9, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_9, L0_SEL, 1, 2)
+REG32(MIO_PIN_10, 0x28)
+ FIELD(MIO_PIN_10, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_10, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_10, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_10, L0_SEL, 1, 2)
+REG32(MIO_PIN_11, 0x2c)
+ FIELD(MIO_PIN_11, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_11, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_11, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_11, L0_SEL, 1, 2)
+REG32(MIO_PIN_12, 0x30)
+ FIELD(MIO_PIN_12, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_12, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_12, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_12, L0_SEL, 1, 2)
+REG32(MIO_PIN_13, 0x34)
+ FIELD(MIO_PIN_13, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_13, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_13, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_13, L0_SEL, 1, 2)
+REG32(MIO_PIN_14, 0x38)
+ FIELD(MIO_PIN_14, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_14, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_14, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_14, L0_SEL, 1, 2)
+REG32(MIO_PIN_15, 0x3c)
+ FIELD(MIO_PIN_15, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_15, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_15, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_15, L0_SEL, 1, 2)
+REG32(MIO_PIN_16, 0x40)
+ FIELD(MIO_PIN_16, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_16, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_16, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_16, L0_SEL, 1, 2)
+REG32(MIO_PIN_17, 0x44)
+ FIELD(MIO_PIN_17, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_17, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_17, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_17, L0_SEL, 1, 2)
+REG32(MIO_PIN_18, 0x48)
+ FIELD(MIO_PIN_18, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_18, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_18, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_18, L0_SEL, 1, 2)
+REG32(MIO_PIN_19, 0x4c)
+ FIELD(MIO_PIN_19, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_19, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_19, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_19, L0_SEL, 1, 2)
+REG32(MIO_PIN_20, 0x50)
+ FIELD(MIO_PIN_20, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_20, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_20, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_20, L0_SEL, 1, 2)
+REG32(MIO_PIN_21, 0x54)
+ FIELD(MIO_PIN_21, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_21, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_21, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_21, L0_SEL, 1, 2)
+REG32(MIO_PIN_22, 0x58)
+ FIELD(MIO_PIN_22, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_22, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_22, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_22, L0_SEL, 1, 2)
+REG32(MIO_PIN_23, 0x5c)
+ FIELD(MIO_PIN_23, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_23, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_23, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_23, L0_SEL, 1, 2)
+REG32(MIO_PIN_24, 0x60)
+ FIELD(MIO_PIN_24, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_24, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_24, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_24, L0_SEL, 1, 2)
+REG32(MIO_PIN_25, 0x64)
+ FIELD(MIO_PIN_25, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_25, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_25, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_25, L0_SEL, 1, 2)
+REG32(MIO_PIN_26, 0x68)
+ FIELD(MIO_PIN_26, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_26, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_26, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_26, L0_SEL, 1, 2)
+REG32(MIO_PIN_27, 0x6c)
+ FIELD(MIO_PIN_27, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_27, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_27, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_27, L0_SEL, 1, 2)
+REG32(MIO_PIN_28, 0x70)
+ FIELD(MIO_PIN_28, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_28, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_28, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_28, L0_SEL, 1, 2)
+REG32(MIO_PIN_29, 0x74)
+ FIELD(MIO_PIN_29, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_29, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_29, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_29, L0_SEL, 1, 2)
+REG32(MIO_PIN_30, 0x78)
+ FIELD(MIO_PIN_30, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_30, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_30, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_30, L0_SEL, 1, 2)
+REG32(MIO_PIN_31, 0x7c)
+ FIELD(MIO_PIN_31, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_31, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_31, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_31, L0_SEL, 1, 2)
+REG32(MIO_PIN_32, 0x80)
+ FIELD(MIO_PIN_32, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_32, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_32, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_32, L0_SEL, 1, 2)
+REG32(MIO_PIN_33, 0x84)
+ FIELD(MIO_PIN_33, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_33, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_33, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_33, L0_SEL, 1, 2)
+REG32(MIO_PIN_34, 0x88)
+ FIELD(MIO_PIN_34, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_34, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_34, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_34, L0_SEL, 1, 2)
+REG32(MIO_PIN_35, 0x8c)
+ FIELD(MIO_PIN_35, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_35, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_35, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_35, L0_SEL, 1, 2)
+REG32(MIO_PIN_36, 0x90)
+ FIELD(MIO_PIN_36, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_36, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_36, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_36, L0_SEL, 1, 2)
+REG32(MIO_PIN_37, 0x94)
+ FIELD(MIO_PIN_37, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_37, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_37, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_37, L0_SEL, 1, 2)
+REG32(MIO_PIN_38, 0x98)
+ FIELD(MIO_PIN_38, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_38, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_38, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_38, L0_SEL, 1, 2)
+REG32(MIO_PIN_39, 0x9c)
+ FIELD(MIO_PIN_39, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_39, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_39, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_39, L0_SEL, 1, 2)
+REG32(MIO_PIN_40, 0xa0)
+ FIELD(MIO_PIN_40, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_40, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_40, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_40, L0_SEL, 1, 2)
+REG32(MIO_PIN_41, 0xa4)
+ FIELD(MIO_PIN_41, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_41, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_41, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_41, L0_SEL, 1, 2)
+REG32(MIO_PIN_42, 0xa8)
+ FIELD(MIO_PIN_42, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_42, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_42, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_42, L0_SEL, 1, 2)
+REG32(MIO_PIN_43, 0xac)
+ FIELD(MIO_PIN_43, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_43, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_43, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_43, L0_SEL, 1, 2)
+REG32(MIO_PIN_44, 0xb0)
+ FIELD(MIO_PIN_44, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_44, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_44, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_44, L0_SEL, 1, 2)
+REG32(MIO_PIN_45, 0xb4)
+ FIELD(MIO_PIN_45, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_45, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_45, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_45, L0_SEL, 1, 2)
+REG32(MIO_PIN_46, 0xb8)
+ FIELD(MIO_PIN_46, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_46, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_46, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_46, L0_SEL, 1, 2)
+REG32(MIO_PIN_47, 0xbc)
+ FIELD(MIO_PIN_47, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_47, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_47, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_47, L0_SEL, 1, 2)
+REG32(MIO_PIN_48, 0xc0)
+ FIELD(MIO_PIN_48, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_48, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_48, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_48, L0_SEL, 1, 2)
+REG32(MIO_PIN_49, 0xc4)
+ FIELD(MIO_PIN_49, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_49, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_49, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_49, L0_SEL, 1, 2)
+REG32(MIO_PIN_50, 0xc8)
+ FIELD(MIO_PIN_50, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_50, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_50, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_50, L0_SEL, 1, 2)
+REG32(MIO_PIN_51, 0xcc)
+ FIELD(MIO_PIN_51, L3_SEL, 7, 3)
+ FIELD(MIO_PIN_51, L2_SEL, 5, 2)
+ FIELD(MIO_PIN_51, L1_SEL, 3, 2)
+ FIELD(MIO_PIN_51, L0_SEL, 1, 2)
+REG32(BNK0_EN_RX, 0x100)
+ FIELD(BNK0_EN_RX, BNK0_EN_RX, 0, 26)
+REG32(BNK0_SEL_RX0, 0x104)
+REG32(BNK0_SEL_RX1, 0x108)
+ FIELD(BNK0_SEL_RX1, BNK0_SEL_RX, 0, 20)
+REG32(BNK0_EN_RX_SCHMITT_HYST, 0x10c)
+ FIELD(BNK0_EN_RX_SCHMITT_HYST, BNK0_EN_RX_SCHMITT_HYST, 0, 26)
+REG32(BNK0_EN_WK_PD, 0x110)
+ FIELD(BNK0_EN_WK_PD, BNK0_EN_WK_PD, 0, 26)
+REG32(BNK0_EN_WK_PU, 0x114)
+ FIELD(BNK0_EN_WK_PU, BNK0_EN_WK_PU, 0, 26)
+REG32(BNK0_SEL_DRV0, 0x118)
+REG32(BNK0_SEL_DRV1, 0x11c)
+ FIELD(BNK0_SEL_DRV1, BNK0_SEL_DRV, 0, 20)
+REG32(BNK0_SEL_SLEW, 0x120)
+ FIELD(BNK0_SEL_SLEW, BNK0_SEL_SLEW, 0, 26)
+REG32(BNK0_EN_DFT_OPT_INV, 0x124)
+ FIELD(BNK0_EN_DFT_OPT_INV, BNK0_EN_DFT_OPT_INV, 0, 26)
+REG32(BNK0_EN_PAD2PAD_LOOPBACK, 0x128)
+ FIELD(BNK0_EN_PAD2PAD_LOOPBACK, BNK0_EN_PAD2PAD_LOOPBACK, 0, 13)
+REG32(BNK0_RX_SPARE0, 0x12c)
+REG32(BNK0_RX_SPARE1, 0x130)
+ FIELD(BNK0_RX_SPARE1, BNK0_RX_SPARE, 0, 20)
+REG32(BNK0_TX_SPARE0, 0x134)
+REG32(BNK0_TX_SPARE1, 0x138)
+ FIELD(BNK0_TX_SPARE1, BNK0_TX_SPARE, 0, 20)
+REG32(BNK0_SEL_EN1P8, 0x13c)
+ FIELD(BNK0_SEL_EN1P8, BNK0_SEL_EN1P8, 0, 1)
+REG32(BNK0_EN_B_POR_DETECT, 0x140)
+ FIELD(BNK0_EN_B_POR_DETECT, BNK0_EN_B_POR_DETECT, 0, 1)
+REG32(BNK0_LPF_BYP_POR_DETECT, 0x144)
+ FIELD(BNK0_LPF_BYP_POR_DETECT, BNK0_LPF_BYP_POR_DETECT, 0, 1)
+REG32(BNK0_EN_LATCH, 0x148)
+ FIELD(BNK0_EN_LATCH, BNK0_EN_LATCH, 0, 1)
+REG32(BNK0_VBG_LPF_BYP_B, 0x14c)
+ FIELD(BNK0_VBG_LPF_BYP_B, BNK0_VBG_LPF_BYP_B, 0, 1)
+REG32(BNK0_EN_AMP_B, 0x150)
+ FIELD(BNK0_EN_AMP_B, BNK0_EN_AMP_B, 0, 2)
+REG32(BNK0_SPARE_BIAS, 0x154)
+ FIELD(BNK0_SPARE_BIAS, BNK0_SPARE_BIAS, 0, 4)
+REG32(BNK0_DRIVER_BIAS, 0x158)
+ FIELD(BNK0_DRIVER_BIAS, BNK0_DRIVER_BIAS, 0, 15)
+REG32(BNK0_VMODE, 0x15c)
+ FIELD(BNK0_VMODE, BNK0_VMODE, 0, 1)
+REG32(BNK0_SEL_AUX_IO_RX, 0x160)
+ FIELD(BNK0_SEL_AUX_IO_RX, BNK0_SEL_AUX_IO_RX, 0, 26)
+REG32(BNK0_EN_TX_HS_MODE, 0x164)
+ FIELD(BNK0_EN_TX_HS_MODE, BNK0_EN_TX_HS_MODE, 0, 26)
+REG32(MIO_MST_TRI0, 0x200)
+ FIELD(MIO_MST_TRI0, PIN_25_TRI, 25, 1)
+ FIELD(MIO_MST_TRI0, PIN_24_TRI, 24, 1)
+ FIELD(MIO_MST_TRI0, PIN_23_TRI, 23, 1)
+ FIELD(MIO_MST_TRI0, PIN_22_TRI, 22, 1)
+ FIELD(MIO_MST_TRI0, PIN_21_TRI, 21, 1)
+ FIELD(MIO_MST_TRI0, PIN_20_TRI, 20, 1)
+ FIELD(MIO_MST_TRI0, PIN_19_TRI, 19, 1)
+ FIELD(MIO_MST_TRI0, PIN_18_TRI, 18, 1)
+ FIELD(MIO_MST_TRI0, PIN_17_TRI, 17, 1)
+ FIELD(MIO_MST_TRI0, PIN_16_TRI, 16, 1)
+ FIELD(MIO_MST_TRI0, PIN_15_TRI, 15, 1)
+ FIELD(MIO_MST_TRI0, PIN_14_TRI, 14, 1)
+ FIELD(MIO_MST_TRI0, PIN_13_TRI, 13, 1)
+ FIELD(MIO_MST_TRI0, PIN_12_TRI, 12, 1)
+ FIELD(MIO_MST_TRI0, PIN_11_TRI, 11, 1)
+ FIELD(MIO_MST_TRI0, PIN_10_TRI, 10, 1)
+ FIELD(MIO_MST_TRI0, PIN_09_TRI, 9, 1)
+ FIELD(MIO_MST_TRI0, PIN_08_TRI, 8, 1)
+ FIELD(MIO_MST_TRI0, PIN_07_TRI, 7, 1)
+ FIELD(MIO_MST_TRI0, PIN_06_TRI, 6, 1)
+ FIELD(MIO_MST_TRI0, PIN_05_TRI, 5, 1)
+ FIELD(MIO_MST_TRI0, PIN_04_TRI, 4, 1)
+ FIELD(MIO_MST_TRI0, PIN_03_TRI, 3, 1)
+ FIELD(MIO_MST_TRI0, PIN_02_TRI, 2, 1)
+ FIELD(MIO_MST_TRI0, PIN_01_TRI, 1, 1)
+ FIELD(MIO_MST_TRI0, PIN_00_TRI, 0, 1)
+REG32(MIO_MST_TRI1, 0x204)
+ FIELD(MIO_MST_TRI1, PIN_51_TRI, 25, 1)
+ FIELD(MIO_MST_TRI1, PIN_50_TRI, 24, 1)
+ FIELD(MIO_MST_TRI1, PIN_49_TRI, 23, 1)
+ FIELD(MIO_MST_TRI1, PIN_48_TRI, 22, 1)
+ FIELD(MIO_MST_TRI1, PIN_47_TRI, 21, 1)
+ FIELD(MIO_MST_TRI1, PIN_46_TRI, 20, 1)
+ FIELD(MIO_MST_TRI1, PIN_45_TRI, 19, 1)
+ FIELD(MIO_MST_TRI1, PIN_44_TRI, 18, 1)
+ FIELD(MIO_MST_TRI1, PIN_43_TRI, 17, 1)
+ FIELD(MIO_MST_TRI1, PIN_42_TRI, 16, 1)
+ FIELD(MIO_MST_TRI1, PIN_41_TRI, 15, 1)
+ FIELD(MIO_MST_TRI1, PIN_40_TRI, 14, 1)
+ FIELD(MIO_MST_TRI1, PIN_39_TRI, 13, 1)
+ FIELD(MIO_MST_TRI1, PIN_38_TRI, 12, 1)
+ FIELD(MIO_MST_TRI1, PIN_37_TRI, 11, 1)
+ FIELD(MIO_MST_TRI1, PIN_36_TRI, 10, 1)
+ FIELD(MIO_MST_TRI1, PIN_35_TRI, 9, 1)
+ FIELD(MIO_MST_TRI1, PIN_34_TRI, 8, 1)
+ FIELD(MIO_MST_TRI1, PIN_33_TRI, 7, 1)
+ FIELD(MIO_MST_TRI1, PIN_32_TRI, 6, 1)
+ FIELD(MIO_MST_TRI1, PIN_31_TRI, 5, 1)
+ FIELD(MIO_MST_TRI1, PIN_30_TRI, 4, 1)
+ FIELD(MIO_MST_TRI1, PIN_29_TRI, 3, 1)
+ FIELD(MIO_MST_TRI1, PIN_28_TRI, 2, 1)
+ FIELD(MIO_MST_TRI1, PIN_27_TRI, 1, 1)
+ FIELD(MIO_MST_TRI1, PIN_26_TRI, 0, 1)
+REG32(BNK1_EN_RX, 0x300)
+ FIELD(BNK1_EN_RX, BNK1_EN_RX, 0, 26)
+REG32(BNK1_SEL_RX0, 0x304)
+REG32(BNK1_SEL_RX1, 0x308)
+ FIELD(BNK1_SEL_RX1, BNK1_SEL_RX, 0, 20)
+REG32(BNK1_EN_RX_SCHMITT_HYST, 0x30c)
+ FIELD(BNK1_EN_RX_SCHMITT_HYST, BNK1_EN_RX_SCHMITT_HYST, 0, 26)
+REG32(BNK1_EN_WK_PD, 0x310)
+ FIELD(BNK1_EN_WK_PD, BNK1_EN_WK_PD, 0, 26)
+REG32(BNK1_EN_WK_PU, 0x314)
+ FIELD(BNK1_EN_WK_PU, BNK1_EN_WK_PU, 0, 26)
+REG32(BNK1_SEL_DRV0, 0x318)
+REG32(BNK1_SEL_DRV1, 0x31c)
+ FIELD(BNK1_SEL_DRV1, BNK1_SEL_DRV, 0, 20)
+REG32(BNK1_SEL_SLEW, 0x320)
+ FIELD(BNK1_SEL_SLEW, BNK1_SEL_SLEW, 0, 26)
+REG32(BNK1_EN_DFT_OPT_INV, 0x324)
+ FIELD(BNK1_EN_DFT_OPT_INV, BNK1_EN_DFT_OPT_INV, 0, 26)
+REG32(BNK1_EN_PAD2PAD_LOOPBACK, 0x328)
+ FIELD(BNK1_EN_PAD2PAD_LOOPBACK, BNK1_EN_PAD2PAD_LOOPBACK, 0, 13)
+REG32(BNK1_RX_SPARE0, 0x32c)
+REG32(BNK1_RX_SPARE1, 0x330)
+ FIELD(BNK1_RX_SPARE1, BNK1_RX_SPARE, 0, 20)
+REG32(BNK1_TX_SPARE0, 0x334)
+REG32(BNK1_TX_SPARE1, 0x338)
+ FIELD(BNK1_TX_SPARE1, BNK1_TX_SPARE, 0, 20)
+REG32(BNK1_SEL_EN1P8, 0x33c)
+ FIELD(BNK1_SEL_EN1P8, BNK1_SEL_EN1P8, 0, 1)
+REG32(BNK1_EN_B_POR_DETECT, 0x340)
+ FIELD(BNK1_EN_B_POR_DETECT, BNK1_EN_B_POR_DETECT, 0, 1)
+REG32(BNK1_LPF_BYP_POR_DETECT, 0x344)
+ FIELD(BNK1_LPF_BYP_POR_DETECT, BNK1_LPF_BYP_POR_DETECT, 0, 1)
+REG32(BNK1_EN_LATCH, 0x348)
+ FIELD(BNK1_EN_LATCH, BNK1_EN_LATCH, 0, 1)
+REG32(BNK1_VBG_LPF_BYP_B, 0x34c)
+ FIELD(BNK1_VBG_LPF_BYP_B, BNK1_VBG_LPF_BYP_B, 0, 1)
+REG32(BNK1_EN_AMP_B, 0x350)
+ FIELD(BNK1_EN_AMP_B, BNK1_EN_AMP_B, 0, 2)
+REG32(BNK1_SPARE_BIAS, 0x354)
+ FIELD(BNK1_SPARE_BIAS, BNK1_SPARE_BIAS, 0, 4)
+REG32(BNK1_DRIVER_BIAS, 0x358)
+ FIELD(BNK1_DRIVER_BIAS, BNK1_DRIVER_BIAS, 0, 15)
+REG32(BNK1_VMODE, 0x35c)
+ FIELD(BNK1_VMODE, BNK1_VMODE, 0, 1)
+REG32(BNK1_SEL_AUX_IO_RX, 0x360)
+ FIELD(BNK1_SEL_AUX_IO_RX, BNK1_SEL_AUX_IO_RX, 0, 26)
+REG32(BNK1_EN_TX_HS_MODE, 0x364)
+ FIELD(BNK1_EN_TX_HS_MODE, BNK1_EN_TX_HS_MODE, 0, 26)
+REG32(SD0_CLK_CTRL, 0x400)
+ FIELD(SD0_CLK_CTRL, SDIO0_FBCLK_SEL, 2, 1)
+ FIELD(SD0_CLK_CTRL, SDIO0_RX_SRC_SEL, 0, 2)
+REG32(SD0_CTRL_REG, 0x404)
+ FIELD(SD0_CTRL_REG, SD0_EMMC_SEL, 0, 1)
+REG32(SD0_CONFIG_REG1, 0x410)
+ FIELD(SD0_CONFIG_REG1, SD0_BASECLK, 7, 8)
+ FIELD(SD0_CONFIG_REG1, SD0_TUNIGCOUNT, 1, 6)
+ FIELD(SD0_CONFIG_REG1, SD0_ASYNCWKPENA, 0, 1)
+REG32(SD0_CONFIG_REG2, 0x414)
+ FIELD(SD0_CONFIG_REG2, SD0_SLOTTYPE, 12, 2)
+ FIELD(SD0_CONFIG_REG2, SD0_ASYCINTR, 11, 1)
+ FIELD(SD0_CONFIG_REG2, SD0_64BIT, 10, 1)
+ FIELD(SD0_CONFIG_REG2, SD0_1P8V, 9, 1)
+ FIELD(SD0_CONFIG_REG2, SD0_3P0V, 8, 1)
+ FIELD(SD0_CONFIG_REG2, SD0_3P3V, 7, 1)
+ FIELD(SD0_CONFIG_REG2, SD0_SUSPRES, 6, 1)
+ FIELD(SD0_CONFIG_REG2, SD0_SDMA, 5, 1)
+ FIELD(SD0_CONFIG_REG2, SD0_HIGHSPEED, 4, 1)
+ FIELD(SD0_CONFIG_REG2, SD0_ADMA2, 3, 1)
+ FIELD(SD0_CONFIG_REG2, SD0_8BIT, 2, 1)
+ FIELD(SD0_CONFIG_REG2, SD0_MAXBLK, 0, 2)
+REG32(SD0_CONFIG_REG3, 0x418)
+ FIELD(SD0_CONFIG_REG3, SD0_TUNINGSDR50, 10, 1)
+ FIELD(SD0_CONFIG_REG3, SD0_RETUNETMR, 6, 4)
+ FIELD(SD0_CONFIG_REG3, SD0_DDRIVER, 5, 1)
+ FIELD(SD0_CONFIG_REG3, SD0_CDRIVER, 4, 1)
+ FIELD(SD0_CONFIG_REG3, SD0_ADRIVER, 3, 1)
+ FIELD(SD0_CONFIG_REG3, SD0_DDR50, 2, 1)
+ FIELD(SD0_CONFIG_REG3, SD0_SDR104, 1, 1)
+ FIELD(SD0_CONFIG_REG3, SD0_SDR50, 0, 1)
+REG32(SD0_INITPRESET, 0x41c)
+ FIELD(SD0_INITPRESET, SD0_INITPRESET, 0, 13)
+REG32(SD0_DSPPRESET, 0x420)
+ FIELD(SD0_DSPPRESET, SD0_DSPPRESET, 0, 13)
+REG32(SD0_HSPDPRESET, 0x424)
+ FIELD(SD0_HSPDPRESET, SD0_HSPDPRESET, 0, 13)
+REG32(SD0_SDR12PRESET, 0x428)
+ FIELD(SD0_SDR12PRESET, SD0_SDR12PRESET, 0, 13)
+REG32(SD0_SDR25PRESET, 0x42c)
+ FIELD(SD0_SDR25PRESET, SD0_SDR25PRESET, 0, 13)
+REG32(SD0_SDR50PRSET, 0x430)
+ FIELD(SD0_SDR50PRSET, SD0_SDR50PRESET, 0, 13)
+REG32(SD0_SDR104PRST, 0x434)
+ FIELD(SD0_SDR104PRST, SD0_SDR104PRESET, 0, 13)
+REG32(SD0_DDR50PRESET, 0x438)
+ FIELD(SD0_DDR50PRESET, SD0_DDR50PRESET, 0, 13)
+REG32(SD0_MAXCUR1P8, 0x43c)
+ FIELD(SD0_MAXCUR1P8, SD0_MAXCUR1P8, 0, 8)
+REG32(SD0_MAXCUR3P0, 0x440)
+ FIELD(SD0_MAXCUR3P0, SD0_MAXCUR3P0, 0, 8)
+REG32(SD0_MAXCUR3P3, 0x444)
+ FIELD(SD0_MAXCUR3P3, SD0_MAXCUR3P3, 0, 8)
+REG32(SD0_DLL_CTRL, 0x448)
+ FIELD(SD0_DLL_CTRL, SD0_CLKSTABLE_CFG, 9, 1)
+ FIELD(SD0_DLL_CTRL, SD0_DLL_CFG, 5, 4)
+ FIELD(SD0_DLL_CTRL, SD0_DLL_PSDONE, 4, 1)
+ FIELD(SD0_DLL_CTRL, SD0_DLL_OVF, 3, 1)
+ FIELD(SD0_DLL_CTRL, SD0_DLL_RST, 2, 1)
+ FIELD(SD0_DLL_CTRL, SD0_DLL_TESTMODE, 1, 1)
+ FIELD(SD0_DLL_CTRL, SD0_DLL_LOCK, 0, 1)
+REG32(SD0_CDN_CTRL, 0x44c)
+ FIELD(SD0_CDN_CTRL, SD0_CDN_CTRL, 0, 1)
+REG32(SD0_DLL_TEST, 0x450)
+ FIELD(SD0_DLL_TEST, DLL_DIV, 16, 8)
+ FIELD(SD0_DLL_TEST, DLL_TX_SEL, 9, 7)
+ FIELD(SD0_DLL_TEST, DLL_RX_SEL, 0, 9)
+REG32(SD0_RX_TUNING_SEL, 0x454)
+ FIELD(SD0_RX_TUNING_SEL, SD0_RX_SEL, 0, 9)
+REG32(SD0_DLL_DIV_MAP0, 0x458)
+ FIELD(SD0_DLL_DIV_MAP0, DIV_3, 24, 8)
+ FIELD(SD0_DLL_DIV_MAP0, DIV_2, 16, 8)
+ FIELD(SD0_DLL_DIV_MAP0, DIV_1, 8, 8)
+ FIELD(SD0_DLL_DIV_MAP0, DIV_0, 0, 8)
+REG32(SD0_DLL_DIV_MAP1, 0x45c)
+ FIELD(SD0_DLL_DIV_MAP1, DIV_7, 24, 8)
+ FIELD(SD0_DLL_DIV_MAP1, DIV_6, 16, 8)
+ FIELD(SD0_DLL_DIV_MAP1, DIV_5, 8, 8)
+ FIELD(SD0_DLL_DIV_MAP1, DIV_4, 0, 8)
+REG32(SD0_IOU_COHERENT_CTRL, 0x460)
+ FIELD(SD0_IOU_COHERENT_CTRL, SD0_AXI_COH, 0, 4)
+REG32(SD0_IOU_INTERCONNECT_ROUTE, 0x464)
+ FIELD(SD0_IOU_INTERCONNECT_ROUTE, SD0, 0, 1)
+REG32(SD0_IOU_RAM, 0x468)
+ FIELD(SD0_IOU_RAM, EMASA0, 6, 1)
+ FIELD(SD0_IOU_RAM, EMAB0, 3, 3)
+ FIELD(SD0_IOU_RAM, EMAA0, 0, 3)
+REG32(SD0_IOU_INTERCONNECT_QOS, 0x46c)
+ FIELD(SD0_IOU_INTERCONNECT_QOS, SD0_QOS, 0, 4)
+REG32(SD1_CLK_CTRL, 0x480)
+ FIELD(SD1_CLK_CTRL, SDIO1_FBCLK_SEL, 1, 1)
+ FIELD(SD1_CLK_CTRL, SDIO1_RX_SRC_SEL, 0, 1)
+REG32(SD1_CTRL_REG, 0x484)
+ FIELD(SD1_CTRL_REG, SD1_EMMC_SEL, 0, 1)
+REG32(SD1_CONFIG_REG1, 0x490)
+ FIELD(SD1_CONFIG_REG1, SD1_BASECLK, 7, 8)
+ FIELD(SD1_CONFIG_REG1, SD1_TUNIGCOUNT, 1, 6)
+ FIELD(SD1_CONFIG_REG1, SD1_ASYNCWKPENA, 0, 1)
+REG32(SD1_CONFIG_REG2, 0x494)
+ FIELD(SD1_CONFIG_REG2, SD1_SLOTTYPE, 12, 2)
+ FIELD(SD1_CONFIG_REG2, SD1_ASYCINTR, 11, 1)
+ FIELD(SD1_CONFIG_REG2, SD1_64BIT, 10, 1)
+ FIELD(SD1_CONFIG_REG2, SD1_1P8V, 9, 1)
+ FIELD(SD1_CONFIG_REG2, SD1_3P0V, 8, 1)
+ FIELD(SD1_CONFIG_REG2, SD1_3P3V, 7, 1)
+ FIELD(SD1_CONFIG_REG2, SD1_SUSPRES, 6, 1)
+ FIELD(SD1_CONFIG_REG2, SD1_SDMA, 5, 1)
+ FIELD(SD1_CONFIG_REG2, SD1_HIGHSPEED, 4, 1)
+ FIELD(SD1_CONFIG_REG2, SD1_ADMA2, 3, 1)
+ FIELD(SD1_CONFIG_REG2, SD1_8BIT, 2, 1)
+ FIELD(SD1_CONFIG_REG2, SD1_MAXBLK, 0, 2)
+REG32(SD1_CONFIG_REG3, 0x498)
+ FIELD(SD1_CONFIG_REG3, SD1_TUNINGSDR50, 10, 1)
+ FIELD(SD1_CONFIG_REG3, SD1_RETUNETMR, 6, 4)
+ FIELD(SD1_CONFIG_REG3, SD1_DDRIVER, 5, 1)
+ FIELD(SD1_CONFIG_REG3, SD1_CDRIVER, 4, 1)
+ FIELD(SD1_CONFIG_REG3, SD1_ADRIVER, 3, 1)
+ FIELD(SD1_CONFIG_REG3, SD1_DDR50, 2, 1)
+ FIELD(SD1_CONFIG_REG3, SD1_SDR104, 1, 1)
+ FIELD(SD1_CONFIG_REG3, SD1_SDR50, 0, 1)
+REG32(SD1_INITPRESET, 0x49c)
+ FIELD(SD1_INITPRESET, SD1_INITPRESET, 0, 13)
+REG32(SD1_DSPPRESET, 0x4a0)
+ FIELD(SD1_DSPPRESET, SD1_DSPPRESET, 0, 13)
+REG32(SD1_HSPDPRESET, 0x4a4)
+ FIELD(SD1_HSPDPRESET, SD1_HSPDPRESET, 0, 13)
+REG32(SD1_SDR12PRESET, 0x4a8)
+ FIELD(SD1_SDR12PRESET, SD1_SDR12PRESET, 0, 13)
+REG32(SD1_SDR25PRESET, 0x4ac)
+ FIELD(SD1_SDR25PRESET, SD1_SDR25PRESET, 0, 13)
+REG32(SD1_SDR50PRSET, 0x4b0)
+ FIELD(SD1_SDR50PRSET, SD1_SDR50PRESET, 0, 13)
+REG32(SD1_SDR104PRST, 0x4b4)
+ FIELD(SD1_SDR104PRST, SD1_SDR104PRESET, 0, 13)
+REG32(SD1_DDR50PRESET, 0x4b8)
+ FIELD(SD1_DDR50PRESET, SD1_DDR50PRESET, 0, 13)
+REG32(SD1_MAXCUR1P8, 0x4bc)
+ FIELD(SD1_MAXCUR1P8, SD1_MAXCUR1P8, 0, 8)
+REG32(SD1_MAXCUR3P0, 0x4c0)
+ FIELD(SD1_MAXCUR3P0, SD1_MAXCUR3P0, 0, 8)
+REG32(SD1_MAXCUR3P3, 0x4c4)
+ FIELD(SD1_MAXCUR3P3, SD1_MAXCUR3P3, 0, 8)
+REG32(SD1_DLL_CTRL, 0x4c8)
+ FIELD(SD1_DLL_CTRL, SD1_CLKSTABLE_CFG, 9, 1)
+ FIELD(SD1_DLL_CTRL, SD1_DLL_CFG, 5, 4)
+ FIELD(SD1_DLL_CTRL, SD1_DLL_PSDONE, 4, 1)
+ FIELD(SD1_DLL_CTRL, SD1_DLL_OVF, 3, 1)
+ FIELD(SD1_DLL_CTRL, SD1_DLL_RST, 2, 1)
+ FIELD(SD1_DLL_CTRL, SD1_DLL_TESTMODE, 1, 1)
+ FIELD(SD1_DLL_CTRL, SD1_DLL_LOCK, 0, 1)
+REG32(SD1_CDN_CTRL, 0x4cc)
+ FIELD(SD1_CDN_CTRL, SD1_CDN_CTRL, 0, 1)
+REG32(SD1_DLL_TEST, 0x4d0)
+ FIELD(SD1_DLL_TEST, DLL_DIV, 16, 8)
+ FIELD(SD1_DLL_TEST, DLL_TX_SEL, 9, 7)
+ FIELD(SD1_DLL_TEST, DLL_RX_SEL, 0, 9)
+REG32(SD1_RX_TUNING_SEL, 0x4d4)
+ FIELD(SD1_RX_TUNING_SEL, SD1_RX_SEL, 0, 9)
+REG32(SD1_DLL_DIV_MAP0, 0x4d8)
+ FIELD(SD1_DLL_DIV_MAP0, DIV_3, 24, 8)
+ FIELD(SD1_DLL_DIV_MAP0, DIV_2, 16, 8)
+ FIELD(SD1_DLL_DIV_MAP0, DIV_1, 8, 8)
+ FIELD(SD1_DLL_DIV_MAP0, DIV_0, 0, 8)
+REG32(SD1_DLL_DIV_MAP1, 0x4dc)
+ FIELD(SD1_DLL_DIV_MAP1, DIV_7, 24, 8)
+ FIELD(SD1_DLL_DIV_MAP1, DIV_6, 16, 8)
+ FIELD(SD1_DLL_DIV_MAP1, DIV_5, 8, 8)
+ FIELD(SD1_DLL_DIV_MAP1, DIV_4, 0, 8)
+REG32(SD1_IOU_COHERENT_CTRL, 0x4e0)
+ FIELD(SD1_IOU_COHERENT_CTRL, SD1_AXI_COH, 0, 4)
+REG32(SD1_IOU_INTERCONNECT_ROUTE, 0x4e4)
+ FIELD(SD1_IOU_INTERCONNECT_ROUTE, SD1, 0, 1)
+REG32(SD1_IOU_RAM, 0x4e8)
+ FIELD(SD1_IOU_RAM, EMASA0, 6, 1)
+ FIELD(SD1_IOU_RAM, EMAB0, 3, 3)
+ FIELD(SD1_IOU_RAM, EMAA0, 0, 3)
+REG32(SD1_IOU_INTERCONNECT_QOS, 0x4ec)
+ FIELD(SD1_IOU_INTERCONNECT_QOS, SD1_QOS, 0, 4)
+REG32(OSPI_QSPI_IOU_AXI_MUX_SEL, 0x504)
+ FIELD(OSPI_QSPI_IOU_AXI_MUX_SEL, OSPI_MUX_SEL, 1, 1)
+ FIELD(OSPI_QSPI_IOU_AXI_MUX_SEL, QSPI_OSPI_MUX_SEL, 0, 1)
+REG32(QSPI_IOU_COHERENT_CTRL, 0x508)
+ FIELD(QSPI_IOU_COHERENT_CTRL, QSPI_AXI_COH, 0, 4)
+REG32(QSPI_IOU_INTERCONNECT_ROUTE, 0x50c)
+ FIELD(QSPI_IOU_INTERCONNECT_ROUTE, QSPI, 0, 1)
+REG32(QSPI_IOU_RAM, 0x510)
+ FIELD(QSPI_IOU_RAM, EMASA1, 13, 1)
+ FIELD(QSPI_IOU_RAM, EMAB1, 10, 3)
+ FIELD(QSPI_IOU_RAM, EMAA1, 7, 3)
+ FIELD(QSPI_IOU_RAM, EMASA0, 6, 1)
+ FIELD(QSPI_IOU_RAM, EMAB0, 3, 3)
+ FIELD(QSPI_IOU_RAM, EMAA0, 0, 3)
+REG32(QSPI_IOU_INTERCONNECT_QOS, 0x514)
+ FIELD(QSPI_IOU_INTERCONNECT_QOS, QSPI_QOS, 0, 4)
+REG32(OSPI_IOU_COHERENT_CTRL, 0x530)
+ FIELD(OSPI_IOU_COHERENT_CTRL, OSPI_AXI_COH, 0, 4)
+REG32(OSPI_IOU_INTERCONNECT_ROUTE, 0x534)
+ FIELD(OSPI_IOU_INTERCONNECT_ROUTE, OSPI, 0, 1)
+REG32(OSPI_IOU_RAM, 0x538)
+ FIELD(OSPI_IOU_RAM, EMAS0, 5, 1)
+ FIELD(OSPI_IOU_RAM, EMAW0, 3, 2)
+ FIELD(OSPI_IOU_RAM, EMA0, 0, 3)
+REG32(OSPI_IOU_INTERCONNECT_QOS, 0x53c)
+ FIELD(OSPI_IOU_INTERCONNECT_QOS, OSPI_QOS, 0, 4)
+REG32(OSPI_REFCLK_DLY_CTRL, 0x540)
+ FIELD(OSPI_REFCLK_DLY_CTRL, DLY1, 3, 2)
+ FIELD(OSPI_REFCLK_DLY_CTRL, DLY0, 0, 3)
+REG32(CUR_PWR_ST, 0x600)
+ FIELD(CUR_PWR_ST, U2PMU, 0, 2)
+REG32(CONNECT_ST, 0x604)
+ FIELD(CONNECT_ST, U2PMU, 0, 1)
+REG32(PW_STATE_REQ, 0x608)
+ FIELD(PW_STATE_REQ, BIT_1_0, 0, 2)
+REG32(HOST_U2_PORT_DISABLE, 0x60c)
+ FIELD(HOST_U2_PORT_DISABLE, BIT_0, 0, 1)
+REG32(DBG_U2PMU, 0x610)
+REG32(DBG_U2PMU_EXT1, 0x614)
+REG32(DBG_U2PMU_EXT2, 0x618)
+ FIELD(DBG_U2PMU_EXT2, BIT_67_64, 0, 4)
+REG32(PME_GEN_U2PMU, 0x61c)
+ FIELD(PME_GEN_U2PMU, BIT_0, 0, 1)
+REG32(PWR_CONFIG_USB2, 0x620)
+ FIELD(PWR_CONFIG_USB2, STRAP, 0, 30)
+REG32(PHY_HUB, 0x624)
+ FIELD(PHY_HUB, VBUS_CTRL, 1, 1)
+ FIELD(PHY_HUB, OVER_CURRENT, 0, 1)
+REG32(CTRL, 0x700)
+ FIELD(CTRL, SLVERR_ENABLE, 0, 1)
+REG32(ISR, 0x800)
+ FIELD(ISR, ADDR_DECODE_ERR, 0, 1)
+REG32(IMR, 0x804)
+ FIELD(IMR, ADDR_DECODE_ERR, 0, 1)
+REG32(IER, 0x808)
+ FIELD(IER, ADDR_DECODE_ERR, 0, 1)
+REG32(IDR, 0x80c)
+ FIELD(IDR, ADDR_DECODE_ERR, 0, 1)
+REG32(ITR, 0x810)
+ FIELD(ITR, ADDR_DECODE_ERR, 0, 1)
+REG32(PARITY_ISR, 0x814)
+ FIELD(PARITY_ISR, PERR_AXI_SD1_IOU, 12, 1)
+ FIELD(PARITY_ISR, PERR_AXI_SD0_IOU, 11, 1)
+ FIELD(PARITY_ISR, PERR_AXI_QSPI_IOU, 10, 1)
+ FIELD(PARITY_ISR, PERR_AXI_OSPI_IOU, 9, 1)
+ FIELD(PARITY_ISR, PERR_IOU_SD1, 8, 1)
+ FIELD(PARITY_ISR, PERR_IOU_SD0, 7, 1)
+ FIELD(PARITY_ISR, PERR_IOU_QSPI1, 6, 1)
+ FIELD(PARITY_ISR, PERR_IOUSLCR_SECURE_APB, 5, 1)
+ FIELD(PARITY_ISR, PERR_IOUSLCR_APB, 4, 1)
+ FIELD(PARITY_ISR, PERR_QSPI0_APB, 3, 1)
+ FIELD(PARITY_ISR, PERR_OSPI_APB, 2, 1)
+ FIELD(PARITY_ISR, PERR_I2C_APB, 1, 1)
+ FIELD(PARITY_ISR, PERR_GPIO_APB, 0, 1)
+REG32(PARITY_IMR, 0x818)
+ FIELD(PARITY_IMR, PERR_AXI_SD1_IOU, 12, 1)
+ FIELD(PARITY_IMR, PERR_AXI_SD0_IOU, 11, 1)
+ FIELD(PARITY_IMR, PERR_AXI_QSPI_IOU, 10, 1)
+ FIELD(PARITY_IMR, PERR_AXI_OSPI_IOU, 9, 1)
+ FIELD(PARITY_IMR, PERR_IOU_SD1, 8, 1)
+ FIELD(PARITY_IMR, PERR_IOU_SD0, 7, 1)
+ FIELD(PARITY_IMR, PERR_IOU_QSPI1, 6, 1)
+ FIELD(PARITY_IMR, PERR_IOUSLCR_SECURE_APB, 5, 1)
+ FIELD(PARITY_IMR, PERR_IOUSLCR_APB, 4, 1)
+ FIELD(PARITY_IMR, PERR_QSPI0_APB, 3, 1)
+ FIELD(PARITY_IMR, PERR_OSPI_APB, 2, 1)
+ FIELD(PARITY_IMR, PERR_I2C_APB, 1, 1)
+ FIELD(PARITY_IMR, PERR_GPIO_APB, 0, 1)
+REG32(PARITY_IER, 0x81c)
+ FIELD(PARITY_IER, PERR_AXI_SD1_IOU, 12, 1)
+ FIELD(PARITY_IER, PERR_AXI_SD0_IOU, 11, 1)
+ FIELD(PARITY_IER, PERR_AXI_QSPI_IOU, 10, 1)
+ FIELD(PARITY_IER, PERR_AXI_OSPI_IOU, 9, 1)
+ FIELD(PARITY_IER, PERR_IOU_SD1, 8, 1)
+ FIELD(PARITY_IER, PERR_IOU_SD0, 7, 1)
+ FIELD(PARITY_IER, PERR_IOU_QSPI1, 6, 1)
+ FIELD(PARITY_IER, PERR_IOUSLCR_SECURE_APB, 5, 1)
+ FIELD(PARITY_IER, PERR_IOUSLCR_APB, 4, 1)
+ FIELD(PARITY_IER, PERR_QSPI0_APB, 3, 1)
+ FIELD(PARITY_IER, PERR_OSPI_APB, 2, 1)
+ FIELD(PARITY_IER, PERR_I2C_APB, 1, 1)
+ FIELD(PARITY_IER, PERR_GPIO_APB, 0, 1)
+REG32(PARITY_IDR, 0x820)
+ FIELD(PARITY_IDR, PERR_AXI_SD1_IOU, 12, 1)
+ FIELD(PARITY_IDR, PERR_AXI_SD0_IOU, 11, 1)
+ FIELD(PARITY_IDR, PERR_AXI_QSPI_IOU, 10, 1)
+ FIELD(PARITY_IDR, PERR_AXI_OSPI_IOU, 9, 1)
+ FIELD(PARITY_IDR, PERR_IOU_SD1, 8, 1)
+ FIELD(PARITY_IDR, PERR_IOU_SD0, 7, 1)
+ FIELD(PARITY_IDR, PERR_IOU_QSPI1, 6, 1)
+ FIELD(PARITY_IDR, PERR_IOUSLCR_SECURE_APB, 5, 1)
+ FIELD(PARITY_IDR, PERR_IOUSLCR_APB, 4, 1)
+ FIELD(PARITY_IDR, PERR_QSPI0_APB, 3, 1)
+ FIELD(PARITY_IDR, PERR_OSPI_APB, 2, 1)
+ FIELD(PARITY_IDR, PERR_I2C_APB, 1, 1)
+ FIELD(PARITY_IDR, PERR_GPIO_APB, 0, 1)
+REG32(PARITY_ITR, 0x824)
+ FIELD(PARITY_ITR, PERR_AXI_SD1_IOU, 12, 1)
+ FIELD(PARITY_ITR, PERR_AXI_SD0_IOU, 11, 1)
+ FIELD(PARITY_ITR, PERR_AXI_QSPI_IOU, 10, 1)
+ FIELD(PARITY_ITR, PERR_AXI_OSPI_IOU, 9, 1)
+ FIELD(PARITY_ITR, PERR_IOU_SD1, 8, 1)
+ FIELD(PARITY_ITR, PERR_IOU_SD0, 7, 1)
+ FIELD(PARITY_ITR, PERR_IOU_QSPI1, 6, 1)
+ FIELD(PARITY_ITR, PERR_IOUSLCR_SECURE_APB, 5, 1)
+ FIELD(PARITY_ITR, PERR_IOUSLCR_APB, 4, 1)
+ FIELD(PARITY_ITR, PERR_QSPI0_APB, 3, 1)
+ FIELD(PARITY_ITR, PERR_OSPI_APB, 2, 1)
+ FIELD(PARITY_ITR, PERR_I2C_APB, 1, 1)
+ FIELD(PARITY_ITR, PERR_GPIO_APB, 0, 1)
+REG32(WPROT0, 0x828)
+ FIELD(WPROT0, ACTIVE, 0, 1)
+
+static void parity_imr_update_irq(XlnxVersalPmcIouSlcr *s)
+{
+ bool pending = s->regs[R_PARITY_ISR] & ~s->regs[R_PARITY_IMR];
+ qemu_set_irq(s->irq_parity_imr, pending);
+}
+
+static void parity_isr_postw(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque);
+ parity_imr_update_irq(s);
+}
+
+static uint64_t parity_ier_prew(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque);
+ uint32_t val = val64;
+
+ s->regs[R_PARITY_IMR] &= ~val;
+ parity_imr_update_irq(s);
+ return 0;
+}
+
+static uint64_t parity_idr_prew(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque);
+ uint32_t val = val64;
+
+ s->regs[R_PARITY_IMR] |= val;
+ parity_imr_update_irq(s);
+ return 0;
+}
+
+static uint64_t parity_itr_prew(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque);
+ uint32_t val = val64;
+
+ s->regs[R_PARITY_ISR] |= val;
+ parity_imr_update_irq(s);
+ return 0;
+}
+
+static void imr_update_irq(XlnxVersalPmcIouSlcr *s)
+{
+ bool pending = s->regs[R_ISR] & ~s->regs[R_IMR];
+ qemu_set_irq(s->irq_imr, pending);
+}
+
+static void isr_postw(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque);
+ imr_update_irq(s);
+}
+
+static uint64_t ier_prew(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque);
+ uint32_t val = val64;
+
+ s->regs[R_IMR] &= ~val;
+ imr_update_irq(s);
+ return 0;
+}
+
+static uint64_t idr_prew(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque);
+ uint32_t val = val64;
+
+ s->regs[R_IMR] |= val;
+ imr_update_irq(s);
+ return 0;
+}
+
+static uint64_t itr_prew(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque);
+ uint32_t val = val64;
+
+ s->regs[R_ISR] |= val;
+ imr_update_irq(s);
+ return 0;
+}
+
+static uint64_t sd0_ctrl_reg_prew(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque);
+ uint32_t prev = ARRAY_FIELD_EX32(s->regs, SD0_CTRL_REG, SD0_EMMC_SEL);
+
+ if (prev != (val64 & R_SD0_CTRL_REG_SD0_EMMC_SEL_MASK)) {
+ qemu_set_irq(s->sd_emmc_sel[0], !!val64);
+ }
+
+ return val64;
+}
+
+static uint64_t sd1_ctrl_reg_prew(RegisterInfo *reg, uint64_t val64)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque);
+ uint32_t prev = ARRAY_FIELD_EX32(s->regs, SD1_CTRL_REG, SD1_EMMC_SEL);
+
+ if (prev != (val64 & R_SD1_CTRL_REG_SD1_EMMC_SEL_MASK)) {
+ qemu_set_irq(s->sd_emmc_sel[1], !!val64);
+ }
+
+ return val64;
+}
+
+static uint64_t ospi_qspi_iou_axi_mux_sel_prew(RegisterInfo *reg,
+ uint64_t val64)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(reg->opaque);
+ uint32_t val32 = (uint32_t) val64;
+ uint8_t ospi_mux_sel = FIELD_EX32(val32, OSPI_QSPI_IOU_AXI_MUX_SEL,
+ OSPI_MUX_SEL);
+ uint8_t qspi_ospi_mux_sel = FIELD_EX32(val32, OSPI_QSPI_IOU_AXI_MUX_SEL,
+ QSPI_OSPI_MUX_SEL);
+
+ if (ospi_mux_sel !=
+ ARRAY_FIELD_EX32(s->regs, OSPI_QSPI_IOU_AXI_MUX_SEL, OSPI_MUX_SEL)) {
+ qemu_set_irq(s->ospi_mux_sel, !!ospi_mux_sel);
+ }
+
+ if (qspi_ospi_mux_sel !=
+ ARRAY_FIELD_EX32(s->regs, OSPI_QSPI_IOU_AXI_MUX_SEL,
+ QSPI_OSPI_MUX_SEL)) {
+ qemu_set_irq(s->qspi_ospi_mux_sel, !!qspi_ospi_mux_sel);
+ }
+
+ return val64;
+}
+
+static RegisterAccessInfo pmc_iou_slcr_regs_info[] = {
+ { .name = "MIO_PIN_0", .addr = A_MIO_PIN_0,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_1", .addr = A_MIO_PIN_1,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_2", .addr = A_MIO_PIN_2,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_3", .addr = A_MIO_PIN_3,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_4", .addr = A_MIO_PIN_4,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_5", .addr = A_MIO_PIN_5,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_6", .addr = A_MIO_PIN_6,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_7", .addr = A_MIO_PIN_7,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_8", .addr = A_MIO_PIN_8,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_9", .addr = A_MIO_PIN_9,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_10", .addr = A_MIO_PIN_10,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_11", .addr = A_MIO_PIN_11,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_12", .addr = A_MIO_PIN_12,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_13", .addr = A_MIO_PIN_13,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_14", .addr = A_MIO_PIN_14,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_15", .addr = A_MIO_PIN_15,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_16", .addr = A_MIO_PIN_16,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_17", .addr = A_MIO_PIN_17,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_18", .addr = A_MIO_PIN_18,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_19", .addr = A_MIO_PIN_19,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_20", .addr = A_MIO_PIN_20,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_21", .addr = A_MIO_PIN_21,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_22", .addr = A_MIO_PIN_22,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_23", .addr = A_MIO_PIN_23,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_24", .addr = A_MIO_PIN_24,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_25", .addr = A_MIO_PIN_25,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_26", .addr = A_MIO_PIN_26,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_27", .addr = A_MIO_PIN_27,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_28", .addr = A_MIO_PIN_28,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_29", .addr = A_MIO_PIN_29,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_30", .addr = A_MIO_PIN_30,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_31", .addr = A_MIO_PIN_31,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_32", .addr = A_MIO_PIN_32,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_33", .addr = A_MIO_PIN_33,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_34", .addr = A_MIO_PIN_34,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_35", .addr = A_MIO_PIN_35,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_36", .addr = A_MIO_PIN_36,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_37", .addr = A_MIO_PIN_37,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_38", .addr = A_MIO_PIN_38,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_39", .addr = A_MIO_PIN_39,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_40", .addr = A_MIO_PIN_40,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_41", .addr = A_MIO_PIN_41,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_42", .addr = A_MIO_PIN_42,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_43", .addr = A_MIO_PIN_43,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_44", .addr = A_MIO_PIN_44,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_45", .addr = A_MIO_PIN_45,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_46", .addr = A_MIO_PIN_46,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_47", .addr = A_MIO_PIN_47,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_48", .addr = A_MIO_PIN_48,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_49", .addr = A_MIO_PIN_49,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_50", .addr = A_MIO_PIN_50,
+ .rsvd = 0xfffffc01,
+ },{ .name = "MIO_PIN_51", .addr = A_MIO_PIN_51,
+ .rsvd = 0xfffffc01,
+ },{ .name = "BNK0_EN_RX", .addr = A_BNK0_EN_RX,
+ .reset = 0x3ffffff,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK0_SEL_RX0", .addr = A_BNK0_SEL_RX0,
+ .reset = 0xffffffff,
+ },{ .name = "BNK0_SEL_RX1", .addr = A_BNK0_SEL_RX1,
+ .reset = 0xfffff,
+ .rsvd = 0xfff00000,
+ },{ .name = "BNK0_EN_RX_SCHMITT_HYST", .addr = A_BNK0_EN_RX_SCHMITT_HYST,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK0_EN_WK_PD", .addr = A_BNK0_EN_WK_PD,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK0_EN_WK_PU", .addr = A_BNK0_EN_WK_PU,
+ .reset = 0x3ffffff,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK0_SEL_DRV0", .addr = A_BNK0_SEL_DRV0,
+ .reset = 0xffffffff,
+ },{ .name = "BNK0_SEL_DRV1", .addr = A_BNK0_SEL_DRV1,
+ .reset = 0xfffff,
+ .rsvd = 0xfff00000,
+ },{ .name = "BNK0_SEL_SLEW", .addr = A_BNK0_SEL_SLEW,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK0_EN_DFT_OPT_INV", .addr = A_BNK0_EN_DFT_OPT_INV,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK0_EN_PAD2PAD_LOOPBACK",
+ .addr = A_BNK0_EN_PAD2PAD_LOOPBACK,
+ .rsvd = 0xffffe000,
+ },{ .name = "BNK0_RX_SPARE0", .addr = A_BNK0_RX_SPARE0,
+ },{ .name = "BNK0_RX_SPARE1", .addr = A_BNK0_RX_SPARE1,
+ .rsvd = 0xfff00000,
+ },{ .name = "BNK0_TX_SPARE0", .addr = A_BNK0_TX_SPARE0,
+ },{ .name = "BNK0_TX_SPARE1", .addr = A_BNK0_TX_SPARE1,
+ .rsvd = 0xfff00000,
+ },{ .name = "BNK0_SEL_EN1P8", .addr = A_BNK0_SEL_EN1P8,
+ .rsvd = 0xfffffffe,
+ },{ .name = "BNK0_EN_B_POR_DETECT", .addr = A_BNK0_EN_B_POR_DETECT,
+ .rsvd = 0xfffffffe,
+ },{ .name = "BNK0_LPF_BYP_POR_DETECT", .addr = A_BNK0_LPF_BYP_POR_DETECT,
+ .reset = 0x1,
+ .rsvd = 0xfffffffe,
+ },{ .name = "BNK0_EN_LATCH", .addr = A_BNK0_EN_LATCH,
+ .rsvd = 0xfffffffe,
+ },{ .name = "BNK0_VBG_LPF_BYP_B", .addr = A_BNK0_VBG_LPF_BYP_B,
+ .reset = 0x1,
+ .rsvd = 0xfffffffe,
+ },{ .name = "BNK0_EN_AMP_B", .addr = A_BNK0_EN_AMP_B,
+ .rsvd = 0xfffffffc,
+ },{ .name = "BNK0_SPARE_BIAS", .addr = A_BNK0_SPARE_BIAS,
+ .rsvd = 0xfffffff0,
+ },{ .name = "BNK0_DRIVER_BIAS", .addr = A_BNK0_DRIVER_BIAS,
+ .rsvd = 0xffff8000,
+ },{ .name = "BNK0_VMODE", .addr = A_BNK0_VMODE,
+ .rsvd = 0xfffffffe,
+ .ro = 0x1,
+ },{ .name = "BNK0_SEL_AUX_IO_RX", .addr = A_BNK0_SEL_AUX_IO_RX,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK0_EN_TX_HS_MODE", .addr = A_BNK0_EN_TX_HS_MODE,
+ .rsvd = 0xfc000000,
+ },{ .name = "MIO_MST_TRI0", .addr = A_MIO_MST_TRI0,
+ .reset = 0x3ffffff,
+ .rsvd = 0xfc000000,
+ },{ .name = "MIO_MST_TRI1", .addr = A_MIO_MST_TRI1,
+ .reset = 0x3ffffff,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK1_EN_RX", .addr = A_BNK1_EN_RX,
+ .reset = 0x3ffffff,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK1_SEL_RX0", .addr = A_BNK1_SEL_RX0,
+ .reset = 0xffffffff,
+ },{ .name = "BNK1_SEL_RX1", .addr = A_BNK1_SEL_RX1,
+ .reset = 0xfffff,
+ .rsvd = 0xfff00000,
+ },{ .name = "BNK1_EN_RX_SCHMITT_HYST", .addr = A_BNK1_EN_RX_SCHMITT_HYST,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK1_EN_WK_PD", .addr = A_BNK1_EN_WK_PD,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK1_EN_WK_PU", .addr = A_BNK1_EN_WK_PU,
+ .reset = 0x3ffffff,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK1_SEL_DRV0", .addr = A_BNK1_SEL_DRV0,
+ .reset = 0xffffffff,
+ },{ .name = "BNK1_SEL_DRV1", .addr = A_BNK1_SEL_DRV1,
+ .reset = 0xfffff,
+ .rsvd = 0xfff00000,
+ },{ .name = "BNK1_SEL_SLEW", .addr = A_BNK1_SEL_SLEW,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK1_EN_DFT_OPT_INV", .addr = A_BNK1_EN_DFT_OPT_INV,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK1_EN_PAD2PAD_LOOPBACK",
+ .addr = A_BNK1_EN_PAD2PAD_LOOPBACK,
+ .rsvd = 0xffffe000,
+ },{ .name = "BNK1_RX_SPARE0", .addr = A_BNK1_RX_SPARE0,
+ },{ .name = "BNK1_RX_SPARE1", .addr = A_BNK1_RX_SPARE1,
+ .rsvd = 0xfff00000,
+ },{ .name = "BNK1_TX_SPARE0", .addr = A_BNK1_TX_SPARE0,
+ },{ .name = "BNK1_TX_SPARE1", .addr = A_BNK1_TX_SPARE1,
+ .rsvd = 0xfff00000,
+ },{ .name = "BNK1_SEL_EN1P8", .addr = A_BNK1_SEL_EN1P8,
+ .rsvd = 0xfffffffe,
+ },{ .name = "BNK1_EN_B_POR_DETECT", .addr = A_BNK1_EN_B_POR_DETECT,
+ .rsvd = 0xfffffffe,
+ },{ .name = "BNK1_LPF_BYP_POR_DETECT", .addr = A_BNK1_LPF_BYP_POR_DETECT,
+ .reset = 0x1,
+ .rsvd = 0xfffffffe,
+ },{ .name = "BNK1_EN_LATCH", .addr = A_BNK1_EN_LATCH,
+ .rsvd = 0xfffffffe,
+ },{ .name = "BNK1_VBG_LPF_BYP_B", .addr = A_BNK1_VBG_LPF_BYP_B,
+ .reset = 0x1,
+ .rsvd = 0xfffffffe,
+ },{ .name = "BNK1_EN_AMP_B", .addr = A_BNK1_EN_AMP_B,
+ .rsvd = 0xfffffffc,
+ },{ .name = "BNK1_SPARE_BIAS", .addr = A_BNK1_SPARE_BIAS,
+ .rsvd = 0xfffffff0,
+ },{ .name = "BNK1_DRIVER_BIAS", .addr = A_BNK1_DRIVER_BIAS,
+ .rsvd = 0xffff8000,
+ },{ .name = "BNK1_VMODE", .addr = A_BNK1_VMODE,
+ .rsvd = 0xfffffffe,
+ .ro = 0x1,
+ },{ .name = "BNK1_SEL_AUX_IO_RX", .addr = A_BNK1_SEL_AUX_IO_RX,
+ .rsvd = 0xfc000000,
+ },{ .name = "BNK1_EN_TX_HS_MODE", .addr = A_BNK1_EN_TX_HS_MODE,
+ .rsvd = 0xfc000000,
+ },{ .name = "SD0_CLK_CTRL", .addr = A_SD0_CLK_CTRL,
+ .rsvd = 0xfffffff8,
+ },{ .name = "SD0_CTRL_REG", .addr = A_SD0_CTRL_REG,
+ .rsvd = 0xfffffffe,
+ .pre_write = sd0_ctrl_reg_prew,
+ },{ .name = "SD0_CONFIG_REG1", .addr = A_SD0_CONFIG_REG1,
+ .reset = 0x3250,
+ .rsvd = 0xffff8000,
+ },{ .name = "SD0_CONFIG_REG2", .addr = A_SD0_CONFIG_REG2,
+ .reset = 0xffc,
+ .rsvd = 0xffffc000,
+ },{ .name = "SD0_CONFIG_REG3", .addr = A_SD0_CONFIG_REG3,
+ .reset = 0x407,
+ .rsvd = 0xfffff800,
+ },{ .name = "SD0_INITPRESET", .addr = A_SD0_INITPRESET,
+ .reset = 0x100,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD0_DSPPRESET", .addr = A_SD0_DSPPRESET,
+ .reset = 0x4,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD0_HSPDPRESET", .addr = A_SD0_HSPDPRESET,
+ .reset = 0x2,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD0_SDR12PRESET", .addr = A_SD0_SDR12PRESET,
+ .reset = 0x4,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD0_SDR25PRESET", .addr = A_SD0_SDR25PRESET,
+ .reset = 0x2,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD0_SDR50PRSET", .addr = A_SD0_SDR50PRSET,
+ .reset = 0x1,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD0_SDR104PRST", .addr = A_SD0_SDR104PRST,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD0_DDR50PRESET", .addr = A_SD0_DDR50PRESET,
+ .reset = 0x2,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD0_MAXCUR1P8", .addr = A_SD0_MAXCUR1P8,
+ .rsvd = 0xffffff00,
+ },{ .name = "SD0_MAXCUR3P0", .addr = A_SD0_MAXCUR3P0,
+ .rsvd = 0xffffff00,
+ },{ .name = "SD0_MAXCUR3P3", .addr = A_SD0_MAXCUR3P3,
+ .rsvd = 0xffffff00,
+ },{ .name = "SD0_DLL_CTRL", .addr = A_SD0_DLL_CTRL,
+ .reset = 0x1,
+ .rsvd = 0xfffffc00,
+ .ro = 0x19,
+ },{ .name = "SD0_CDN_CTRL", .addr = A_SD0_CDN_CTRL,
+ .rsvd = 0xfffffffe,
+ },{ .name = "SD0_DLL_TEST", .addr = A_SD0_DLL_TEST,
+ .rsvd = 0xff000000,
+ },{ .name = "SD0_RX_TUNING_SEL", .addr = A_SD0_RX_TUNING_SEL,
+ .rsvd = 0xfffffe00,
+ .ro = 0x1ff,
+ },{ .name = "SD0_DLL_DIV_MAP0", .addr = A_SD0_DLL_DIV_MAP0,
+ .reset = 0x50505050,
+ },{ .name = "SD0_DLL_DIV_MAP1", .addr = A_SD0_DLL_DIV_MAP1,
+ .reset = 0x50505050,
+ },{ .name = "SD0_IOU_COHERENT_CTRL", .addr = A_SD0_IOU_COHERENT_CTRL,
+ .rsvd = 0xfffffff0,
+ },{ .name = "SD0_IOU_INTERCONNECT_ROUTE",
+ .addr = A_SD0_IOU_INTERCONNECT_ROUTE,
+ .rsvd = 0xfffffffe,
+ },{ .name = "SD0_IOU_RAM", .addr = A_SD0_IOU_RAM,
+ .reset = 0x24,
+ .rsvd = 0xffffff80,
+ },{ .name = "SD0_IOU_INTERCONNECT_QOS",
+ .addr = A_SD0_IOU_INTERCONNECT_QOS,
+ .rsvd = 0xfffffff0,
+ },{ .name = "SD1_CLK_CTRL", .addr = A_SD1_CLK_CTRL,
+ .rsvd = 0xfffffffc,
+ },{ .name = "SD1_CTRL_REG", .addr = A_SD1_CTRL_REG,
+ .rsvd = 0xfffffffe,
+ .pre_write = sd1_ctrl_reg_prew,
+ },{ .name = "SD1_CONFIG_REG1", .addr = A_SD1_CONFIG_REG1,
+ .reset = 0x3250,
+ .rsvd = 0xffff8000,
+ },{ .name = "SD1_CONFIG_REG2", .addr = A_SD1_CONFIG_REG2,
+ .reset = 0xffc,
+ .rsvd = 0xffffc000,
+ },{ .name = "SD1_CONFIG_REG3", .addr = A_SD1_CONFIG_REG3,
+ .reset = 0x407,
+ .rsvd = 0xfffff800,
+ },{ .name = "SD1_INITPRESET", .addr = A_SD1_INITPRESET,
+ .reset = 0x100,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD1_DSPPRESET", .addr = A_SD1_DSPPRESET,
+ .reset = 0x4,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD1_HSPDPRESET", .addr = A_SD1_HSPDPRESET,
+ .reset = 0x2,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD1_SDR12PRESET", .addr = A_SD1_SDR12PRESET,
+ .reset = 0x4,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD1_SDR25PRESET", .addr = A_SD1_SDR25PRESET,
+ .reset = 0x2,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD1_SDR50PRSET", .addr = A_SD1_SDR50PRSET,
+ .reset = 0x1,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD1_SDR104PRST", .addr = A_SD1_SDR104PRST,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD1_DDR50PRESET", .addr = A_SD1_DDR50PRESET,
+ .reset = 0x2,
+ .rsvd = 0xffffe000,
+ },{ .name = "SD1_MAXCUR1P8", .addr = A_SD1_MAXCUR1P8,
+ .rsvd = 0xffffff00,
+ },{ .name = "SD1_MAXCUR3P0", .addr = A_SD1_MAXCUR3P0,
+ .rsvd = 0xffffff00,
+ },{ .name = "SD1_MAXCUR3P3", .addr = A_SD1_MAXCUR3P3,
+ .rsvd = 0xffffff00,
+ },{ .name = "SD1_DLL_CTRL", .addr = A_SD1_DLL_CTRL,
+ .reset = 0x1,
+ .rsvd = 0xfffffc00,
+ .ro = 0x19,
+ },{ .name = "SD1_CDN_CTRL", .addr = A_SD1_CDN_CTRL,
+ .rsvd = 0xfffffffe,
+ },{ .name = "SD1_DLL_TEST", .addr = A_SD1_DLL_TEST,
+ .rsvd = 0xff000000,
+ },{ .name = "SD1_RX_TUNING_SEL", .addr = A_SD1_RX_TUNING_SEL,
+ .rsvd = 0xfffffe00,
+ .ro = 0x1ff,
+ },{ .name = "SD1_DLL_DIV_MAP0", .addr = A_SD1_DLL_DIV_MAP0,
+ .reset = 0x50505050,
+ },{ .name = "SD1_DLL_DIV_MAP1", .addr = A_SD1_DLL_DIV_MAP1,
+ .reset = 0x50505050,
+ },{ .name = "SD1_IOU_COHERENT_CTRL", .addr = A_SD1_IOU_COHERENT_CTRL,
+ .rsvd = 0xfffffff0,
+ },{ .name = "SD1_IOU_INTERCONNECT_ROUTE",
+ .addr = A_SD1_IOU_INTERCONNECT_ROUTE,
+ .rsvd = 0xfffffffe,
+ },{ .name = "SD1_IOU_RAM", .addr = A_SD1_IOU_RAM,
+ .reset = 0x24,
+ .rsvd = 0xffffff80,
+ },{ .name = "SD1_IOU_INTERCONNECT_QOS",
+ .addr = A_SD1_IOU_INTERCONNECT_QOS,
+ .rsvd = 0xfffffff0,
+ },{ .name = "OSPI_QSPI_IOU_AXI_MUX_SEL",
+ .addr = A_OSPI_QSPI_IOU_AXI_MUX_SEL,
+ .reset = 0x1,
+ .rsvd = 0xfffffffc,
+ .pre_write = ospi_qspi_iou_axi_mux_sel_prew,
+ },{ .name = "QSPI_IOU_COHERENT_CTRL", .addr = A_QSPI_IOU_COHERENT_CTRL,
+ .rsvd = 0xfffffff0,
+ },{ .name = "QSPI_IOU_INTERCONNECT_ROUTE",
+ .addr = A_QSPI_IOU_INTERCONNECT_ROUTE,
+ .rsvd = 0xfffffffe,
+ },{ .name = "QSPI_IOU_RAM", .addr = A_QSPI_IOU_RAM,
+ .reset = 0x1224,
+ .rsvd = 0xffffc000,
+ },{ .name = "QSPI_IOU_INTERCONNECT_QOS",
+ .addr = A_QSPI_IOU_INTERCONNECT_QOS,
+ .rsvd = 0xfffffff0,
+ },{ .name = "OSPI_IOU_COHERENT_CTRL", .addr = A_OSPI_IOU_COHERENT_CTRL,
+ .rsvd = 0xfffffff0,
+ },{ .name = "OSPI_IOU_INTERCONNECT_ROUTE",
+ .addr = A_OSPI_IOU_INTERCONNECT_ROUTE,
+ .rsvd = 0xfffffffe,
+ },{ .name = "OSPI_IOU_RAM", .addr = A_OSPI_IOU_RAM,
+ .reset = 0xa,
+ .rsvd = 0xffffffc0,
+ },{ .name = "OSPI_IOU_INTERCONNECT_QOS",
+ .addr = A_OSPI_IOU_INTERCONNECT_QOS,
+ .rsvd = 0xfffffff0,
+ },{ .name = "OSPI_REFCLK_DLY_CTRL", .addr = A_OSPI_REFCLK_DLY_CTRL,
+ .reset = 0x13,
+ .rsvd = 0xffffffe0,
+ },{ .name = "CUR_PWR_ST", .addr = A_CUR_PWR_ST,
+ .rsvd = 0xfffffffc,
+ .ro = 0x3,
+ },{ .name = "CONNECT_ST", .addr = A_CONNECT_ST,
+ .rsvd = 0xfffffffe,
+ .ro = 0x1,
+ },{ .name = "PW_STATE_REQ", .addr = A_PW_STATE_REQ,
+ .rsvd = 0xfffffffc,
+ },{ .name = "HOST_U2_PORT_DISABLE", .addr = A_HOST_U2_PORT_DISABLE,
+ .rsvd = 0xfffffffe,
+ },{ .name = "DBG_U2PMU", .addr = A_DBG_U2PMU,
+ .ro = 0xffffffff,
+ },{ .name = "DBG_U2PMU_EXT1", .addr = A_DBG_U2PMU_EXT1,
+ .ro = 0xffffffff,
+ },{ .name = "DBG_U2PMU_EXT2", .addr = A_DBG_U2PMU_EXT2,
+ .rsvd = 0xfffffff0,
+ .ro = 0xf,
+ },{ .name = "PME_GEN_U2PMU", .addr = A_PME_GEN_U2PMU,
+ .rsvd = 0xfffffffe,
+ .ro = 0x1,
+ },{ .name = "PWR_CONFIG_USB2", .addr = A_PWR_CONFIG_USB2,
+ .rsvd = 0xc0000000,
+ },{ .name = "PHY_HUB", .addr = A_PHY_HUB,
+ .rsvd = 0xfffffffc,
+ .ro = 0x2,
+ },{ .name = "CTRL", .addr = A_CTRL,
+ },{ .name = "ISR", .addr = A_ISR,
+ .w1c = 0x1,
+ .post_write = isr_postw,
+ },{ .name = "IMR", .addr = A_IMR,
+ .reset = 0x1,
+ .ro = 0x1,
+ },{ .name = "IER", .addr = A_IER,
+ .pre_write = ier_prew,
+ },{ .name = "IDR", .addr = A_IDR,
+ .pre_write = idr_prew,
+ },{ .name = "ITR", .addr = A_ITR,
+ .pre_write = itr_prew,
+ },{ .name = "PARITY_ISR", .addr = A_PARITY_ISR,
+ .w1c = 0x1fff,
+ .post_write = parity_isr_postw,
+ },{ .name = "PARITY_IMR", .addr = A_PARITY_IMR,
+ .reset = 0x1fff,
+ .ro = 0x1fff,
+ },{ .name = "PARITY_IER", .addr = A_PARITY_IER,
+ .pre_write = parity_ier_prew,
+ },{ .name = "PARITY_IDR", .addr = A_PARITY_IDR,
+ .pre_write = parity_idr_prew,
+ },{ .name = "PARITY_ITR", .addr = A_PARITY_ITR,
+ .pre_write = parity_itr_prew,
+ },{ .name = "WPROT0", .addr = A_WPROT0,
+ .reset = 0x1,
+ }
+};
+
+static void xlnx_versal_pmc_iou_slcr_reset_init(Object *obj, ResetType type)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(obj);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
+ register_reset(&s->regs_info[i]);
+ }
+}
+
+static void xlnx_versal_pmc_iou_slcr_reset_hold(Object *obj)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(obj);
+
+ parity_imr_update_irq(s);
+ imr_update_irq(s);
+
+ /*
+ * Setup OSPI_QSPI mux
+ * By default axi slave interface is enabled for ospi-dma
+ */
+ qemu_set_irq(s->ospi_mux_sel, 0);
+ qemu_set_irq(s->qspi_ospi_mux_sel, 1);
+}
+
+static const MemoryRegionOps pmc_iou_slcr_ops = {
+ .read = register_read_memory,
+ .write = register_write_memory,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void xlnx_versal_pmc_iou_slcr_realize(DeviceState *dev, Error **errp)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(dev);
+
+ qdev_init_gpio_out_named(dev, s->sd_emmc_sel, "sd-emmc-sel", 2);
+ qdev_init_gpio_out_named(dev, &s->qspi_ospi_mux_sel,
+ "qspi-ospi-mux-sel", 1);
+ qdev_init_gpio_out_named(dev, &s->ospi_mux_sel, "ospi-mux-sel", 1);
+}
+
+static void xlnx_versal_pmc_iou_slcr_init(Object *obj)
+{
+ XlnxVersalPmcIouSlcr *s = XILINX_VERSAL_PMC_IOU_SLCR(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ RegisterInfoArray *reg_array;
+
+ memory_region_init(&s->iomem, obj, TYPE_XILINX_VERSAL_PMC_IOU_SLCR,
+ XILINX_VERSAL_PMC_IOU_SLCR_R_MAX * 4);
+ reg_array =
+ register_init_block32(DEVICE(obj), pmc_iou_slcr_regs_info,
+ ARRAY_SIZE(pmc_iou_slcr_regs_info),
+ s->regs_info, s->regs,
+ &pmc_iou_slcr_ops,
+ XILINX_VERSAL_PMC_IOU_SLCR_ERR_DEBUG,
+ XILINX_VERSAL_PMC_IOU_SLCR_R_MAX * 4);
+ memory_region_add_subregion(&s->iomem,
+ 0x0,
+ &reg_array->mem);
+ sysbus_init_mmio(sbd, &s->iomem);
+ sysbus_init_irq(sbd, &s->irq_parity_imr);
+ sysbus_init_irq(sbd, &s->irq_imr);
+}
+
+static const VMStateDescription vmstate_pmc_iou_slcr = {
+ .name = TYPE_XILINX_VERSAL_PMC_IOU_SLCR,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, XlnxVersalPmcIouSlcr,
+ XILINX_VERSAL_PMC_IOU_SLCR_R_MAX),
+ VMSTATE_END_OF_LIST(),
+ }
+};
+
+static void xlnx_versal_pmc_iou_slcr_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ dc->realize = xlnx_versal_pmc_iou_slcr_realize;
+ dc->vmsd = &vmstate_pmc_iou_slcr;
+ rc->phases.enter = xlnx_versal_pmc_iou_slcr_reset_init;
+ rc->phases.hold = xlnx_versal_pmc_iou_slcr_reset_hold;
+}
+
+static const TypeInfo xlnx_versal_pmc_iou_slcr_info = {
+ .name = TYPE_XILINX_VERSAL_PMC_IOU_SLCR,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XlnxVersalPmcIouSlcr),
+ .class_init = xlnx_versal_pmc_iou_slcr_class_init,
+ .instance_init = xlnx_versal_pmc_iou_slcr_init,
+};
+
+static void xlnx_versal_pmc_iou_slcr_register_types(void)
+{
+ type_register_static(&xlnx_versal_pmc_iou_slcr_info);
+}
+
+type_init(xlnx_versal_pmc_iou_slcr_register_types)
diff --git a/hw/net/can/can_kvaser_pci.c b/hw/net/can/can_kvaser_pci.c
index 168b3a6..94b3a53 100644
--- a/hw/net/can/can_kvaser_pci.c
+++ b/hw/net/can/can_kvaser_pci.c
@@ -266,7 +266,6 @@ static const VMStateDescription vmstate_kvaser_pci = {
.name = "kvaser_pci",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(dev, KvaserPCIState),
/* Load this before sja_state. */
diff --git a/hw/net/can/can_mioe3680_pci.c b/hw/net/can/can_mioe3680_pci.c
index 7a79e26..29dc696 100644
--- a/hw/net/can/can_mioe3680_pci.c
+++ b/hw/net/can/can_mioe3680_pci.c
@@ -203,7 +203,6 @@ static const VMStateDescription vmstate_mioe3680_pci = {
.name = "mioe3680_pci",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(dev, Mioe3680PCIState),
VMSTATE_STRUCT(sja_state[0], Mioe3680PCIState, 0, vmstate_can_sja,
diff --git a/hw/net/can/can_pcm3680_pci.c b/hw/net/can/can_pcm3680_pci.c
index 8ef4e74..e8e57f4 100644
--- a/hw/net/can/can_pcm3680_pci.c
+++ b/hw/net/can/can_pcm3680_pci.c
@@ -204,7 +204,6 @@ static const VMStateDescription vmstate_pcm3680i_pci = {
.name = "pcm3680i_pci",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(dev, Pcm3680iPCIState),
VMSTATE_STRUCT(sja_state[0], Pcm3680iPCIState, 0,
diff --git a/hw/net/can/can_sja1000.c b/hw/net/can/can_sja1000.c
index 34eea68..3ba803e 100644
--- a/hw/net/can/can_sja1000.c
+++ b/hw/net/can/can_sja1000.c
@@ -928,7 +928,6 @@ const VMStateDescription vmstate_qemu_can_filter = {
.name = "qemu_can_filter",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32(can_id, qemu_can_filter),
VMSTATE_UINT32(can_mask, qemu_can_filter),
@@ -952,7 +951,6 @@ const VMStateDescription vmstate_can_sja = {
.name = "can_sja",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.post_load = can_sja_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT8(mode, CanSJA1000State),
diff --git a/hw/net/can/ctucan_core.c b/hw/net/can/ctucan_core.c
index d171c37..f2c3b6a 100644
--- a/hw/net/can/ctucan_core.c
+++ b/hw/net/can/ctucan_core.c
@@ -617,7 +617,6 @@ const VMStateDescription vmstate_qemu_ctucan_tx_buffer = {
.name = "qemu_ctucan_tx_buffer",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT8_ARRAY(data, CtuCanCoreMsgBuffer, CTUCAN_CORE_MSG_MAX_LEN),
VMSTATE_END_OF_LIST()
@@ -636,7 +635,6 @@ const VMStateDescription vmstate_ctucan = {
.name = "ctucan",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.post_load = ctucan_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT32(mode_settings.u32, CtuCanCoreState),
diff --git a/hw/net/can/ctucan_pci.c b/hw/net/can/ctucan_pci.c
index f1c86cd..50f4ea6 100644
--- a/hw/net/can/ctucan_pci.c
+++ b/hw/net/can/ctucan_pci.c
@@ -215,7 +215,6 @@ static const VMStateDescription vmstate_ctucan_pci = {
.name = "ctucan_pci",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(dev, CtuCanPCIState),
VMSTATE_STRUCT(ctucan_state[0], CtuCanPCIState, 0, vmstate_ctucan,
diff --git a/hw/pci-host/pnv_phb3.c b/hw/pci-host/pnv_phb3.c
index 7fb35dc..aafd46b 100644
--- a/hw/pci-host/pnv_phb3.c
+++ b/hw/pci-host/pnv_phb3.c
@@ -792,7 +792,9 @@ static void pnv_phb3_translate_tve(PnvPhb3DMASpace *ds, hwaddr addr,
sh = tbl_shift * lev + tce_shift;
/* TODO: Multi-level untested */
- while ((lev--) >= 0) {
+ do {
+ lev--;
+
/* Grab the TCE address */
taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3);
if (dma_memory_read(&address_space_memory, taddr, &tce,
@@ -813,21 +815,22 @@ static void pnv_phb3_translate_tve(PnvPhb3DMASpace *ds, hwaddr addr,
}
sh -= tbl_shift;
base = tce & ~0xfffull;
- }
+ } while (lev >= 0);
/* We exit the loop with TCE being the final TCE */
- tce_mask = ~((1ull << tce_shift) - 1);
- tlb->iova = addr & tce_mask;
- tlb->translated_addr = tce & tce_mask;
- tlb->addr_mask = ~tce_mask;
- tlb->perm = tce & 3;
if ((is_write & !(tce & 2)) || ((!is_write) && !(tce & 1))) {
phb3_error(phb, "TCE access fault at 0x%"PRIx64, taddr);
phb3_error(phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr,
is_write ? 'W' : 'R', tve);
phb3_error(phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d",
tta, lev, tts, tps);
+ return;
}
+ tce_mask = ~((1ull << tce_shift) - 1);
+ tlb->iova = addr & tce_mask;
+ tlb->translated_addr = tce & tce_mask;
+ tlb->addr_mask = ~tce_mask;
+ tlb->perm = tce & 3;
}
}
diff --git a/hw/pci-host/pnv_phb4.c b/hw/pci-host/pnv_phb4.c
index a78add7..e91249e 100644
--- a/hw/pci-host/pnv_phb4.c
+++ b/hw/pci-host/pnv_phb4.c
@@ -1267,7 +1267,9 @@ static void pnv_phb4_translate_tve(PnvPhb4DMASpace *ds, hwaddr addr,
/* TODO: Limit to support IO page sizes */
/* TODO: Multi-level untested */
- while ((lev--) >= 0) {
+ do {
+ lev--;
+
/* Grab the TCE address */
taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3);
if (dma_memory_read(&address_space_memory, taddr, &tce,
@@ -1288,21 +1290,22 @@ static void pnv_phb4_translate_tve(PnvPhb4DMASpace *ds, hwaddr addr,
}
sh -= tbl_shift;
base = tce & ~0xfffull;
- }
+ } while (lev >= 0);
/* We exit the loop with TCE being the final TCE */
- tce_mask = ~((1ull << tce_shift) - 1);
- tlb->iova = addr & tce_mask;
- tlb->translated_addr = tce & tce_mask;
- tlb->addr_mask = ~tce_mask;
- tlb->perm = tce & 3;
if ((is_write & !(tce & 2)) || ((!is_write) && !(tce & 1))) {
phb_error(ds->phb, "TCE access fault at 0x%"PRIx64, taddr);
phb_error(ds->phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr,
is_write ? 'W' : 'R', tve);
phb_error(ds->phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d",
tta, lev, tts, tps);
+ return;
}
+ tce_mask = ~((1ull << tce_shift) - 1);
+ tlb->iova = addr & tce_mask;
+ tlb->translated_addr = tce & tce_mask;
+ tlb->addr_mask = ~tce_mask;
+ tlb->perm = tce & 3;
}
}
diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
index bb5bee9..462c87d 100644
--- a/hw/ppc/ppc.c
+++ b/hw/ppc/ppc.c
@@ -1049,7 +1049,6 @@ const VMStateDescription vmstate_ppc_timebase = {
.name = "timebase",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.pre_save = timebase_pre_save,
.fields = (VMStateField []) {
VMSTATE_UINT64(guest_timebase, PPCTimebase),
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 72f5dce..3d6ec30 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -3053,7 +3053,7 @@ static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
VHostSCSICommon *vsc = CAST(VHostSCSICommon, dev, TYPE_VHOST_SCSI_COMMON);
PCIDevice *pcidev = CAST(PCIDevice, dev, TYPE_PCI_DEVICE);
- if (d) {
+ if (d && bus) {
void *spapr = CAST(void, bus->parent, "spapr-vscsi");
VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI);
USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE);
diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index a57ba70..a781e97 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -37,6 +37,11 @@ static void spapr_reset_vcpu(PowerPCCPU *cpu)
cpu_reset(cs);
+ /*
+ * "PowerPC Processor binding to IEEE 1275" defines the initial MSR state
+ * as 32bit (MSR_SF=0) in "8.2.1. Initial Register Values".
+ */
+ env->msr &= ~(1ULL << MSR_SF);
env->spr[SPR_HIOR] = 0;
lpcr = env->spr[SPR_LPCR];
diff --git a/hw/ppc/spapr_rtc.c b/hw/ppc/spapr_rtc.c
index fba4dfc..94a5510 100644
--- a/hw/ppc/spapr_rtc.c
+++ b/hw/ppc/spapr_rtc.c
@@ -26,9 +26,9 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu/timer.h"
#include "sysemu/sysemu.h"
+#include "sysemu/rtc.h"
#include "hw/ppc/spapr.h"
#include "migration/vmstate.h"
#include "qapi/error.h"
diff --git a/hw/ppc/spapr_vof.c b/hw/ppc/spapr_vof.c
index 40ce8fe..a33f940 100644
--- a/hw/ppc/spapr_vof.c
+++ b/hw/ppc/spapr_vof.c
@@ -88,8 +88,6 @@ void spapr_vof_reset(SpaprMachineState *spapr, void *fdt, Error **errp)
spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT,
stack_ptr, spapr->initrd_base,
spapr->initrd_size);
- /* VOF is 32bit BE so enforce MSR here */
- first_ppc_cpu->env.msr &= ~((1ULL << MSR_SF) | (1ULL << MSR_LE));
/*
* At this point the expected allocation map is:
diff --git a/hw/ppc/vof.c b/hw/ppc/vof.c
index 73adc44..2b63a62 100644
--- a/hw/ppc/vof.c
+++ b/hw/ppc/vof.c
@@ -16,7 +16,6 @@
#include "qemu/units.h"
#include "qemu/log.h"
#include "qapi/error.h"
-#include "exec/ram_addr.h"
#include "exec/address-spaces.h"
#include "hw/ppc/vof.h"
#include "hw/ppc/fdt.h"
diff --git a/hw/rtc/allwinner-rtc.c b/hw/rtc/allwinner-rtc.c
index 5606a51..7e493f0 100644
--- a/hw/rtc/allwinner-rtc.c
+++ b/hw/rtc/allwinner-rtc.c
@@ -23,9 +23,9 @@
#include "migration/vmstate.h"
#include "qemu/log.h"
#include "qemu/module.h"
-#include "qemu-common.h"
#include "hw/qdev-properties.h"
#include "hw/rtc/allwinner-rtc.h"
+#include "sysemu/rtc.h"
#include "trace.h"
/* RTC registers */
diff --git a/hw/rtc/aspeed_rtc.c b/hw/rtc/aspeed_rtc.c
index 3ca1183..f6da7b6 100644
--- a/hw/rtc/aspeed_rtc.c
+++ b/hw/rtc/aspeed_rtc.c
@@ -7,11 +7,11 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "hw/rtc/aspeed_rtc.h"
#include "migration/vmstate.h"
#include "qemu/log.h"
#include "qemu/timer.h"
+#include "sysemu/rtc.h"
#include "trace.h"
diff --git a/hw/rtc/ds1338.c b/hw/rtc/ds1338.c
index bc5ce1a..36d8121 100644
--- a/hw/rtc/ds1338.c
+++ b/hw/rtc/ds1338.c
@@ -11,12 +11,12 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "hw/i2c/i2c.h"
#include "migration/vmstate.h"
#include "qemu/bcd.h"
#include "qemu/module.h"
#include "qom/object.h"
+#include "sysemu/rtc.h"
/* Size of NVRAM including both the user-accessible area and the
* secondary register area.
diff --git a/hw/rtc/exynos4210_rtc.c b/hw/rtc/exynos4210_rtc.c
index 45c0a95..ae67641 100644
--- a/hw/rtc/exynos4210_rtc.c
+++ b/hw/rtc/exynos4210_rtc.c
@@ -26,7 +26,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "hw/sysbus.h"
@@ -39,6 +38,7 @@
#include "hw/arm/exynos4210.h"
#include "qom/object.h"
+#include "sysemu/rtc.h"
#define DEBUG_RTC 0
diff --git a/hw/rtc/goldfish_rtc.c b/hw/rtc/goldfish_rtc.c
index e07ff01..35e493b 100644
--- a/hw/rtc/goldfish_rtc.c
+++ b/hw/rtc/goldfish_rtc.c
@@ -20,7 +20,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "hw/rtc/goldfish_rtc.h"
#include "migration/vmstate.h"
#include "hw/irq.h"
@@ -29,6 +28,7 @@
#include "qemu/bitops.h"
#include "qemu/timer.h"
#include "sysemu/sysemu.h"
+#include "sysemu/rtc.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
diff --git a/hw/rtc/m41t80.c b/hw/rtc/m41t80.c
index 396d110..a00971a 100644
--- a/hw/rtc/m41t80.c
+++ b/hw/rtc/m41t80.c
@@ -8,13 +8,13 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/timer.h"
#include "qemu/bcd.h"
#include "hw/i2c/i2c.h"
#include "qom/object.h"
+#include "sysemu/rtc.h"
#define TYPE_M41T80 "m41t80"
OBJECT_DECLARE_SIMPLE_TYPE(M41t80State, M41T80)
diff --git a/hw/rtc/m48t59.c b/hw/rtc/m48t59.c
index 690f4e0..74345d9 100644
--- a/hw/rtc/m48t59.c
+++ b/hw/rtc/m48t59.c
@@ -24,12 +24,12 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "hw/rtc/m48t59.h"
#include "qemu/timer.h"
#include "sysemu/runstate.h"
+#include "sysemu/rtc.h"
#include "sysemu/sysemu.h"
#include "hw/sysbus.h"
#include "qapi/error.h"
diff --git a/hw/rtc/mc146818rtc.c b/hw/rtc/mc146818rtc.c
index 4fbafdd..e61a0cc 100644
--- a/hw/rtc/mc146818rtc.c
+++ b/hw/rtc/mc146818rtc.c
@@ -23,7 +23,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu/cutils.h"
#include "qemu/module.h"
#include "qemu/bcd.h"
@@ -36,6 +35,7 @@
#include "sysemu/replay.h"
#include "sysemu/reset.h"
#include "sysemu/runstate.h"
+#include "sysemu/rtc.h"
#include "hw/rtc/mc146818rtc.h"
#include "hw/rtc/mc146818rtc_regs.h"
#include "migration/vmstate.h"
diff --git a/hw/rtc/pl031.c b/hw/rtc/pl031.c
index e7ced90..38d9d3c 100644
--- a/hw/rtc/pl031.c
+++ b/hw/rtc/pl031.c
@@ -12,7 +12,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "hw/rtc/pl031.h"
#include "migration/vmstate.h"
#include "hw/irq.h"
@@ -20,6 +19,7 @@
#include "hw/sysbus.h"
#include "qemu/timer.h"
#include "sysemu/sysemu.h"
+#include "sysemu/rtc.h"
#include "qemu/cutils.h"
#include "qemu/log.h"
#include "qemu/module.h"
diff --git a/hw/rtc/twl92230.c b/hw/rtc/twl92230.c
index 0922df5..e8d5eda 100644
--- a/hw/rtc/twl92230.c
+++ b/hw/rtc/twl92230.c
@@ -20,13 +20,13 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu/timer.h"
#include "hw/i2c/i2c.h"
#include "hw/irq.h"
#include "migration/qemu-file-types.h"
#include "migration/vmstate.h"
#include "sysemu/sysemu.h"
+#include "sysemu/rtc.h"
#include "qemu/bcd.h"
#include "qemu/module.h"
#include "qom/object.h"
diff --git a/hw/rtc/xlnx-zynqmp-rtc.c b/hw/rtc/xlnx-zynqmp-rtc.c
index 2bcd14d..3e7d61a 100644
--- a/hw/rtc/xlnx-zynqmp-rtc.c
+++ b/hw/rtc/xlnx-zynqmp-rtc.c
@@ -25,7 +25,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "hw/sysbus.h"
#include "hw/register.h"
#include "qemu/bitops.h"
@@ -34,6 +33,7 @@
#include "hw/irq.h"
#include "qemu/cutils.h"
#include "sysemu/sysemu.h"
+#include "sysemu/rtc.h"
#include "trace.h"
#include "hw/rtc/xlnx-zynqmp-rtc.h"
#include "migration/vmstate.h"
diff --git a/hw/s390x/tod-tcg.c b/hw/s390x/tod-tcg.c
index 9bb94ff..7646b4a 100644
--- a/hw/s390x/tod-tcg.c
+++ b/hw/s390x/tod-tcg.c
@@ -9,7 +9,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qapi/error.h"
#include "hw/s390x/tod.h"
#include "qemu/timer.h"
@@ -17,6 +16,7 @@
#include "qemu/module.h"
#include "cpu.h"
#include "tcg/tcg_s390x.h"
+#include "sysemu/rtc.h"
static void qemu_s390_tod_get(const S390TODState *td, S390TOD *tod,
Error **errp)
diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
index c9da5ce..cd43945 100644
--- a/hw/scsi/megasas.c
+++ b/hw/scsi/megasas.c
@@ -19,11 +19,11 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "hw/pci/pci.h"
#include "hw/qdev-properties.h"
#include "sysemu/dma.h"
#include "sysemu/block-backend.h"
+#include "sysemu/rtc.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "qemu/iov.h"
@@ -2315,7 +2315,6 @@ static const VMStateDescription vmstate_megasas_gen2 = {
.name = "megasas-gen2",
.version_id = 0,
.minimum_version_id = 0,
- .minimum_version_id_old = 0,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(parent_obj, MegasasState),
VMSTATE_MSIX(parent_obj, MegasasState),
diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c
index 5181b0c..706cf0d 100644
--- a/hw/scsi/mptsas.c
+++ b/hw/scsi/mptsas.c
@@ -1363,7 +1363,6 @@ static const VMStateDescription vmstate_mptsas = {
.name = "mptsas",
.version_id = 0,
.minimum_version_id = 0,
- .minimum_version_id_old = 0,
.post_load = mptsas_post_load,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(dev, MPTSASState),
diff --git a/hw/ssi/meson.build b/hw/ssi/meson.build
index 3d6bc82..0ded9cd 100644
--- a/hw/ssi/meson.build
+++ b/hw/ssi/meson.build
@@ -7,5 +7,6 @@ softmmu_ss.add(when: 'CONFIG_SSI', if_true: files('ssi.c'))
softmmu_ss.add(when: 'CONFIG_STM32F2XX_SPI', if_true: files('stm32f2xx_spi.c'))
softmmu_ss.add(when: 'CONFIG_XILINX_SPI', if_true: files('xilinx_spi.c'))
softmmu_ss.add(when: 'CONFIG_XILINX_SPIPS', if_true: files('xilinx_spips.c'))
+softmmu_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-ospi.c'))
softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_spi.c'))
softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_spi.c'))
diff --git a/hw/ssi/xlnx-versal-ospi.c b/hw/ssi/xlnx-versal-ospi.c
new file mode 100644
index 0000000..7ecd148
--- /dev/null
+++ b/hw/ssi/xlnx-versal-ospi.c
@@ -0,0 +1,1853 @@
+/*
+ * QEMU model of Xilinx Versal's OSPI controller.
+ *
+ * Copyright (c) 2021 Xilinx Inc.
+ * Written by Francisco Iglesias <francisco.iglesias@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "migration/vmstate.h"
+#include "hw/qdev-properties.h"
+#include "qemu/bitops.h"
+#include "qemu/log.h"
+#include "hw/irq.h"
+#include "hw/ssi/xlnx-versal-ospi.h"
+
+#ifndef XILINX_VERSAL_OSPI_ERR_DEBUG
+#define XILINX_VERSAL_OSPI_ERR_DEBUG 0
+#endif
+
+REG32(CONFIG_REG, 0x0)
+ FIELD(CONFIG_REG, IDLE_FLD, 31, 1)
+ FIELD(CONFIG_REG, DUAL_BYTE_OPCODE_EN_FLD, 30, 1)
+ FIELD(CONFIG_REG, CRC_ENABLE_FLD, 29, 1)
+ FIELD(CONFIG_REG, CONFIG_RESV2_FLD, 26, 3)
+ FIELD(CONFIG_REG, PIPELINE_PHY_FLD, 25, 1)
+ FIELD(CONFIG_REG, ENABLE_DTR_PROTOCOL_FLD, 24, 1)
+ FIELD(CONFIG_REG, ENABLE_AHB_DECODER_FLD, 23, 1)
+ FIELD(CONFIG_REG, MSTR_BAUD_DIV_FLD, 19, 4)
+ FIELD(CONFIG_REG, ENTER_XIP_MODE_IMM_FLD, 18, 1)
+ FIELD(CONFIG_REG, ENTER_XIP_MODE_FLD, 17, 1)
+ FIELD(CONFIG_REG, ENB_AHB_ADDR_REMAP_FLD, 16, 1)
+ FIELD(CONFIG_REG, ENB_DMA_IF_FLD, 15, 1)
+ FIELD(CONFIG_REG, WR_PROT_FLASH_FLD, 14, 1)
+ FIELD(CONFIG_REG, PERIPH_CS_LINES_FLD, 10, 4)
+ FIELD(CONFIG_REG, PERIPH_SEL_DEC_FLD, 9, 1)
+ FIELD(CONFIG_REG, ENB_LEGACY_IP_MODE_FLD, 8, 1)
+ FIELD(CONFIG_REG, ENB_DIR_ACC_CTLR_FLD, 7, 1)
+ FIELD(CONFIG_REG, RESET_CFG_FLD, 6, 1)
+ FIELD(CONFIG_REG, RESET_PIN_FLD, 5, 1)
+ FIELD(CONFIG_REG, HOLD_PIN_FLD, 4, 1)
+ FIELD(CONFIG_REG, PHY_MODE_ENABLE_FLD, 3, 1)
+ FIELD(CONFIG_REG, SEL_CLK_PHASE_FLD, 2, 1)
+ FIELD(CONFIG_REG, SEL_CLK_POL_FLD, 1, 1)
+ FIELD(CONFIG_REG, ENB_SPI_FLD, 0, 1)
+REG32(DEV_INSTR_RD_CONFIG_REG, 0x4)
+ FIELD(DEV_INSTR_RD_CONFIG_REG, RD_INSTR_RESV5_FLD, 29, 3)
+ FIELD(DEV_INSTR_RD_CONFIG_REG, DUMMY_RD_CLK_CYCLES_FLD, 24, 5)
+ FIELD(DEV_INSTR_RD_CONFIG_REG, RD_INSTR_RESV4_FLD, 21, 3)
+ FIELD(DEV_INSTR_RD_CONFIG_REG, MODE_BIT_ENABLE_FLD, 20, 1)
+ FIELD(DEV_INSTR_RD_CONFIG_REG, RD_INSTR_RESV3_FLD, 18, 2)
+ FIELD(DEV_INSTR_RD_CONFIG_REG, DATA_XFER_TYPE_EXT_MODE_FLD, 16, 2)
+ FIELD(DEV_INSTR_RD_CONFIG_REG, RD_INSTR_RESV2_FLD, 14, 2)
+ FIELD(DEV_INSTR_RD_CONFIG_REG, ADDR_XFER_TYPE_STD_MODE_FLD, 12, 2)
+ FIELD(DEV_INSTR_RD_CONFIG_REG, PRED_DIS_FLD, 11, 1)
+ FIELD(DEV_INSTR_RD_CONFIG_REG, DDR_EN_FLD, 10, 1)
+ FIELD(DEV_INSTR_RD_CONFIG_REG, INSTR_TYPE_FLD, 8, 2)
+ FIELD(DEV_INSTR_RD_CONFIG_REG, RD_OPCODE_NON_XIP_FLD, 0, 8)
+REG32(DEV_INSTR_WR_CONFIG_REG, 0x8)
+ FIELD(DEV_INSTR_WR_CONFIG_REG, WR_INSTR_RESV4_FLD, 29, 3)
+ FIELD(DEV_INSTR_WR_CONFIG_REG, DUMMY_WR_CLK_CYCLES_FLD, 24, 5)
+ FIELD(DEV_INSTR_WR_CONFIG_REG, WR_INSTR_RESV3_FLD, 18, 6)
+ FIELD(DEV_INSTR_WR_CONFIG_REG, DATA_XFER_TYPE_EXT_MODE_FLD, 16, 2)
+ FIELD(DEV_INSTR_WR_CONFIG_REG, WR_INSTR_RESV2_FLD, 14, 2)
+ FIELD(DEV_INSTR_WR_CONFIG_REG, ADDR_XFER_TYPE_STD_MODE_FLD, 12, 2)
+ FIELD(DEV_INSTR_WR_CONFIG_REG, WR_INSTR_RESV1_FLD, 9, 3)
+ FIELD(DEV_INSTR_WR_CONFIG_REG, WEL_DIS_FLD, 8, 1)
+ FIELD(DEV_INSTR_WR_CONFIG_REG, WR_OPCODE_FLD, 0, 8)
+REG32(DEV_DELAY_REG, 0xc)
+ FIELD(DEV_DELAY_REG, D_NSS_FLD, 24, 8)
+ FIELD(DEV_DELAY_REG, D_BTWN_FLD, 16, 8)
+ FIELD(DEV_DELAY_REG, D_AFTER_FLD, 8, 8)
+ FIELD(DEV_DELAY_REG, D_INIT_FLD, 0, 8)
+REG32(RD_DATA_CAPTURE_REG, 0x10)
+ FIELD(RD_DATA_CAPTURE_REG, RD_DATA_RESV3_FLD, 20, 12)
+ FIELD(RD_DATA_CAPTURE_REG, DDR_READ_DELAY_FLD, 16, 4)
+ FIELD(RD_DATA_CAPTURE_REG, RD_DATA_RESV2_FLD, 9, 7)
+ FIELD(RD_DATA_CAPTURE_REG, DQS_ENABLE_FLD, 8, 1)
+ FIELD(RD_DATA_CAPTURE_REG, RD_DATA_RESV1_FLD, 6, 2)
+ FIELD(RD_DATA_CAPTURE_REG, SAMPLE_EDGE_SEL_FLD, 5, 1)
+ FIELD(RD_DATA_CAPTURE_REG, DELAY_FLD, 1, 4)
+ FIELD(RD_DATA_CAPTURE_REG, BYPASS_FLD, 0, 1)
+REG32(DEV_SIZE_CONFIG_REG, 0x14)
+ FIELD(DEV_SIZE_CONFIG_REG, DEV_SIZE_RESV_FLD, 29, 3)
+ FIELD(DEV_SIZE_CONFIG_REG, MEM_SIZE_ON_CS3_FLD, 27, 2)
+ FIELD(DEV_SIZE_CONFIG_REG, MEM_SIZE_ON_CS2_FLD, 25, 2)
+ FIELD(DEV_SIZE_CONFIG_REG, MEM_SIZE_ON_CS1_FLD, 23, 2)
+ FIELD(DEV_SIZE_CONFIG_REG, MEM_SIZE_ON_CS0_FLD, 21, 2)
+ FIELD(DEV_SIZE_CONFIG_REG, BYTES_PER_SUBSECTOR_FLD, 16, 5)
+ FIELD(DEV_SIZE_CONFIG_REG, BYTES_PER_DEVICE_PAGE_FLD, 4, 12)
+ FIELD(DEV_SIZE_CONFIG_REG, NUM_ADDR_BYTES_FLD, 0, 4)
+REG32(SRAM_PARTITION_CFG_REG, 0x18)
+ FIELD(SRAM_PARTITION_CFG_REG, SRAM_PARTITION_RESV_FLD, 8, 24)
+ FIELD(SRAM_PARTITION_CFG_REG, ADDR_FLD, 0, 8)
+REG32(IND_AHB_ADDR_TRIGGER_REG, 0x1c)
+REG32(DMA_PERIPH_CONFIG_REG, 0x20)
+ FIELD(DMA_PERIPH_CONFIG_REG, DMA_PERIPH_RESV2_FLD, 12, 20)
+ FIELD(DMA_PERIPH_CONFIG_REG, NUM_BURST_REQ_BYTES_FLD, 8, 4)
+ FIELD(DMA_PERIPH_CONFIG_REG, DMA_PERIPH_RESV1_FLD, 4, 4)
+ FIELD(DMA_PERIPH_CONFIG_REG, NUM_SINGLE_REQ_BYTES_FLD, 0, 4)
+REG32(REMAP_ADDR_REG, 0x24)
+REG32(MODE_BIT_CONFIG_REG, 0x28)
+ FIELD(MODE_BIT_CONFIG_REG, RX_CRC_DATA_LOW_FLD, 24, 8)
+ FIELD(MODE_BIT_CONFIG_REG, RX_CRC_DATA_UP_FLD, 16, 8)
+ FIELD(MODE_BIT_CONFIG_REG, CRC_OUT_ENABLE_FLD, 15, 1)
+ FIELD(MODE_BIT_CONFIG_REG, MODE_BIT_RESV1_FLD, 11, 4)
+ FIELD(MODE_BIT_CONFIG_REG, CHUNK_SIZE_FLD, 8, 3)
+ FIELD(MODE_BIT_CONFIG_REG, MODE_FLD, 0, 8)
+REG32(SRAM_FILL_REG, 0x2c)
+ FIELD(SRAM_FILL_REG, SRAM_FILL_INDAC_WRITE_FLD, 16, 16)
+ FIELD(SRAM_FILL_REG, SRAM_FILL_INDAC_READ_FLD, 0, 16)
+REG32(TX_THRESH_REG, 0x30)
+ FIELD(TX_THRESH_REG, TX_THRESH_RESV_FLD, 5, 27)
+ FIELD(TX_THRESH_REG, LEVEL_FLD, 0, 5)
+REG32(RX_THRESH_REG, 0x34)
+ FIELD(RX_THRESH_REG, RX_THRESH_RESV_FLD, 5, 27)
+ FIELD(RX_THRESH_REG, LEVEL_FLD, 0, 5)
+REG32(WRITE_COMPLETION_CTRL_REG, 0x38)
+ FIELD(WRITE_COMPLETION_CTRL_REG, POLL_REP_DELAY_FLD, 24, 8)
+ FIELD(WRITE_COMPLETION_CTRL_REG, POLL_COUNT_FLD, 16, 8)
+ FIELD(WRITE_COMPLETION_CTRL_REG, ENABLE_POLLING_EXP_FLD, 15, 1)
+ FIELD(WRITE_COMPLETION_CTRL_REG, DISABLE_POLLING_FLD, 14, 1)
+ FIELD(WRITE_COMPLETION_CTRL_REG, POLLING_POLARITY_FLD, 13, 1)
+ FIELD(WRITE_COMPLETION_CTRL_REG, WR_COMP_CTRL_RESV1_FLD, 12, 1)
+ FIELD(WRITE_COMPLETION_CTRL_REG, POLLING_ADDR_EN_FLD, 11, 1)
+ FIELD(WRITE_COMPLETION_CTRL_REG, POLLING_BIT_INDEX_FLD, 8, 3)
+ FIELD(WRITE_COMPLETION_CTRL_REG, OPCODE_FLD, 0, 8)
+REG32(NO_OF_POLLS_BEF_EXP_REG, 0x3c)
+REG32(IRQ_STATUS_REG, 0x40)
+ FIELD(IRQ_STATUS_REG, IRQ_STAT_RESV_FLD, 20, 12)
+ FIELD(IRQ_STATUS_REG, ECC_FAIL_FLD, 19, 1)
+ FIELD(IRQ_STATUS_REG, TX_CRC_CHUNK_BRK_FLD, 18, 1)
+ FIELD(IRQ_STATUS_REG, RX_CRC_DATA_VAL_FLD, 17, 1)
+ FIELD(IRQ_STATUS_REG, RX_CRC_DATA_ERR_FLD, 16, 1)
+ FIELD(IRQ_STATUS_REG, IRQ_STAT_RESV1_FLD, 15, 1)
+ FIELD(IRQ_STATUS_REG, STIG_REQ_INT_FLD, 14, 1)
+ FIELD(IRQ_STATUS_REG, POLL_EXP_INT_FLD, 13, 1)
+ FIELD(IRQ_STATUS_REG, INDRD_SRAM_FULL_FLD, 12, 1)
+ FIELD(IRQ_STATUS_REG, RX_FIFO_FULL_FLD, 11, 1)
+ FIELD(IRQ_STATUS_REG, RX_FIFO_NOT_EMPTY_FLD, 10, 1)
+ FIELD(IRQ_STATUS_REG, TX_FIFO_FULL_FLD, 9, 1)
+ FIELD(IRQ_STATUS_REG, TX_FIFO_NOT_FULL_FLD, 8, 1)
+ FIELD(IRQ_STATUS_REG, RECV_OVERFLOW_FLD, 7, 1)
+ FIELD(IRQ_STATUS_REG, INDIRECT_XFER_LEVEL_BREACH_FLD, 6, 1)
+ FIELD(IRQ_STATUS_REG, ILLEGAL_ACCESS_DET_FLD, 5, 1)
+ FIELD(IRQ_STATUS_REG, PROT_WR_ATTEMPT_FLD, 4, 1)
+ FIELD(IRQ_STATUS_REG, INDIRECT_TRANSFER_REJECT_FLD, 3, 1)
+ FIELD(IRQ_STATUS_REG, INDIRECT_OP_DONE_FLD, 2, 1)
+ FIELD(IRQ_STATUS_REG, UNDERFLOW_DET_FLD, 1, 1)
+ FIELD(IRQ_STATUS_REG, MODE_M_FAIL_FLD, 0, 1)
+REG32(IRQ_MASK_REG, 0x44)
+ FIELD(IRQ_MASK_REG, IRQ_MASK_RESV_FLD, 20, 12)
+ FIELD(IRQ_MASK_REG, ECC_FAIL_MASK_FLD, 19, 1)
+ FIELD(IRQ_MASK_REG, TX_CRC_CHUNK_BRK_MASK_FLD, 18, 1)
+ FIELD(IRQ_MASK_REG, RX_CRC_DATA_VAL_MASK_FLD, 17, 1)
+ FIELD(IRQ_MASK_REG, RX_CRC_DATA_ERR_MASK_FLD, 16, 1)
+ FIELD(IRQ_MASK_REG, IRQ_MASK_RESV1_FLD, 15, 1)
+ FIELD(IRQ_MASK_REG, STIG_REQ_MASK_FLD, 14, 1)
+ FIELD(IRQ_MASK_REG, POLL_EXP_INT_MASK_FLD, 13, 1)
+ FIELD(IRQ_MASK_REG, INDRD_SRAM_FULL_MASK_FLD, 12, 1)
+ FIELD(IRQ_MASK_REG, RX_FIFO_FULL_MASK_FLD, 11, 1)
+ FIELD(IRQ_MASK_REG, RX_FIFO_NOT_EMPTY_MASK_FLD, 10, 1)
+ FIELD(IRQ_MASK_REG, TX_FIFO_FULL_MASK_FLD, 9, 1)
+ FIELD(IRQ_MASK_REG, TX_FIFO_NOT_FULL_MASK_FLD, 8, 1)
+ FIELD(IRQ_MASK_REG, RECV_OVERFLOW_MASK_FLD, 7, 1)
+ FIELD(IRQ_MASK_REG, INDIRECT_XFER_LEVEL_BREACH_MASK_FLD, 6, 1)
+ FIELD(IRQ_MASK_REG, ILLEGAL_ACCESS_DET_MASK_FLD, 5, 1)
+ FIELD(IRQ_MASK_REG, PROT_WR_ATTEMPT_MASK_FLD, 4, 1)
+ FIELD(IRQ_MASK_REG, INDIRECT_TRANSFER_REJECT_MASK_FLD, 3, 1)
+ FIELD(IRQ_MASK_REG, INDIRECT_OP_DONE_MASK_FLD, 2, 1)
+ FIELD(IRQ_MASK_REG, UNDERFLOW_DET_MASK_FLD, 1, 1)
+ FIELD(IRQ_MASK_REG, MODE_M_FAIL_MASK_FLD, 0, 1)
+REG32(LOWER_WR_PROT_REG, 0x50)
+REG32(UPPER_WR_PROT_REG, 0x54)
+REG32(WR_PROT_CTRL_REG, 0x58)
+ FIELD(WR_PROT_CTRL_REG, WR_PROT_CTRL_RESV_FLD, 2, 30)
+ FIELD(WR_PROT_CTRL_REG, ENB_FLD, 1, 1)
+ FIELD(WR_PROT_CTRL_REG, INV_FLD, 0, 1)
+REG32(INDIRECT_READ_XFER_CTRL_REG, 0x60)
+ FIELD(INDIRECT_READ_XFER_CTRL_REG, INDIR_RD_XFER_RESV_FLD, 8, 24)
+ FIELD(INDIRECT_READ_XFER_CTRL_REG, NUM_IND_OPS_DONE_FLD, 6, 2)
+ FIELD(INDIRECT_READ_XFER_CTRL_REG, IND_OPS_DONE_STATUS_FLD, 5, 1)
+ FIELD(INDIRECT_READ_XFER_CTRL_REG, RD_QUEUED_FLD, 4, 1)
+ FIELD(INDIRECT_READ_XFER_CTRL_REG, SRAM_FULL_FLD, 3, 1)
+ FIELD(INDIRECT_READ_XFER_CTRL_REG, RD_STATUS_FLD, 2, 1)
+ FIELD(INDIRECT_READ_XFER_CTRL_REG, CANCEL_FLD, 1, 1)
+ FIELD(INDIRECT_READ_XFER_CTRL_REG, START_FLD, 0, 1)
+REG32(INDIRECT_READ_XFER_WATERMARK_REG, 0x64)
+REG32(INDIRECT_READ_XFER_START_REG, 0x68)
+REG32(INDIRECT_READ_XFER_NUM_BYTES_REG, 0x6c)
+REG32(INDIRECT_WRITE_XFER_CTRL_REG, 0x70)
+ FIELD(INDIRECT_WRITE_XFER_CTRL_REG, INDIR_WR_XFER_RESV2_FLD, 8, 24)
+ FIELD(INDIRECT_WRITE_XFER_CTRL_REG, NUM_IND_OPS_DONE_FLD, 6, 2)
+ FIELD(INDIRECT_WRITE_XFER_CTRL_REG, IND_OPS_DONE_STATUS_FLD, 5, 1)
+ FIELD(INDIRECT_WRITE_XFER_CTRL_REG, WR_QUEUED_FLD, 4, 1)
+ FIELD(INDIRECT_WRITE_XFER_CTRL_REG, INDIR_WR_XFER_RESV1_FLD, 3, 1)
+ FIELD(INDIRECT_WRITE_XFER_CTRL_REG, WR_STATUS_FLD, 2, 1)
+ FIELD(INDIRECT_WRITE_XFER_CTRL_REG, CANCEL_FLD, 1, 1)
+ FIELD(INDIRECT_WRITE_XFER_CTRL_REG, START_FLD, 0, 1)
+REG32(INDIRECT_WRITE_XFER_WATERMARK_REG, 0x74)
+REG32(INDIRECT_WRITE_XFER_START_REG, 0x78)
+REG32(INDIRECT_WRITE_XFER_NUM_BYTES_REG, 0x7c)
+REG32(INDIRECT_TRIGGER_ADDR_RANGE_REG, 0x80)
+ FIELD(INDIRECT_TRIGGER_ADDR_RANGE_REG, IND_RANGE_RESV1_FLD, 4, 28)
+ FIELD(INDIRECT_TRIGGER_ADDR_RANGE_REG, IND_RANGE_WIDTH_FLD, 0, 4)
+REG32(FLASH_COMMAND_CTRL_MEM_REG, 0x8c)
+ FIELD(FLASH_COMMAND_CTRL_MEM_REG, FLASH_COMMAND_CTRL_MEM_RESV1_FLD, 29, 3)
+ FIELD(FLASH_COMMAND_CTRL_MEM_REG, MEM_BANK_ADDR_FLD, 20, 9)
+ FIELD(FLASH_COMMAND_CTRL_MEM_REG, FLASH_COMMAND_CTRL_MEM_RESV2_FLD, 19, 1)
+ FIELD(FLASH_COMMAND_CTRL_MEM_REG, NB_OF_STIG_READ_BYTES_FLD, 16, 3)
+ FIELD(FLASH_COMMAND_CTRL_MEM_REG, MEM_BANK_READ_DATA_FLD, 8, 8)
+ FIELD(FLASH_COMMAND_CTRL_MEM_REG, FLASH_COMMAND_CTRL_MEM_RESV3_FLD, 2, 6)
+ FIELD(FLASH_COMMAND_CTRL_MEM_REG, MEM_BANK_REQ_IN_PROGRESS_FLD, 1, 1)
+ FIELD(FLASH_COMMAND_CTRL_MEM_REG, TRIGGER_MEM_BANK_REQ_FLD, 0, 1)
+REG32(FLASH_CMD_CTRL_REG, 0x90)
+ FIELD(FLASH_CMD_CTRL_REG, CMD_OPCODE_FLD, 24, 8)
+ FIELD(FLASH_CMD_CTRL_REG, ENB_READ_DATA_FLD, 23, 1)
+ FIELD(FLASH_CMD_CTRL_REG, NUM_RD_DATA_BYTES_FLD, 20, 3)
+ FIELD(FLASH_CMD_CTRL_REG, ENB_COMD_ADDR_FLD, 19, 1)
+ FIELD(FLASH_CMD_CTRL_REG, ENB_MODE_BIT_FLD, 18, 1)
+ FIELD(FLASH_CMD_CTRL_REG, NUM_ADDR_BYTES_FLD, 16, 2)
+ FIELD(FLASH_CMD_CTRL_REG, ENB_WRITE_DATA_FLD, 15, 1)
+ FIELD(FLASH_CMD_CTRL_REG, NUM_WR_DATA_BYTES_FLD, 12, 3)
+ FIELD(FLASH_CMD_CTRL_REG, NUM_DUMMY_CYCLES_FLD, 7, 5)
+ FIELD(FLASH_CMD_CTRL_REG, FLASH_CMD_CTRL_RESV1_FLD, 3, 4)
+ FIELD(FLASH_CMD_CTRL_REG, STIG_MEM_BANK_EN_FLD, 2, 1)
+ FIELD(FLASH_CMD_CTRL_REG, CMD_EXEC_STATUS_FLD, 1, 1)
+ FIELD(FLASH_CMD_CTRL_REG, CMD_EXEC_FLD, 0, 1)
+REG32(FLASH_CMD_ADDR_REG, 0x94)
+REG32(FLASH_RD_DATA_LOWER_REG, 0xa0)
+REG32(FLASH_RD_DATA_UPPER_REG, 0xa4)
+REG32(FLASH_WR_DATA_LOWER_REG, 0xa8)
+REG32(FLASH_WR_DATA_UPPER_REG, 0xac)
+REG32(POLLING_FLASH_STATUS_REG, 0xb0)
+ FIELD(POLLING_FLASH_STATUS_REG, DEVICE_STATUS_RSVD_FLD2, 21, 11)
+ FIELD(POLLING_FLASH_STATUS_REG, DEVICE_STATUS_NB_DUMMY, 16, 5)
+ FIELD(POLLING_FLASH_STATUS_REG, DEVICE_STATUS_RSVD_FLD1, 9, 7)
+ FIELD(POLLING_FLASH_STATUS_REG, DEVICE_STATUS_VALID_FLD, 8, 1)
+ FIELD(POLLING_FLASH_STATUS_REG, DEVICE_STATUS_FLD, 0, 8)
+REG32(PHY_CONFIGURATION_REG, 0xb4)
+ FIELD(PHY_CONFIGURATION_REG, PHY_CONFIG_RESYNC_FLD, 31, 1)
+ FIELD(PHY_CONFIGURATION_REG, PHY_CONFIG_RESET_FLD, 30, 1)
+ FIELD(PHY_CONFIGURATION_REG, PHY_CONFIG_RX_DLL_BYPASS_FLD, 29, 1)
+ FIELD(PHY_CONFIGURATION_REG, PHY_CONFIG_RESV2_FLD, 23, 6)
+ FIELD(PHY_CONFIGURATION_REG, PHY_CONFIG_TX_DLL_DELAY_FLD, 16, 7)
+ FIELD(PHY_CONFIGURATION_REG, PHY_CONFIG_RESV1_FLD, 7, 9)
+ FIELD(PHY_CONFIGURATION_REG, PHY_CONFIG_RX_DLL_DELAY_FLD, 0, 7)
+REG32(PHY_MASTER_CONTROL_REG, 0xb8)
+ FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_CONTROL_RESV3_FLD, 25, 7)
+ FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_LOCK_MODE_FLD, 24, 1)
+ FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_BYPASS_MODE_FLD, 23, 1)
+ FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_PHASE_DETECT_SELECTOR_FLD, 20, 3)
+ FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_CONTROL_RESV2_FLD, 19, 1)
+ FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_NB_INDICATIONS_FLD, 16, 3)
+ FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_CONTROL_RESV1_FLD, 7, 9)
+ FIELD(PHY_MASTER_CONTROL_REG, PHY_MASTER_INITIAL_DELAY_FLD, 0, 7)
+REG32(DLL_OBSERVABLE_LOWER_REG, 0xbc)
+ FIELD(DLL_OBSERVABLE_LOWER_REG,
+ DLL_OBSERVABLE_LOWER_DLL_LOCK_INC_FLD, 24, 8)
+ FIELD(DLL_OBSERVABLE_LOWER_REG,
+ DLL_OBSERVABLE_LOWER_DLL_LOCK_DEC_FLD, 16, 8)
+ FIELD(DLL_OBSERVABLE_LOWER_REG,
+ DLL_OBSERVABLE_LOWER_LOOPBACK_LOCK_FLD, 15, 1)
+ FIELD(DLL_OBSERVABLE_LOWER_REG,
+ DLL_OBSERVABLE_LOWER_LOCK_VALUE_FLD, 8, 7)
+ FIELD(DLL_OBSERVABLE_LOWER_REG,
+ DLL_OBSERVABLE_LOWER_UNLOCK_COUNTER_FLD, 3, 5)
+ FIELD(DLL_OBSERVABLE_LOWER_REG,
+ DLL_OBSERVABLE_LOWER_LOCK_MODE_FLD, 1, 2)
+ FIELD(DLL_OBSERVABLE_LOWER_REG,
+ DLL_OBSERVABLE_LOWER_DLL_LOCK_FLD, 0, 1)
+REG32(DLL_OBSERVABLE_UPPER_REG, 0xc0)
+ FIELD(DLL_OBSERVABLE_UPPER_REG,
+ DLL_OBSERVABLE_UPPER_RESV2_FLD, 23, 9)
+ FIELD(DLL_OBSERVABLE_UPPER_REG,
+ DLL_OBSERVABLE_UPPER_TX_DECODER_OUTPUT_FLD, 16, 7)
+ FIELD(DLL_OBSERVABLE_UPPER_REG,
+ DLL_OBSERVABLE_UPPER_RESV1_FLD, 7, 9)
+ FIELD(DLL_OBSERVABLE_UPPER_REG,
+ DLL_OBSERVABLE__UPPER_RX_DECODER_OUTPUT_FLD, 0, 7)
+REG32(OPCODE_EXT_LOWER_REG, 0xe0)
+ FIELD(OPCODE_EXT_LOWER_REG, EXT_READ_OPCODE_FLD, 24, 8)
+ FIELD(OPCODE_EXT_LOWER_REG, EXT_WRITE_OPCODE_FLD, 16, 8)
+ FIELD(OPCODE_EXT_LOWER_REG, EXT_POLL_OPCODE_FLD, 8, 8)
+ FIELD(OPCODE_EXT_LOWER_REG, EXT_STIG_OPCODE_FLD, 0, 8)
+REG32(OPCODE_EXT_UPPER_REG, 0xe4)
+ FIELD(OPCODE_EXT_UPPER_REG, WEL_OPCODE_FLD, 24, 8)
+ FIELD(OPCODE_EXT_UPPER_REG, EXT_WEL_OPCODE_FLD, 16, 8)
+ FIELD(OPCODE_EXT_UPPER_REG, OPCODE_EXT_UPPER_RESV1_FLD, 0, 16)
+REG32(MODULE_ID_REG, 0xfc)
+ FIELD(MODULE_ID_REG, FIX_PATCH_FLD, 24, 8)
+ FIELD(MODULE_ID_REG, MODULE_ID_FLD, 8, 16)
+ FIELD(MODULE_ID_REG, MODULE_ID_RESV_FLD, 2, 6)
+ FIELD(MODULE_ID_REG, CONF_FLD, 0, 2)
+
+#define RXFF_SZ 1024
+#define TXFF_SZ 1024
+
+#define MAX_RX_DEC_OUT 8
+
+#define SZ_512MBIT (512 * 1024 * 1024)
+#define SZ_1GBIT (1024 * 1024 * 1024)
+#define SZ_2GBIT (2ULL * SZ_1GBIT)
+#define SZ_4GBIT (4ULL * SZ_1GBIT)
+
+#define IS_IND_DMA_START(op) (op->done_bytes == 0)
+/*
+ * Bit field size of R_INDIRECT_WRITE_XFER_CTRL_REG_NUM_IND_OPS_DONE_FLD
+ * is 2 bits, which can record max of 3 indac operations.
+ */
+#define IND_OPS_DONE_MAX 3
+
+typedef enum {
+ WREN = 0x6,
+} FlashCMD;
+
+static unsigned int ospi_stig_addr_len(XlnxVersalOspi *s)
+{
+ /* Num address bytes is NUM_ADDR_BYTES_FLD + 1 */
+ return ARRAY_FIELD_EX32(s->regs,
+ FLASH_CMD_CTRL_REG, NUM_ADDR_BYTES_FLD) + 1;
+}
+
+static unsigned int ospi_stig_wr_data_len(XlnxVersalOspi *s)
+{
+ /* Num write data bytes is NUM_WR_DATA_BYTES_FLD + 1 */
+ return ARRAY_FIELD_EX32(s->regs,
+ FLASH_CMD_CTRL_REG, NUM_WR_DATA_BYTES_FLD) + 1;
+}
+
+static unsigned int ospi_stig_rd_data_len(XlnxVersalOspi *s)
+{
+ /* Num read data bytes is NUM_RD_DATA_BYTES_FLD + 1 */
+ return ARRAY_FIELD_EX32(s->regs,
+ FLASH_CMD_CTRL_REG, NUM_RD_DATA_BYTES_FLD) + 1;
+}
+
+/*
+ * Status bits in R_IRQ_STATUS_REG are set when the event occurs and the
+ * interrupt is enabled in the mask register ([1] Section 2.3.17)
+ */
+static void set_irq(XlnxVersalOspi *s, uint32_t set_mask)
+{
+ s->regs[R_IRQ_STATUS_REG] |= s->regs[R_IRQ_MASK_REG] & set_mask;
+}
+
+static void ospi_update_irq_line(XlnxVersalOspi *s)
+{
+ qemu_set_irq(s->irq, !!(s->regs[R_IRQ_STATUS_REG] &
+ s->regs[R_IRQ_MASK_REG]));
+}
+
+static uint8_t ospi_get_wr_opcode(XlnxVersalOspi *s)
+{
+ return ARRAY_FIELD_EX32(s->regs,
+ DEV_INSTR_WR_CONFIG_REG, WR_OPCODE_FLD);
+}
+
+static uint8_t ospi_get_rd_opcode(XlnxVersalOspi *s)
+{
+ return ARRAY_FIELD_EX32(s->regs,
+ DEV_INSTR_RD_CONFIG_REG, RD_OPCODE_NON_XIP_FLD);
+}
+
+static uint32_t ospi_get_num_addr_bytes(XlnxVersalOspi *s)
+{
+ /* Num address bytes is NUM_ADDR_BYTES_FLD + 1 */
+ return ARRAY_FIELD_EX32(s->regs,
+ DEV_SIZE_CONFIG_REG, NUM_ADDR_BYTES_FLD) + 1;
+}
+
+static void ospi_stig_membank_req(XlnxVersalOspi *s)
+{
+ int idx = ARRAY_FIELD_EX32(s->regs,
+ FLASH_COMMAND_CTRL_MEM_REG, MEM_BANK_ADDR_FLD);
+
+ ARRAY_FIELD_DP32(s->regs, FLASH_COMMAND_CTRL_MEM_REG,
+ MEM_BANK_READ_DATA_FLD, s->stig_membank[idx]);
+}
+
+static int ospi_stig_membank_rd_bytes(XlnxVersalOspi *s)
+{
+ int rd_data_fld = ARRAY_FIELD_EX32(s->regs, FLASH_COMMAND_CTRL_MEM_REG,
+ NB_OF_STIG_READ_BYTES_FLD);
+ static const int sizes[6] = { 16, 32, 64, 128, 256, 512 };
+ return (rd_data_fld < 6) ? sizes[rd_data_fld] : 0;
+}
+
+static uint32_t ospi_get_page_sz(XlnxVersalOspi *s)
+{
+ return ARRAY_FIELD_EX32(s->regs,
+ DEV_SIZE_CONFIG_REG, BYTES_PER_DEVICE_PAGE_FLD);
+}
+
+static bool ospi_ind_rd_watermark_enabled(XlnxVersalOspi *s)
+{
+ return s->regs[R_INDIRECT_READ_XFER_WATERMARK_REG];
+}
+
+static void ind_op_advance(IndOp *op, unsigned int len)
+{
+ op->done_bytes += len;
+ assert(op->done_bytes <= op->num_bytes);
+ if (op->done_bytes == op->num_bytes) {
+ op->completed = true;
+ }
+}
+
+static uint32_t ind_op_next_byte(IndOp *op)
+{
+ return op->flash_addr + op->done_bytes;
+}
+
+static uint32_t ind_op_end_byte(IndOp *op)
+{
+ return op->flash_addr + op->num_bytes;
+}
+
+static void ospi_ind_op_next(IndOp *op)
+{
+ op[0] = op[1];
+ op[1].completed = true;
+}
+
+static void ind_op_setup(IndOp *op, uint32_t flash_addr, uint32_t num_bytes)
+{
+ if (num_bytes & 0x3) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "OSPI indirect op num bytes not word aligned\n");
+ }
+ op->flash_addr = flash_addr;
+ op->num_bytes = num_bytes;
+ op->done_bytes = 0;
+ op->completed = false;
+}
+
+static bool ospi_ind_op_completed(IndOp *op)
+{
+ return op->completed;
+}
+
+static bool ospi_ind_op_all_completed(XlnxVersalOspi *s)
+{
+ return s->rd_ind_op[0].completed && s->wr_ind_op[0].completed;
+}
+
+static void ospi_ind_op_cancel(IndOp *op)
+{
+ op[0].completed = true;
+ op[1].completed = true;
+}
+
+static bool ospi_ind_op_add(IndOp *op, Fifo8 *fifo,
+ uint32_t flash_addr, uint32_t num_bytes)
+{
+ /* Check if first indirect op has been completed */
+ if (op->completed) {
+ fifo8_reset(fifo);
+ ind_op_setup(op, flash_addr, num_bytes);
+ return false;
+ }
+
+ /* Check if second indirect op has been completed */
+ op++;
+ if (op->completed) {
+ ind_op_setup(op, flash_addr, num_bytes);
+ return false;
+ }
+ return true;
+}
+
+static void ospi_ind_op_queue_up_rd(XlnxVersalOspi *s)
+{
+ uint32_t num_bytes = s->regs[R_INDIRECT_READ_XFER_NUM_BYTES_REG];
+ uint32_t flash_addr = s->regs[R_INDIRECT_READ_XFER_START_REG];
+ bool failed;
+
+ failed = ospi_ind_op_add(s->rd_ind_op, &s->rx_sram, flash_addr, num_bytes);
+ /* If two already queued set rd reject interrupt */
+ if (failed) {
+ set_irq(s, R_IRQ_STATUS_REG_INDIRECT_TRANSFER_REJECT_FLD_MASK);
+ }
+}
+
+static void ospi_ind_op_queue_up_wr(XlnxVersalOspi *s)
+{
+ uint32_t num_bytes = s->regs[R_INDIRECT_WRITE_XFER_NUM_BYTES_REG];
+ uint32_t flash_addr = s->regs[R_INDIRECT_WRITE_XFER_START_REG];
+ bool failed;
+
+ failed = ospi_ind_op_add(s->wr_ind_op, &s->tx_sram, flash_addr, num_bytes);
+ /* If two already queued set rd reject interrupt */
+ if (failed) {
+ set_irq(s, R_IRQ_STATUS_REG_INDIRECT_TRANSFER_REJECT_FLD_MASK);
+ }
+}
+
+static uint64_t flash_sz(XlnxVersalOspi *s, unsigned int cs)
+{
+ /* Flash sizes in MB */
+ static const uint64_t sizes[4] = { SZ_512MBIT / 8, SZ_1GBIT / 8,
+ SZ_2GBIT / 8, SZ_4GBIT / 8 };
+ uint32_t v = s->regs[R_DEV_SIZE_CONFIG_REG];
+
+ v >>= cs * R_DEV_SIZE_CONFIG_REG_MEM_SIZE_ON_CS0_FLD_LENGTH;
+ return sizes[FIELD_EX32(v, DEV_SIZE_CONFIG_REG, MEM_SIZE_ON_CS0_FLD)];
+}
+
+static unsigned int ospi_get_block_sz(XlnxVersalOspi *s)
+{
+ unsigned int block_fld = ARRAY_FIELD_EX32(s->regs,
+ DEV_SIZE_CONFIG_REG,
+ BYTES_PER_SUBSECTOR_FLD);
+ return 1 << block_fld;
+}
+
+static unsigned int flash_blocks(XlnxVersalOspi *s, unsigned int cs)
+{
+ unsigned int b_sz = ospi_get_block_sz(s);
+ unsigned int f_sz = flash_sz(s, cs);
+
+ return f_sz / b_sz;
+}
+
+static int ospi_ahb_decoder_cs(XlnxVersalOspi *s, hwaddr addr)
+{
+ uint64_t end_addr = 0;
+ int cs;
+
+ for (cs = 0; cs < s->num_cs; cs++) {
+ end_addr += flash_sz(s, cs);
+ if (addr < end_addr) {
+ break;
+ }
+ }
+
+ if (cs == s->num_cs) {
+ /* Address is out of range */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "OSPI flash address does not fit in configuration\n");
+ return -1;
+ }
+ return cs;
+}
+
+static void ospi_ahb_decoder_enable_cs(XlnxVersalOspi *s, hwaddr addr)
+{
+ int cs = ospi_ahb_decoder_cs(s, addr);
+
+ if (cs >= 0) {
+ for (int i = 0; i < s->num_cs; i++) {
+ qemu_set_irq(s->cs_lines[i], cs != i);
+ }
+ }
+}
+
+static unsigned int single_cs(XlnxVersalOspi *s)
+{
+ unsigned int field = ARRAY_FIELD_EX32(s->regs,
+ CONFIG_REG, PERIPH_CS_LINES_FLD);
+
+ /*
+ * Below one liner is a trick that finds the rightmost zero and makes sure
+ * all other bits are turned to 1. It is a variant of the 'Isolate the
+ * rightmost 0-bit' trick found below at the time of writing:
+ *
+ * https://emre.me/computer-science/bit-manipulation-tricks/
+ *
+ * 4'bXXX0 -> 4'b1110
+ * 4'bXX01 -> 4'b1101
+ * 4'bX011 -> 4'b1011
+ * 4'b0111 -> 4'b0111
+ * 4'b1111 -> 4'b1111
+ */
+ return (field | ~(field + 1)) & 0xf;
+}
+
+static void ospi_update_cs_lines(XlnxVersalOspi *s)
+{
+ unsigned int all_cs;
+ int i;
+
+ if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, PERIPH_SEL_DEC_FLD)) {
+ all_cs = ARRAY_FIELD_EX32(s->regs, CONFIG_REG, PERIPH_CS_LINES_FLD);
+ } else {
+ all_cs = single_cs(s);
+ }
+
+ for (i = 0; i < s->num_cs; i++) {
+ bool cs = (all_cs >> i) & 1;
+
+ qemu_set_irq(s->cs_lines[i], cs);
+ }
+}
+
+static void ospi_dac_cs(XlnxVersalOspi *s, hwaddr addr)
+{
+ if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENABLE_AHB_DECODER_FLD)) {
+ ospi_ahb_decoder_enable_cs(s, addr);
+ } else {
+ ospi_update_cs_lines(s);
+ }
+}
+
+static void ospi_disable_cs(XlnxVersalOspi *s)
+{
+ int i;
+
+ for (i = 0; i < s->num_cs; i++) {
+ qemu_set_irq(s->cs_lines[i], 1);
+ }
+}
+
+static void ospi_flush_txfifo(XlnxVersalOspi *s)
+{
+ while (!fifo8_is_empty(&s->tx_fifo)) {
+ uint32_t tx_rx = fifo8_pop(&s->tx_fifo);
+
+ tx_rx = ssi_transfer(s->spi, tx_rx);
+ fifo8_push(&s->rx_fifo, tx_rx);
+ }
+}
+
+static void ospi_tx_fifo_push_address_raw(XlnxVersalOspi *s,
+ uint32_t flash_addr,
+ unsigned int addr_bytes)
+{
+ /* Push write address */
+ if (addr_bytes == 4) {
+ fifo8_push(&s->tx_fifo, flash_addr >> 24);
+ }
+ if (addr_bytes >= 3) {
+ fifo8_push(&s->tx_fifo, flash_addr >> 16);
+ }
+ if (addr_bytes >= 2) {
+ fifo8_push(&s->tx_fifo, flash_addr >> 8);
+ }
+ fifo8_push(&s->tx_fifo, flash_addr);
+}
+
+static void ospi_tx_fifo_push_address(XlnxVersalOspi *s, uint32_t flash_addr)
+{
+ /* Push write address */
+ int addr_bytes = ospi_get_num_addr_bytes(s);
+
+ ospi_tx_fifo_push_address_raw(s, flash_addr, addr_bytes);
+}
+
+static void ospi_tx_fifo_push_stig_addr(XlnxVersalOspi *s)
+{
+ uint32_t flash_addr = s->regs[R_FLASH_CMD_ADDR_REG];
+ unsigned int addr_bytes = ospi_stig_addr_len(s);
+
+ ospi_tx_fifo_push_address_raw(s, flash_addr, addr_bytes);
+}
+
+static void ospi_tx_fifo_push_rd_op_addr(XlnxVersalOspi *s, uint32_t flash_addr)
+{
+ uint8_t inst_code = ospi_get_rd_opcode(s);
+
+ fifo8_reset(&s->tx_fifo);
+
+ /* Push read opcode */
+ fifo8_push(&s->tx_fifo, inst_code);
+
+ /* Push read address */
+ ospi_tx_fifo_push_address(s, flash_addr);
+}
+
+static void ospi_tx_fifo_push_stig_wr_data(XlnxVersalOspi *s)
+{
+ uint64_t data = s->regs[R_FLASH_WR_DATA_LOWER_REG];
+ int wr_data_len = ospi_stig_wr_data_len(s);
+ int i;
+
+ data |= (uint64_t) s->regs[R_FLASH_WR_DATA_UPPER_REG] << 32;
+ for (i = 0; i < wr_data_len; i++) {
+ int shift = i * 8;
+ fifo8_push(&s->tx_fifo, data >> shift);
+ }
+}
+
+static void ospi_tx_fifo_push_stig_rd_data(XlnxVersalOspi *s)
+{
+ int rd_data_len;
+ int i;
+
+ if (ARRAY_FIELD_EX32(s->regs, FLASH_CMD_CTRL_REG, STIG_MEM_BANK_EN_FLD)) {
+ rd_data_len = ospi_stig_membank_rd_bytes(s);
+ } else {
+ rd_data_len = ospi_stig_rd_data_len(s);
+ }
+
+ /* transmit second part (data) */
+ for (i = 0; i < rd_data_len; ++i) {
+ fifo8_push(&s->tx_fifo, 0);
+ }
+}
+
+static void ospi_rx_fifo_pop_stig_rd_data(XlnxVersalOspi *s)
+{
+ int size = ospi_stig_rd_data_len(s);
+ uint8_t bytes[8] = {};
+ int i;
+
+ size = MIN(fifo8_num_used(&s->rx_fifo), size);
+
+ assert(size <= 8);
+
+ for (i = 0; i < size; i++) {
+ bytes[i] = fifo8_pop(&s->rx_fifo);
+ }
+
+ s->regs[R_FLASH_RD_DATA_LOWER_REG] = ldl_le_p(bytes);
+ s->regs[R_FLASH_RD_DATA_UPPER_REG] = ldl_le_p(bytes + 4);
+}
+
+static void ospi_ind_read(XlnxVersalOspi *s, uint32_t flash_addr, uint32_t len)
+{
+ int i;
+
+ /* Create first section of read cmd */
+ ospi_tx_fifo_push_rd_op_addr(s, flash_addr);
+
+ /* transmit first part */
+ ospi_update_cs_lines(s);
+ ospi_flush_txfifo(s);
+
+ fifo8_reset(&s->rx_fifo);
+
+ /* transmit second part (data) */
+ for (i = 0; i < len; ++i) {
+ fifo8_push(&s->tx_fifo, 0);
+ }
+ ospi_flush_txfifo(s);
+
+ for (i = 0; i < len; ++i) {
+ fifo8_push(&s->rx_sram, fifo8_pop(&s->rx_fifo));
+ }
+
+ /* done */
+ ospi_disable_cs(s);
+}
+
+static unsigned int ospi_dma_burst_size(XlnxVersalOspi *s)
+{
+ return 1 << ARRAY_FIELD_EX32(s->regs,
+ DMA_PERIPH_CONFIG_REG,
+ NUM_BURST_REQ_BYTES_FLD);
+}
+
+static unsigned int ospi_dma_single_size(XlnxVersalOspi *s)
+{
+ return 1 << ARRAY_FIELD_EX32(s->regs,
+ DMA_PERIPH_CONFIG_REG,
+ NUM_SINGLE_REQ_BYTES_FLD);
+}
+
+static void ind_rd_inc_num_done(XlnxVersalOspi *s)
+{
+ unsigned int done = ARRAY_FIELD_EX32(s->regs,
+ INDIRECT_READ_XFER_CTRL_REG,
+ NUM_IND_OPS_DONE_FLD);
+ if (done < IND_OPS_DONE_MAX) {
+ done++;
+ }
+ done &= 0x3;
+ ARRAY_FIELD_DP32(s->regs, INDIRECT_READ_XFER_CTRL_REG,
+ NUM_IND_OPS_DONE_FLD, done);
+}
+
+static void ospi_ind_rd_completed(XlnxVersalOspi *s)
+{
+ ARRAY_FIELD_DP32(s->regs, INDIRECT_READ_XFER_CTRL_REG,
+ IND_OPS_DONE_STATUS_FLD, 1);
+
+ ind_rd_inc_num_done(s);
+ ospi_ind_op_next(s->rd_ind_op);
+ if (ospi_ind_op_all_completed(s)) {
+ set_irq(s, R_IRQ_STATUS_REG_INDIRECT_OP_DONE_FLD_MASK);
+ }
+}
+
+static void ospi_dma_read(XlnxVersalOspi *s)
+{
+ IndOp *op = s->rd_ind_op;
+ uint32_t dma_len = op->num_bytes;
+ uint32_t burst_sz = ospi_dma_burst_size(s);
+ uint32_t single_sz = ospi_dma_single_size(s);
+ uint32_t ind_trig_range;
+ uint32_t remainder;
+ XlnxCSUDMAClass *xcdc = XLNX_CSU_DMA_GET_CLASS(s->dma_src);
+
+ ind_trig_range = (1 << ARRAY_FIELD_EX32(s->regs,
+ INDIRECT_TRIGGER_ADDR_RANGE_REG,
+ IND_RANGE_WIDTH_FLD));
+ remainder = dma_len % burst_sz;
+ remainder = remainder % single_sz;
+ if (burst_sz > ind_trig_range || single_sz > ind_trig_range ||
+ remainder != 0) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "OSPI DMA burst size / single size config error\n");
+ }
+
+ s->src_dma_inprog = true;
+ if (xcdc->read(s->dma_src, 0, dma_len) != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR, "OSPI DMA configuration error\n");
+ }
+ s->src_dma_inprog = false;
+}
+
+static void ospi_do_ind_read(XlnxVersalOspi *s)
+{
+ IndOp *op = s->rd_ind_op;
+ uint32_t next_b;
+ uint32_t end_b;
+ uint32_t len;
+ bool start_dma = IS_IND_DMA_START(op) && !s->src_dma_inprog;
+
+ /* Continue to read flash until we run out of space in sram */
+ while (!ospi_ind_op_completed(op) &&
+ !fifo8_is_full(&s->rx_sram)) {
+ /* Read reqested number of bytes, max bytes limited to size of sram */
+ next_b = ind_op_next_byte(op);
+ end_b = next_b + fifo8_num_free(&s->rx_sram);
+ end_b = MIN(end_b, ind_op_end_byte(op));
+
+ len = end_b - next_b;
+ ospi_ind_read(s, next_b, len);
+ ind_op_advance(op, len);
+
+ if (ospi_ind_rd_watermark_enabled(s)) {
+ ARRAY_FIELD_DP32(s->regs, IRQ_STATUS_REG,
+ INDIRECT_XFER_LEVEL_BREACH_FLD, 1);
+ set_irq(s,
+ R_IRQ_STATUS_REG_INDIRECT_XFER_LEVEL_BREACH_FLD_MASK);
+ }
+
+ if (!s->src_dma_inprog &&
+ ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_DMA_IF_FLD)) {
+ ospi_dma_read(s);
+ }
+ }
+
+ /* Set sram full */
+ if (fifo8_num_used(&s->rx_sram) == RXFF_SZ) {
+ ARRAY_FIELD_DP32(s->regs,
+ INDIRECT_READ_XFER_CTRL_REG, SRAM_FULL_FLD, 1);
+ set_irq(s, R_IRQ_STATUS_REG_INDRD_SRAM_FULL_FLD_MASK);
+ }
+
+ /* Signal completion if done, unless inside recursion via ospi_dma_read */
+ if (!ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_DMA_IF_FLD) || start_dma) {
+ if (ospi_ind_op_completed(op)) {
+ ospi_ind_rd_completed(s);
+ }
+ }
+}
+
+/* Transmit write enable instruction */
+static void ospi_transmit_wel(XlnxVersalOspi *s, bool ahb_decoder_cs,
+ hwaddr addr)
+{
+ fifo8_reset(&s->tx_fifo);
+ fifo8_push(&s->tx_fifo, WREN);
+
+ if (ahb_decoder_cs) {
+ ospi_ahb_decoder_enable_cs(s, addr);
+ } else {
+ ospi_update_cs_lines(s);
+ }
+
+ ospi_flush_txfifo(s);
+ ospi_disable_cs(s);
+
+ fifo8_reset(&s->rx_fifo);
+}
+
+static void ospi_ind_write(XlnxVersalOspi *s, uint32_t flash_addr, uint32_t len)
+{
+ bool ahb_decoder_cs = false;
+ uint8_t inst_code;
+ int i;
+
+ assert(fifo8_num_used(&s->tx_sram) >= len);
+
+ if (!ARRAY_FIELD_EX32(s->regs, DEV_INSTR_WR_CONFIG_REG, WEL_DIS_FLD)) {
+ ospi_transmit_wel(s, ahb_decoder_cs, 0);
+ }
+
+ /* reset fifos */
+ fifo8_reset(&s->tx_fifo);
+ fifo8_reset(&s->rx_fifo);
+
+ /* Push write opcode */
+ inst_code = ospi_get_wr_opcode(s);
+ fifo8_push(&s->tx_fifo, inst_code);
+
+ /* Push write address */
+ ospi_tx_fifo_push_address(s, flash_addr);
+
+ /* data */
+ for (i = 0; i < len; i++) {
+ fifo8_push(&s->tx_fifo, fifo8_pop(&s->tx_sram));
+ }
+
+ /* transmit */
+ ospi_update_cs_lines(s);
+ ospi_flush_txfifo(s);
+
+ /* done */
+ ospi_disable_cs(s);
+ fifo8_reset(&s->rx_fifo);
+}
+
+static void ind_wr_inc_num_done(XlnxVersalOspi *s)
+{
+ unsigned int done = ARRAY_FIELD_EX32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG,
+ NUM_IND_OPS_DONE_FLD);
+ if (done < IND_OPS_DONE_MAX) {
+ done++;
+ }
+ done &= 0x3;
+ ARRAY_FIELD_DP32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG,
+ NUM_IND_OPS_DONE_FLD, done);
+}
+
+static void ospi_ind_wr_completed(XlnxVersalOspi *s)
+{
+ ARRAY_FIELD_DP32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG,
+ IND_OPS_DONE_STATUS_FLD, 1);
+ ind_wr_inc_num_done(s);
+ ospi_ind_op_next(s->wr_ind_op);
+ /* Set indirect op done interrupt if enabled */
+ if (ospi_ind_op_all_completed(s)) {
+ set_irq(s, R_IRQ_STATUS_REG_INDIRECT_OP_DONE_FLD_MASK);
+ }
+}
+
+static void ospi_do_indirect_write(XlnxVersalOspi *s)
+{
+ uint32_t write_watermark = s->regs[R_INDIRECT_WRITE_XFER_WATERMARK_REG];
+ uint32_t pagesz = ospi_get_page_sz(s);
+ uint32_t page_mask = ~(pagesz - 1);
+ IndOp *op = s->wr_ind_op;
+ uint32_t next_b;
+ uint32_t end_b;
+ uint32_t len;
+
+ /* Write out tx_fifo in maximum page sz chunks */
+ while (!ospi_ind_op_completed(op) && fifo8_num_used(&s->tx_sram) > 0) {
+ next_b = ind_op_next_byte(op);
+ end_b = next_b + MIN(fifo8_num_used(&s->tx_sram), pagesz);
+
+ /* Dont cross page boundary */
+ if ((end_b & page_mask) > next_b) {
+ end_b &= page_mask;
+ }
+
+ len = end_b - next_b;
+ len = MIN(len, op->num_bytes - op->done_bytes);
+ ospi_ind_write(s, next_b, len);
+ ind_op_advance(op, len);
+ }
+
+ /*
+ * Always set indirect transfer level breached interrupt if enabled
+ * (write watermark > 0) since the tx_sram always will be emptied
+ */
+ if (write_watermark > 0) {
+ set_irq(s, R_IRQ_STATUS_REG_INDIRECT_XFER_LEVEL_BREACH_FLD_MASK);
+ }
+
+ /* Signal completions if done */
+ if (ospi_ind_op_completed(op)) {
+ ospi_ind_wr_completed(s);
+ }
+}
+
+static void ospi_stig_fill_membank(XlnxVersalOspi *s)
+{
+ int num_rd_bytes = ospi_stig_membank_rd_bytes(s);
+ int idx = num_rd_bytes - 8; /* first of last 8 */
+ int i;
+
+ for (i = 0; i < num_rd_bytes; i++) {
+ s->stig_membank[i] = fifo8_pop(&s->rx_fifo);
+ }
+
+ g_assert((idx + 4) < ARRAY_SIZE(s->stig_membank));
+
+ /* Fill in lower upper regs */
+ s->regs[R_FLASH_RD_DATA_LOWER_REG] = ldl_le_p(&s->stig_membank[idx]);
+ s->regs[R_FLASH_RD_DATA_UPPER_REG] = ldl_le_p(&s->stig_membank[idx + 4]);
+}
+
+static void ospi_stig_cmd_exec(XlnxVersalOspi *s)
+{
+ uint8_t inst_code;
+
+ /* Reset fifos */
+ fifo8_reset(&s->tx_fifo);
+ fifo8_reset(&s->rx_fifo);
+
+ /* Push write opcode */
+ inst_code = ARRAY_FIELD_EX32(s->regs, FLASH_CMD_CTRL_REG, CMD_OPCODE_FLD);
+ fifo8_push(&s->tx_fifo, inst_code);
+
+ /* Push address if enabled */
+ if (ARRAY_FIELD_EX32(s->regs, FLASH_CMD_CTRL_REG, ENB_COMD_ADDR_FLD)) {
+ ospi_tx_fifo_push_stig_addr(s);
+ }
+
+ /* Enable cs */
+ ospi_update_cs_lines(s);
+
+ /* Data */
+ if (ARRAY_FIELD_EX32(s->regs, FLASH_CMD_CTRL_REG, ENB_WRITE_DATA_FLD)) {
+ ospi_tx_fifo_push_stig_wr_data(s);
+ } else if (ARRAY_FIELD_EX32(s->regs,
+ FLASH_CMD_CTRL_REG, ENB_READ_DATA_FLD)) {
+ /* transmit first part */
+ ospi_flush_txfifo(s);
+ fifo8_reset(&s->rx_fifo);
+ ospi_tx_fifo_push_stig_rd_data(s);
+ }
+
+ /* Transmit */
+ ospi_flush_txfifo(s);
+ ospi_disable_cs(s);
+
+ if (ARRAY_FIELD_EX32(s->regs, FLASH_CMD_CTRL_REG, ENB_READ_DATA_FLD)) {
+ if (ARRAY_FIELD_EX32(s->regs,
+ FLASH_CMD_CTRL_REG, STIG_MEM_BANK_EN_FLD)) {
+ ospi_stig_fill_membank(s);
+ } else {
+ ospi_rx_fifo_pop_stig_rd_data(s);
+ }
+ }
+}
+
+static uint32_t ospi_block_address(XlnxVersalOspi *s, unsigned int block)
+{
+ unsigned int block_sz = ospi_get_block_sz(s);
+ unsigned int cs = 0;
+ uint32_t addr = 0;
+
+ while (cs < s->num_cs && block >= flash_blocks(s, cs)) {
+ block -= flash_blocks(s, 0);
+ addr += flash_sz(s, cs);
+ }
+ addr += block * block_sz;
+ return addr;
+}
+
+static uint32_t ospi_get_wr_prot_addr_low(XlnxVersalOspi *s)
+{
+ unsigned int block = s->regs[R_LOWER_WR_PROT_REG];
+
+ return ospi_block_address(s, block);
+}
+
+static uint32_t ospi_get_wr_prot_addr_upper(XlnxVersalOspi *s)
+{
+ unsigned int block = s->regs[R_UPPER_WR_PROT_REG];
+
+ /* Get address of first block out of defined range */
+ return ospi_block_address(s, block + 1);
+}
+
+static bool ospi_is_write_protected(XlnxVersalOspi *s, hwaddr addr)
+{
+ uint32_t wr_prot_addr_upper = ospi_get_wr_prot_addr_upper(s);
+ uint32_t wr_prot_addr_low = ospi_get_wr_prot_addr_low(s);
+ bool in_range = false;
+
+ if (addr >= wr_prot_addr_low && addr < wr_prot_addr_upper) {
+ in_range = true;
+ }
+
+ if (ARRAY_FIELD_EX32(s->regs, WR_PROT_CTRL_REG, INV_FLD)) {
+ in_range = !in_range;
+ }
+ return in_range;
+}
+
+static uint64_t ospi_rx_sram_read(XlnxVersalOspi *s, unsigned int size)
+{
+ uint8_t bytes[8] = {};
+ int i;
+
+ if (size < 4 && fifo8_num_used(&s->rx_sram) >= 4) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "OSPI only last read of internal "
+ "sram is allowed to be < 32 bits\n");
+ }
+
+ size = MIN(fifo8_num_used(&s->rx_sram), size);
+
+ assert(size <= 8);
+
+ for (i = 0; i < size; i++) {
+ bytes[i] = fifo8_pop(&s->rx_sram);
+ }
+
+ return ldq_le_p(bytes);
+}
+
+static void ospi_tx_sram_write(XlnxVersalOspi *s, uint64_t value,
+ unsigned int size)
+{
+ int i;
+ for (i = 0; i < size && !fifo8_is_full(&s->tx_sram); i++) {
+ fifo8_push(&s->tx_sram, value >> 8 * i);
+ }
+}
+
+static uint64_t ospi_do_dac_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(opaque);
+ uint8_t bytes[8] = {};
+ int i;
+
+ /* Create first section of read cmd */
+ ospi_tx_fifo_push_rd_op_addr(s, (uint32_t) addr);
+
+ /* Enable cs and transmit first part */
+ ospi_dac_cs(s, addr);
+ ospi_flush_txfifo(s);
+
+ fifo8_reset(&s->rx_fifo);
+
+ /* transmit second part (data) */
+ for (i = 0; i < size; ++i) {
+ fifo8_push(&s->tx_fifo, 0);
+ }
+ ospi_flush_txfifo(s);
+
+ /* fill in result */
+ size = MIN(fifo8_num_used(&s->rx_fifo), size);
+
+ assert(size <= 8);
+
+ for (i = 0; i < size; i++) {
+ bytes[i] = fifo8_pop(&s->rx_fifo);
+ }
+
+ /* done */
+ ospi_disable_cs(s);
+
+ return ldq_le_p(bytes);
+}
+
+static void ospi_do_dac_write(void *opaque,
+ hwaddr addr,
+ uint64_t value,
+ unsigned int size)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(opaque);
+ bool ahb_decoder_cs = ARRAY_FIELD_EX32(s->regs, CONFIG_REG,
+ ENABLE_AHB_DECODER_FLD);
+ uint8_t inst_code;
+ unsigned int i;
+
+ if (!ARRAY_FIELD_EX32(s->regs, DEV_INSTR_WR_CONFIG_REG, WEL_DIS_FLD)) {
+ ospi_transmit_wel(s, ahb_decoder_cs, addr);
+ }
+
+ /* reset fifos */
+ fifo8_reset(&s->tx_fifo);
+ fifo8_reset(&s->rx_fifo);
+
+ /* Push write opcode */
+ inst_code = ospi_get_wr_opcode(s);
+ fifo8_push(&s->tx_fifo, inst_code);
+
+ /* Push write address */
+ ospi_tx_fifo_push_address(s, addr);
+
+ /* data */
+ for (i = 0; i < size; i++) {
+ fifo8_push(&s->tx_fifo, value >> 8 * i);
+ }
+
+ /* Enable cs and transmit */
+ ospi_dac_cs(s, addr);
+ ospi_flush_txfifo(s);
+ ospi_disable_cs(s);
+
+ fifo8_reset(&s->rx_fifo);
+}
+
+static void flash_cmd_ctrl_mem_reg_post_write(RegisterInfo *reg,
+ uint64_t val)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque);
+ if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_SPI_FLD)) {
+ if (ARRAY_FIELD_EX32(s->regs,
+ FLASH_COMMAND_CTRL_MEM_REG,
+ TRIGGER_MEM_BANK_REQ_FLD)) {
+ ospi_stig_membank_req(s);
+ ARRAY_FIELD_DP32(s->regs, FLASH_COMMAND_CTRL_MEM_REG,
+ TRIGGER_MEM_BANK_REQ_FLD, 0);
+ }
+ }
+}
+
+static void flash_cmd_ctrl_reg_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque);
+
+ if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_SPI_FLD) &&
+ ARRAY_FIELD_EX32(s->regs, FLASH_CMD_CTRL_REG, CMD_EXEC_FLD)) {
+ ospi_stig_cmd_exec(s);
+ set_irq(s, R_IRQ_STATUS_REG_STIG_REQ_INT_FLD_MASK);
+ ARRAY_FIELD_DP32(s->regs, FLASH_CMD_CTRL_REG, CMD_EXEC_FLD, 0);
+ }
+}
+
+static uint64_t ind_wr_dec_num_done(XlnxVersalOspi *s, uint64_t val)
+{
+ unsigned int done = ARRAY_FIELD_EX32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG,
+ NUM_IND_OPS_DONE_FLD);
+ done--;
+ done &= 0x3;
+ val = FIELD_DP32(val, INDIRECT_WRITE_XFER_CTRL_REG,
+ NUM_IND_OPS_DONE_FLD, done);
+ return val;
+}
+
+static bool ind_wr_clearing_op_done(XlnxVersalOspi *s, uint64_t new_val)
+{
+ bool set_in_reg = ARRAY_FIELD_EX32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG,
+ IND_OPS_DONE_STATUS_FLD);
+ bool set_in_new_val = FIELD_EX32(new_val, INDIRECT_WRITE_XFER_CTRL_REG,
+ IND_OPS_DONE_STATUS_FLD);
+ /* return true if clearing bit */
+ return set_in_reg && !set_in_new_val;
+}
+
+static uint64_t ind_wr_xfer_ctrl_reg_pre_write(RegisterInfo *reg,
+ uint64_t val)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque);
+
+ if (ind_wr_clearing_op_done(s, val)) {
+ val = ind_wr_dec_num_done(s, val);
+ }
+ return val;
+}
+
+static void ind_wr_xfer_ctrl_reg_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque);
+
+ if (s->ind_write_disabled) {
+ return;
+ }
+
+ if (ARRAY_FIELD_EX32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG, START_FLD)) {
+ ospi_ind_op_queue_up_wr(s);
+ ospi_do_indirect_write(s);
+ ARRAY_FIELD_DP32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG, START_FLD, 0);
+ }
+
+ if (ARRAY_FIELD_EX32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG, CANCEL_FLD)) {
+ ospi_ind_op_cancel(s->wr_ind_op);
+ fifo8_reset(&s->tx_sram);
+ ARRAY_FIELD_DP32(s->regs, INDIRECT_WRITE_XFER_CTRL_REG, CANCEL_FLD, 0);
+ }
+}
+
+static uint64_t ind_wr_xfer_ctrl_reg_post_read(RegisterInfo *reg,
+ uint64_t val)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque);
+ IndOp *op = s->wr_ind_op;
+
+ /* Check if ind ops is ongoing */
+ if (!ospi_ind_op_completed(&op[0])) {
+ /* Check if two ind ops are queued */
+ if (!ospi_ind_op_completed(&op[1])) {
+ val = FIELD_DP32(val, INDIRECT_WRITE_XFER_CTRL_REG,
+ WR_QUEUED_FLD, 1);
+ }
+ val = FIELD_DP32(val, INDIRECT_WRITE_XFER_CTRL_REG, WR_STATUS_FLD, 1);
+ }
+ return val;
+}
+
+static uint64_t ind_rd_dec_num_done(XlnxVersalOspi *s, uint64_t val)
+{
+ unsigned int done = ARRAY_FIELD_EX32(s->regs, INDIRECT_READ_XFER_CTRL_REG,
+ NUM_IND_OPS_DONE_FLD);
+ done--;
+ done &= 0x3;
+ val = FIELD_DP32(val, INDIRECT_READ_XFER_CTRL_REG,
+ NUM_IND_OPS_DONE_FLD, done);
+ return val;
+}
+
+static uint64_t ind_rd_xfer_ctrl_reg_pre_write(RegisterInfo *reg,
+ uint64_t val)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque);
+
+ if (FIELD_EX32(val, INDIRECT_READ_XFER_CTRL_REG,
+ IND_OPS_DONE_STATUS_FLD)) {
+ val = ind_rd_dec_num_done(s, val);
+ val &= ~R_INDIRECT_READ_XFER_CTRL_REG_IND_OPS_DONE_STATUS_FLD_MASK;
+ }
+ return val;
+}
+
+static void ind_rd_xfer_ctrl_reg_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque);
+
+ if (ARRAY_FIELD_EX32(s->regs, INDIRECT_READ_XFER_CTRL_REG, START_FLD)) {
+ ospi_ind_op_queue_up_rd(s);
+ ospi_do_ind_read(s);
+ ARRAY_FIELD_DP32(s->regs, INDIRECT_READ_XFER_CTRL_REG, START_FLD, 0);
+ }
+
+ if (ARRAY_FIELD_EX32(s->regs, INDIRECT_READ_XFER_CTRL_REG, CANCEL_FLD)) {
+ ospi_ind_op_cancel(s->rd_ind_op);
+ fifo8_reset(&s->rx_sram);
+ ARRAY_FIELD_DP32(s->regs, INDIRECT_READ_XFER_CTRL_REG, CANCEL_FLD, 0);
+ }
+}
+
+static uint64_t ind_rd_xfer_ctrl_reg_post_read(RegisterInfo *reg,
+ uint64_t val)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque);
+ IndOp *op = s->rd_ind_op;
+
+ /* Check if ind ops is ongoing */
+ if (!ospi_ind_op_completed(&op[0])) {
+ /* Check if two ind ops are queued */
+ if (!ospi_ind_op_completed(&op[1])) {
+ val = FIELD_DP32(val, INDIRECT_READ_XFER_CTRL_REG,
+ RD_QUEUED_FLD, 1);
+ }
+ val = FIELD_DP32(val, INDIRECT_READ_XFER_CTRL_REG, RD_STATUS_FLD, 1);
+ }
+ return val;
+}
+
+static uint64_t sram_fill_reg_post_read(RegisterInfo *reg, uint64_t val)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque);
+ val = ((fifo8_num_used(&s->tx_sram) & 0xFFFF) << 16) |
+ (fifo8_num_used(&s->rx_sram) & 0xFFFF);
+ return val;
+}
+
+static uint64_t dll_obs_upper_reg_post_read(RegisterInfo *reg, uint64_t val)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(reg->opaque);
+ uint32_t rx_dec_out;
+
+ rx_dec_out = FIELD_EX32(val, DLL_OBSERVABLE_UPPER_REG,
+ DLL_OBSERVABLE__UPPER_RX_DECODER_OUTPUT_FLD);
+
+ if (rx_dec_out < MAX_RX_DEC_OUT) {
+ ARRAY_FIELD_DP32(s->regs, DLL_OBSERVABLE_UPPER_REG,
+ DLL_OBSERVABLE__UPPER_RX_DECODER_OUTPUT_FLD,
+ rx_dec_out + 1);
+ }
+
+ return val;
+}
+
+
+static void xlnx_versal_ospi_reset(DeviceState *dev)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(dev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
+ register_reset(&s->regs_info[i]);
+ }
+
+ fifo8_reset(&s->rx_fifo);
+ fifo8_reset(&s->tx_fifo);
+ fifo8_reset(&s->rx_sram);
+ fifo8_reset(&s->tx_sram);
+
+ s->rd_ind_op[0].completed = true;
+ s->rd_ind_op[1].completed = true;
+ s->wr_ind_op[0].completed = true;
+ s->wr_ind_op[1].completed = true;
+ ARRAY_FIELD_DP32(s->regs, DLL_OBSERVABLE_LOWER_REG,
+ DLL_OBSERVABLE_LOWER_DLL_LOCK_FLD, 1);
+ ARRAY_FIELD_DP32(s->regs, DLL_OBSERVABLE_LOWER_REG,
+ DLL_OBSERVABLE_LOWER_LOOPBACK_LOCK_FLD, 1);
+}
+
+static RegisterAccessInfo ospi_regs_info[] = {
+ { .name = "CONFIG_REG",
+ .addr = A_CONFIG_REG,
+ .reset = 0x80780081,
+ .ro = 0x9c000000,
+ },{ .name = "DEV_INSTR_RD_CONFIG_REG",
+ .addr = A_DEV_INSTR_RD_CONFIG_REG,
+ .reset = 0x3,
+ .ro = 0xe0ecc800,
+ },{ .name = "DEV_INSTR_WR_CONFIG_REG",
+ .addr = A_DEV_INSTR_WR_CONFIG_REG,
+ .reset = 0x2,
+ .ro = 0xe0fcce00,
+ },{ .name = "DEV_DELAY_REG",
+ .addr = A_DEV_DELAY_REG,
+ },{ .name = "RD_DATA_CAPTURE_REG",
+ .addr = A_RD_DATA_CAPTURE_REG,
+ .reset = 0x1,
+ .ro = 0xfff0fec0,
+ },{ .name = "DEV_SIZE_CONFIG_REG",
+ .addr = A_DEV_SIZE_CONFIG_REG,
+ .reset = 0x101002,
+ .ro = 0xe0000000,
+ },{ .name = "SRAM_PARTITION_CFG_REG",
+ .addr = A_SRAM_PARTITION_CFG_REG,
+ .reset = 0x80,
+ .ro = 0xffffff00,
+ },{ .name = "IND_AHB_ADDR_TRIGGER_REG",
+ .addr = A_IND_AHB_ADDR_TRIGGER_REG,
+ },{ .name = "DMA_PERIPH_CONFIG_REG",
+ .addr = A_DMA_PERIPH_CONFIG_REG,
+ .ro = 0xfffff0f0,
+ },{ .name = "REMAP_ADDR_REG",
+ .addr = A_REMAP_ADDR_REG,
+ },{ .name = "MODE_BIT_CONFIG_REG",
+ .addr = A_MODE_BIT_CONFIG_REG,
+ .reset = 0x200,
+ .ro = 0xffff7800,
+ },{ .name = "SRAM_FILL_REG",
+ .addr = A_SRAM_FILL_REG,
+ .ro = 0xffffffff,
+ .post_read = sram_fill_reg_post_read,
+ },{ .name = "TX_THRESH_REG",
+ .addr = A_TX_THRESH_REG,
+ .reset = 0x1,
+ .ro = 0xffffffe0,
+ },{ .name = "RX_THRESH_REG",
+ .addr = A_RX_THRESH_REG,
+ .reset = 0x1,
+ .ro = 0xffffffe0,
+ },{ .name = "WRITE_COMPLETION_CTRL_REG",
+ .addr = A_WRITE_COMPLETION_CTRL_REG,
+ .reset = 0x10005,
+ .ro = 0x1800,
+ },{ .name = "NO_OF_POLLS_BEF_EXP_REG",
+ .addr = A_NO_OF_POLLS_BEF_EXP_REG,
+ .reset = 0xffffffff,
+ },{ .name = "IRQ_STATUS_REG",
+ .addr = A_IRQ_STATUS_REG,
+ .ro = 0xfff08000,
+ .w1c = 0xf7fff,
+ },{ .name = "IRQ_MASK_REG",
+ .addr = A_IRQ_MASK_REG,
+ .ro = 0xfff08000,
+ },{ .name = "LOWER_WR_PROT_REG",
+ .addr = A_LOWER_WR_PROT_REG,
+ },{ .name = "UPPER_WR_PROT_REG",
+ .addr = A_UPPER_WR_PROT_REG,
+ },{ .name = "WR_PROT_CTRL_REG",
+ .addr = A_WR_PROT_CTRL_REG,
+ .ro = 0xfffffffc,
+ },{ .name = "INDIRECT_READ_XFER_CTRL_REG",
+ .addr = A_INDIRECT_READ_XFER_CTRL_REG,
+ .ro = 0xffffffd4,
+ .w1c = 0x08,
+ .pre_write = ind_rd_xfer_ctrl_reg_pre_write,
+ .post_write = ind_rd_xfer_ctrl_reg_post_write,
+ .post_read = ind_rd_xfer_ctrl_reg_post_read,
+ },{ .name = "INDIRECT_READ_XFER_WATERMARK_REG",
+ .addr = A_INDIRECT_READ_XFER_WATERMARK_REG,
+ },{ .name = "INDIRECT_READ_XFER_START_REG",
+ .addr = A_INDIRECT_READ_XFER_START_REG,
+ },{ .name = "INDIRECT_READ_XFER_NUM_BYTES_REG",
+ .addr = A_INDIRECT_READ_XFER_NUM_BYTES_REG,
+ },{ .name = "INDIRECT_WRITE_XFER_CTRL_REG",
+ .addr = A_INDIRECT_WRITE_XFER_CTRL_REG,
+ .ro = 0xffffffdc,
+ .w1c = 0x20,
+ .pre_write = ind_wr_xfer_ctrl_reg_pre_write,
+ .post_write = ind_wr_xfer_ctrl_reg_post_write,
+ .post_read = ind_wr_xfer_ctrl_reg_post_read,
+ },{ .name = "INDIRECT_WRITE_XFER_WATERMARK_REG",
+ .addr = A_INDIRECT_WRITE_XFER_WATERMARK_REG,
+ .reset = 0xffffffff,
+ },{ .name = "INDIRECT_WRITE_XFER_START_REG",
+ .addr = A_INDIRECT_WRITE_XFER_START_REG,
+ },{ .name = "INDIRECT_WRITE_XFER_NUM_BYTES_REG",
+ .addr = A_INDIRECT_WRITE_XFER_NUM_BYTES_REG,
+ },{ .name = "INDIRECT_TRIGGER_ADDR_RANGE_REG",
+ .addr = A_INDIRECT_TRIGGER_ADDR_RANGE_REG,
+ .reset = 0x4,
+ .ro = 0xfffffff0,
+ },{ .name = "FLASH_COMMAND_CTRL_MEM_REG",
+ .addr = A_FLASH_COMMAND_CTRL_MEM_REG,
+ .ro = 0xe008fffe,
+ .post_write = flash_cmd_ctrl_mem_reg_post_write,
+ },{ .name = "FLASH_CMD_CTRL_REG",
+ .addr = A_FLASH_CMD_CTRL_REG,
+ .ro = 0x7a,
+ .post_write = flash_cmd_ctrl_reg_post_write,
+ },{ .name = "FLASH_CMD_ADDR_REG",
+ .addr = A_FLASH_CMD_ADDR_REG,
+ },{ .name = "FLASH_RD_DATA_LOWER_REG",
+ .addr = A_FLASH_RD_DATA_LOWER_REG,
+ .ro = 0xffffffff,
+ },{ .name = "FLASH_RD_DATA_UPPER_REG",
+ .addr = A_FLASH_RD_DATA_UPPER_REG,
+ .ro = 0xffffffff,
+ },{ .name = "FLASH_WR_DATA_LOWER_REG",
+ .addr = A_FLASH_WR_DATA_LOWER_REG,
+ },{ .name = "FLASH_WR_DATA_UPPER_REG",
+ .addr = A_FLASH_WR_DATA_UPPER_REG,
+ },{ .name = "POLLING_FLASH_STATUS_REG",
+ .addr = A_POLLING_FLASH_STATUS_REG,
+ .ro = 0xfff0ffff,
+ },{ .name = "PHY_CONFIGURATION_REG",
+ .addr = A_PHY_CONFIGURATION_REG,
+ .reset = 0x40000000,
+ .ro = 0x1f80ff80,
+ },{ .name = "PHY_MASTER_CONTROL_REG",
+ .addr = A_PHY_MASTER_CONTROL_REG,
+ .reset = 0x800000,
+ .ro = 0xfe08ff80,
+ },{ .name = "DLL_OBSERVABLE_LOWER_REG",
+ .addr = A_DLL_OBSERVABLE_LOWER_REG,
+ .ro = 0xffffffff,
+ },{ .name = "DLL_OBSERVABLE_UPPER_REG",
+ .addr = A_DLL_OBSERVABLE_UPPER_REG,
+ .ro = 0xffffffff,
+ .post_read = dll_obs_upper_reg_post_read,
+ },{ .name = "OPCODE_EXT_LOWER_REG",
+ .addr = A_OPCODE_EXT_LOWER_REG,
+ .reset = 0x13edfa00,
+ },{ .name = "OPCODE_EXT_UPPER_REG",
+ .addr = A_OPCODE_EXT_UPPER_REG,
+ .reset = 0x6f90000,
+ .ro = 0xffff,
+ },{ .name = "MODULE_ID_REG",
+ .addr = A_MODULE_ID_REG,
+ .reset = 0x300,
+ .ro = 0xffffffff,
+ }
+};
+
+/* Return dev-obj from reg-region created by register_init_block32 */
+static XlnxVersalOspi *xilinx_ospi_of_mr(void *mr_accessor)
+{
+ RegisterInfoArray *reg_array = mr_accessor;
+ Object *dev;
+
+ dev = reg_array->mem.owner;
+ assert(dev);
+
+ return XILINX_VERSAL_OSPI(dev);
+}
+
+static void ospi_write(void *opaque, hwaddr addr, uint64_t value,
+ unsigned int size)
+{
+ XlnxVersalOspi *s = xilinx_ospi_of_mr(opaque);
+
+ register_write_memory(opaque, addr, value, size);
+ ospi_update_irq_line(s);
+}
+
+static const MemoryRegionOps ospi_ops = {
+ .read = register_read_memory,
+ .write = ospi_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static uint64_t ospi_indac_read(void *opaque, unsigned int size)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(opaque);
+ uint64_t ret = ospi_rx_sram_read(s, size);
+
+ if (!ospi_ind_op_completed(s->rd_ind_op)) {
+ ospi_do_ind_read(s);
+ }
+ return ret;
+}
+
+static void ospi_indac_write(void *opaque, uint64_t value, unsigned int size)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(opaque);
+
+ g_assert(!s->ind_write_disabled);
+
+ if (!ospi_ind_op_completed(s->wr_ind_op)) {
+ ospi_tx_sram_write(s, value, size);
+ ospi_do_indirect_write(s);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "OSPI wr into indac area while no ongoing indac wr\n");
+ }
+}
+
+static bool is_inside_indac_range(XlnxVersalOspi *s, hwaddr addr)
+{
+ uint32_t range_start;
+ uint32_t range_end;
+
+ if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_DMA_IF_FLD)) {
+ return true;
+ }
+
+ range_start = s->regs[R_IND_AHB_ADDR_TRIGGER_REG];
+ range_end = range_start +
+ (1 << ARRAY_FIELD_EX32(s->regs,
+ INDIRECT_TRIGGER_ADDR_RANGE_REG,
+ IND_RANGE_WIDTH_FLD));
+
+ addr += s->regs[R_IND_AHB_ADDR_TRIGGER_REG] & 0xF0000000;
+
+ return addr >= range_start && addr < range_end;
+}
+
+static bool ospi_is_indac_active(XlnxVersalOspi *s)
+{
+ /*
+ * When dac and indac cannot be active at the same time,
+ * return true when dac is disabled.
+ */
+ return s->dac_with_indac || !s->dac_enable;
+}
+
+static uint64_t ospi_dac_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(opaque);
+
+ if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_SPI_FLD)) {
+ if (ospi_is_indac_active(s) &&
+ is_inside_indac_range(s, addr)) {
+ return ospi_indac_read(s, size);
+ }
+ if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_DIR_ACC_CTLR_FLD)
+ && s->dac_enable) {
+ if (ARRAY_FIELD_EX32(s->regs,
+ CONFIG_REG, ENB_AHB_ADDR_REMAP_FLD)) {
+ addr += s->regs[R_REMAP_ADDR_REG];
+ }
+ return ospi_do_dac_read(opaque, addr, size);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "OSPI AHB rd while DAC disabled\n");
+ }
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "OSPI AHB rd while OSPI disabled\n");
+ }
+
+ return 0;
+}
+
+static void ospi_dac_write(void *opaque, hwaddr addr, uint64_t value,
+ unsigned int size)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(opaque);
+
+ if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_SPI_FLD)) {
+ if (ospi_is_indac_active(s) &&
+ !s->ind_write_disabled &&
+ is_inside_indac_range(s, addr)) {
+ return ospi_indac_write(s, value, size);
+ }
+ if (ARRAY_FIELD_EX32(s->regs, CONFIG_REG, ENB_DIR_ACC_CTLR_FLD) &&
+ s->dac_enable) {
+ if (ARRAY_FIELD_EX32(s->regs,
+ CONFIG_REG, ENB_AHB_ADDR_REMAP_FLD)) {
+ addr += s->regs[R_REMAP_ADDR_REG];
+ }
+ /* Check if addr is write protected */
+ if (ARRAY_FIELD_EX32(s->regs, WR_PROT_CTRL_REG, ENB_FLD) &&
+ ospi_is_write_protected(s, addr)) {
+ set_irq(s, R_IRQ_STATUS_REG_PROT_WR_ATTEMPT_FLD_MASK);
+ ospi_update_irq_line(s);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "OSPI writing into write protected area\n");
+ return;
+ }
+ ospi_do_dac_write(opaque, addr, value, size);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "OSPI AHB wr while DAC disabled\n");
+ }
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "OSPI AHB wr while OSPI disabled\n");
+ }
+}
+
+static const MemoryRegionOps ospi_dac_ops = {
+ .read = ospi_dac_read,
+ .write = ospi_dac_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void ospi_update_dac_status(void *opaque, int n, int level)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(opaque);
+
+ s->dac_enable = level;
+}
+
+static void xlnx_versal_ospi_realize(DeviceState *dev, Error **errp)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+
+ s->num_cs = 4;
+ s->spi = ssi_create_bus(dev, "spi0");
+ s->cs_lines = g_new0(qemu_irq, s->num_cs);
+ for (int i = 0; i < s->num_cs; ++i) {
+ sysbus_init_irq(sbd, &s->cs_lines[i]);
+ }
+
+ fifo8_create(&s->rx_fifo, RXFF_SZ);
+ fifo8_create(&s->tx_fifo, TXFF_SZ);
+ fifo8_create(&s->rx_sram, RXFF_SZ);
+ fifo8_create(&s->tx_sram, TXFF_SZ);
+}
+
+static void xlnx_versal_ospi_init(Object *obj)
+{
+ XlnxVersalOspi *s = XILINX_VERSAL_OSPI(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ DeviceState *dev = DEVICE(obj);
+ RegisterInfoArray *reg_array;
+
+ memory_region_init(&s->iomem, obj, TYPE_XILINX_VERSAL_OSPI,
+ XILINX_VERSAL_OSPI_R_MAX * 4);
+ reg_array =
+ register_init_block32(DEVICE(obj), ospi_regs_info,
+ ARRAY_SIZE(ospi_regs_info),
+ s->regs_info, s->regs,
+ &ospi_ops,
+ XILINX_VERSAL_OSPI_ERR_DEBUG,
+ XILINX_VERSAL_OSPI_R_MAX * 4);
+ memory_region_add_subregion(&s->iomem, 0x0, &reg_array->mem);
+ sysbus_init_mmio(sbd, &s->iomem);
+
+ memory_region_init_io(&s->iomem_dac, obj, &ospi_dac_ops, s,
+ TYPE_XILINX_VERSAL_OSPI "-dac", 0x20000000);
+ sysbus_init_mmio(sbd, &s->iomem_dac);
+
+ sysbus_init_irq(sbd, &s->irq);
+
+ object_property_add_link(obj, "dma-src", TYPE_XLNX_CSU_DMA,
+ (Object **)&s->dma_src,
+ object_property_allow_set_link,
+ OBJ_PROP_LINK_STRONG);
+
+ qdev_init_gpio_in_named(dev, ospi_update_dac_status, "ospi-mux-sel", 1);
+}
+
+static const VMStateDescription vmstate_ind_op = {
+ .name = "OSPIIndOp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(flash_addr, IndOp),
+ VMSTATE_UINT32(num_bytes, IndOp),
+ VMSTATE_UINT32(done_bytes, IndOp),
+ VMSTATE_BOOL(completed, IndOp),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_xlnx_versal_ospi = {
+ .name = TYPE_XILINX_VERSAL_OSPI,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_FIFO8(rx_fifo, XlnxVersalOspi),
+ VMSTATE_FIFO8(tx_fifo, XlnxVersalOspi),
+ VMSTATE_FIFO8(rx_sram, XlnxVersalOspi),
+ VMSTATE_FIFO8(tx_sram, XlnxVersalOspi),
+ VMSTATE_BOOL(ind_write_disabled, XlnxVersalOspi),
+ VMSTATE_BOOL(dac_with_indac, XlnxVersalOspi),
+ VMSTATE_BOOL(dac_enable, XlnxVersalOspi),
+ VMSTATE_BOOL(src_dma_inprog, XlnxVersalOspi),
+ VMSTATE_STRUCT_ARRAY(rd_ind_op, XlnxVersalOspi, 2, 1,
+ vmstate_ind_op, IndOp),
+ VMSTATE_STRUCT_ARRAY(wr_ind_op, XlnxVersalOspi, 2, 1,
+ vmstate_ind_op, IndOp),
+ VMSTATE_UINT32_ARRAY(regs, XlnxVersalOspi, XILINX_VERSAL_OSPI_R_MAX),
+ VMSTATE_UINT8_ARRAY(stig_membank, XlnxVersalOspi, 512),
+ VMSTATE_END_OF_LIST(),
+ }
+};
+
+static Property xlnx_versal_ospi_properties[] = {
+ DEFINE_PROP_BOOL("dac-with-indac", XlnxVersalOspi, dac_with_indac, false),
+ DEFINE_PROP_BOOL("indac-write-disabled", XlnxVersalOspi,
+ ind_write_disabled, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void xlnx_versal_ospi_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = xlnx_versal_ospi_reset;
+ dc->realize = xlnx_versal_ospi_realize;
+ dc->vmsd = &vmstate_xlnx_versal_ospi;
+ device_class_set_props(dc, xlnx_versal_ospi_properties);
+}
+
+static const TypeInfo xlnx_versal_ospi_info = {
+ .name = TYPE_XILINX_VERSAL_OSPI,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XlnxVersalOspi),
+ .class_init = xlnx_versal_ospi_class_init,
+ .instance_init = xlnx_versal_ospi_init,
+};
+
+static void xlnx_versal_ospi_register_types(void)
+{
+ type_register_static(&xlnx_versal_ospi_info);
+}
+
+type_init(xlnx_versal_ospi_register_types)
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index 72da12f..688eccd 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -592,7 +592,6 @@ static const VMStateDescription vmstate_virtio_mmio = {
.name = "virtio_mmio",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_END_OF_LIST()
},
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 750aa47..f9cf959 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -131,7 +131,6 @@ static const VMStateDescription vmstate_virtio_pci = {
.name = "virtio_pci",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_END_OF_LIST()
},
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index aae72fb..9e8f51d 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -2808,7 +2808,6 @@ static const VMStateDescription vmstate_virtio = {
.name = "virtio",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_END_OF_LIST()
},
diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h
index 895ba12..1b5ad4d 100644
--- a/include/hw/arm/xlnx-versal.h
+++ b/include/hw/arm/xlnx-versal.h
@@ -26,6 +26,9 @@
#include "hw/misc/xlnx-versal-xramc.h"
#include "hw/nvram/xlnx-bbram.h"
#include "hw/nvram/xlnx-versal-efuse.h"
+#include "hw/ssi/xlnx-versal-ospi.h"
+#include "hw/dma/xlnx_csu_dma.h"
+#include "hw/misc/xlnx-versal-pmc-iou-slcr.h"
#define TYPE_XLNX_VERSAL "xlnx-versal"
OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL)
@@ -78,6 +81,15 @@ struct Versal {
struct {
struct {
SDHCIState sd[XLNX_VERSAL_NR_SDS];
+ XlnxVersalPmcIouSlcr slcr;
+
+ struct {
+ XlnxVersalOspi ospi;
+ XlnxCSUDMA dma_src;
+ XlnxCSUDMA dma_dst;
+ MemoryRegion linear_mr;
+ qemu_or_irq irq_orgate;
+ } ospi;
} iou;
XlnxZynqMPRTC rtc;
@@ -85,6 +97,8 @@ struct Versal {
XlnxEFuse efuse;
XlnxVersalEFuseCtrl efuse_ctrl;
XlnxVersalEFuseCache efuse_cache;
+
+ qemu_or_irq apb_irq_orgate;
} pmc;
struct {
@@ -111,8 +125,8 @@ struct Versal {
#define VERSAL_GEM1_WAKE_IRQ_0 59
#define VERSAL_ADMA_IRQ_0 60
#define VERSAL_XRAM_IRQ_0 79
-#define VERSAL_BBRAM_APB_IRQ_0 121
-#define VERSAL_RTC_APB_ERR_IRQ 121
+#define VERSAL_PMC_APB_IRQ 121
+#define VERSAL_OSPI_IRQ 124
#define VERSAL_SD0_IRQ_0 126
#define VERSAL_EFUSE_IRQ 139
#define VERSAL_RTC_ALARM_IRQ 142
@@ -178,6 +192,18 @@ struct Versal {
#define MM_FPD_FPD_APU 0xfd5c0000
#define MM_FPD_FPD_APU_SIZE 0x100
+#define MM_PMC_PMC_IOU_SLCR 0xf1060000
+#define MM_PMC_PMC_IOU_SLCR_SIZE 0x10000
+
+#define MM_PMC_OSPI 0xf1010000
+#define MM_PMC_OSPI_SIZE 0x10000
+
+#define MM_PMC_OSPI_DAC 0xc0000000
+#define MM_PMC_OSPI_DAC_SIZE 0x20000000
+
+#define MM_PMC_OSPI_DMA_DST 0xf1011800
+#define MM_PMC_OSPI_DMA_SRC 0xf1011000
+
#define MM_PMC_SD0 0xf1040000U
#define MM_PMC_SD0_SIZE 0x10000
#define MM_PMC_BBRAM_CTRL 0xf11f0000
diff --git a/include/hw/dma/xlnx_csu_dma.h b/include/hw/dma/xlnx_csu_dma.h
index 9e9dc55..922ab80 100644
--- a/include/hw/dma/xlnx_csu_dma.h
+++ b/include/hw/dma/xlnx_csu_dma.h
@@ -21,6 +21,11 @@
#ifndef XLNX_CSU_DMA_H
#define XLNX_CSU_DMA_H
+#include "hw/sysbus.h"
+#include "hw/register.h"
+#include "hw/ptimer.h"
+#include "hw/stream.h"
+
#define TYPE_XLNX_CSU_DMA "xlnx.csu_dma"
#define XLNX_CSU_DMA_R_MAX (0x2c / 4)
@@ -46,7 +51,22 @@ typedef struct XlnxCSUDMA {
RegisterInfo regs_info[XLNX_CSU_DMA_R_MAX];
} XlnxCSUDMA;
-#define XLNX_CSU_DMA(obj) \
- OBJECT_CHECK(XlnxCSUDMA, (obj), TYPE_XLNX_CSU_DMA)
+OBJECT_DECLARE_TYPE(XlnxCSUDMA, XlnxCSUDMAClass, XLNX_CSU_DMA)
+
+struct XlnxCSUDMAClass {
+ SysBusDeviceClass parent_class;
+
+ /*
+ * read: Start a read transfer on a Xilinx CSU DMA engine
+ *
+ * @s: the Xilinx CSU DMA engine to start the transfer on
+ * @addr: the address to read
+ * @len: the number of bytes to read at 'addr'
+ *
+ * @return a MemTxResult indicating whether the operation succeeded ('len'
+ * bytes were read) or failed.
+ */
+ MemTxResult (*read)(XlnxCSUDMA *s, hwaddr addr, uint32_t len);
+};
#endif
diff --git a/include/hw/intc/arm_gicv3_its_common.h b/include/hw/intc/arm_gicv3_its_common.h
index b32c697..3e2ad2d 100644
--- a/include/hw/intc/arm_gicv3_its_common.h
+++ b/include/hw/intc/arm_gicv3_its_common.h
@@ -47,7 +47,6 @@ typedef struct {
uint16_t entry_sz;
uint32_t page_sz;
uint32_t num_entries;
- uint32_t num_ids;
uint64_t base_addr;
} TableDesc;
diff --git a/include/hw/misc/xlnx-versal-pmc-iou-slcr.h b/include/hw/misc/xlnx-versal-pmc-iou-slcr.h
new file mode 100644
index 0000000..ab4e4b4
--- /dev/null
+++ b/include/hw/misc/xlnx-versal-pmc-iou-slcr.h
@@ -0,0 +1,78 @@
+/*
+ * Header file for the Xilinx Versal's PMC IOU SLCR
+ *
+ * Copyright (C) 2021 Xilinx Inc
+ * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/*
+ * This is a model of Xilinx Versal's PMC I/O Peripheral Control and Status
+ * module documented in Versal's Technical Reference manual [1] and the Versal
+ * ACAP Register reference [2].
+ *
+ * References:
+ *
+ * [1] Versal ACAP Technical Reference Manual,
+ * https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf
+ *
+ * [2] Versal ACAP Register Reference,
+ * https://www.xilinx.com/html_docs/registers/am012/am012-versal-register-reference.html#mod___pmc_iop_slcr.html
+ *
+ * QEMU interface:
+ * + sysbus MMIO region 0: MemoryRegion for the device's registers
+ * + sysbus IRQ 0: PMC (AXI and APB) parity error interrupt detected by the PMC
+ * I/O peripherals.
+ * + sysbus IRQ 1: Device interrupt.
+ * + Named GPIO output "sd-emmc-sel[0]": Enables 0: SD mode or 1: eMMC mode on
+ * SD/eMMC controller 0.
+ * + Named GPIO output "sd-emmc-sel[1]": Enables 0: SD mode or 1: eMMC mode on
+ * SD/eMMC controller 1.
+ * + Named GPIO output "qspi-ospi-mux-sel": Selects 0: QSPI linear region or 1:
+ * OSPI linear region.
+ * + Named GPIO output "ospi-mux-sel": Selects 0: OSPI Indirect access mode or
+ * 1: OSPI direct access mode.
+ */
+
+#ifndef XILINX_VERSAL_PMC_IOU_SLCR_H
+#define XILINX_VERSAL_PMC_IOU_SLCR_H
+
+#include "hw/register.h"
+
+#define TYPE_XILINX_VERSAL_PMC_IOU_SLCR "xlnx.versal-pmc-iou-slcr"
+
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalPmcIouSlcr, XILINX_VERSAL_PMC_IOU_SLCR)
+
+#define XILINX_VERSAL_PMC_IOU_SLCR_R_MAX (0x828 / 4 + 1)
+
+struct XlnxVersalPmcIouSlcr {
+ SysBusDevice parent_obj;
+ MemoryRegion iomem;
+ qemu_irq irq_parity_imr;
+ qemu_irq irq_imr;
+ qemu_irq sd_emmc_sel[2];
+ qemu_irq qspi_ospi_mux_sel;
+ qemu_irq ospi_mux_sel;
+
+ uint32_t regs[XILINX_VERSAL_PMC_IOU_SLCR_R_MAX];
+ RegisterInfo regs_info[XILINX_VERSAL_PMC_IOU_SLCR_R_MAX];
+};
+
+#endif /* XILINX_VERSAL_PMC_IOU_SLCR_H */
diff --git a/include/hw/ppc/vof.h b/include/hw/ppc/vof.h
index 97fdef7..f8c0eff 100644
--- a/include/hw/ppc/vof.h
+++ b/include/hw/ppc/vof.h
@@ -6,6 +6,11 @@
#ifndef HW_VOF_H
#define HW_VOF_H
+#include "qom/object.h"
+#include "exec/address-spaces.h"
+#include "exec/memory.h"
+#include "cpu.h"
+
typedef struct Vof {
uint64_t top_addr; /* copied from rma_size */
GArray *claimed; /* array of SpaprOfClaimed */
diff --git a/include/hw/ssi/xlnx-versal-ospi.h b/include/hw/ssi/xlnx-versal-ospi.h
new file mode 100644
index 0000000..14d1263
--- /dev/null
+++ b/include/hw/ssi/xlnx-versal-ospi.h
@@ -0,0 +1,111 @@
+/*
+ * Header file for the Xilinx Versal's OSPI controller
+ *
+ * Copyright (C) 2021 Xilinx Inc
+ * Written by Francisco Iglesias <francisco.iglesias@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/*
+ * This is a model of Xilinx Versal's Octal SPI flash memory controller
+ * documented in Versal's Technical Reference manual [1] and the Versal ACAP
+ * Register reference [2].
+ *
+ * References:
+ *
+ * [1] Versal ACAP Technical Reference Manual,
+ * https://www.xilinx.com/support/documentation/architecture-manuals/am011-versal-acap-trm.pdf
+ *
+ * [2] Versal ACAP Register Reference,
+ * https://www.xilinx.com/html_docs/registers/am012/am012-versal-register-reference.html#mod___ospi.html
+ *
+ *
+ * QEMU interface:
+ * + sysbus MMIO region 0: MemoryRegion for the device's registers
+ * + sysbus MMIO region 1: MemoryRegion for flash memory linear address space
+ * (data transfer).
+ * + sysbus IRQ 0: Device interrupt.
+ * + Named GPIO input "ospi-mux-sel": 0: enables indirect access mode
+ * and 1: enables direct access mode.
+ * + Property "dac-with-indac": Allow both direct accesses and indirect
+ * accesses simultaneously.
+ * + Property "indac-write-disabled": Disable indirect access writes.
+ */
+
+#ifndef XILINX_VERSAL_OSPI_H
+#define XILINX_VERSAL_OSPI_H
+
+#include "hw/register.h"
+#include "hw/ssi/ssi.h"
+#include "qemu/fifo8.h"
+#include "hw/dma/xlnx_csu_dma.h"
+
+#define TYPE_XILINX_VERSAL_OSPI "xlnx.versal-ospi"
+
+OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalOspi, XILINX_VERSAL_OSPI)
+
+#define XILINX_VERSAL_OSPI_R_MAX (0xfc / 4 + 1)
+
+/*
+ * Indirect operations
+ */
+typedef struct IndOp {
+ uint32_t flash_addr;
+ uint32_t num_bytes;
+ uint32_t done_bytes;
+ bool completed;
+} IndOp;
+
+struct XlnxVersalOspi {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ MemoryRegion iomem_dac;
+
+ uint8_t num_cs;
+ qemu_irq *cs_lines;
+
+ SSIBus *spi;
+
+ Fifo8 rx_fifo;
+ Fifo8 tx_fifo;
+
+ Fifo8 rx_sram;
+ Fifo8 tx_sram;
+
+ qemu_irq irq;
+
+ XlnxCSUDMA *dma_src;
+ bool ind_write_disabled;
+ bool dac_with_indac;
+ bool dac_enable;
+ bool src_dma_inprog;
+
+ IndOp rd_ind_op[2];
+ IndOp wr_ind_op[2];
+
+ uint32_t regs[XILINX_VERSAL_OSPI_R_MAX];
+ RegisterInfo regs_info[XILINX_VERSAL_OSPI_R_MAX];
+
+ /* Maximum inferred membank size is 512 bytes */
+ uint8_t stig_membank[512];
+};
+
+#endif /* XILINX_VERSAL_OSPI_H */
diff --git a/include/qemu-common.h b/include/qemu-common.h
index 73bcf76..68b2e3b 100644
--- a/include/qemu-common.h
+++ b/include/qemu-common.h
@@ -13,7 +13,7 @@
#define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR)
/* Copyright string for -version arguments, About dialogs, etc */
-#define QEMU_COPYRIGHT "Copyright (c) 2003-2021 " \
+#define QEMU_COPYRIGHT "Copyright (c) 2003-2022 " \
"Fabrice Bellard and the QEMU Project developers"
/* Bug reporting information for --help arguments, About dialogs, etc */
@@ -26,9 +26,6 @@
int qemu_main(int argc, char **argv, char **envp);
#endif
-void qemu_get_timedate(struct tm *tm, int offset);
-int qemu_timedate_diff(struct tm *tm);
-
void *qemu_oom_check(void *ptr);
ssize_t qemu_write_full(int fd, const void *buf, size_t count)
diff --git a/include/sysemu/rtc.h b/include/sysemu/rtc.h
new file mode 100644
index 0000000..159702b
--- /dev/null
+++ b/include/sysemu/rtc.h
@@ -0,0 +1,58 @@
+/*
+ * RTC configuration and clock read
+ *
+ * Copyright (c) 2003-2021 QEMU contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef SYSEMU_RTC_H
+#define SYSEMU_RTC_H
+
+/**
+ * qemu_get_timedate: Get the current RTC time
+ * @tm: struct tm to fill in with RTC time
+ * @offset: offset in seconds to adjust the RTC time by before
+ * converting to struct tm format.
+ *
+ * This function fills in @tm with the current RTC time, as adjusted
+ * by @offset (for example, if @offset is 3600 then the returned time/date
+ * will be one hour further ahead than the current RTC time).
+ *
+ * The usual use is by RTC device models, which should call this function
+ * to find the time/date value that they should return to the guest
+ * when it reads the RTC registers.
+ *
+ * The behaviour of the clock whose value this function returns will
+ * depend on the -rtc command line option passed by the user.
+ */
+void qemu_get_timedate(struct tm *tm, int offset);
+
+/**
+ * qemu_timedate_diff: Return difference between a struct tm and the RTC
+ * @tm: struct tm containing the date/time to compare against
+ *
+ * Returns the difference in seconds between the RTC clock time
+ * and the date/time specified in @tm. For example, if @tm specifies
+ * a timestamp one hour further ahead than the current RTC time
+ * then this function will return 3600.
+ */
+int qemu_timedate_diff(struct tm *tm);
+
+#endif
diff --git a/migration/migration.c b/migration/migration.c
index 0652165..bcc385b 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1014,6 +1014,9 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
info->ram->page_size = page_size;
info->ram->multifd_bytes = ram_counters.multifd_bytes;
info->ram->pages_per_second = s->pages_per_second;
+ info->ram->precopy_bytes = ram_counters.precopy_bytes;
+ info->ram->downtime_bytes = ram_counters.downtime_bytes;
+ info->ram->postcopy_bytes = ram_counters.postcopy_bytes;
if (migrate_use_xbzrle()) {
info->has_xbzrle_cache = true;
@@ -2991,10 +2994,7 @@ static int postcopy_start(MigrationState *ms)
* that are dirty
*/
if (migrate_postcopy_ram()) {
- if (ram_postcopy_send_discard_bitmap(ms)) {
- error_report("postcopy send discard bitmap failed");
- goto fail;
- }
+ ram_postcopy_send_discard_bitmap(ms);
}
/*
@@ -3205,7 +3205,7 @@ static void migration_completion(MigrationState *s)
qemu_mutex_unlock_iothread();
trace_migration_completion_postcopy_end_after_complete();
- } else if (s->state == MIGRATION_STATUS_CANCELLING) {
+ } else {
goto fail;
}
@@ -3230,7 +3230,11 @@ static void migration_completion(MigrationState *s)
goto fail_invalidate;
}
- if (!migrate_colo_enabled()) {
+ if (migrate_colo_enabled() && s->state == MIGRATION_STATUS_ACTIVE) {
+ /* COLO does not support postcopy */
+ migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
+ MIGRATION_STATUS_COLO);
+ } else {
migrate_set_state(&s->state, current_active_state,
MIGRATION_STATUS_COMPLETED);
}
@@ -3621,16 +3625,6 @@ static void migration_iteration_finish(MigrationState *s)
"COLO enabled", __func__);
}
migrate_start_colo_process(s);
- /*
- * Fixme: we will run VM in COLO no matter its old running state.
- * After exited COLO, we will keep running.
- */
- /* Fallthrough */
- case MIGRATION_STATUS_ACTIVE:
- /*
- * We should really assert here, but since it's during
- * migration, let's try to reduce the usage of assertions.
- */
s->vm_was_running = true;
/* Fallthrough */
case MIGRATION_STATUS_FAILED:
diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c
index da62017..aba1c88 100644
--- a/migration/multifd-zlib.c
+++ b/migration/multifd-zlib.c
@@ -51,16 +51,16 @@ static int zlib_send_setup(MultiFDSendParams *p, Error **errp)
zs->opaque = Z_NULL;
if (deflateInit(zs, migrate_multifd_zlib_level()) != Z_OK) {
g_free(z);
- error_setg(errp, "multifd %d: deflate init failed", p->id);
+ error_setg(errp, "multifd %u: deflate init failed", p->id);
return -1;
}
- /* To be safe, we reserve twice the size of the packet */
- z->zbuff_len = MULTIFD_PACKET_SIZE * 2;
+ /* This is the maxium size of the compressed buffer */
+ z->zbuff_len = compressBound(MULTIFD_PACKET_SIZE);
z->zbuff = g_try_malloc(z->zbuff_len);
if (!z->zbuff) {
deflateEnd(&z->zs);
g_free(z);
- error_setg(errp, "multifd %d: out of memory for zbuff", p->id);
+ error_setg(errp, "multifd %u: out of memory for zbuff", p->id);
return -1;
}
p->data = z;
@@ -106,16 +106,16 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp)
int ret;
uint32_t i;
- for (i = 0; i < p->pages->num; i++) {
+ for (i = 0; i < p->normal_num; i++) {
uint32_t available = z->zbuff_len - out_size;
int flush = Z_NO_FLUSH;
- if (i == p->pages->num - 1) {
+ if (i == p->normal_num - 1) {
flush = Z_SYNC_FLUSH;
}
zs->avail_in = page_size;
- zs->next_in = p->pages->block->host + p->pages->offset[i];
+ zs->next_in = p->pages->block->host + p->normal[i];
zs->avail_out = available;
zs->next_out = z->zbuff + out_size;
@@ -132,17 +132,20 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp)
ret = deflate(zs, flush);
} while (ret == Z_OK && zs->avail_in && zs->avail_out);
if (ret == Z_OK && zs->avail_in) {
- error_setg(errp, "multifd %d: deflate failed to compress all input",
+ error_setg(errp, "multifd %u: deflate failed to compress all input",
p->id);
return -1;
}
if (ret != Z_OK) {
- error_setg(errp, "multifd %d: deflate returned %d instead of Z_OK",
+ error_setg(errp, "multifd %u: deflate returned %d instead of Z_OK",
p->id, ret);
return -1;
}
out_size += available - zs->avail_out;
}
+ p->iov[p->iovs_num].iov_base = z->zbuff;
+ p->iov[p->iovs_num].iov_len = out_size;
+ p->iovs_num++;
p->next_packet_size = out_size;
p->flags |= MULTIFD_FLAG_ZLIB;
@@ -150,25 +153,6 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp)
}
/**
- * zlib_send_write: do the actual write of the data
- *
- * Do the actual write of the comprresed buffer.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @used: number of pages used
- * @errp: pointer to an error
- */
-static int zlib_send_write(MultiFDSendParams *p, uint32_t used, Error **errp)
-{
- struct zlib_data *z = p->data;
-
- return qio_channel_write_all(p->c, (void *)z->zbuff, p->next_packet_size,
- errp);
-}
-
-/**
* zlib_recv_setup: setup receive side
*
* Create the compressed channel and buffer.
@@ -190,7 +174,7 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp)
zs->avail_in = 0;
zs->next_in = Z_NULL;
if (inflateInit(zs) != Z_OK) {
- error_setg(errp, "multifd %d: inflate init failed", p->id);
+ error_setg(errp, "multifd %u: inflate init failed", p->id);
return -1;
}
/* To be safe, we reserve twice the size of the packet */
@@ -198,7 +182,7 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp)
z->zbuff = g_try_malloc(z->zbuff_len);
if (!z->zbuff) {
inflateEnd(zs);
- error_setg(errp, "multifd %d: out of memory for zbuff", p->id);
+ error_setg(errp, "multifd %u: out of memory for zbuff", p->id);
return -1;
}
return 0;
@@ -241,13 +225,13 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp)
uint32_t in_size = p->next_packet_size;
/* we measure the change of total_out */
uint32_t out_size = zs->total_out;
- uint32_t expected_size = p->pages->num * qemu_target_page_size();
+ uint32_t expected_size = p->normal_num * page_size;
uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
int ret;
int i;
if (flags != MULTIFD_FLAG_ZLIB) {
- error_setg(errp, "multifd %d: flags received %x flags expected %x",
+ error_setg(errp, "multifd %u: flags received %x flags expected %x",
p->id, flags, MULTIFD_FLAG_ZLIB);
return -1;
}
@@ -260,16 +244,16 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp)
zs->avail_in = in_size;
zs->next_in = z->zbuff;
- for (i = 0; i < p->pages->num; i++) {
+ for (i = 0; i < p->normal_num; i++) {
int flush = Z_NO_FLUSH;
unsigned long start = zs->total_out;
- if (i == p->pages->num - 1) {
+ if (i == p->normal_num - 1) {
flush = Z_SYNC_FLUSH;
}
zs->avail_out = page_size;
- zs->next_out = p->pages->block->host + p->pages->offset[i];
+ zs->next_out = p->host + p->normal[i];
/*
* Welcome to inflate semantics
@@ -284,19 +268,19 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp)
} while (ret == Z_OK && zs->avail_in
&& (zs->total_out - start) < page_size);
if (ret == Z_OK && (zs->total_out - start) < page_size) {
- error_setg(errp, "multifd %d: inflate generated too few output",
+ error_setg(errp, "multifd %u: inflate generated too few output",
p->id);
return -1;
}
if (ret != Z_OK) {
- error_setg(errp, "multifd %d: inflate returned %d instead of Z_OK",
+ error_setg(errp, "multifd %u: inflate returned %d instead of Z_OK",
p->id, ret);
return -1;
}
}
out_size = zs->total_out - out_size;
if (out_size != expected_size) {
- error_setg(errp, "multifd %d: packet size received %d size expected %d",
+ error_setg(errp, "multifd %u: packet size received %u size expected %u",
p->id, out_size, expected_size);
return -1;
}
@@ -307,7 +291,6 @@ static MultiFDMethods multifd_zlib_ops = {
.send_setup = zlib_send_setup,
.send_cleanup = zlib_send_cleanup,
.send_prepare = zlib_send_prepare,
- .send_write = zlib_send_write,
.recv_setup = zlib_recv_setup,
.recv_cleanup = zlib_recv_cleanup,
.recv_pages = zlib_recv_pages
diff --git a/migration/multifd-zstd.c b/migration/multifd-zstd.c
index 2d5b611..d788d30 100644
--- a/migration/multifd-zstd.c
+++ b/migration/multifd-zstd.c
@@ -55,7 +55,7 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp)
z->zcs = ZSTD_createCStream();
if (!z->zcs) {
g_free(z);
- error_setg(errp, "multifd %d: zstd createCStream failed", p->id);
+ error_setg(errp, "multifd %u: zstd createCStream failed", p->id);
return -1;
}
@@ -63,17 +63,17 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp)
if (ZSTD_isError(res)) {
ZSTD_freeCStream(z->zcs);
g_free(z);
- error_setg(errp, "multifd %d: initCStream failed with error %s",
+ error_setg(errp, "multifd %u: initCStream failed with error %s",
p->id, ZSTD_getErrorName(res));
return -1;
}
- /* To be safe, we reserve twice the size of the packet */
- z->zbuff_len = MULTIFD_PACKET_SIZE * 2;
+ /* This is the maxium size of the compressed buffer */
+ z->zbuff_len = ZSTD_compressBound(MULTIFD_PACKET_SIZE);
z->zbuff = g_try_malloc(z->zbuff_len);
if (!z->zbuff) {
ZSTD_freeCStream(z->zcs);
g_free(z);
- error_setg(errp, "multifd %d: out of memory for zbuff", p->id);
+ error_setg(errp, "multifd %u: out of memory for zbuff", p->id);
return -1;
}
return 0;
@@ -121,13 +121,13 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp)
z->out.size = z->zbuff_len;
z->out.pos = 0;
- for (i = 0; i < p->pages->num; i++) {
+ for (i = 0; i < p->normal_num; i++) {
ZSTD_EndDirective flush = ZSTD_e_continue;
- if (i == p->pages->num - 1) {
+ if (i == p->normal_num - 1) {
flush = ZSTD_e_flush;
}
- z->in.src = p->pages->block->host + p->pages->offset[i];
+ z->in.src = p->pages->block->host + p->normal[i];
z->in.size = page_size;
z->in.pos = 0;
@@ -144,16 +144,19 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp)
} while (ret > 0 && (z->in.size - z->in.pos > 0)
&& (z->out.size - z->out.pos > 0));
if (ret > 0 && (z->in.size - z->in.pos > 0)) {
- error_setg(errp, "multifd %d: compressStream buffer too small",
+ error_setg(errp, "multifd %u: compressStream buffer too small",
p->id);
return -1;
}
if (ZSTD_isError(ret)) {
- error_setg(errp, "multifd %d: compressStream error %s",
+ error_setg(errp, "multifd %u: compressStream error %s",
p->id, ZSTD_getErrorName(ret));
return -1;
}
}
+ p->iov[p->iovs_num].iov_base = z->zbuff;
+ p->iov[p->iovs_num].iov_len = z->out.pos;
+ p->iovs_num++;
p->next_packet_size = z->out.pos;
p->flags |= MULTIFD_FLAG_ZSTD;
@@ -161,25 +164,6 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp)
}
/**
- * zstd_send_write: do the actual write of the data
- *
- * Do the actual write of the comprresed buffer.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @used: number of pages used
- * @errp: pointer to an error
- */
-static int zstd_send_write(MultiFDSendParams *p, uint32_t used, Error **errp)
-{
- struct zstd_data *z = p->data;
-
- return qio_channel_write_all(p->c, (void *)z->zbuff, p->next_packet_size,
- errp);
-}
-
-/**
* zstd_recv_setup: setup receive side
*
* Create the compressed channel and buffer.
@@ -198,7 +182,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp)
z->zds = ZSTD_createDStream();
if (!z->zds) {
g_free(z);
- error_setg(errp, "multifd %d: zstd createDStream failed", p->id);
+ error_setg(errp, "multifd %u: zstd createDStream failed", p->id);
return -1;
}
@@ -206,7 +190,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp)
if (ZSTD_isError(ret)) {
ZSTD_freeDStream(z->zds);
g_free(z);
- error_setg(errp, "multifd %d: initDStream failed with error %s",
+ error_setg(errp, "multifd %u: initDStream failed with error %s",
p->id, ZSTD_getErrorName(ret));
return -1;
}
@@ -217,7 +201,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp)
if (!z->zbuff) {
ZSTD_freeDStream(z->zds);
g_free(z);
- error_setg(errp, "multifd %d: out of memory for zbuff", p->id);
+ error_setg(errp, "multifd %u: out of memory for zbuff", p->id);
return -1;
}
return 0;
@@ -258,14 +242,14 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp)
uint32_t in_size = p->next_packet_size;
uint32_t out_size = 0;
size_t page_size = qemu_target_page_size();
- uint32_t expected_size = p->pages->num * page_size;
+ uint32_t expected_size = p->normal_num * page_size;
uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
struct zstd_data *z = p->data;
int ret;
int i;
if (flags != MULTIFD_FLAG_ZSTD) {
- error_setg(errp, "multifd %d: flags received %x flags expected %x",
+ error_setg(errp, "multifd %u: flags received %x flags expected %x",
p->id, flags, MULTIFD_FLAG_ZSTD);
return -1;
}
@@ -279,8 +263,8 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp)
z->in.size = in_size;
z->in.pos = 0;
- for (i = 0; i < p->pages->num; i++) {
- z->out.dst = p->pages->block->host + p->pages->offset[i];
+ for (i = 0; i < p->normal_num; i++) {
+ z->out.dst = p->host + p->normal[i];
z->out.size = page_size;
z->out.pos = 0;
@@ -297,19 +281,19 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp)
} while (ret > 0 && (z->in.size - z->in.pos > 0)
&& (z->out.pos < page_size));
if (ret > 0 && (z->out.pos < page_size)) {
- error_setg(errp, "multifd %d: decompressStream buffer too small",
+ error_setg(errp, "multifd %u: decompressStream buffer too small",
p->id);
return -1;
}
if (ZSTD_isError(ret)) {
- error_setg(errp, "multifd %d: decompressStream returned %s",
+ error_setg(errp, "multifd %u: decompressStream returned %s",
p->id, ZSTD_getErrorName(ret));
return ret;
}
out_size += z->out.pos;
}
if (out_size != expected_size) {
- error_setg(errp, "multifd %d: packet size received %d size expected %d",
+ error_setg(errp, "multifd %u: packet size received %u size expected %u",
p->id, out_size, expected_size);
return -1;
}
@@ -320,7 +304,6 @@ static MultiFDMethods multifd_zstd_ops = {
.send_setup = zstd_send_setup,
.send_cleanup = zstd_send_cleanup,
.send_prepare = zstd_send_prepare,
- .send_write = zstd_send_write,
.recv_setup = zstd_recv_setup,
.recv_cleanup = zstd_recv_cleanup,
.recv_pages = zstd_recv_pages
diff --git a/migration/multifd.c b/migration/multifd.c
index 3242f68..76b57a7 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -86,28 +86,21 @@ static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp)
*/
static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp)
{
- p->next_packet_size = p->pages->num * qemu_target_page_size();
+ MultiFDPages_t *pages = p->pages;
+ size_t page_size = qemu_target_page_size();
+
+ for (int i = 0; i < p->normal_num; i++) {
+ p->iov[p->iovs_num].iov_base = pages->block->host + p->normal[i];
+ p->iov[p->iovs_num].iov_len = page_size;
+ p->iovs_num++;
+ }
+
+ p->next_packet_size = p->normal_num * page_size;
p->flags |= MULTIFD_FLAG_NOCOMP;
return 0;
}
/**
- * nocomp_send_write: do the actual write of the data
- *
- * For no compression we just have to write the data.
- *
- * Returns 0 for success or -1 for error
- *
- * @p: Params for the channel that we are using
- * @used: number of pages used
- * @errp: pointer to an error
- */
-static int nocomp_send_write(MultiFDSendParams *p, uint32_t used, Error **errp)
-{
- return qio_channel_writev_all(p->c, p->pages->iov, used, errp);
-}
-
-/**
* nocomp_recv_setup: setup receive side
*
* For no compression this function does nothing.
@@ -146,20 +139,24 @@ static void nocomp_recv_cleanup(MultiFDRecvParams *p)
static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp)
{
uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
+ size_t page_size = qemu_target_page_size();
if (flags != MULTIFD_FLAG_NOCOMP) {
- error_setg(errp, "multifd %d: flags received %x flags expected %x",
+ error_setg(errp, "multifd %u: flags received %x flags expected %x",
p->id, flags, MULTIFD_FLAG_NOCOMP);
return -1;
}
- return qio_channel_readv_all(p->c, p->pages->iov, p->pages->num, errp);
+ for (int i = 0; i < p->normal_num; i++) {
+ p->iov[i].iov_base = p->host + p->normal[i];
+ p->iov[i].iov_len = page_size;
+ }
+ return qio_channel_readv_all(p->c, p->iov, p->normal_num, errp);
}
static MultiFDMethods multifd_nocomp_ops = {
.send_setup = nocomp_send_setup,
.send_cleanup = nocomp_send_cleanup,
.send_prepare = nocomp_send_prepare,
- .send_write = nocomp_send_write,
.recv_setup = nocomp_recv_setup,
.recv_cleanup = nocomp_recv_cleanup,
.recv_pages = nocomp_recv_pages
@@ -212,8 +209,8 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
}
if (msg.version != MULTIFD_VERSION) {
- error_setg(errp, "multifd: received packet version %d "
- "expected %d", msg.version, MULTIFD_VERSION);
+ error_setg(errp, "multifd: received packet version %u "
+ "expected %u", msg.version, MULTIFD_VERSION);
return -1;
}
@@ -229,8 +226,8 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
}
if (msg.id > migrate_multifd_channels()) {
- error_setg(errp, "multifd: received channel version %d "
- "expected %d", msg.version, MULTIFD_VERSION);
+ error_setg(errp, "multifd: received channel version %u "
+ "expected %u", msg.version, MULTIFD_VERSION);
return -1;
}
@@ -242,7 +239,6 @@ static MultiFDPages_t *multifd_pages_init(size_t size)
MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
pages->allocated = size;
- pages->iov = g_new0(struct iovec, size);
pages->offset = g_new0(ram_addr_t, size);
return pages;
@@ -254,8 +250,6 @@ static void multifd_pages_clear(MultiFDPages_t *pages)
pages->allocated = 0;
pages->packet_num = 0;
pages->block = NULL;
- g_free(pages->iov);
- pages->iov = NULL;
g_free(pages->offset);
pages->offset = NULL;
g_free(pages);
@@ -268,7 +262,7 @@ static void multifd_send_fill_packet(MultiFDSendParams *p)
packet->flags = cpu_to_be32(p->flags);
packet->pages_alloc = cpu_to_be32(p->pages->allocated);
- packet->pages_used = cpu_to_be32(p->pages->num);
+ packet->normal_pages = cpu_to_be32(p->normal_num);
packet->next_packet_size = cpu_to_be32(p->next_packet_size);
packet->packet_num = cpu_to_be64(p->packet_num);
@@ -276,9 +270,9 @@ static void multifd_send_fill_packet(MultiFDSendParams *p)
strncpy(packet->ramblock, p->pages->block->idstr, 256);
}
- for (i = 0; i < p->pages->num; i++) {
+ for (i = 0; i < p->normal_num; i++) {
/* there are architectures where ram_addr_t is 32 bit */
- uint64_t temp = p->pages->offset[i];
+ uint64_t temp = p->normal[i];
packet->offset[i] = cpu_to_be64(temp);
}
@@ -288,7 +282,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
{
MultiFDPacket_t *packet = p->packet;
size_t page_size = qemu_target_page_size();
- uint32_t pages_max = MULTIFD_PACKET_SIZE / page_size;
+ uint32_t page_count = MULTIFD_PACKET_SIZE / page_size;
RAMBlock *block;
int i;
@@ -303,7 +297,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
packet->version = be32_to_cpu(packet->version);
if (packet->version != MULTIFD_VERSION) {
error_setg(errp, "multifd: received packet "
- "version %d and expected version %d",
+ "version %u and expected version %u",
packet->version, MULTIFD_VERSION);
return -1;
}
@@ -315,33 +309,25 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
* If we received a packet that is 100 times bigger than expected
* just stop migration. It is a magic number.
*/
- if (packet->pages_alloc > pages_max * 100) {
+ if (packet->pages_alloc > page_count) {
error_setg(errp, "multifd: received packet "
- "with size %d and expected a maximum size of %d",
- packet->pages_alloc, pages_max * 100) ;
+ "with size %u and expected a size of %u",
+ packet->pages_alloc, page_count) ;
return -1;
}
- /*
- * We received a packet that is bigger than expected but inside
- * reasonable limits (see previous comment). Just reallocate.
- */
- if (packet->pages_alloc > p->pages->allocated) {
- multifd_pages_clear(p->pages);
- p->pages = multifd_pages_init(packet->pages_alloc);
- }
- p->pages->num = be32_to_cpu(packet->pages_used);
- if (p->pages->num > packet->pages_alloc) {
+ p->normal_num = be32_to_cpu(packet->normal_pages);
+ if (p->normal_num > packet->pages_alloc) {
error_setg(errp, "multifd: received packet "
- "with %d pages and expected maximum pages are %d",
- p->pages->num, packet->pages_alloc) ;
+ "with %u pages and expected maximum pages are %u",
+ p->normal_num, packet->pages_alloc) ;
return -1;
}
p->next_packet_size = be32_to_cpu(packet->next_packet_size);
p->packet_num = be64_to_cpu(packet->packet_num);
- if (p->pages->num == 0) {
+ if (p->normal_num == 0) {
return 0;
}
@@ -354,8 +340,8 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
return -1;
}
- p->pages->block = block;
- for (i = 0; i < p->pages->num; i++) {
+ p->host = block->host;
+ for (i = 0; i < p->normal_num; i++) {
uint64_t offset = be64_to_cpu(packet->offset[i]);
if (offset > (block->used_length - page_size)) {
@@ -364,9 +350,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
offset, block->used_length);
return -1;
}
- p->pages->offset[i] = offset;
- p->pages->iov[i].iov_base = block->host + offset;
- p->pages->iov[i].iov_len = page_size;
+ p->normal[i] = offset;
}
return 0;
@@ -470,8 +454,6 @@ int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
if (pages->block == block) {
pages->offset[pages->num] = offset;
- pages->iov[pages->num].iov_base = block->host + offset;
- pages->iov[pages->num].iov_len = qemu_target_page_size();
pages->num++;
if (pages->num < pages->allocated) {
@@ -567,6 +549,10 @@ void multifd_save_cleanup(void)
p->packet_len = 0;
g_free(p->packet);
p->packet = NULL;
+ g_free(p->iov);
+ p->iov = NULL;
+ g_free(p->normal);
+ p->normal = NULL;
multifd_send_state->ops->send_cleanup(p, &local_err);
if (local_err) {
migrate_set_error(migrate_get_current(), local_err);
@@ -651,11 +637,17 @@ static void *multifd_send_thread(void *opaque)
qemu_mutex_lock(&p->mutex);
if (p->pending_job) {
- uint32_t used = p->pages->num;
uint64_t packet_num = p->packet_num;
uint32_t flags = p->flags;
+ p->iovs_num = 1;
+ p->normal_num = 0;
+
+ for (int i = 0; i < p->pages->num; i++) {
+ p->normal[p->normal_num] = p->pages->offset[i];
+ p->normal_num++;
+ }
- if (used) {
+ if (p->normal_num) {
ret = multifd_send_state->ops->send_prepare(p, &local_err);
if (ret != 0) {
qemu_mutex_unlock(&p->mutex);
@@ -665,27 +657,23 @@ static void *multifd_send_thread(void *opaque)
multifd_send_fill_packet(p);
p->flags = 0;
p->num_packets++;
- p->num_pages += used;
+ p->total_normal_pages += p->normal_num;
p->pages->num = 0;
p->pages->block = NULL;
qemu_mutex_unlock(&p->mutex);
- trace_multifd_send(p->id, packet_num, used, flags,
+ trace_multifd_send(p->id, packet_num, p->normal_num, flags,
p->next_packet_size);
- ret = qio_channel_write_all(p->c, (void *)p->packet,
- p->packet_len, &local_err);
+ p->iov[0].iov_len = p->packet_len;
+ p->iov[0].iov_base = p->packet;
+
+ ret = qio_channel_writev_all(p->c, p->iov, p->iovs_num,
+ &local_err);
if (ret != 0) {
break;
}
- if (used) {
- ret = multifd_send_state->ops->send_write(p, used, &local_err);
- if (ret != 0) {
- break;
- }
- }
-
qemu_mutex_lock(&p->mutex);
p->pending_job--;
qemu_mutex_unlock(&p->mutex);
@@ -724,7 +712,7 @@ out:
qemu_mutex_unlock(&p->mutex);
rcu_unregister_thread();
- trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages);
+ trace_multifd_send_thread_end(p->id, p->num_packets, p->total_normal_pages);
return NULL;
}
@@ -922,6 +910,9 @@ int multifd_save_setup(Error **errp)
p->packet->version = cpu_to_be32(MULTIFD_VERSION);
p->name = g_strdup_printf("multifdsend_%d", i);
p->tls_hostname = g_strdup(s->hostname);
+ /* We need one extra place for the packet header */
+ p->iov = g_new0(struct iovec, page_count + 1);
+ p->normal = g_new0(ram_addr_t, page_count);
socket_send_channel_create(multifd_new_send_channel_async, p);
}
@@ -1016,11 +1007,13 @@ int multifd_load_cleanup(Error **errp)
qemu_sem_destroy(&p->sem_sync);
g_free(p->name);
p->name = NULL;
- multifd_pages_clear(p->pages);
- p->pages = NULL;
p->packet_len = 0;
g_free(p->packet);
p->packet = NULL;
+ g_free(p->iov);
+ p->iov = NULL;
+ g_free(p->normal);
+ p->normal = NULL;
multifd_recv_state->ops->recv_cleanup(p);
}
qemu_sem_destroy(&multifd_recv_state->sem_sync);
@@ -1069,7 +1062,6 @@ static void *multifd_recv_thread(void *opaque)
rcu_register_thread();
while (true) {
- uint32_t used;
uint32_t flags;
if (p->quit) {
@@ -1092,17 +1084,16 @@ static void *multifd_recv_thread(void *opaque)
break;
}
- used = p->pages->num;
flags = p->flags;
/* recv methods don't know how to handle the SYNC flag */
p->flags &= ~MULTIFD_FLAG_SYNC;
- trace_multifd_recv(p->id, p->packet_num, used, flags,
+ trace_multifd_recv(p->id, p->packet_num, p->normal_num, flags,
p->next_packet_size);
p->num_packets++;
- p->num_pages += used;
+ p->total_normal_pages += p->normal_num;
qemu_mutex_unlock(&p->mutex);
- if (used) {
+ if (p->normal_num) {
ret = multifd_recv_state->ops->recv_pages(p, &local_err);
if (ret != 0) {
break;
@@ -1124,7 +1115,7 @@ static void *multifd_recv_thread(void *opaque)
qemu_mutex_unlock(&p->mutex);
rcu_unregister_thread();
- trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages);
+ trace_multifd_recv_thread_end(p->id, p->num_packets, p->total_normal_pages);
return NULL;
}
@@ -1156,11 +1147,12 @@ int multifd_load_setup(Error **errp)
qemu_sem_init(&p->sem_sync, 0);
p->quit = false;
p->id = i;
- p->pages = multifd_pages_init(page_count);
p->packet_len = sizeof(MultiFDPacket_t)
+ sizeof(uint64_t) * page_count;
p->packet = g_malloc0(p->packet_len);
p->name = g_strdup_printf("multifdrecv_%d", i);
+ p->iov = g_new0(struct iovec, page_count);
+ p->normal = g_new0(ram_addr_t, page_count);
}
for (i = 0; i < thread_count; i++) {
diff --git a/migration/multifd.h b/migration/multifd.h
index e57adc7..4dda900 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -44,7 +44,8 @@ typedef struct {
uint32_t flags;
/* maximum number of allocated pages */
uint32_t pages_alloc;
- uint32_t pages_used;
+ /* non zero pages */
+ uint32_t normal_pages;
/* size of the next packet that contains pages */
uint32_t next_packet_size;
uint64_t packet_num;
@@ -62,8 +63,6 @@ typedef struct {
uint64_t packet_num;
/* offset of each page */
ram_addr_t *offset;
- /* pointer to each page */
- struct iovec *iov;
RAMBlock *block;
} MultiFDPages_t;
@@ -106,10 +105,18 @@ typedef struct {
/* thread local variables */
/* packets sent through this channel */
uint64_t num_packets;
- /* pages sent through this channel */
- uint64_t num_pages;
+ /* non zero pages sent through this channel */
+ uint64_t total_normal_pages;
/* syncs main thread and channels */
QemuSemaphore sem_sync;
+ /* buffers to send */
+ struct iovec *iov;
+ /* number of iovs used */
+ uint32_t iovs_num;
+ /* Pages that are not zero */
+ ram_addr_t *normal;
+ /* num of non zero pages */
+ uint32_t normal_num;
/* used for compression methods */
void *data;
} MultiFDSendParams;
@@ -130,8 +137,8 @@ typedef struct {
bool running;
/* should this thread finish */
bool quit;
- /* array of pages to receive */
- MultiFDPages_t *pages;
+ /* ramblock host address */
+ uint8_t *host;
/* packet allocated len */
uint32_t packet_len;
/* pointer to the packet */
@@ -145,10 +152,16 @@ typedef struct {
uint32_t next_packet_size;
/* packets sent through this channel */
uint64_t num_packets;
- /* pages sent through this channel */
- uint64_t num_pages;
+ /* non zero pages recv through this channel */
+ uint64_t total_normal_pages;
/* syncs main thread and channels */
QemuSemaphore sem_sync;
+ /* buffers to recv */
+ struct iovec *iov;
+ /* Pages that are not zero */
+ ram_addr_t *normal;
+ /* num of non zero pages */
+ uint32_t normal_num;
/* used for de-compression methods */
void *data;
} MultiFDRecvParams;
@@ -160,8 +173,6 @@ typedef struct {
void (*send_cleanup)(MultiFDSendParams *p, Error **errp);
/* Prepare the send packet */
int (*send_prepare)(MultiFDSendParams *p, Error **errp);
- /* Write the send packet */
- int (*send_write)(MultiFDSendParams *p, uint32_t used, Error **errp);
/* Setup for receiving side */
int (*recv_setup)(MultiFDRecvParams *p, Error **errp);
/* Cleanup for receiving side */
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index d18b5d0..e662dd0 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -283,15 +283,13 @@ static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis)
}
#ifdef UFFD_FEATURE_THREAD_ID
- if (migrate_postcopy_blocktime() && mis &&
- UFFD_FEATURE_THREAD_ID & supported_features) {
- /* kernel supports that feature */
- /* don't create blocktime_context if it exists */
- if (!mis->blocktime_ctx) {
- mis->blocktime_ctx = blocktime_context_new();
- }
-
+ if (UFFD_FEATURE_THREAD_ID & supported_features) {
asked_features |= UFFD_FEATURE_THREAD_ID;
+ if (migrate_postcopy_blocktime()) {
+ if (!mis->blocktime_ctx) {
+ mis->blocktime_ctx = blocktime_context_new();
+ }
+ }
}
#endif
@@ -525,6 +523,19 @@ int postcopy_ram_incoming_init(MigrationIncomingState *mis)
return 0;
}
+static void postcopy_temp_pages_cleanup(MigrationIncomingState *mis)
+{
+ if (mis->postcopy_tmp_page) {
+ munmap(mis->postcopy_tmp_page, mis->largest_page_size);
+ mis->postcopy_tmp_page = NULL;
+ }
+
+ if (mis->postcopy_tmp_zero_page) {
+ munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
+ mis->postcopy_tmp_zero_page = NULL;
+ }
+}
+
/*
* At the end of a migration where postcopy_ram_incoming_init was called.
*/
@@ -566,14 +577,8 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
}
}
- if (mis->postcopy_tmp_page) {
- munmap(mis->postcopy_tmp_page, mis->largest_page_size);
- mis->postcopy_tmp_page = NULL;
- }
- if (mis->postcopy_tmp_zero_page) {
- munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
- mis->postcopy_tmp_zero_page = NULL;
- }
+ postcopy_temp_pages_cleanup(mis);
+
trace_postcopy_ram_incoming_cleanup_blocktime(
get_postcopy_total_blocktime());
@@ -1084,6 +1089,40 @@ retry:
return NULL;
}
+static int postcopy_temp_pages_setup(MigrationIncomingState *mis)
+{
+ int err;
+
+ mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mis->postcopy_tmp_page == MAP_FAILED) {
+ err = errno;
+ mis->postcopy_tmp_page = NULL;
+ error_report("%s: Failed to map postcopy_tmp_page %s",
+ __func__, strerror(err));
+ return -err;
+ }
+
+ /*
+ * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages
+ */
+ mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
+ err = errno;
+ mis->postcopy_tmp_zero_page = NULL;
+ error_report("%s: Failed to map large zero page %s",
+ __func__, strerror(err));
+ return -err;
+ }
+
+ memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
+
+ return 0;
+}
+
int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
{
/* Open the fd for the kernel to give us userfaults */
@@ -1124,32 +1163,11 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
return -1;
}
- mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size,
- PROT_READ | PROT_WRITE, MAP_PRIVATE |
- MAP_ANONYMOUS, -1, 0);
- if (mis->postcopy_tmp_page == MAP_FAILED) {
- mis->postcopy_tmp_page = NULL;
- error_report("%s: Failed to map postcopy_tmp_page %s",
- __func__, strerror(errno));
+ if (postcopy_temp_pages_setup(mis)) {
+ /* Error dumped in the sub-function */
return -1;
}
- /*
- * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages
- */
- mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS,
- -1, 0);
- if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
- int e = errno;
- mis->postcopy_tmp_zero_page = NULL;
- error_report("%s: Failed to map large zero page %s",
- __func__, strerror(e));
- return -e;
- }
- memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
-
trace_postcopy_ram_enable_notify();
return 0;
diff --git a/migration/ram.c b/migration/ram.c
index 57efa67..91ca743 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -325,7 +325,8 @@ struct RAMState {
uint64_t xbzrle_bytes_prev;
/* Start using XBZRLE (e.g., after the first round). */
bool xbzrle_enabled;
-
+ /* Are we on the last stage of migration */
+ bool last_stage;
/* compression statistics since the beginning of the period */
/* amount of count that no free thread to compress data */
uint64_t compress_thread_busy_prev;
@@ -354,6 +355,12 @@ static RAMState *ram_state;
static NotifierWithReturnList precopy_notifier_list;
+/* Whether postcopy has queued requests? */
+static bool postcopy_has_request(RAMState *rs)
+{
+ return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests);
+}
+
void precopy_infrastructure_init(void)
{
notifier_with_return_list_init(&precopy_notifier_list);
@@ -386,6 +393,18 @@ uint64_t ram_bytes_remaining(void)
MigrationStats ram_counters;
+static void ram_transferred_add(uint64_t bytes)
+{
+ if (runstate_is_running()) {
+ ram_counters.precopy_bytes += bytes;
+ } else if (migration_in_postcopy()) {
+ ram_counters.postcopy_bytes += bytes;
+ } else {
+ ram_counters.downtime_bytes += bytes;
+ }
+ ram_counters.transferred += bytes;
+}
+
/* used by the search for pages to send */
struct PageSearchStatus {
/* Current block being searched */
@@ -683,11 +702,10 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
* @current_addr: addr of the page
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
- * @last_stage: if we are at the completion stage
*/
static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
ram_addr_t current_addr, RAMBlock *block,
- ram_addr_t offset, bool last_stage)
+ ram_addr_t offset)
{
int encoded_len = 0, bytes_xbzrle;
uint8_t *prev_cached_page;
@@ -695,7 +713,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
if (!cache_is_cached(XBZRLE.cache, current_addr,
ram_counters.dirty_sync_count)) {
xbzrle_counters.cache_miss++;
- if (!last_stage) {
+ if (!rs->last_stage) {
if (cache_insert(XBZRLE.cache, current_addr, *current_data,
ram_counters.dirty_sync_count) == -1) {
return -1;
@@ -734,7 +752,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
* Update the cache contents, so that it corresponds to the data
* sent, in all cases except where we skip the page.
*/
- if (!last_stage && encoded_len != 0) {
+ if (!rs->last_stage && encoded_len != 0) {
memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
/*
* In the case where we couldn't compress, ensure that the caller
@@ -767,7 +785,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
* RAM_SAVE_FLAG_CONTINUE.
*/
xbzrle_counters.bytes += bytes_xbzrle - 8;
- ram_counters.transferred += bytes_xbzrle;
+ ram_transferred_add(bytes_xbzrle);
return 1;
}
@@ -1158,6 +1176,15 @@ static void migration_bitmap_sync_precopy(RAMState *rs)
}
}
+static void ram_release_page(const char *rbname, uint64_t offset)
+{
+ if (!migrate_release_ram() || !migration_in_postcopy()) {
+ return;
+ }
+
+ ram_discard_range(rbname, offset, TARGET_PAGE_SIZE);
+}
+
/**
* save_zero_page_to_file: send the zero page to the file
*
@@ -1179,6 +1206,7 @@ static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
qemu_put_byte(file, 0);
len += 1;
+ ram_release_page(block->idstr, offset);
}
return len;
}
@@ -1198,21 +1226,12 @@ static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
if (len) {
ram_counters.duplicate++;
- ram_counters.transferred += len;
+ ram_transferred_add(len);
return 1;
}
return -1;
}
-static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
-{
- if (!migrate_release_ram() || !migration_in_postcopy()) {
- return;
- }
-
- ram_discard_range(rbname, offset, ((ram_addr_t)pages) << TARGET_PAGE_BITS);
-}
-
/*
* @pages: the number of pages written by the control path,
* < 0 - error
@@ -1234,7 +1253,7 @@ static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
}
if (bytes_xmit) {
- ram_counters.transferred += bytes_xmit;
+ ram_transferred_add(bytes_xmit);
*pages = 1;
}
@@ -1265,8 +1284,8 @@ static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
uint8_t *buf, bool async)
{
- ram_counters.transferred += save_page_header(rs, rs->f, block,
- offset | RAM_SAVE_FLAG_PAGE);
+ ram_transferred_add(save_page_header(rs, rs->f, block,
+ offset | RAM_SAVE_FLAG_PAGE));
if (async) {
qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
migrate_release_ram() &
@@ -1274,7 +1293,7 @@ static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
} else {
qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
}
- ram_counters.transferred += TARGET_PAGE_SIZE;
+ ram_transferred_add(TARGET_PAGE_SIZE);
ram_counters.normal++;
return 1;
}
@@ -1290,9 +1309,8 @@ static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
* @rs: current RAM state
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
- * @last_stage: if we are at the completion stage
*/
-static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
+static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
{
int pages = -1;
uint8_t *p;
@@ -1307,8 +1325,8 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
XBZRLE_cache_lock();
if (rs->xbzrle_enabled && !migration_in_postcopy()) {
pages = save_xbzrle_page(rs, &p, current_addr, block,
- offset, last_stage);
- if (!last_stage) {
+ offset);
+ if (!rs->last_stage) {
/* Can't send this cached data async, since the cache page
* might get updated before it gets to the wire
*/
@@ -1341,13 +1359,11 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
ram_addr_t offset, uint8_t *source_buf)
{
RAMState *rs = ram_state;
- uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
- bool zero_page = false;
+ uint8_t *p = block->host + offset;
int ret;
if (save_zero_page_to_file(rs, f, block, offset)) {
- zero_page = true;
- goto exit;
+ return true;
}
save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
@@ -1362,18 +1378,14 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
if (ret < 0) {
qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
error_report("compressed data failed!");
- return false;
}
-
-exit:
- ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
- return zero_page;
+ return false;
}
static void
update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
{
- ram_counters.transferred += bytes_xmit;
+ ram_transferred_add(bytes_xmit);
if (param->zero_page) {
ram_counters.duplicate++;
@@ -1533,30 +1545,42 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
*/
static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
{
+ struct RAMSrcPageRequest *entry;
RAMBlock *block = NULL;
+ size_t page_size;
- if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
+ if (!postcopy_has_request(rs)) {
return NULL;
}
QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
- if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
- struct RAMSrcPageRequest *entry =
- QSIMPLEQ_FIRST(&rs->src_page_requests);
- block = entry->rb;
- *offset = entry->offset;
-
- if (entry->len > TARGET_PAGE_SIZE) {
- entry->len -= TARGET_PAGE_SIZE;
- entry->offset += TARGET_PAGE_SIZE;
- } else {
- memory_region_unref(block->mr);
- QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
- g_free(entry);
- migration_consume_urgent_request();
- }
+
+ /*
+ * This should _never_ change even after we take the lock, because no one
+ * should be taking anything off the request list other than us.
+ */
+ assert(postcopy_has_request(rs));
+
+ entry = QSIMPLEQ_FIRST(&rs->src_page_requests);
+ block = entry->rb;
+ *offset = entry->offset;
+ page_size = qemu_ram_pagesize(block);
+ /* Each page request should only be multiple page size of the ramblock */
+ assert((entry->len % page_size) == 0);
+
+ if (entry->len > page_size) {
+ entry->len -= page_size;
+ entry->offset += page_size;
+ } else {
+ memory_region_unref(block->mr);
+ QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
+ g_free(entry);
+ migration_consume_urgent_request();
}
+ trace_unqueue_page(block->idstr, *offset,
+ test_bit((*offset >> TARGET_PAGE_BITS), block->bmap));
+
return block;
}
@@ -1611,7 +1635,7 @@ static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss,
/* Check if page is from UFFD-managed region. */
if (pss->block->flags & RAM_UF_WRITEPROTECT) {
void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS);
- uint64_t run_length = (pss->page - start_page + 1) << TARGET_PAGE_BITS;
+ uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS;
/* Flush async buffers before un-protect. */
qemu_fflush(rs->f);
@@ -1931,30 +1955,8 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
{
RAMBlock *block;
ram_addr_t offset;
- bool dirty;
-
- do {
- block = unqueue_page(rs, &offset);
- /*
- * We're sending this page, and since it's postcopy nothing else
- * will dirty it, and we must make sure it doesn't get sent again
- * even if this queue request was received after the background
- * search already sent it.
- */
- if (block) {
- unsigned long page;
- page = offset >> TARGET_PAGE_BITS;
- dirty = test_bit(page, block->bmap);
- if (!dirty) {
- trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
- page);
- } else {
- trace_get_queued_page(block->idstr, (uint64_t)offset, page);
- }
- }
-
- } while (block && !dirty);
+ block = unqueue_page(rs, &offset);
if (!block) {
/*
@@ -2129,10 +2131,8 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
*
* @rs: current RAM state
* @pss: data about the page we want to send
- * @last_stage: if we are at the completion stage
*/
-static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
- bool last_stage)
+static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
{
RAMBlock *block = pss->block;
ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
@@ -2156,7 +2156,6 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
xbzrle_cache_zero_page(rs, block->offset + offset);
XBZRLE_cache_unlock();
}
- ram_release_pages(block->idstr, offset, res);
return res;
}
@@ -2171,7 +2170,7 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
return ram_save_multifd_page(rs, block, offset);
}
- return ram_save_page(rs, pss, last_stage);
+ return ram_save_page(rs, pss);
}
/**
@@ -2188,12 +2187,9 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
* Returns the number of pages written or negative on error
*
* @rs: current RAM state
- * @ms: current migration state
* @pss: data about the page we want to send
- * @last_stage: if we are at the completion stage
*/
-static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
- bool last_stage)
+static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
{
int tmppages, pages = 0;
size_t pagesize_bits =
@@ -2211,7 +2207,7 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
do {
/* Check the pages is dirty and if it is send it */
if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
- tmppages = ram_save_target_page(rs, pss, last_stage);
+ tmppages = ram_save_target_page(rs, pss);
if (tmppages < 0) {
return tmppages;
}
@@ -2230,7 +2226,7 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
offset_in_ramblock(pss->block,
((ram_addr_t)pss->page) << TARGET_PAGE_BITS));
/* The offset we leave with is the min boundary of host page and block */
- pss->page = MIN(pss->page, hostpage_boundary) - 1;
+ pss->page = MIN(pss->page, hostpage_boundary);
res = ram_save_release_protection(rs, pss, start_page);
return (res < 0 ? res : pages);
@@ -2245,13 +2241,11 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
* or negative on error
*
* @rs: current RAM state
- * @last_stage: if we are at the completion stage
*
* On systems where host-page-size > target-page-size it will send all the
* pages in a host page that are dirty.
*/
-
-static int ram_find_and_save_block(RAMState *rs, bool last_stage)
+static int ram_find_and_save_block(RAMState *rs)
{
PageSearchStatus pss;
int pages = 0;
@@ -2280,7 +2274,7 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage)
}
if (found) {
- pages = ram_save_host_page(rs, &pss, last_stage);
+ pages = ram_save_host_page(rs, &pss);
}
} while (!pages && again);
@@ -2298,7 +2292,7 @@ void acct_update_position(QEMUFile *f, size_t size, bool zero)
ram_counters.duplicate += pages;
} else {
ram_counters.normal += pages;
- ram_counters.transferred += size;
+ ram_transferred_add(size);
qemu_update_position(f, size);
}
}
@@ -2408,40 +2402,6 @@ static void ram_state_reset(RAMState *rs)
#define MAX_WAIT 50 /* ms, half buffered_file limit */
-/*
- * 'expected' is the value you expect the bitmap mostly to be full
- * of; it won't bother printing lines that are all this value.
- * If 'todump' is null the migration bitmap is dumped.
- */
-void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
- unsigned long pages)
-{
- int64_t cur;
- int64_t linelen = 128;
- char linebuf[129];
-
- for (cur = 0; cur < pages; cur += linelen) {
- int64_t curb;
- bool found = false;
- /*
- * Last line; catch the case where the line length
- * is longer than remaining ram
- */
- if (cur + linelen > pages) {
- linelen = pages - cur;
- }
- for (curb = 0; curb < linelen; curb++) {
- bool thisbit = test_bit(cur + curb, todump);
- linebuf[curb] = thisbit ? '1' : '.';
- found = found || (thisbit != expected);
- }
- if (found) {
- linebuf[curb] = '\0';
- fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
- }
- }
-}
-
/* **** functions for postcopy ***** */
void ram_postcopy_migrated_memory_release(MigrationState *ms)
@@ -2467,14 +2427,12 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms)
/**
* postcopy_send_discard_bm_ram: discard a RAMBlock
*
- * Returns zero on success
- *
* Callback from postcopy_each_ram_send_discard for each RAMBlock
*
* @ms: current migration state
* @block: RAMBlock to discard
*/
-static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
+static void postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
{
unsigned long end = block->used_length >> TARGET_PAGE_BITS;
unsigned long current;
@@ -2498,15 +2456,13 @@ static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
postcopy_discard_send_range(ms, one, discard_length);
current = one + discard_length;
}
-
- return 0;
}
+static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block);
+
/**
* postcopy_each_ram_send_discard: discard all RAMBlocks
*
- * Returns 0 for success or negative for error
- *
* Utility for the outgoing postcopy code.
* Calls postcopy_send_discard_bm_ram for each RAMBlock
* passing it bitmap indexes and name.
@@ -2515,27 +2471,29 @@ static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
*
* @ms: current migration state
*/
-static int postcopy_each_ram_send_discard(MigrationState *ms)
+static void postcopy_each_ram_send_discard(MigrationState *ms)
{
struct RAMBlock *block;
- int ret;
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
postcopy_discard_send_init(ms, block->idstr);
/*
+ * Deal with TPS != HPS and huge pages. It discard any partially sent
+ * host-page size chunks, mark any partially dirty host-page size
+ * chunks as all dirty. In this case the host-page is the host-page
+ * for the particular RAMBlock, i.e. it might be a huge page.
+ */
+ postcopy_chunk_hostpages_pass(ms, block);
+
+ /*
* Postcopy sends chunks of bitmap over the wire, but it
* just needs indexes at this point, avoids it having
* target page specific code.
*/
- ret = postcopy_send_discard_bm_ram(ms, block);
+ postcopy_send_discard_bm_ram(ms, block);
postcopy_discard_send_finish(ms);
- if (ret) {
- return ret;
- }
}
-
- return 0;
}
/**
@@ -2606,37 +2564,8 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block)
}
/**
- * postcopy_chunk_hostpages: discard any partially sent host page
- *
- * Utility for the outgoing postcopy code.
- *
- * Discard any partially sent host-page size chunks, mark any partially
- * dirty host-page size chunks as all dirty. In this case the host-page
- * is the host-page for the particular RAMBlock, i.e. it might be a huge page
- *
- * Returns zero on success
- *
- * @ms: current migration state
- * @block: block we want to work with
- */
-static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
-{
- postcopy_discard_send_init(ms, block->idstr);
-
- /*
- * Ensure that all partially dirty host pages are made fully dirty.
- */
- postcopy_chunk_hostpages_pass(ms, block);
-
- postcopy_discard_send_finish(ms);
- return 0;
-}
-
-/**
* ram_postcopy_send_discard_bitmap: transmit the discard bitmap
*
- * Returns zero on success
- *
* Transmit the set of pages to be discarded after precopy to the target
* these are pages that:
* a) Have been previously transmitted but are now dirty again
@@ -2647,11 +2576,9 @@ static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
*
* @ms: current migration state
*/
-int ram_postcopy_send_discard_bitmap(MigrationState *ms)
+void ram_postcopy_send_discard_bitmap(MigrationState *ms)
{
RAMState *rs = ram_state;
- RAMBlock *block;
- int ret;
RCU_READ_LOCK_GUARD();
@@ -2663,21 +2590,9 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms)
rs->last_sent_block = NULL;
rs->last_page = 0;
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- /* Deal with TPS != HPS and huge pages */
- ret = postcopy_chunk_hostpages(ms, block);
- if (ret) {
- return ret;
- }
+ postcopy_each_ram_send_discard(ms);
-#ifdef DEBUG_POSTCOPY
- ram_debug_dump_bitmap(block->bmap, true,
- block->used_length >> TARGET_PAGE_BITS);
-#endif
- }
trace_ram_postcopy_send_discard_bitmap();
-
- return postcopy_each_ram_send_discard(ms);
}
/**
@@ -3073,14 +2988,14 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
i = 0;
while ((ret = qemu_file_rate_limit(f)) == 0 ||
- !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
+ postcopy_has_request(rs)) {
int pages;
if (qemu_file_get_error(f)) {
break;
}
- pages = ram_find_and_save_block(rs, false);
+ pages = ram_find_and_save_block(rs);
/* no more pages to sent */
if (pages == 0) {
done = 1;
@@ -3133,7 +3048,7 @@ out:
multifd_send_sync_main(rs->f);
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
qemu_fflush(f);
- ram_counters.transferred += 8;
+ ram_transferred_add(8);
ret = qemu_file_get_error(f);
}
@@ -3160,6 +3075,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
RAMState *rs = *temp;
int ret = 0;
+ rs->last_stage = !migration_in_colo_state();
+
WITH_RCU_READ_LOCK_GUARD() {
if (!migration_in_postcopy()) {
migration_bitmap_sync_precopy(rs);
@@ -3173,7 +3090,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
while (true) {
int pages;
- pages = ram_find_and_save_block(rs, !migration_in_colo_state());
+ pages = ram_find_and_save_block(rs);
/* no more blocks to sent */
if (pages == 0) {
break;
diff --git a/migration/ram.h b/migration/ram.h
index c515396..2c6dc36 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -55,11 +55,9 @@ void mig_throttle_counter_reset(void);
uint64_t ram_pagesize_summary(void);
int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
void acct_update_position(QEMUFile *f, size_t size, bool zero);
-void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
- unsigned long pages);
void ram_postcopy_migrated_memory_release(MigrationState *ms);
/* For outgoing discard bitmap */
-int ram_postcopy_send_discard_bitmap(MigrationState *ms);
+void ram_postcopy_send_discard_bitmap(MigrationState *ms);
/* For incoming postcopy discard */
int ram_discard_range(const char *block_name, uint64_t start, size_t length);
int ram_postcopy_incoming_init(MigrationIncomingState *mis);
diff --git a/migration/savevm.c b/migration/savevm.c
index 0bef031..1599b02 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1298,8 +1298,9 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
save_section_footer(f, se);
if (ret < 0) {
- error_report("failed to save SaveStateEntry with id(name): %d(%s)",
- se->section_id, se->idstr);
+ error_report("failed to save SaveStateEntry with id(name): "
+ "%d(%s): %d",
+ se->section_id, se->idstr, ret);
qemu_file_set_error(f, ret);
}
if (ret <= 0) {
diff --git a/migration/trace-events b/migration/trace-events
index b48d873..48aa7b1 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -86,8 +86,6 @@ put_qlist_end(const char *field_name, const char *vmsd_name) "%s(%s)"
qemu_file_fclose(void) ""
# ram.c
-get_queued_page(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/0x%" PRIx64 " page_abs=0x%lx"
-get_queued_page_not_dirty(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/0x%" PRIx64 " page_abs=0x%lx"
migration_bitmap_sync_start(void) ""
migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64
migration_bitmap_clear_dirty(char *str, uint64_t start, uint64_t size, unsigned long page) "rb %s start 0x%"PRIx64" size 0x%"PRIx64" page 0x%lx"
@@ -113,25 +111,26 @@ ram_save_iterate_big_wait(uint64_t milliconds, int iterations) "big wait: %" PRI
ram_load_complete(int ret, uint64_t seq_iter) "exit_code %d seq iteration %" PRIu64
ram_write_tracking_ramblock_start(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu"
ram_write_tracking_ramblock_stop(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu"
+unqueue_page(char *block, uint64_t offset, bool dirty) "ramblock '%s' offset 0x%"PRIx64" dirty %d"
# multifd.c
-multifd_new_send_channel_async(uint8_t id) "channel %d"
-multifd_recv(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %d packet_num %" PRIu64 " pages %d flags 0x%x next packet size %d"
-multifd_recv_new_channel(uint8_t id) "channel %d"
+multifd_new_send_channel_async(uint8_t id) "channel %u"
+multifd_recv(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " pages %u flags 0x%x next packet size %u"
+multifd_recv_new_channel(uint8_t id) "channel %u"
multifd_recv_sync_main(long packet_num) "packet num %ld"
-multifd_recv_sync_main_signal(uint8_t id) "channel %d"
-multifd_recv_sync_main_wait(uint8_t id) "channel %d"
+multifd_recv_sync_main_signal(uint8_t id) "channel %u"
+multifd_recv_sync_main_wait(uint8_t id) "channel %u"
multifd_recv_terminate_threads(bool error) "error %d"
-multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %d packets %" PRIu64 " pages %" PRIu64
-multifd_recv_thread_start(uint8_t id) "%d"
-multifd_send(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %d packet_num %" PRIu64 " pages %d flags 0x%x next packet size %d"
-multifd_send_error(uint8_t id) "channel %d"
+multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %u packets %" PRIu64 " pages %" PRIu64
+multifd_recv_thread_start(uint8_t id) "%u"
+multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u flags 0x%x next packet size %u"
+multifd_send_error(uint8_t id) "channel %u"
multifd_send_sync_main(long packet_num) "packet num %ld"
-multifd_send_sync_main_signal(uint8_t id) "channel %d"
-multifd_send_sync_main_wait(uint8_t id) "channel %d"
+multifd_send_sync_main_signal(uint8_t id) "channel %u"
+multifd_send_sync_main_wait(uint8_t id) "channel %u"
multifd_send_terminate_threads(bool error) "error %d"
-multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %d packets %" PRIu64 " pages %" PRIu64
-multifd_send_thread_start(uint8_t id) "%d"
+multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64
+multifd_send_thread_start(uint8_t id) "%u"
multifd_tls_outgoing_handshake_start(void *ioc, void *tioc, const char *hostname) "ioc=%p tioc=%p hostname=%s"
multifd_tls_outgoing_handshake_error(void *ioc, const char *err) "ioc=%p err=%s"
multifd_tls_outgoing_handshake_complete(void *ioc) "ioc=%p"
diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c
index 2669156..8c384dc 100644
--- a/monitor/hmp-cmds.c
+++ b/monitor/hmp-cmds.c
@@ -293,6 +293,18 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
monitor_printf(mon, "postcopy request count: %" PRIu64 "\n",
info->ram->postcopy_requests);
}
+ if (info->ram->precopy_bytes) {
+ monitor_printf(mon, "precopy ram: %" PRIu64 " kbytes\n",
+ info->ram->precopy_bytes >> 10);
+ }
+ if (info->ram->downtime_bytes) {
+ monitor_printf(mon, "downtime ram: %" PRIu64 " kbytes\n",
+ info->ram->downtime_bytes >> 10);
+ }
+ if (info->ram->postcopy_bytes) {
+ monitor_printf(mon, "postcopy ram: %" PRIu64 " kbytes\n",
+ info->ram->postcopy_bytes >> 10);
+ }
}
if (info->has_disk) {
diff --git a/nbd/server.c b/nbd/server.c
index 4630dd7..9fb2f26 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -77,7 +77,6 @@ static int system_errno_to_nbd_errno(int err)
typedef struct NBDRequestData NBDRequestData;
struct NBDRequestData {
- QSIMPLEQ_ENTRY(NBDRequestData) entry;
NBDClient *client;
uint8_t *data;
bool complete;
diff --git a/net/dump.c b/net/dump.c
index a07ba62..6a63b15 100644
--- a/net/dump.c
+++ b/net/dump.c
@@ -23,7 +23,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "clients.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
@@ -33,6 +32,7 @@
#include "qapi/visitor.h"
#include "net/filter.h"
#include "qom/object.h"
+#include "sysemu/rtc.h"
typedef struct DumpState {
int64_t start_ts;
diff --git a/qapi/block-export.json b/qapi/block-export.json
index f9ce79a..f183522 100644
--- a/qapi/block-export.json
+++ b/qapi/block-export.json
@@ -278,7 +278,8 @@
##
{ 'enum': 'BlockExportType',
'data': [ 'nbd',
- { 'name': 'vhost-user-blk', 'if': 'CONFIG_VHOST_USER_BLK_SERVER' },
+ { 'name': 'vhost-user-blk',
+ 'if': 'CONFIG_VHOST_USER_BLK_SERVER' },
{ 'name': 'fuse', 'if': 'CONFIG_FUSE' } ] }
##
diff --git a/qapi/migration.json b/qapi/migration.json
index bbfd48c..5975a0e 100644
--- a/qapi/migration.json
+++ b/qapi/migration.json
@@ -46,6 +46,15 @@
# @pages-per-second: the number of memory pages transferred per second
# (Since 4.0)
#
+# @precopy-bytes: The number of bytes sent in the pre-copy phase
+# (since 7.0).
+#
+# @downtime-bytes: The number of bytes sent while the guest is paused
+# (since 7.0).
+#
+# @postcopy-bytes: The number of bytes sent during the post-copy phase
+# (since 7.0).
+#
# Since: 0.14
##
{ 'struct': 'MigrationStats',
@@ -54,7 +63,9 @@
'normal-bytes': 'int', 'dirty-pages-rate' : 'int',
'mbps' : 'number', 'dirty-sync-count' : 'int',
'postcopy-requests' : 'int', 'page-size' : 'int',
- 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64' } }
+ 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64',
+ 'precopy-bytes' : 'uint64', 'downtime-bytes' : 'uint64',
+ 'postcopy-bytes' : 'uint64' } }
##
# @XBZRLECacheStats:
diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh
index fea4d6e..fe85076 100755
--- a/scripts/update-linux-headers.sh
+++ b/scripts/update-linux-headers.sh
@@ -9,6 +9,22 @@
#
# This work is licensed under the terms of the GNU GPL version 2.
# See the COPYING file in the top-level directory.
+#
+# The script will copy the headers into two target folders:
+#
+# - linux-headers/ for files that are required for compiling for a
+# Linux host. Generally we have these so we can use kernel structs
+# and defines that are more recent than the headers that might be
+# installed on the host system. Usually this script can do simple
+# file copies for these headers.
+#
+# - include/standard-headers/ for files that are used for guest
+# device emulation and are required on all hosts. For instance, we
+# get our definitions of the virtio structures from the Linux
+# kernel headers, but we need those definitions regardless of which
+# host OS we are building for. This script has to be careful to
+# sanitize the headers to remove any use of Linux-specifics such as
+# types like "__u64". This work is done in the cp_portable function.
tmpdir=$(mktemp -d)
linux="$1"
diff --git a/softmmu/rtc.c b/softmmu/rtc.c
index 5632684..7e2956f 100644
--- a/softmmu/rtc.c
+++ b/softmmu/rtc.c
@@ -23,7 +23,6 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
@@ -33,6 +32,7 @@
#include "qom/object.h"
#include "sysemu/replay.h"
#include "sysemu/sysemu.h"
+#include "sysemu/rtc.h"
static enum {
RTC_BASE_UTC,
diff --git a/target/arm/helper.c b/target/arm/helper.c
index cfca0f5..6dd241f 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -9317,8 +9317,10 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
return target_el;
}
-void arm_log_exception(int idx)
+void arm_log_exception(CPUState *cs)
{
+ int idx = cs->exception_index;
+
if (qemu_loglevel_mask(CPU_LOG_INT)) {
const char *exc = NULL;
static const char * const excnames[] = {
@@ -9352,7 +9354,8 @@ void arm_log_exception(int idx)
if (!exc) {
exc = "unknown";
}
- qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
+ qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
+ idx, exc, cs->cpu_index);
}
}
@@ -9655,7 +9658,7 @@ static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
* separately here.
*
* The vector table entry used is always the 0x14 Hyp mode entry point,
- * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
+ * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
* The offset applied to the preferred return address is always zero
* (see DDI0487C.a section G1.12.3).
* PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
@@ -9669,7 +9672,7 @@ static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
addr = 0x04;
break;
case EXCP_SWI:
- addr = 0x14;
+ addr = 0x08;
break;
case EXCP_BKPT:
/* Fall through to prefetch abort. */
@@ -10185,7 +10188,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
assert(!arm_feature(env, ARM_FEATURE_M));
- arm_log_exception(cs->exception_index);
+ arm_log_exception(cs);
qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
new_el);
if (qemu_loglevel_mask(CPU_LOG_INT)
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 89f7610..3f05748 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1130,7 +1130,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
__attribute__((nonnull));
-void arm_log_exception(int idx);
+void arm_log_exception(CPUState *cs);
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
index 2c9922d..b11e927 100644
--- a/target/arm/m_helper.c
+++ b/target/arm/m_helper.c
@@ -2206,7 +2206,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
uint32_t lr;
bool ignore_stackfaults;
- arm_log_exception(cs->exception_index);
+ arm_log_exception(cs);
/*
* For exceptions we just mark as pending on the NVIC, and let that
diff --git a/target/openrisc/machine.c b/target/openrisc/machine.c
index 6239725..b7d7388 100644
--- a/target/openrisc/machine.c
+++ b/target/openrisc/machine.c
@@ -25,7 +25,6 @@ static const VMStateDescription vmstate_tlb_entry = {
.name = "tlb_entry",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINTTL(mr, OpenRISCTLBEntry),
VMSTATE_UINTTL(tr, OpenRISCTLBEntry),
diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c
index 764afe5..a2c720c 100644
--- a/target/ppc/cpu-models.c
+++ b/target/ppc/cpu-models.c
@@ -428,8 +428,6 @@
"PowerPC 601v1")
POWERPC_DEF("601_v2", CPU_POWERPC_601_v2, 601v,
"PowerPC 601v2")
- POWERPC_DEF("602", CPU_POWERPC_602, 602,
- "PowerPC 602")
POWERPC_DEF("603", CPU_POWERPC_603, 603,
"PowerPC 603")
POWERPC_DEF("603e_v1.1", CPU_POWERPC_603E_v11, 603E,
diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h
index bf1dc7e..612978a 100644
--- a/target/ppc/cpu-models.h
+++ b/target/ppc/cpu-models.h
@@ -208,7 +208,6 @@ enum {
CPU_POWERPC_601_v0 = 0x00010001,
CPU_POWERPC_601_v1 = 0x00010001,
CPU_POWERPC_601_v2 = 0x00010002,
- CPU_POWERPC_602 = 0x00050100,
CPU_POWERPC_603 = 0x00030100,
CPU_POWERPC_603E_v11 = 0x00060101,
CPU_POWERPC_603E_v12 = 0x00060102,
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 2560b70..dcd83b5 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -321,12 +321,11 @@ typedef enum {
#define MSR_UCLE 26 /* User-mode cache lock enable for BookE */
#define MSR_VR 25 /* altivec available x hflags */
#define MSR_SPE 25 /* SPE enable for BookE x hflags */
-#define MSR_AP 23 /* Access privilege state on 602 hflags */
#define MSR_VSX 23 /* Vector Scalar Extension (ISA 2.06 and later) x hflags */
-#define MSR_SA 22 /* Supervisor access mode on 602 hflags */
#define MSR_S 22 /* Secure state */
#define MSR_KEY 19 /* key bit on 603e */
#define MSR_POW 18 /* Power management */
+#define MSR_WE 18 /* Wait State Enable on 405 */
#define MSR_TGPR 17 /* TGPR usage on 602/603 x */
#define MSR_CE 17 /* Critical interrupt enable on embedded PowerPC x */
#define MSR_ILE 16 /* Interrupt little-endian mode */
@@ -476,9 +475,7 @@ typedef enum {
#define msr_ucle ((env->msr >> MSR_UCLE) & 1)
#define msr_vr ((env->msr >> MSR_VR) & 1)
#define msr_spe ((env->msr >> MSR_SPE) & 1)
-#define msr_ap ((env->msr >> MSR_AP) & 1)
#define msr_vsx ((env->msr >> MSR_VSX) & 1)
-#define msr_sa ((env->msr >> MSR_SA) & 1)
#define msr_key ((env->msr >> MSR_KEY) & 1)
#define msr_pow ((env->msr >> MSR_POW) & 1)
#define msr_tgpr ((env->msr >> MSR_TGPR) & 1)
@@ -2141,8 +2138,6 @@ enum {
PPC_MFTB = 0x0000000000000200ULL,
/* Fixed-point unit extensions */
- /* PowerPC 602 specific */
- PPC_602_SPEC = 0x0000000000000400ULL,
/* isel instruction */
PPC_ISEL = 0x0000000000000800ULL,
/* popcntb instruction */
@@ -2244,7 +2239,7 @@ enum {
#define PPC_TCG_INSNS (PPC_INSNS_BASE | PPC_POWER | PPC_POWER2 \
| PPC_POWER_RTC | PPC_POWER_BR | PPC_64B \
| PPC_64BX | PPC_64H | PPC_WAIT | PPC_MFTB \
- | PPC_602_SPEC | PPC_ISEL | PPC_POPCNTB \
+ | PPC_ISEL | PPC_POPCNTB \
| PPC_STRING | PPC_FLOAT | PPC_FLOAT_EXT \
| PPC_FLOAT_FSQRT | PPC_FLOAT_FRES \
| PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES \
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index e30e86f..bf60529 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -749,54 +749,6 @@ static void register_G2_sprs(CPUPPCState *env)
0x00000000);
}
-/* SPR specific to PowerPC 602 implementation */
-static void register_602_sprs(CPUPPCState *env)
-{
- /* ESA registers */
- /* XXX : not implemented */
- spr_register(env, SPR_SER, "SER",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_SEBR, "SEBR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_ESASRR, "ESASRR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Floating point status */
- /* XXX : not implemented */
- spr_register(env, SPR_SP, "SP",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_LT, "LT",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Watchdog timer */
- /* XXX : not implemented */
- spr_register(env, SPR_TCR, "TCR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Interrupt base */
- spr_register(env, SPR_IBR, "IBR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_IABR, "IABR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
-}
-
/* SPR specific to PowerPC 601 implementation */
static void register_601_sprs(CPUPPCState *env)
{
@@ -2128,33 +2080,6 @@ static void init_excp_601(CPUPPCState *env)
#endif
}
-static void init_excp_602(CPUPPCState *env)
-{
-#if !defined(CONFIG_USER_ONLY)
- /* XXX: exception prefix has a special behavior on 602 */
- env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
- env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
- env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
- env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
- env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
- env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
- env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
- env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
- env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
- env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
- env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
- env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
- env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
- env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
- env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
- env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
- env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001500;
- env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001600;
- /* Hardware reset vector */
- env->hreset_vector = 0x00000100UL;
-#endif
-}
-
static void init_excp_603(CPUPPCState *env)
{
#if !defined(CONFIG_USER_ONLY)
@@ -2535,11 +2460,12 @@ POWERPC_FAMILY(405)(ObjectClass *oc, void *data)
PPC_MEM_SYNC | PPC_MEM_EIEIO |
PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
PPC_4xx_COMMON | PPC_405_MAC | PPC_40x_EXCP;
- pcc->msr_mask = (1ull << MSR_POW) |
+ pcc->msr_mask = (1ull << MSR_WE) |
(1ull << MSR_CE) |
(1ull << MSR_EE) |
(1ull << MSR_PR) |
(1ull << MSR_FP) |
+ (1ull << MSR_ME) |
(1ull << MSR_DWE) |
(1ull << MSR_DE) |
(1ull << MSR_IR) |
@@ -4080,76 +4006,6 @@ POWERPC_FAMILY(601v)(ObjectClass *oc, void *data)
pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_HID0_LE;
}
-static void init_proc_602(CPUPPCState *env)
-{
- register_ne_601_sprs(env);
- register_sdr1_sprs(env);
- register_602_sprs(env);
- /* Time base */
- register_tbl(env);
- /* hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Memory management */
- register_low_BATs(env);
- register_6xx_7xx_soft_tlb(env, 64, 2);
- init_excp_602(env);
- env->dcache_line_size = 32;
- env->icache_line_size = 32;
- /* Allocate hardware IRQ controller */
- ppc6xx_irq_init(env_archcpu(env));
-}
-
-POWERPC_FAMILY(602)(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
-
- dc->desc = "PowerPC 602";
- pcc->init_proc = init_proc_602;
- pcc->check_pow = check_pow_hid0;
- pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
- PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
- PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
- PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
- PPC_MEM_SYNC | PPC_MEM_EIEIO |
- PPC_MEM_TLBIE | PPC_6xx_TLB | PPC_MEM_TLBSYNC |
- PPC_SEGMENT | PPC_602_SPEC;
- pcc->msr_mask = (1ull << MSR_VSX) |
- (1ull << MSR_SA) |
- (1ull << MSR_POW) |
- (1ull << MSR_TGPR) |
- (1ull << MSR_ILE) |
- (1ull << MSR_EE) |
- (1ull << MSR_PR) |
- (1ull << MSR_FP) |
- (1ull << MSR_ME) |
- (1ull << MSR_FE0) |
- (1ull << MSR_SE) |
- (1ull << MSR_DE) |
- (1ull << MSR_FE1) |
- (1ull << MSR_EP) |
- (1ull << MSR_IR) |
- (1ull << MSR_DR) |
- (1ull << MSR_RI) |
- (1ull << MSR_LE);
- /* XXX: 602 MMU is quite specific. Should add a special case */
- pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
- pcc->excp_model = POWERPC_EXCP_602;
- pcc->bus_model = PPC_FLAGS_INPUT_6xx;
- pcc->bfd_mach = bfd_mach_ppc_602;
- pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
- POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
-}
-
static void init_proc_603(CPUPPCState *env)
{
register_ne_601_sprs(env);
@@ -8270,8 +8126,6 @@ static void ppc_cpu_reset(DeviceState *dev)
msr = (target_ulong)0;
msr |= (target_ulong)MSR_HVB;
- msr |= (target_ulong)0 << MSR_AP; /* TO BE CHECKED */
- msr |= (target_ulong)0 << MSR_SA; /* TO BE CHECKED */
msr |= (target_ulong)1 << MSR_EP;
#if defined(DO_SINGLE_STEP) && 0
/* Single step trace mode */
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index bc646c6..c107953 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -392,6 +392,660 @@ static void powerpc_set_excp_state(PowerPCCPU *cpu,
check_tlb_flush(env, false);
}
+static void powerpc_excp_40x(PowerPCCPU *cpu, int excp)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ target_ulong msr, new_msr, vector;
+ int srr0, srr1;
+
+ if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) {
+ cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
+ }
+
+ qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
+ " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp),
+ excp, env->error_code);
+
+ /* new srr1 value excluding must-be-zero bits */
+ msr = env->msr & ~0x783f0000ULL;
+
+ /*
+ * new interrupt handler msr preserves existing ME unless
+ * explicitly overriden.
+ */
+ new_msr = env->msr & (((target_ulong)1 << MSR_ME));
+
+ /* target registers */
+ srr0 = SPR_SRR0;
+ srr1 = SPR_SRR1;
+
+ /*
+ * Hypervisor emulation assistance interrupt only exists on server
+ * arch 2.05 server or later.
+ */
+ if (excp == POWERPC_EXCP_HV_EMU) {
+ excp = POWERPC_EXCP_PROGRAM;
+ }
+
+ vector = env->excp_vectors[excp];
+ if (vector == (target_ulong)-1ULL) {
+ cpu_abort(cs, "Raised an exception without defined vector %d\n",
+ excp);
+ }
+
+ vector |= env->excp_prefix;
+
+ switch (excp) {
+ case POWERPC_EXCP_CRITICAL: /* Critical input */
+ srr0 = SPR_40x_SRR2;
+ srr1 = SPR_40x_SRR3;
+ break;
+ case POWERPC_EXCP_MCHECK: /* Machine check exception */
+ if (msr_me == 0) {
+ /*
+ * Machine check exception is not enabled. Enter
+ * checkstop state.
+ */
+ fprintf(stderr, "Machine check while not allowed. "
+ "Entering checkstop state\n");
+ if (qemu_log_separate()) {
+ qemu_log("Machine check while not allowed. "
+ "Entering checkstop state\n");
+ }
+ cs->halted = 1;
+ cpu_interrupt_exittb(cs);
+ }
+
+ /* machine check exceptions don't have ME set */
+ new_msr &= ~((target_ulong)1 << MSR_ME);
+
+ srr0 = SPR_40x_SRR2;
+ srr1 = SPR_40x_SRR3;
+ break;
+ case POWERPC_EXCP_DSI: /* Data storage exception */
+ trace_ppc_excp_dsi(env->spr[SPR_40x_ESR], env->spr[SPR_40x_DEAR]);
+ break;
+ case POWERPC_EXCP_ISI: /* Instruction storage exception */
+ trace_ppc_excp_isi(msr, env->nip);
+ break;
+ case POWERPC_EXCP_EXTERNAL: /* External input */
+ break;
+ case POWERPC_EXCP_ALIGN: /* Alignment exception */
+ break;
+ case POWERPC_EXCP_PROGRAM: /* Program exception */
+ switch (env->error_code & ~0xF) {
+ case POWERPC_EXCP_FP:
+ if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
+ trace_ppc_excp_fp_ignore();
+ cs->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+ return;
+ }
+ env->spr[SPR_40x_ESR] = ESR_FP;
+ break;
+ case POWERPC_EXCP_INVAL:
+ trace_ppc_excp_inval(env->nip);
+ env->spr[SPR_40x_ESR] = ESR_PIL;
+ break;
+ case POWERPC_EXCP_PRIV:
+ env->spr[SPR_40x_ESR] = ESR_PPR;
+ break;
+ case POWERPC_EXCP_TRAP:
+ env->spr[SPR_40x_ESR] = ESR_PTR;
+ break;
+ default:
+ cpu_abort(cs, "Invalid program exception %d. Aborting\n",
+ env->error_code);
+ break;
+ }
+ break;
+ case POWERPC_EXCP_SYSCALL: /* System call exception */
+ dump_syscall(env);
+
+ /*
+ * We need to correct the NIP which in this case is supposed
+ * to point to the next instruction
+ */
+ env->nip += 4;
+ break;
+ case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
+ trace_ppc_excp_print("FIT");
+ break;
+ case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
+ trace_ppc_excp_print("WDT");
+ break;
+ case POWERPC_EXCP_DTLB: /* Data TLB error */
+ case POWERPC_EXCP_ITLB: /* Instruction TLB error */
+ break;
+ case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
+ trace_ppc_excp_print("PIT");
+ break;
+ case POWERPC_EXCP_DEBUG: /* Debug interrupt */
+ cpu_abort(cs, "%s exception not implemented\n",
+ powerpc_excp_name(excp));
+ break;
+ default:
+ cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
+ break;
+ }
+
+ /* Sanity check */
+ if (!(env->msr_mask & MSR_HVB)) {
+ if (new_msr & MSR_HVB) {
+ cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
+ "no HV support\n", excp);
+ }
+ if (srr0 == SPR_HSRR0) {
+ cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
+ "no HV support\n", excp);
+ }
+ }
+
+ /* Save PC */
+ env->spr[srr0] = env->nip;
+
+ /* Save MSR */
+ env->spr[srr1] = msr;
+
+ powerpc_set_excp_state(cpu, vector, new_msr);
+}
+
+static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ target_ulong msr, new_msr, vector;
+
+ if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) {
+ cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
+ }
+
+ qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
+ " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp),
+ excp, env->error_code);
+
+ /* new srr1 value excluding must-be-zero bits */
+ msr = env->msr & ~0x783f0000ULL;
+
+ /*
+ * new interrupt handler msr preserves existing ME unless
+ * explicitly overriden
+ */
+ new_msr = env->msr & ((target_ulong)1 << MSR_ME);
+
+ /*
+ * Hypervisor emulation assistance interrupt only exists on server
+ * arch 2.05 server or later.
+ */
+ if (excp == POWERPC_EXCP_HV_EMU) {
+ excp = POWERPC_EXCP_PROGRAM;
+ }
+
+ vector = env->excp_vectors[excp];
+ if (vector == (target_ulong)-1ULL) {
+ cpu_abort(cs, "Raised an exception without defined vector %d\n",
+ excp);
+ }
+
+ vector |= env->excp_prefix;
+
+ switch (excp) {
+ case POWERPC_EXCP_MCHECK: /* Machine check exception */
+ if (msr_me == 0) {
+ /*
+ * Machine check exception is not enabled. Enter
+ * checkstop state.
+ */
+ fprintf(stderr, "Machine check while not allowed. "
+ "Entering checkstop state\n");
+ if (qemu_log_separate()) {
+ qemu_log("Machine check while not allowed. "
+ "Entering checkstop state\n");
+ }
+ cs->halted = 1;
+ cpu_interrupt_exittb(cs);
+ }
+
+ /* machine check exceptions don't have ME set */
+ new_msr &= ~((target_ulong)1 << MSR_ME);
+
+ break;
+ case POWERPC_EXCP_DSI: /* Data storage exception */
+ trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
+ break;
+ case POWERPC_EXCP_ISI: /* Instruction storage exception */
+ trace_ppc_excp_isi(msr, env->nip);
+ msr |= env->error_code;
+ break;
+ case POWERPC_EXCP_EXTERNAL: /* External input */
+ break;
+ case POWERPC_EXCP_ALIGN: /* Alignment exception */
+ /* Get rS/rD and rA from faulting opcode */
+ /*
+ * Note: the opcode fields will not be set properly for a
+ * direct store load/store, but nobody cares as nobody
+ * actually uses direct store segments.
+ */
+ env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
+ break;
+ case POWERPC_EXCP_PROGRAM: /* Program exception */
+ switch (env->error_code & ~0xF) {
+ case POWERPC_EXCP_FP:
+ if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
+ trace_ppc_excp_fp_ignore();
+ cs->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+ return;
+ }
+
+ /*
+ * FP exceptions always have NIP pointing to the faulting
+ * instruction, so always use store_next and claim we are
+ * precise in the MSR.
+ */
+ msr |= 0x00100000;
+ break;
+ case POWERPC_EXCP_INVAL:
+ trace_ppc_excp_inval(env->nip);
+ msr |= 0x00080000;
+ break;
+ case POWERPC_EXCP_PRIV:
+ msr |= 0x00040000;
+ break;
+ case POWERPC_EXCP_TRAP:
+ msr |= 0x00020000;
+ break;
+ default:
+ /* Should never occur */
+ cpu_abort(cs, "Invalid program exception %d. Aborting\n",
+ env->error_code);
+ break;
+ }
+ break;
+ case POWERPC_EXCP_SYSCALL: /* System call exception */
+ {
+ int lev = env->error_code;
+
+ if ((lev == 1) && cpu->vhyp) {
+ dump_hcall(env);
+ } else {
+ dump_syscall(env);
+ }
+
+ /*
+ * We need to correct the NIP which in this case is supposed
+ * to point to the next instruction
+ */
+ env->nip += 4;
+
+ /*
+ * The Virtual Open Firmware (VOF) relies on the 'sc 1'
+ * instruction to communicate with QEMU. The pegasos2 machine
+ * uses VOF and the 74xx CPUs, so although the 74xx don't have
+ * HV mode, we need to keep hypercall support.
+ */
+ if ((lev == 1) && cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->hypercall(cpu->vhyp, cpu);
+ return;
+ }
+
+ break;
+ }
+ case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
+ case POWERPC_EXCP_DECR: /* Decrementer exception */
+ break;
+ case POWERPC_EXCP_RESET: /* System reset exception */
+ if (msr_pow) {
+ cpu_abort(cs, "Trying to deliver power-saving system reset "
+ "exception %d with no HV support\n", excp);
+ }
+ break;
+ case POWERPC_EXCP_TRACE: /* Trace exception */
+ break;
+ case POWERPC_EXCP_VPU: /* Vector unavailable exception */
+ break;
+ case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
+ case POWERPC_EXCP_SMI: /* System management interrupt */
+ case POWERPC_EXCP_THERM: /* Thermal interrupt */
+ case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
+ case POWERPC_EXCP_VPUA: /* Vector assist exception */
+ cpu_abort(cs, "%s exception not implemented\n",
+ powerpc_excp_name(excp));
+ break;
+ default:
+ cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
+ break;
+ }
+
+ /* Sanity check */
+ if (!(env->msr_mask & MSR_HVB)) {
+ if (new_msr & MSR_HVB) {
+ cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
+ "no HV support\n", excp);
+ }
+ }
+
+ /*
+ * Sort out endianness of interrupt, this differs depending on the
+ * CPU, the HV mode, etc...
+ */
+ if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+
+ /* Save PC */
+ env->spr[SPR_SRR0] = env->nip;
+
+ /* Save MSR */
+ env->spr[SPR_SRR1] = msr;
+
+ powerpc_set_excp_state(cpu, vector, new_msr);
+}
+
+#ifdef TARGET_PPC64
+static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ int excp_model = env->excp_model;
+ target_ulong msr, new_msr, vector;
+ int srr0, srr1, lev = -1;
+
+ if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) {
+ cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
+ }
+
+ qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
+ " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp),
+ excp, env->error_code);
+
+ /* new srr1 value excluding must-be-zero bits */
+ msr = env->msr & ~0x783f0000ULL;
+
+ /*
+ * new interrupt handler msr preserves existing HV and ME unless
+ * explicitly overriden
+ */
+ new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
+
+ /* target registers */
+ srr0 = SPR_SRR0;
+ srr1 = SPR_SRR1;
+
+ /*
+ * check for special resume at 0x100 from doze/nap/sleep/winkle on
+ * P7/P8/P9
+ */
+ if (env->resume_as_sreset) {
+ excp = powerpc_reset_wakeup(cs, env, excp, &msr);
+ }
+
+ /*
+ * We don't want to generate a Hypervisor Emulation Assistance
+ * Interrupt if we don't have HVB in msr_mask (PAPR mode).
+ */
+ if (excp == POWERPC_EXCP_HV_EMU && !(env->msr_mask & MSR_HVB)) {
+ excp = POWERPC_EXCP_PROGRAM;
+ }
+
+ vector = env->excp_vectors[excp];
+ if (vector == (target_ulong)-1ULL) {
+ cpu_abort(cs, "Raised an exception without defined vector %d\n",
+ excp);
+ }
+
+ vector |= env->excp_prefix;
+
+ switch (excp) {
+ case POWERPC_EXCP_MCHECK: /* Machine check exception */
+ if (msr_me == 0) {
+ /*
+ * Machine check exception is not enabled. Enter
+ * checkstop state.
+ */
+ fprintf(stderr, "Machine check while not allowed. "
+ "Entering checkstop state\n");
+ if (qemu_log_separate()) {
+ qemu_log("Machine check while not allowed. "
+ "Entering checkstop state\n");
+ }
+ cs->halted = 1;
+ cpu_interrupt_exittb(cs);
+ }
+ if (env->msr_mask & MSR_HVB) {
+ /*
+ * ISA specifies HV, but can be delivered to guest with HV
+ * clear (e.g., see FWNMI in PAPR).
+ */
+ new_msr |= (target_ulong)MSR_HVB;
+ }
+
+ /* machine check exceptions don't have ME set */
+ new_msr &= ~((target_ulong)1 << MSR_ME);
+
+ break;
+ case POWERPC_EXCP_DSI: /* Data storage exception */
+ trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
+ break;
+ case POWERPC_EXCP_ISI: /* Instruction storage exception */
+ trace_ppc_excp_isi(msr, env->nip);
+ msr |= env->error_code;
+ break;
+ case POWERPC_EXCP_EXTERNAL: /* External input */
+ {
+ bool lpes0;
+
+ /*
+ * LPES0 is only taken into consideration if we support HV
+ * mode for this CPU.
+ */
+ if (!env->has_hv_mode) {
+ break;
+ }
+
+ lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
+
+ if (!lpes0) {
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ }
+
+ break;
+ }
+ case POWERPC_EXCP_ALIGN: /* Alignment exception */
+ /* Get rS/rD and rA from faulting opcode */
+ /*
+ * Note: the opcode fields will not be set properly for a
+ * direct store load/store, but nobody cares as nobody
+ * actually uses direct store segments.
+ */
+ env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
+ break;
+ case POWERPC_EXCP_PROGRAM: /* Program exception */
+ switch (env->error_code & ~0xF) {
+ case POWERPC_EXCP_FP:
+ if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
+ trace_ppc_excp_fp_ignore();
+ cs->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+ return;
+ }
+
+ /*
+ * FP exceptions always have NIP pointing to the faulting
+ * instruction, so always use store_next and claim we are
+ * precise in the MSR.
+ */
+ msr |= 0x00100000;
+ break;
+ case POWERPC_EXCP_INVAL:
+ trace_ppc_excp_inval(env->nip);
+ msr |= 0x00080000;
+ break;
+ case POWERPC_EXCP_PRIV:
+ msr |= 0x00040000;
+ break;
+ case POWERPC_EXCP_TRAP:
+ msr |= 0x00020000;
+ break;
+ default:
+ /* Should never occur */
+ cpu_abort(cs, "Invalid program exception %d. Aborting\n",
+ env->error_code);
+ break;
+ }
+ break;
+ case POWERPC_EXCP_SYSCALL: /* System call exception */
+ lev = env->error_code;
+
+ if ((lev == 1) && cpu->vhyp) {
+ dump_hcall(env);
+ } else {
+ dump_syscall(env);
+ }
+
+ /*
+ * We need to correct the NIP which in this case is supposed
+ * to point to the next instruction
+ */
+ env->nip += 4;
+
+ /* "PAPR mode" built-in hypercall emulation */
+ if ((lev == 1) && cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->hypercall(cpu->vhyp, cpu);
+ return;
+ }
+ if (lev == 1) {
+ new_msr |= (target_ulong)MSR_HVB;
+ }
+ break;
+ case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */
+ lev = env->error_code;
+ dump_syscall(env);
+ env->nip += 4;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+
+ vector += lev * 0x20;
+
+ env->lr = env->nip;
+ env->ctr = msr;
+ break;
+ case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
+ case POWERPC_EXCP_DECR: /* Decrementer exception */
+ break;
+ case POWERPC_EXCP_RESET: /* System reset exception */
+ /* A power-saving exception sets ME, otherwise it is unchanged */
+ if (msr_pow) {
+ /* indicate that we resumed from power save mode */
+ msr |= 0x10000;
+ new_msr |= ((target_ulong)1 << MSR_ME);
+ }
+ if (env->msr_mask & MSR_HVB) {
+ /*
+ * ISA specifies HV, but can be delivered to guest with HV
+ * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
+ */
+ new_msr |= (target_ulong)MSR_HVB;
+ } else {
+ if (msr_pow) {
+ cpu_abort(cs, "Trying to deliver power-saving system reset "
+ "exception %d with no HV support\n", excp);
+ }
+ }
+ break;
+ case POWERPC_EXCP_DSEG: /* Data segment exception */
+ case POWERPC_EXCP_ISEG: /* Instruction segment exception */
+ case POWERPC_EXCP_TRACE: /* Trace exception */
+ break;
+ case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
+ msr |= env->error_code;
+ /* fall through */
+ case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
+ case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
+ case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */
+ case POWERPC_EXCP_HV_EMU:
+ case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ break;
+ case POWERPC_EXCP_VPU: /* Vector unavailable exception */
+ case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
+ case POWERPC_EXCP_FU: /* Facility unavailable exception */
+ env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
+ break;
+ case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */
+ env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS);
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ break;
+ case POWERPC_EXCP_THERM: /* Thermal interrupt */
+ case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
+ case POWERPC_EXCP_VPUA: /* Vector assist exception */
+ case POWERPC_EXCP_MAINT: /* Maintenance exception */
+ case POWERPC_EXCP_SDOOR: /* Doorbell interrupt */
+ case POWERPC_EXCP_HV_MAINT: /* Hypervisor Maintenance exception */
+ cpu_abort(cs, "%s exception not implemented\n",
+ powerpc_excp_name(excp));
+ break;
+ default:
+ cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
+ break;
+ }
+
+ /* Sanity check */
+ if (!(env->msr_mask & MSR_HVB)) {
+ if (new_msr & MSR_HVB) {
+ cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
+ "no HV support\n", excp);
+ }
+ if (srr0 == SPR_HSRR0) {
+ cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
+ "no HV support\n", excp);
+ }
+ }
+
+ /*
+ * Sort out endianness of interrupt, this differs depending on the
+ * CPU, the HV mode, etc...
+ */
+ if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+
+ new_msr |= (target_ulong)1 << MSR_SF;
+
+ if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
+ /* Save PC */
+ env->spr[srr0] = env->nip;
+
+ /* Save MSR */
+ env->spr[srr1] = msr;
+ }
+
+ /* This can update new_msr and vector if AIL applies */
+ ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector);
+
+ powerpc_set_excp_state(cpu, vector, new_msr);
+}
+#else
+static inline void powerpc_excp_books(PowerPCCPU *cpu, int excp)
+{
+ g_assert_not_reached();
+}
+#endif
+
/*
* Note that this function should be greatly optimized when called
* with a constant excp, from ppc_hw_interrupt
@@ -768,7 +1422,6 @@ static inline void powerpc_excp_legacy(PowerPCCPU *cpu, int excp)
case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
switch (excp_model) {
- case POWERPC_EXCP_602:
case POWERPC_EXCP_603:
case POWERPC_EXCP_G2:
/* Swap temporary saved registers with GPRs */
@@ -872,6 +1525,19 @@ static void powerpc_excp(PowerPCCPU *cpu, int excp)
CPUPPCState *env = &cpu->env;
switch (env->excp_model) {
+ case POWERPC_EXCP_40x:
+ powerpc_excp_40x(cpu, excp);
+ break;
+ case POWERPC_EXCP_74xx:
+ powerpc_excp_74xx(cpu, excp);
+ break;
+ case POWERPC_EXCP_970:
+ case POWERPC_EXCP_POWER7:
+ case POWERPC_EXCP_POWER8:
+ case POWERPC_EXCP_POWER9:
+ case POWERPC_EXCP_POWER10:
+ powerpc_excp_books(cpu, excp);
+ break;
default:
powerpc_excp_legacy(cpu, excp);
}
@@ -1155,7 +1821,6 @@ void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
(env->spr[SPR_PSSCR] & PSSCR_EC);
}
#endif /* defined(TARGET_PPC64) */
-#endif /* CONFIG_TCG */
static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
{
@@ -1164,6 +1829,10 @@ static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
/* MSR:POW cannot be set by any form of rfi */
msr &= ~(1ULL << MSR_POW);
+ /* MSR:TGPR cannot be set by any form of rfi */
+ if (env->flags & POWERPC_FLAG_TGPR)
+ msr &= ~(1ULL << MSR_TGPR);
+
#if defined(TARGET_PPC64)
/* Switching to 32-bit ? Crop the nip */
if (!msr_is_64bit(env, msr)) {
@@ -1188,7 +1857,6 @@ static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
check_tlb_flush(env, false);
}
-#ifdef CONFIG_TCG
void helper_rfi(CPUPPCState *env)
{
do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index d318837..f2e5060 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -646,7 +646,6 @@ DEF_HELPER_FLAGS_2(slbieg, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_NO_RWG, tl, env, tl)
DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_NO_RWG, void, env, tl, tl)
-DEF_HELPER_FLAGS_1(602_mfrom, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_1(msgsnd, void, tl)
DEF_HELPER_2(msgclr, void, env, tl)
DEF_HELPER_1(book3s_msgsnd, void, tl)
@@ -707,6 +706,7 @@ DEF_HELPER_FLAGS_1(load_40x_pit, TCG_CALL_NO_RWG, tl, env)
DEF_HELPER_FLAGS_2(store_40x_pit, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_2(store_40x_tcr, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_2(store_40x_tsr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_2(store_40x_pid, void, env, tl)
DEF_HELPER_2(store_40x_dbcr0, void, env, tl)
DEF_HELPER_2(store_40x_sler, void, env, tl)
DEF_HELPER_FLAGS_2(store_booke_tcr, TCG_CALL_NO_RWG, void, env, tl)
diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c
index 8671b7b..5b12cb0 100644
--- a/target/ppc/helper_regs.c
+++ b/target/ppc/helper_regs.c
@@ -156,7 +156,8 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
*/
unsigned immu_idx, dmmu_idx;
dmmu_idx = msr & (1 << MSR_PR) ? 0 : 1;
- if (env->mmu_model & POWERPC_MMU_BOOKE) {
+ if (env->mmu_model == POWERPC_MMU_BOOKE ||
+ env->mmu_model == POWERPC_MMU_BOOKE206) {
dmmu_idx |= msr & (1 << MSR_GS) ? 4 : 0;
immu_idx = dmmu_idx;
immu_idx |= msr & (1 << MSR_IS) ? 2 : 0;
@@ -201,7 +202,11 @@ void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
void cpu_interrupt_exittb(CPUState *cs)
{
- if (!kvm_enabled()) {
+ /*
+ * We don't need to worry about translation blocks
+ * when running with KVM.
+ */
+ if (kvm_enabled()) {
return;
}
@@ -233,7 +238,8 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv)
((value >> MSR_DR) & 1) != msr_dr) {
cpu_interrupt_exittb(cs);
}
- if ((env->mmu_model & POWERPC_MMU_BOOKE) &&
+ if ((env->mmu_model == POWERPC_MMU_BOOKE ||
+ env->mmu_model == POWERPC_MMU_BOOKE206) &&
((value >> MSR_GS) & 1) != msr_gs) {
cpu_interrupt_exittb(cs);
}
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index 9bc327b..d7765fd 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -489,27 +489,6 @@ target_ulong helper_divso(CPUPPCState *env, target_ulong arg1,
}
/*****************************************************************************/
-/* 602 specific instructions */
-/* mfrom is the most crazy instruction ever seen, imho ! */
-/* Real implementation uses a ROM table. Do the same */
-/*
- * Extremely decomposed:
- * -arg / 256
- * return 256 * log10(10 + 1.0) + 0.5
- */
-#if !defined(CONFIG_USER_ONLY)
-target_ulong helper_602_mfrom(target_ulong arg)
-{
- if (likely(arg < 602)) {
-#include "mfrom_table.c.inc"
- return mfrom_ROM_table[arg];
- } else {
- return 0;
- }
-}
-#endif
-
-/*****************************************************************************/
/* Altivec extension helpers */
#if defined(HOST_WORDS_BIGENDIAN)
#define VECTOR_FOR_INORDER_I(index, element) \
diff --git a/target/ppc/machine.c b/target/ppc/machine.c
index 733a22d..a503e00 100644
--- a/target/ppc/machine.c
+++ b/target/ppc/machine.c
@@ -421,7 +421,6 @@ static const VMStateDescription vmstate_tm = {
.name = "cpu/tm",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.needed = tm_needed,
.fields = (VMStateField []) {
VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32),
@@ -672,7 +671,6 @@ const VMStateDescription vmstate_ppc_cpu = {
.name = "cpu",
.version_id = 5,
.minimum_version_id = 5,
- .minimum_version_id_old = 4,
.pre_save = cpu_pre_save,
.post_load = cpu_post_load,
.fields = (VMStateField[]) {
diff --git a/target/ppc/mfrom_table.c.inc b/target/ppc/mfrom_table.c.inc
deleted file mode 100644
index 1653b97..0000000
--- a/target/ppc/mfrom_table.c.inc
+++ /dev/null
@@ -1,78 +0,0 @@
-static const uint8_t mfrom_ROM_table[602] = {
- 77, 77, 76, 76, 75, 75, 74, 74,
- 73, 73, 72, 72, 71, 71, 70, 70,
- 69, 69, 68, 68, 68, 67, 67, 66,
- 66, 65, 65, 64, 64, 64, 63, 63,
- 62, 62, 61, 61, 61, 60, 60, 59,
- 59, 58, 58, 58, 57, 57, 56, 56,
- 56, 55, 55, 54, 54, 54, 53, 53,
- 53, 52, 52, 51, 51, 51, 50, 50,
- 50, 49, 49, 49, 48, 48, 47, 47,
- 47, 46, 46, 46, 45, 45, 45, 44,
- 44, 44, 43, 43, 43, 42, 42, 42,
- 42, 41, 41, 41, 40, 40, 40, 39,
- 39, 39, 39, 38, 38, 38, 37, 37,
- 37, 37, 36, 36, 36, 35, 35, 35,
- 35, 34, 34, 34, 34, 33, 33, 33,
- 33, 32, 32, 32, 32, 31, 31, 31,
- 31, 30, 30, 30, 30, 29, 29, 29,
- 29, 28, 28, 28, 28, 28, 27, 27,
- 27, 27, 26, 26, 26, 26, 26, 25,
- 25, 25, 25, 25, 24, 24, 24, 24,
- 24, 23, 23, 23, 23, 23, 23, 22,
- 22, 22, 22, 22, 21, 21, 21, 21,
- 21, 21, 20, 20, 20, 20, 20, 20,
- 19, 19, 19, 19, 19, 19, 19, 18,
- 18, 18, 18, 18, 18, 17, 17, 17,
- 17, 17, 17, 17, 16, 16, 16, 16,
- 16, 16, 16, 16, 15, 15, 15, 15,
- 15, 15, 15, 15, 14, 14, 14, 14,
- 14, 14, 14, 14, 13, 13, 13, 13,
- 13, 13, 13, 13, 13, 12, 12, 12,
- 12, 12, 12, 12, 12, 12, 12, 11,
- 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 10, 10, 10, 10, 10, 10,
- 10, 10, 10, 10, 10, 9, 9, 9,
- 9, 9, 9, 9, 9, 9, 9, 9,
- 9, 9, 8, 8, 8, 8, 8, 8,
- 8, 8, 8, 8, 8, 8, 8, 8,
- 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 5, 5, 5, 5,
- 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 5, 5, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4, 4, 3,
- 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 0,
-};
diff --git a/target/ppc/mfrom_table_gen.c b/target/ppc/mfrom_table_gen.c
deleted file mode 100644
index f96c426..0000000
--- a/target/ppc/mfrom_table_gen.c
+++ /dev/null
@@ -1,34 +0,0 @@
-#define _GNU_SOURCE
-#include "qemu/osdep.h"
-#include <math.h>
-
-int main(void)
-{
- double d;
- uint8_t n;
- int i;
-
- printf("static const uint8_t mfrom_ROM_table[602] =\n{\n ");
- for (i = 0; i < 602; i++) {
- /*
- * Extremely decomposed:
- * -T0 / 256
- * T0 = 256 * log10(10 + 1.0) + 0.5
- */
- d = -i;
- d /= 256.0;
- d = exp10(d);
- d += 1.0;
- d = log10(d);
- d *= 256;
- d += 0.5;
- n = d;
- printf("%3d, ", n);
- if ((i & 7) == 7) {
- printf("\n ");
- }
- }
- printf("\n};\n");
-
- return 0;
-}
diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c
index 91270c1..6512ee0 100644
--- a/target/ppc/mmu_common.c
+++ b/target/ppc/mmu_common.c
@@ -1367,22 +1367,34 @@ static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr,
case -2:
/* Access rights violation */
cs->exception_index = POWERPC_EXCP_ISI;
- env->error_code = 0x08000000;
+ if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
+ (env->mmu_model == POWERPC_MMU_BOOKE206)) {
+ env->error_code = 0;
+ } else {
+ env->error_code = 0x08000000;
+ }
break;
case -3:
/* No execute protection violation */
if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
(env->mmu_model == POWERPC_MMU_BOOKE206)) {
env->spr[SPR_BOOKE_ESR] = 0x00000000;
+ env->error_code = 0;
+ } else {
+ env->error_code = 0x10000000;
}
cs->exception_index = POWERPC_EXCP_ISI;
- env->error_code = 0x10000000;
break;
case -4:
/* Direct store exception */
/* No code fetch is allowed in direct-store areas */
cs->exception_index = POWERPC_EXCP_ISI;
- env->error_code = 0x10000000;
+ if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
+ (env->mmu_model == POWERPC_MMU_BOOKE206)) {
+ env->error_code = 0;
+ } else {
+ env->error_code = 0x10000000;
+ }
break;
}
} else {
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index 59df695..a2a52a1 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -664,6 +664,14 @@ static inline int booke_page_size_to_tlb(target_ulong page_size)
#define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
#define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
+void helper_store_40x_pid(CPUPPCState *env, target_ulong val)
+{
+ if (env->spr[SPR_40x_PID] != val) {
+ env->spr[SPR_40x_PID] = val;
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
+ }
+}
+
target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
{
ppcemb_tlb_t *tlb;
@@ -681,7 +689,7 @@ target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
size = PPC4XX_TLBHI_SIZE_DEFAULT;
}
ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
- env->spr[SPR_40x_PID] = tlb->PID;
+ helper_store_40x_pid(env, tlb->PID);
return ret;
}
@@ -794,6 +802,8 @@ void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
tlb->prot & PAGE_WRITE ? 'w' : '-',
tlb->prot & PAGE_EXEC ? 'x' : '-',
tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
+
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
}
target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 9d2adc0..c2f436f 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -894,7 +894,7 @@ void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn)
{
TCGv t0 = tcg_temp_new();
tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF);
- gen_store_spr(SPR_40x_PID, t0);
+ gen_helper_store_40x_pid(cpu_env, t0);
tcg_temp_free(t0);
}
@@ -6272,33 +6272,6 @@ static void gen_srq(DisasContext *ctx)
}
}
-/* PowerPC 602 specific instructions */
-
-/* dsa */
-static void gen_dsa(DisasContext *ctx)
-{
- /* XXX: TODO */
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
-}
-
-/* esa */
-static void gen_esa(DisasContext *ctx)
-{
- /* XXX: TODO */
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
-}
-
-/* mfrom */
-static void gen_mfrom(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
- gen_helper_602_mfrom(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
/* 602 - 603 - G2 TLB management */
/* tlbld */
@@ -7779,9 +7752,6 @@ GEN_HANDLER(sriq, 0x1F, 0x18, 0x15, 0x00000000, PPC_POWER_BR),
GEN_HANDLER(srliq, 0x1F, 0x18, 0x17, 0x00000000, PPC_POWER_BR),
GEN_HANDLER(srlq, 0x1F, 0x18, 0x16, 0x00000000, PPC_POWER_BR),
GEN_HANDLER(srq, 0x1F, 0x18, 0x14, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(dsa, 0x1F, 0x14, 0x13, 0x03FFF801, PPC_602_SPEC),
-GEN_HANDLER(esa, 0x1F, 0x14, 0x12, 0x03FFF801, PPC_602_SPEC),
-GEN_HANDLER(mfrom, 0x1F, 0x09, 0x08, 0x03E0F801, PPC_602_SPEC),
GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB),
GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB),
GEN_HANDLER(clf, 0x1F, 0x16, 0x03, 0x03E00000, PPC_POWER),
diff --git a/target/sparc/machine.c b/target/sparc/machine.c
index 917375c..44b9e7d 100644
--- a/target/sparc/machine.c
+++ b/target/sparc/machine.c
@@ -10,7 +10,6 @@ static const VMStateDescription vmstate_cpu_timer = {
.name = "cpu_timer",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32(frequency, CPUTimer),
VMSTATE_UINT32(disabled, CPUTimer),
@@ -30,7 +29,6 @@ static const VMStateDescription vmstate_trap_state = {
.name = "trap_state",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT64(tpc, trap_state),
VMSTATE_UINT64(tnpc, trap_state),
@@ -44,7 +42,6 @@ static const VMStateDescription vmstate_tlb_entry = {
.name = "tlb_entry",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT64(tag, SparcTLBEntry),
VMSTATE_UINT64(tte, SparcTLBEntry),
@@ -113,7 +110,6 @@ const VMStateDescription vmstate_sparc_cpu = {
.name = "cpu",
.version_id = SPARC_VMSTATE_VER,
.minimum_version_id = SPARC_VMSTATE_VER,
- .minimum_version_id_old = SPARC_VMSTATE_VER,
.pre_save = cpu_pre_save,
.fields = (VMStateField[]) {
VMSTATE_UINTTL_ARRAY(env.gregs, SPARCCPU, 8),
diff --git a/tests/qemu-iotests/tests/block-status-cache b/tests/qemu-iotests/tests/block-status-cache
new file mode 100755
index 0000000..6fa10bb
--- /dev/null
+++ b/tests/qemu-iotests/tests/block-status-cache
@@ -0,0 +1,139 @@
+#!/usr/bin/env python3
+# group: rw quick
+#
+# Test cases for the block-status cache.
+#
+# Copyright (C) 2022 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import signal
+import iotests
+from iotests import qemu_img_create, qemu_img_pipe, qemu_nbd
+
+
+image_size = 1 * 1024 * 1024
+test_img = os.path.join(iotests.test_dir, 'test.img')
+
+nbd_pidfile = os.path.join(iotests.test_dir, 'nbd.pid')
+nbd_sock = os.path.join(iotests.sock_dir, 'nbd.sock')
+
+
+class TestBscWithNbd(iotests.QMPTestCase):
+ def setUp(self) -> None:
+ """Just create an empty image with a read-only NBD server on it"""
+ assert qemu_img_create('-f', iotests.imgfmt, test_img,
+ str(image_size)) == 0
+
+ # Pass --allocation-depth to enable the qemu:allocation-depth context,
+ # which we are going to query to provoke a block-status inquiry with
+ # want_zero=false.
+ assert qemu_nbd(f'--socket={nbd_sock}',
+ f'--format={iotests.imgfmt}',
+ '--persistent',
+ '--allocation-depth',
+ '--read-only',
+ f'--pid-file={nbd_pidfile}',
+ test_img) \
+ == 0
+
+ def tearDown(self) -> None:
+ with open(nbd_pidfile, encoding='utf-8') as f:
+ pid = int(f.read())
+ os.kill(pid, signal.SIGTERM)
+ os.remove(nbd_pidfile)
+ os.remove(test_img)
+
+ def test_with_zero_bug(self) -> None:
+ """
+ Verify that the block-status cache is not corrupted by a
+ want_zero=false call.
+ We can provoke a want_zero=false call with `qemu-img map` over NBD with
+ x-dirty-bitmap=qemu:allocation-depth, so we first run a normal `map`
+ (which results in want_zero=true), then using said
+ qemu:allocation-depth context, and finally another normal `map` to
+ verify that the cache has not been corrupted.
+ """
+
+ nbd_img_opts = f'driver=nbd,server.type=unix,server.path={nbd_sock}'
+ nbd_img_opts_alloc_depth = nbd_img_opts + \
+ ',x-dirty-bitmap=qemu:allocation-depth'
+
+ # Normal map, results in want_zero=true.
+ # This will probably detect an allocated data sector first (qemu likes
+ # to allocate the first sector to facilitate alignment probing), and
+ # then the rest to be zero. The BSC will thus contain (if anything)
+ # one range covering the first sector.
+ map_pre = qemu_img_pipe('map', '--output=json', '--image-opts',
+ nbd_img_opts)
+
+ # qemu:allocation-depth maps for want_zero=false.
+ # want_zero=false should (with the file driver, which the server is
+ # using) report everything as data. While this is sufficient for
+ # want_zero=false, this is nothing that should end up in the
+ # block-status cache.
+ # Due to a bug, this information did end up in the cache, though, and
+ # this would lead to wrong information being returned on subsequent
+ # want_zero=true calls.
+ #
+ # We need to run this map twice: On the first call, we probably still
+ # have the first sector in the cache, and so this will be served from
+ # the cache; and only the subsequent range will be queried from the
+ # block driver. This subsequent range will then be entered into the
+ # cache.
+ # If we did a want_zero=true call at this point, we would thus get
+ # correct information: The first sector is not covered by the cache, so
+ # we would get fresh block-status information from the driver, which
+ # would return a data range, and this would then go into the cache,
+ # evicting the wrong range from the want_zero=false call before.
+ #
+ # Therefore, we need a second want_zero=false map to reproduce:
+ # Since the first sector is not in the cache, the query for its status
+ # will go to the driver, which will return a result that reports the
+ # whole image to be a single data area. This result will then go into
+ # the cache, and so the cache will then report the whole image to
+ # contain data.
+ #
+ # Note that once the cache reports the whole image to contain data, any
+ # subsequent map operation will be served from the cache, and so we can
+ # never loop too many times here.
+ for _ in range(2):
+ # (Ignore the result, this is just to contaminate the cache)
+ qemu_img_pipe('map', '--output=json', '--image-opts',
+ nbd_img_opts_alloc_depth)
+
+ # Now let's see whether the cache reports everything as data, or
+ # whether we get correct information (i.e. the same as we got on our
+ # first attempt).
+ map_post = qemu_img_pipe('map', '--output=json', '--image-opts',
+ nbd_img_opts)
+
+ if map_pre != map_post:
+ print('ERROR: Map information differs before and after querying ' +
+ 'qemu:allocation-depth')
+ print('Before:')
+ print(map_pre)
+ print('After:')
+ print(map_post)
+
+ self.fail("Map information differs")
+
+
+if __name__ == '__main__':
+ # The block-status cache only works on the protocol layer, so to test it,
+ # we can only use the raw format
+ iotests.main(supported_fmts=['raw'],
+ supported_protocols=['file'])
diff --git a/tests/qemu-iotests/tests/block-status-cache.out b/tests/qemu-iotests/tests/block-status-cache.out
new file mode 100644
index 0000000..ae1213e
--- /dev/null
+++ b/tests/qemu-iotests/tests/block-status-cache.out
@@ -0,0 +1,5 @@
+.
+----------------------------------------------------------------------
+Ran 1 tests
+
+OK