aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS1
-rw-r--r--docs/devel/migration/main.rst4
-rw-r--r--docs/devel/migration/postcopy.rst36
-rw-r--r--docs/devel/migration/vfio.rst16
-rw-r--r--docs/system/arm/max78000.rst37
-rw-r--r--docs/system/arm/virt.rst9
-rw-r--r--docs/system/devices/cxl.rst11
-rw-r--r--docs/system/target-arm.rst1
-rw-r--r--hw/acpi/cxl.c76
-rw-r--r--hw/arm/Kconfig15
-rw-r--r--hw/arm/fsl-imx8mp.c4
-rw-r--r--hw/arm/max78000_soc.c232
-rw-r--r--hw/arm/max78000fthr.c50
-rw-r--r--hw/arm/meson.build2
-rw-r--r--hw/arm/virt-acpi-build.c40
-rw-r--r--hw/arm/virt.c52
-rw-r--r--hw/char/Kconfig3
-rw-r--r--hw/char/max78000_uart.c285
-rw-r--r--hw/char/meson.build1
-rw-r--r--hw/cxl/cxl-host-stubs.c7
-rw-r--r--hw/cxl/cxl-host.c174
-rw-r--r--hw/i386/pc.c50
-rw-r--r--hw/intc/arm_gicv3_common.c1
-rw-r--r--hw/intc/arm_gicv3_kvm.c29
-rw-r--r--hw/intc/armv7m_nvic.c4
-rw-r--r--hw/intc/loongarch_extioi.c9
-rw-r--r--hw/intc/loongarch_extioi_common.c9
-rw-r--r--hw/misc/Kconfig12
-rw-r--r--hw/misc/max78000_aes.c223
-rw-r--r--hw/misc/max78000_gcr.c351
-rw-r--r--hw/misc/max78000_icc.c120
-rw-r--r--hw/misc/max78000_trng.c139
-rw-r--r--hw/misc/meson.build4
-rw-r--r--hw/ppc/spapr.c2
-rw-r--r--hw/s390x/s390-pci-bus.c26
-rw-r--r--hw/s390x/s390-stattrib.c2
-rw-r--r--hw/vfio/migration-multifd.c4
-rw-r--r--hw/vfio/migration-multifd.h2
-rw-r--r--hw/vfio/migration.c4
-rw-r--r--include/hw/arm/max78000_soc.h50
-rw-r--r--include/hw/arm/virt.h4
-rw-r--r--include/hw/char/max78000_uart.h78
-rw-r--r--include/hw/cxl/cxl.h5
-rw-r--r--include/hw/cxl/cxl_host.h5
-rw-r--r--include/hw/intc/arm_gicv3_common.h1
-rw-r--r--include/hw/intc/loongarch_extioi.h1
-rw-r--r--include/hw/intc/loongarch_extioi_common.h1
-rw-r--r--include/hw/misc/max78000_aes.h68
-rw-r--r--include/hw/misc/max78000_gcr.h131
-rw-r--r--include/hw/misc/max78000_icc.h33
-rw-r--r--include/hw/misc/max78000_trng.h35
-rw-r--r--include/migration/misc.h8
-rw-r--r--include/migration/register.h34
-rw-r--r--include/qemu/typedefs.h6
-rw-r--r--migration/block-dirty-bitmap.c3
-rw-r--r--migration/migration-hmp-cmds.c159
-rw-r--r--migration/migration.c87
-rw-r--r--migration/migration.h2
-rw-r--r--migration/multifd-device-state.c10
-rw-r--r--migration/options.c2
-rw-r--r--migration/postcopy-ram.c563
-rw-r--r--migration/postcopy-ram.h2
-rw-r--r--migration/ram.c32
-rw-r--r--migration/savevm.c89
-rw-r--r--migration/trace-events9
-rw-r--r--pc-bios/s390-ccw.imgbin96000 -> 87824 bytes
-rw-r--r--pc-bios/s390-ccw/Makefile2
-rw-r--r--pc-bios/s390-ccw/menu.c6
-rw-r--r--pc-bios/s390-ccw/netmain.c66
-rw-r--r--pc-bios/s390-ccw/s390-ccw.h1
-rw-r--r--qapi/migration.json38
-rw-r--r--system/qdev-monitor.c9
-rw-r--r--target/arm/cpregs-pmu.c1309
-rw-r--r--target/arm/cpregs.h3
-rw-r--r--target/arm/cpu-sysregs.h.inc4
-rw-r--r--target/arm/cpu.h6
-rw-r--r--target/arm/cpu64.c8
-rw-r--r--target/arm/helper.c1813
-rw-r--r--target/arm/internals.h5
-rw-r--r--target/arm/kvm-stub.c5
-rw-r--r--target/arm/kvm.c24
-rw-r--r--target/arm/kvm_arm.h7
-rw-r--r--target/arm/meson.build2
-rw-r--r--target/arm/tcg-stubs.c5
-rw-r--r--target/arm/tcg/cpregs-at.c519
-rw-r--r--target/arm/tcg/cpu-v7m.c16
-rw-r--r--target/arm/tcg/cpu32.c34
-rw-r--r--target/arm/tcg/cpu64.c68
-rw-r--r--target/arm/tcg/helper-a64.c20
-rw-r--r--target/arm/tcg/helper.h1
-rw-r--r--target/arm/tcg/meson.build1
-rw-r--r--target/arm/tcg/vec_helper.c1
-rw-r--r--target/loongarch/tcg/csr_helper.c8
-rw-r--r--target/loongarch/tcg/tlb_helper.c27
-rw-r--r--target/s390x/cpu-system.c6
-rw-r--r--target/s390x/helper.c4
-rw-r--r--target/s390x/kvm/kvm.c4
-rw-r--r--target/s390x/s390x-internal.h13
-rw-r--r--target/s390x/tcg/mem_helper.c10
-rw-r--r--tests/functional/meson.build4
-rwxr-xr-xtests/functional/test_arm_max78000fthr.py48
-rwxr-xr-xtests/functional/test_ppc_bamboo.py34
-rwxr-xr-xtests/functional/test_s390x_pxelinux.py119
-rw-r--r--tests/qtest/cxl-test.c58
-rw-r--r--tests/qtest/meson.build1
-rw-r--r--tests/qtest/migration/migration-qmp.c5
106 files changed, 5351 insertions, 2428 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 1842c3d..e88ed2c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1805,6 +1805,7 @@ F: hw/s390x/ipl.*
F: pc-bios/s390-ccw/
F: pc-bios/s390-ccw.img
F: docs/devel/s390-dasd-ipl.rst
+F: tests/functional/test_s390x_pxelinux.py
T: git https://github.com/borntraeger/qemu.git s390-next
L: qemu-s390x@nongnu.org
diff --git a/docs/devel/migration/main.rst b/docs/devel/migration/main.rst
index cdd4f4a..6493c1d 100644
--- a/docs/devel/migration/main.rst
+++ b/docs/devel/migration/main.rst
@@ -508,8 +508,8 @@ An iterative device must provide:
the point that stream bandwidth limits tell it to stop. Each call
generates one section.
- - A ``save_live_complete_precopy`` function that must transmit the
- last section for the device containing any remaining data.
+ - A ``save_complete`` function that must transmit the last section for
+ the device containing any remaining data.
- A ``load_state`` function used to load sections generated by
any of the save functions that generate sections.
diff --git a/docs/devel/migration/postcopy.rst b/docs/devel/migration/postcopy.rst
index 82e7a84..e319388 100644
--- a/docs/devel/migration/postcopy.rst
+++ b/docs/devel/migration/postcopy.rst
@@ -33,25 +33,6 @@ will now cause the transition from precopy to postcopy.
It can be issued immediately after migration is started or any
time later on. Issuing it after the end of a migration is harmless.
-Blocktime is a postcopy live migration metric, intended to show how
-long the vCPU was in state of interruptible sleep due to pagefault.
-That metric is calculated both for all vCPUs as overlapped value, and
-separately for each vCPU. These values are calculated on destination
-side. To enable postcopy blocktime calculation, enter following
-command on destination monitor:
-
-``migrate_set_capability postcopy-blocktime on``
-
-Postcopy blocktime can be retrieved by query-migrate qmp command.
-postcopy-blocktime value of qmp command will show overlapped blocking
-time for all vCPU, postcopy-vcpu-blocktime will show list of blocking
-time per vCPU.
-
-.. note::
- During the postcopy phase, the bandwidth limits set using
- ``migrate_set_parameter`` is ignored (to avoid delaying requested pages that
- the destination is waiting for).
-
Postcopy internals
==================
@@ -312,3 +293,20 @@ explicitly) to be sent in a separate preempt channel, rather than queued in
the background migration channel. Anyone who cares about latencies of page
faults during a postcopy migration should enable this feature. By default,
it's not enabled.
+
+Postcopy blocktime statistics
+-----------------------------
+
+Blocktime is a postcopy live migration metric, intended to show how
+long the vCPU was in state of interruptible sleep due to pagefault.
+That metric is calculated both for all vCPUs as overlapped value, and
+separately for each vCPU. These values are calculated on destination
+side. To enable postcopy blocktime calculation, enter following
+command on destination monitor:
+
+``migrate_set_capability postcopy-blocktime on``
+
+Postcopy blocktime can be retrieved by query-migrate qmp command.
+postcopy-blocktime value of qmp command will show overlapped blocking
+time for all vCPU, postcopy-vcpu-blocktime will show list of blocking
+time per vCPU.
diff --git a/docs/devel/migration/vfio.rst b/docs/devel/migration/vfio.rst
index 673e354..2d8e5ca 100644
--- a/docs/devel/migration/vfio.rst
+++ b/docs/devel/migration/vfio.rst
@@ -75,12 +75,12 @@ VFIO implements the device hooks for the iterative approach as follows:
in the non-multifd mode.
In the multifd mode it just emits either a dummy EOS marker.
-* A ``save_live_complete_precopy`` function that sets the VFIO device in
- _STOP_COPY state and iteratively copies the data for the VFIO device until
- the vendor driver indicates that no data remains.
- In the multifd mode it just emits a dummy EOS marker.
+* A ``save_complete`` function that sets the VFIO device in _STOP_COPY
+ state and iteratively copies the data for the VFIO device until the
+ vendor driver indicates that no data remains. In the multifd mode it
+ just emits a dummy EOS marker.
-* A ``save_live_complete_precopy_thread`` function that in the multifd mode
+* A ``save_complete_precopy_thread`` function that in the multifd mode
provides thread handler performing multifd device state transfer.
It sets the VFIO device to _STOP_COPY state, iteratively reads the data
from the VFIO device and queues it for multifd transmission until the vendor
@@ -195,12 +195,12 @@ Live migration save path
|
Then the VFIO device is put in _STOP_COPY state
(FINISH_MIGRATE, _ACTIVE, _STOP_COPY)
- .save_live_complete_precopy() is called for each active device
+ .save_complete() is called for each active device
For the VFIO device: in the non-multifd mode iterate in
- .save_live_complete_precopy() until
+ .save_complete() until
pending data is 0
In the multifd mode this iteration is done in
- .save_live_complete_precopy_thread() instead.
+ .save_complete_precopy_thread() instead.
|
(POSTMIGRATE, _COMPLETED, _STOP_COPY)
Migraton thread schedules cleanup bottom half and exits
diff --git a/docs/system/arm/max78000.rst b/docs/system/arm/max78000.rst
new file mode 100644
index 0000000..3d95011
--- /dev/null
+++ b/docs/system/arm/max78000.rst
@@ -0,0 +1,37 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+Analog Devices max78000 board (``max78000fthr``)
+================================================
+
+The max78000 is a Cortex-M4 based SOC with a RISC-V coprocessor. The RISC-V coprocessor is not supported.
+
+Supported devices
+-----------------
+
+ * Instruction Cache Controller
+ * UART
+ * Global Control Register
+ * True Random Number Generator
+ * AES
+
+Notable unsupported devices
+---------------------------
+
+ * I2C
+ * CNN
+ * CRC
+ * SPI
+
+Boot options
+------------
+
+The max78000 can be started using the ``-kernel`` option to load a
+firmware at address 0 as the ROM. As the ROM normally jumps to software loaded
+from the internal flash at address 0x10000000, loading your program there is
+generally advisable. If you don't have a copy of the ROM, the interrupt
+vector table from user firmware will do.
+Example:
+
+.. code-block:: bash
+
+ $ qemu-system-arm -machine max78000fthr -kernel max78000.bin -device loader,file=max78000.bin,addr=0x10000000
diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst
index 6a719b9..10cbffc 100644
--- a/docs/system/arm/virt.rst
+++ b/docs/system/arm/virt.rst
@@ -31,6 +31,7 @@ Supported devices
The virt board supports:
- PCI/PCIe devices
+- CXL Fixed memory windows, root bridges and devices.
- Flash memory
- Either one or two PL011 UARTs for the NonSecure World
- An RTC
@@ -189,6 +190,14 @@ ras
acpi
Set ``on``/``off``/``auto`` to enable/disable ACPI.
+cxl
+ Set ``on``/``off`` to enable/disable CXL. More details in
+ :doc:`../devices/cxl`. The default is off.
+
+cxl-fmw
+ Array of CXL fixed memory windows describing fixed address routing to
+ target CXL host bridges. See :doc:`../devices/cxl`.
+
dtb-randomness
Set ``on``/``off`` to pass random seeds via the guest DTB
rng-seed and kaslr-seed nodes (in both "/chosen" and
diff --git a/docs/system/devices/cxl.rst b/docs/system/devices/cxl.rst
index e307caf..ca15a0d 100644
--- a/docs/system/devices/cxl.rst
+++ b/docs/system/devices/cxl.rst
@@ -384,6 +384,17 @@ An example of 4 devices below a switch suitable for 1, 2 or 4 way interleave::
-device cxl-type3,bus=swport3,persistent-memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3,sn=0x4 \
-M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=4k
+A simple arm/virt example featuring a single direct connected CXL Type 3
+Volatile Memory device::
+
+ qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8g,slots=4 -cpu max -smp 4 \
+ ...
+ -object memory-backend-ram,id=vmem0,share=on,size=256M \
+ -device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \
+ -device cxl-rp,port=0,bus=cxl.1,id=root_port13,chassis=0,slot=2 \
+ -device cxl-type3,bus=root_port13,volatile-memdev=vmem0,id=cxl-vmem0 \
+ -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G
+
Deprecations
------------
diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst
index b96a05a..a96d186 100644
--- a/docs/system/target-arm.rst
+++ b/docs/system/target-arm.rst
@@ -71,6 +71,7 @@ Board-specific documentation
.. toctree::
:maxdepth: 1
+ arm/max78000
arm/integratorcp
arm/mps2
arm/musca
diff --git a/hw/acpi/cxl.c b/hw/acpi/cxl.c
index 9cd7905..75d5b30 100644
--- a/hw/acpi/cxl.c
+++ b/hw/acpi/cxl.c
@@ -22,6 +22,7 @@
#include "hw/pci/pci_bridge.h"
#include "hw/pci/pci_host.h"
#include "hw/cxl/cxl.h"
+#include "hw/cxl/cxl_host.h"
#include "hw/mem/memory-device.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/aml-build.h"
@@ -135,55 +136,52 @@ static void cedt_build_chbs(GArray *table_data, PXBCXLDev *cxl)
* Interleave ways encoding in CXL 2.0 ECN: 3, 6, 12 and 16-way memory
* interleaving.
*/
-static void cedt_build_cfmws(GArray *table_data, CXLState *cxls)
+static void cedt_build_cfmws(CXLFixedWindow *fw, Aml *cedt)
{
- GList *it;
+ GArray *table_data = cedt->buf;
+ int i;
- for (it = cxls->fixed_windows; it; it = it->next) {
- CXLFixedWindow *fw = it->data;
- int i;
-
- /* Type */
- build_append_int_noprefix(table_data, 1, 1);
+ /* Type */
+ build_append_int_noprefix(table_data, 1, 1);
- /* Reserved */
- build_append_int_noprefix(table_data, 0, 1);
+ /* Reserved */
+ build_append_int_noprefix(table_data, 0, 1);
- /* Record Length */
- build_append_int_noprefix(table_data, 36 + 4 * fw->num_targets, 2);
+ /* Record Length */
+ build_append_int_noprefix(table_data, 36 + 4 * fw->num_targets, 2);
- /* Reserved */
- build_append_int_noprefix(table_data, 0, 4);
+ /* Reserved */
+ build_append_int_noprefix(table_data, 0, 4);
- /* Base HPA */
- build_append_int_noprefix(table_data, fw->mr.addr, 8);
+ /* Base HPA */
+ build_append_int_noprefix(table_data, fw->mr.addr, 8);
- /* Window Size */
- build_append_int_noprefix(table_data, fw->size, 8);
+ /* Window Size */
+ build_append_int_noprefix(table_data, fw->size, 8);
- /* Host Bridge Interleave Ways */
- build_append_int_noprefix(table_data, fw->enc_int_ways, 1);
+ /* Host Bridge Interleave Ways */
+ build_append_int_noprefix(table_data, fw->enc_int_ways, 1);
- /* Host Bridge Interleave Arithmetic */
- build_append_int_noprefix(table_data, 0, 1);
+ /* Host Bridge Interleave Arithmetic */
+ build_append_int_noprefix(table_data, 0, 1);
- /* Reserved */
- build_append_int_noprefix(table_data, 0, 2);
+ /* Reserved */
+ build_append_int_noprefix(table_data, 0, 2);
- /* Host Bridge Interleave Granularity */
- build_append_int_noprefix(table_data, fw->enc_int_gran, 4);
+ /* Host Bridge Interleave Granularity */
+ build_append_int_noprefix(table_data, fw->enc_int_gran, 4);
- /* Window Restrictions */
- build_append_int_noprefix(table_data, 0x0f, 2); /* No restrictions */
+ /* Window Restrictions */
+ build_append_int_noprefix(table_data, 0x0f, 2);
- /* QTG ID */
- build_append_int_noprefix(table_data, 0, 2);
+ /* QTG ID */
+ build_append_int_noprefix(table_data, 0, 2);
- /* Host Bridge List (list of UIDs - currently bus_nr) */
- for (i = 0; i < fw->num_targets; i++) {
- g_assert(fw->target_hbs[i]);
- build_append_int_noprefix(table_data, PXB_DEV(fw->target_hbs[i])->bus_nr, 4);
- }
+ /* Host Bridge List (list of UIDs - currently bus_nr) */
+ for (i = 0; i < fw->num_targets; i++) {
+ g_assert(fw->target_hbs[i]);
+ build_append_int_noprefix(table_data,
+ PXB_DEV(fw->target_hbs[i])->bus_nr, 4);
}
}
@@ -202,6 +200,7 @@ void cxl_build_cedt(GArray *table_offsets, GArray *table_data,
BIOSLinker *linker, const char *oem_id,
const char *oem_table_id, CXLState *cxl_state)
{
+ GSList *cfmws_list, *iter;
Aml *cedt;
AcpiTable table = { .sig = "CEDT", .rev = 1, .oem_id = oem_id,
.oem_table_id = oem_table_id };
@@ -213,7 +212,12 @@ void cxl_build_cedt(GArray *table_offsets, GArray *table_data,
/* reserve space for CEDT header */
object_child_foreach_recursive(object_get_root(), cxl_foreach_pxb_hb, cedt);
- cedt_build_cfmws(cedt->buf, cxl_state);
+
+ cfmws_list = cxl_fmws_get_all_sorted();
+ for (iter = cfmws_list; iter; iter = iter->next) {
+ cedt_build_cfmws(CXL_FMW(iter->data), cedt);
+ }
+ g_slist_free(cfmws_list);
/* copy AML table into ACPI tables blob and patch header there */
g_array_append_vals(table_data, cedt->buf->data, cedt->buf->len);
diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig
index 6ea8653..1634e26 100644
--- a/hw/arm/Kconfig
+++ b/hw/arm/Kconfig
@@ -95,6 +95,12 @@ config INTEGRATOR
select PL181 # display
select SMC91C111
+config MAX78000FTHR
+ bool
+ default y
+ depends on TCG && ARM
+ select MAX78000_SOC
+
config MPS3R
bool
default y
@@ -357,6 +363,15 @@ config ALLWINNER_R40
select USB_EHCI_SYSBUS
select SD
+config MAX78000_SOC
+ bool
+ select ARM_V7M
+ select MAX78000_ICC
+ select MAX78000_UART
+ select MAX78000_GCR
+ select MAX78000_TRNG
+ select MAX78000_AES
+
config RASPI
bool
default y
diff --git a/hw/arm/fsl-imx8mp.c b/hw/arm/fsl-imx8mp.c
index 23e662c..866f4d1 100644
--- a/hw/arm/fsl-imx8mp.c
+++ b/hw/arm/fsl-imx8mp.c
@@ -356,6 +356,10 @@ static void fsl_imx8mp_realize(DeviceState *dev, Error **errp)
qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
sysbus_connect_irq(gicsbd, i + ms->smp.cpus,
qdev_get_gpio_in(cpudev, ARM_CPU_FIQ));
+ sysbus_connect_irq(gicsbd, i + 2 * ms->smp.cpus,
+ qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ));
+ sysbus_connect_irq(gicsbd, i + 3 * ms->smp.cpus,
+ qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ));
}
}
diff --git a/hw/arm/max78000_soc.c b/hw/arm/max78000_soc.c
new file mode 100644
index 0000000..7f1856f
--- /dev/null
+++ b/hw/arm/max78000_soc.c
@@ -0,0 +1,232 @@
+/*
+ * MAX78000 SOC
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Implementation based on stm32f205 and Max78000 user guide at
+ * https://www.analog.com/media/en/technical-documentation/user-guides/max78000-user-guide.pdf
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "system/address-spaces.h"
+#include "system/system.h"
+#include "hw/arm/max78000_soc.h"
+#include "hw/qdev-clock.h"
+#include "hw/misc/unimp.h"
+
+static const uint32_t max78000_icc_addr[] = {0x4002a000, 0x4002a800};
+static const uint32_t max78000_uart_addr[] = {0x40042000, 0x40043000,
+ 0x40044000};
+
+static const int max78000_uart_irq[] = {14, 15, 34};
+
+static void max78000_soc_initfn(Object *obj)
+{
+ MAX78000State *s = MAX78000_SOC(obj);
+ int i;
+
+ object_initialize_child(obj, "armv7m", &s->armv7m, TYPE_ARMV7M);
+
+ object_initialize_child(obj, "gcr", &s->gcr, TYPE_MAX78000_GCR);
+
+ for (i = 0; i < MAX78000_NUM_ICC; i++) {
+ g_autofree char *name = g_strdup_printf("icc%d", i);
+ object_initialize_child(obj, name, &s->icc[i], TYPE_MAX78000_ICC);
+ }
+
+ for (i = 0; i < MAX78000_NUM_UART; i++) {
+ g_autofree char *name = g_strdup_printf("uart%d", i);
+ object_initialize_child(obj, name, &s->uart[i],
+ TYPE_MAX78000_UART);
+ }
+
+ object_initialize_child(obj, "trng", &s->trng, TYPE_MAX78000_TRNG);
+
+ object_initialize_child(obj, "aes", &s->aes, TYPE_MAX78000_AES);
+
+ s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0);
+}
+
+static void max78000_soc_realize(DeviceState *dev_soc, Error **errp)
+{
+ MAX78000State *s = MAX78000_SOC(dev_soc);
+ MemoryRegion *system_memory = get_system_memory();
+ DeviceState *dev, *gcrdev, *armv7m;
+ SysBusDevice *busdev;
+ Error *err = NULL;
+ int i;
+
+ if (!clock_has_source(s->sysclk)) {
+ error_setg(errp, "sysclk clock must be wired up by the board code");
+ return;
+ }
+
+ memory_region_init_rom(&s->flash, OBJECT(dev_soc), "MAX78000.flash",
+ FLASH_SIZE, &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ memory_region_add_subregion(system_memory, FLASH_BASE_ADDRESS, &s->flash);
+
+ memory_region_init_ram(&s->sram, NULL, "MAX78000.sram", SRAM_SIZE,
+ &err);
+
+ gcrdev = DEVICE(&s->gcr);
+ object_property_set_link(OBJECT(gcrdev), "sram", OBJECT(&s->sram),
+ &err);
+
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+ memory_region_add_subregion(system_memory, SRAM_BASE_ADDRESS, &s->sram);
+
+ armv7m = DEVICE(&s->armv7m);
+
+ /*
+ * The MAX78000 user guide's Interrupt Vector Table section
+ * suggests that there are 120 IRQs in the text, while only listing
+ * 104 in table 5-1. Implement the more generous of the two.
+ * This has not been tested in hardware.
+ */
+ qdev_prop_set_uint32(armv7m, "num-irq", 120);
+ qdev_prop_set_uint8(armv7m, "num-prio-bits", 3);
+ qdev_prop_set_string(armv7m, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m4"));
+ qdev_prop_set_bit(armv7m, "enable-bitband", true);
+ qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk);
+ object_property_set_link(OBJECT(&s->armv7m), "memory",
+ OBJECT(system_memory), &error_abort);
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) {
+ return;
+ }
+
+ for (i = 0; i < MAX78000_NUM_ICC; i++) {
+ dev = DEVICE(&(s->icc[i]));
+ sysbus_realize(SYS_BUS_DEVICE(dev), errp);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, max78000_icc_addr[i]);
+ }
+
+ for (i = 0; i < MAX78000_NUM_UART; i++) {
+ g_autofree char *link = g_strdup_printf("uart%d", i);
+ dev = DEVICE(&(s->uart[i]));
+ qdev_prop_set_chr(dev, "chardev", serial_hd(i));
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->uart[i]), errp)) {
+ return;
+ }
+
+ object_property_set_link(OBJECT(gcrdev), link, OBJECT(dev),
+ &err);
+
+ busdev = SYS_BUS_DEVICE(dev);
+ sysbus_mmio_map(busdev, 0, max78000_uart_addr[i]);
+ sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(armv7m,
+ max78000_uart_irq[i]));
+ }
+
+ dev = DEVICE(&s->trng);
+ sysbus_realize(SYS_BUS_DEVICE(dev), errp);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x4004d000);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(armv7m, 4));
+
+ object_property_set_link(OBJECT(gcrdev), "trng", OBJECT(dev), &err);
+
+ dev = DEVICE(&s->aes);
+ sysbus_realize(SYS_BUS_DEVICE(dev), errp);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x40007400);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(armv7m, 5));
+
+ object_property_set_link(OBJECT(gcrdev), "aes", OBJECT(dev), &err);
+
+ dev = DEVICE(&s->gcr);
+ sysbus_realize(SYS_BUS_DEVICE(dev), errp);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0x40000000);
+
+ create_unimplemented_device("systemInterface", 0x40000400, 0x400);
+ create_unimplemented_device("functionControl", 0x40000800, 0x400);
+ create_unimplemented_device("watchdogTimer0", 0x40003000, 0x400);
+ create_unimplemented_device("dynamicVoltScale", 0x40003c00, 0x40);
+ create_unimplemented_device("SIMO", 0x40004400, 0x400);
+ create_unimplemented_device("trimSystemInit", 0x40005400, 0x400);
+ create_unimplemented_device("generalCtrlFunc", 0x40005800, 0x400);
+ create_unimplemented_device("wakeupTimer", 0x40006400, 0x400);
+ create_unimplemented_device("powerSequencer", 0x40006800, 0x400);
+ create_unimplemented_device("miscControl", 0x40006c00, 0x400);
+
+ create_unimplemented_device("gpio0", 0x40008000, 0x1000);
+ create_unimplemented_device("gpio1", 0x40009000, 0x1000);
+
+ create_unimplemented_device("parallelCamInterface", 0x4000e000, 0x1000);
+ create_unimplemented_device("CRC", 0x4000f000, 0x1000);
+
+ create_unimplemented_device("timer0", 0x40010000, 0x1000);
+ create_unimplemented_device("timer1", 0x40011000, 0x1000);
+ create_unimplemented_device("timer2", 0x40012000, 0x1000);
+ create_unimplemented_device("timer3", 0x40013000, 0x1000);
+
+ create_unimplemented_device("i2c0", 0x4001d000, 0x1000);
+ create_unimplemented_device("i2c1", 0x4001e000, 0x1000);
+ create_unimplemented_device("i2c2", 0x4001f000, 0x1000);
+
+ create_unimplemented_device("standardDMA", 0x40028000, 0x1000);
+ create_unimplemented_device("flashController0", 0x40029000, 0x400);
+
+ create_unimplemented_device("adc", 0x40034000, 0x1000);
+ create_unimplemented_device("pulseTrainEngine", 0x4003c000, 0xa0);
+ create_unimplemented_device("oneWireMaster", 0x4003d000, 0x1000);
+ create_unimplemented_device("semaphore", 0x4003e000, 0x1000);
+
+ create_unimplemented_device("spi1", 0x40046000, 0x2000);
+ create_unimplemented_device("i2s", 0x40060000, 0x1000);
+ create_unimplemented_device("lowPowerControl", 0x40080000, 0x400);
+ create_unimplemented_device("gpio2", 0x40080400, 0x200);
+ create_unimplemented_device("lowPowerWatchdogTimer", 0x40080800, 0x400);
+ create_unimplemented_device("lowPowerTimer4", 0x40080c00, 0x400);
+
+ create_unimplemented_device("lowPowerTimer5", 0x40081000, 0x400);
+ create_unimplemented_device("lowPowerUART0", 0x40081400, 0x400);
+ create_unimplemented_device("lowPowerComparator", 0x40088000, 0x400);
+
+ create_unimplemented_device("spi0", 0x400be000, 0x400);
+
+ /*
+ * The MAX78000 user guide's base address map lists the CNN TX FIFO as
+ * beginning at 0x400c0400 and ending at 0x400c0400. Given that CNN_FIFO
+ * is listed as having data accessible up to offset 0x1000, the user
+ * guide is likely incorrect.
+ */
+ create_unimplemented_device("cnnTxFIFO", 0x400c0400, 0x2000);
+
+ create_unimplemented_device("cnnGlobalControl", 0x50000000, 0x10000);
+ create_unimplemented_device("cnnx16quad0", 0x50100000, 0x40000);
+ create_unimplemented_device("cnnx16quad1", 0x50500000, 0x40000);
+ create_unimplemented_device("cnnx16quad2", 0x50900000, 0x40000);
+ create_unimplemented_device("cnnx16quad3", 0x50d00000, 0x40000);
+
+}
+
+static void max78000_soc_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = max78000_soc_realize;
+}
+
+static const TypeInfo max78000_soc_info = {
+ .name = TYPE_MAX78000_SOC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(MAX78000State),
+ .instance_init = max78000_soc_initfn,
+ .class_init = max78000_soc_class_init,
+};
+
+static void max78000_soc_types(void)
+{
+ type_register_static(&max78000_soc_info);
+}
+
+type_init(max78000_soc_types)
diff --git a/hw/arm/max78000fthr.c b/hw/arm/max78000fthr.c
new file mode 100644
index 0000000..c4f6b5b
--- /dev/null
+++ b/hw/arm/max78000fthr.c
@@ -0,0 +1,50 @@
+/*
+ * MAX78000FTHR Evaluation Board
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/boards.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-clock.h"
+#include "qemu/error-report.h"
+#include "hw/arm/max78000_soc.h"
+#include "hw/arm/boot.h"
+
+/* 60MHz is the default, but other clocks can be selected. */
+#define SYSCLK_FRQ 60000000ULL
+static void max78000_init(MachineState *machine)
+{
+ DeviceState *dev;
+ Clock *sysclk;
+
+ sysclk = clock_new(OBJECT(machine), "SYSCLK");
+ clock_set_hz(sysclk, SYSCLK_FRQ);
+
+ dev = qdev_new(TYPE_MAX78000_SOC);
+ object_property_add_child(OBJECT(machine), "soc", OBJECT(dev));
+ qdev_connect_clock_in(dev, "sysclk", sysclk);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+
+ armv7m_load_kernel(ARM_CPU(first_cpu),
+ machine->kernel_filename,
+ 0x00000000, FLASH_SIZE);
+}
+
+static void max78000_machine_init(MachineClass *mc)
+{
+ static const char * const valid_cpu_types[] = {
+ ARM_CPU_TYPE_NAME("cortex-m4"),
+ NULL
+ };
+
+ mc->desc = "MAX78000FTHR Board (Cortex-M4 / (Unimplemented) RISC-V)";
+ mc->init = max78000_init;
+ mc->valid_cpu_types = valid_cpu_types;
+}
+
+DEFINE_MACHINE("max78000fthr", max78000_machine_init)
diff --git a/hw/arm/meson.build b/hw/arm/meson.build
index d90be8f..dc683913 100644
--- a/hw/arm/meson.build
+++ b/hw/arm/meson.build
@@ -27,6 +27,7 @@ arm_common_ss.add(when: 'CONFIG_OMAP', if_true: files('omap1.c'))
arm_common_ss.add(when: 'CONFIG_ALLWINNER_A10', if_true: files('allwinner-a10.c', 'cubieboard.c'))
arm_common_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-h3.c', 'orangepi.c'))
arm_common_ss.add(when: 'CONFIG_ALLWINNER_R40', if_true: files('allwinner-r40.c', 'bananapi_m2u.c'))
+arm_common_ss.add(when: 'CONFIG_MAX78000_SOC', if_true: files('max78000_soc.c'))
arm_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2836.c', 'raspi.c'))
arm_common_ss.add(when: ['CONFIG_RASPI', 'TARGET_AARCH64'], if_true: files('bcm2838.c', 'raspi4b.c'))
arm_common_ss.add(when: 'CONFIG_STM32F100_SOC', if_true: files('stm32f100_soc.c'))
@@ -71,6 +72,7 @@ arm_ss.add(when: 'CONFIG_XEN', if_true: files(
arm_common_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmu-common.c'))
arm_common_ss.add(when: 'CONFIG_COLLIE', if_true: files('collie.c'))
arm_common_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4_boards.c'))
+arm_common_ss.add(when: 'CONFIG_MAX78000FTHR', if_true: files('max78000fthr.c'))
arm_common_ss.add(when: 'CONFIG_NETDUINO2', if_true: files('netduino2.c'))
arm_common_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_peripherals.c'))
arm_common_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2838_peripherals.c'))
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index cd90c47..0dfb8ec 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -39,10 +39,12 @@
#include "hw/acpi/aml-build.h"
#include "hw/acpi/utils.h"
#include "hw/acpi/pci.h"
+#include "hw/acpi/cxl.h"
#include "hw/acpi/memory_hotplug.h"
#include "hw/acpi/generic_event_device.h"
#include "hw/acpi/tpm.h"
#include "hw/acpi/hmat.h"
+#include "hw/cxl/cxl.h"
#include "hw/pci/pcie_host.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_bus.h"
@@ -119,10 +121,29 @@ static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap)
aml_append(scope, dev);
}
+static void build_acpi0017(Aml *table)
+{
+ Aml *dev, *scope, *method;
+
+ scope = aml_scope("_SB");
+ dev = aml_device("CXLM");
+ aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0017")));
+
+ method = aml_method("_STA", 0, AML_NOTSERIALIZED);
+ aml_append(method, aml_return(aml_int(0x0B)));
+ aml_append(dev, method);
+ build_cxl_dsm_method(dev);
+
+ aml_append(scope, dev);
+ aml_append(table, scope);
+}
+
static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
uint32_t irq, VirtMachineState *vms)
{
int ecam_id = VIRT_ECAM_ID(vms->highmem_ecam);
+ bool cxl_present = false;
+ PCIBus *bus = vms->bus;
struct GPEXConfig cfg = {
.mmio32 = memmap[VIRT_PCIE_MMIO],
.pio = memmap[VIRT_PCIE_PIO],
@@ -136,6 +157,14 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
}
acpi_dsdt_add_gpex(scope, &cfg);
+ QLIST_FOREACH(bus, &vms->bus->child, sibling) {
+ if (pci_bus_is_cxl(bus)) {
+ cxl_present = true;
+ }
+ }
+ if (cxl_present) {
+ build_acpi0017(scope);
+ }
}
static void acpi_dsdt_add_gpio(Aml *scope, const MemMapEntry *gpio_memmap,
@@ -329,12 +358,6 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
/* Sort the smmu idmap by input_base */
g_array_sort(rc_smmu_idmaps, iort_idmap_compare);
- /*
- * Knowing the ID ranges from the RC to the SMMU, it's possible to
- * determine the ID ranges from RC that are directed to the ITS.
- */
- create_rc_its_idmaps(rc_its_idmaps, rc_smmu_idmaps);
-
nb_nodes = 2; /* RC and SMMUv3 */
rc_mapping_count = rc_smmu_idmaps->len;
@@ -1027,6 +1050,11 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
}
}
+ if (vms->cxl_devices_state.is_enabled) {
+ cxl_build_cedt(table_offsets, tables_blob, tables->linker,
+ vms->oem_id, vms->oem_table_id, &vms->cxl_devices_state);
+ }
+
if (ms->nvdimms_state->is_enabled) {
nvdimm_build_acpi(table_offsets, tables_blob, tables->linker,
ms->nvdimms_state, ms->ram_slots, vms->oem_id,
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 3bcdf92..8070ff7 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -57,6 +57,7 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "hw/pci-host/gpex.h"
+#include "hw/pci-bridge/pci_expander_bridge.h"
#include "hw/virtio/virtio-pci.h"
#include "hw/core/sysbus-fdt.h"
#include "hw/platform-bus.h"
@@ -86,6 +87,8 @@
#include "hw/virtio/virtio-md-pci.h"
#include "hw/virtio/virtio-iommu.h"
#include "hw/char/pl011.h"
+#include "hw/cxl/cxl.h"
+#include "hw/cxl/cxl_host.h"
#include "qemu/guest-random.h"
static GlobalProperty arm_virt_compat[] = {
@@ -220,9 +223,11 @@ static const MemMapEntry base_memmap[] = {
static MemMapEntry extended_memmap[] = {
/* Additional 64 MB redist region (can contain up to 512 redistributors) */
[VIRT_HIGH_GIC_REDIST2] = { 0x0, 64 * MiB },
+ [VIRT_CXL_HOST] = { 0x0, 64 * KiB * 16 }, /* 16 UID */
[VIRT_HIGH_PCIE_ECAM] = { 0x0, 256 * MiB },
/* Second PCIe window */
[VIRT_HIGH_PCIE_MMIO] = { 0x0, DEFAULT_HIGH_PCIE_MMIO_SIZE },
+ /* Any CXL Fixed memory windows come here */
};
static const int a15irqmap[] = {
@@ -792,6 +797,13 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
default:
g_assert_not_reached();
}
+
+ if (kvm_enabled() && vms->virt &&
+ (revision != 3 || !kvm_irqchip_in_kernel())) {
+ error_report("KVM EL2 is only supported with in-kernel GICv3");
+ exit(1);
+ }
+
vms->gic = qdev_new(gictype);
qdev_prop_set_uint32(vms->gic, "revision", revision);
qdev_prop_set_uint32(vms->gic, "num-cpu", smp_cpus);
@@ -828,6 +840,9 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
OBJECT(mem), &error_fatal);
qdev_prop_set_bit(vms->gic, "has-lpi", true);
}
+ } else if (vms->virt) {
+ qdev_prop_set_uint32(vms->gic, "maintenance-interrupt-id",
+ ARCH_GIC_MAINT_IRQ);
}
} else {
if (!kvm_irqchip_in_kernel()) {
@@ -1623,6 +1638,17 @@ static void create_pcie(VirtMachineState *vms)
}
}
+static void create_cxl_host_reg_region(VirtMachineState *vms)
+{
+ MemoryRegion *sysmem = get_system_memory();
+ MemoryRegion *mr = &vms->cxl_devices_state.host_mr;
+
+ memory_region_init(mr, OBJECT(vms), "cxl_host_reg",
+ vms->memmap[VIRT_CXL_HOST].size);
+ memory_region_add_subregion(sysmem, vms->memmap[VIRT_CXL_HOST].base, mr);
+ vms->highmem_cxl = true;
+}
+
static void create_platform_bus(VirtMachineState *vms)
{
DeviceState *dev;
@@ -1739,6 +1765,12 @@ void virt_machine_done(Notifier *notifier, void *data)
struct arm_boot_info *info = &vms->bootinfo;
AddressSpace *as = arm_boot_address_space(cpu, info);
+ cxl_hook_up_pxb_registers(vms->bus, &vms->cxl_devices_state,
+ &error_fatal);
+
+ if (vms->cxl_devices_state.is_enabled) {
+ cxl_fmws_link_targets(&error_fatal);
+ }
/*
* If the user provided a dtb, we assume the dynamic sysbus nodes
* already are integrated there. This corresponds to a use case where
@@ -1785,6 +1817,7 @@ static inline bool *virt_get_high_memmap_enabled(VirtMachineState *vms,
{
bool *enabled_array[] = {
&vms->highmem_redists,
+ &vms->highmem_cxl,
&vms->highmem_ecam,
&vms->highmem_mmio,
};
@@ -1892,6 +1925,9 @@ static void virt_set_memmap(VirtMachineState *vms, int pa_bits)
if (device_memory_size > 0) {
machine_memory_devices_init(ms, device_memory_base, device_memory_size);
}
+ vms->highest_gpa = cxl_fmws_set_memmap(ROUND_UP(vms->highest_gpa + 1,
+ 256 * MiB),
+ BIT_ULL(pa_bits)) - 1;
}
static VirtGICType finalize_gic_version_do(const char *accel_name,
@@ -2063,6 +2099,10 @@ static void virt_post_cpus_gic_realized(VirtMachineState *vms,
memory_region_init_ram(pvtime, NULL, "pvtime", pvtime_size, NULL);
memory_region_add_subregion(sysmem, pvtime_reg_base, pvtime);
}
+ if (!aarch64 && vms->virt) {
+ error_report("KVM does not support EL2 on an AArch32 vCPU");
+ exit(1);
+ }
CPU_FOREACH(cpu) {
if (pmu) {
@@ -2208,7 +2248,13 @@ static void machvirt_init(MachineState *machine)
exit(1);
}
- if (vms->virt && !tcg_enabled() && !qtest_enabled()) {
+ if (vms->virt && kvm_enabled() && !kvm_arm_el2_supported()) {
+ error_report("mach-virt: host kernel KVM does not support providing "
+ "Virtualization extensions to the guest CPU");
+ exit(1);
+ }
+
+ if (vms->virt && !kvm_enabled() && !tcg_enabled() && !qtest_enabled()) {
error_report("mach-virt: %s does not support providing "
"Virtualization extensions to the guest CPU",
current_accel_name());
@@ -2343,6 +2389,8 @@ static void machvirt_init(MachineState *machine)
memory_region_add_subregion(sysmem, vms->memmap[VIRT_MEM].base,
machine->ram);
+ cxl_fmws_update_mmio();
+
virt_flash_fdt(vms, sysmem, secure_sysmem ?: sysmem);
create_gic(vms, sysmem);
@@ -2398,6 +2446,7 @@ static void machvirt_init(MachineState *machine)
create_rtc(vms);
create_pcie(vms);
+ create_cxl_host_reg_region(vms);
if (has_ged && aarch64 && firmware_loaded && virt_is_acpi_enabled(vms)) {
vms->acpi_dev = create_acpi_ged(vms);
@@ -3364,6 +3413,7 @@ static void virt_instance_init(Object *obj)
vms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6);
vms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8);
+ cxl_machine_init(obj, &vms->cxl_devices_state);
}
static const TypeInfo virt_machine_info = {
diff --git a/hw/char/Kconfig b/hw/char/Kconfig
index 9d517f3..020c0a8 100644
--- a/hw/char/Kconfig
+++ b/hw/char/Kconfig
@@ -48,6 +48,9 @@ config VIRTIO_SERIAL
default y
depends on VIRTIO
+config MAX78000_UART
+ bool
+
config STM32F2XX_USART
bool
diff --git a/hw/char/max78000_uart.c b/hw/char/max78000_uart.c
new file mode 100644
index 0000000..19506d5
--- /dev/null
+++ b/hw/char/max78000_uart.c
@@ -0,0 +1,285 @@
+/*
+ * MAX78000 UART
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hw/char/max78000_uart.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "migration/vmstate.h"
+#include "trace.h"
+
+
+static int max78000_uart_can_receive(void *opaque)
+{
+ Max78000UartState *s = opaque;
+ if (!(s->ctrl & UART_BCLKEN)) {
+ return 0;
+ }
+ return fifo8_num_free(&s->rx_fifo);
+}
+
+static void max78000_update_irq(Max78000UartState *s)
+{
+ int interrupt_level;
+
+ interrupt_level = s->int_fl & s->int_en;
+ qemu_set_irq(s->irq, interrupt_level);
+}
+
+static void max78000_uart_receive(void *opaque, const uint8_t *buf, int size)
+{
+ Max78000UartState *s = opaque;
+
+ assert(size <= fifo8_num_free(&s->rx_fifo));
+
+ fifo8_push_all(&s->rx_fifo, buf, size);
+
+ uint32_t rx_threshold = s->ctrl & 0xf;
+
+ if (fifo8_num_used(&s->rx_fifo) >= rx_threshold) {
+ s->int_fl |= UART_RX_THD;
+ }
+
+ max78000_update_irq(s);
+}
+
+static void max78000_uart_reset_hold(Object *obj, ResetType type)
+{
+ Max78000UartState *s = MAX78000_UART(obj);
+
+ s->ctrl = 0;
+ s->status = UART_TX_EM | UART_RX_EM;
+ s->int_en = 0;
+ s->int_fl = 0;
+ s->osr = 0;
+ s->txpeek = 0;
+ s->pnr = UART_RTS;
+ s->fifo = 0;
+ s->dma = 0;
+ s->wken = 0;
+ s->wkfl = 0;
+ fifo8_reset(&s->rx_fifo);
+}
+
+static uint64_t max78000_uart_read(void *opaque, hwaddr addr,
+ unsigned int size)
+{
+ Max78000UartState *s = opaque;
+ uint64_t retvalue = 0;
+ switch (addr) {
+ case UART_CTRL:
+ retvalue = s->ctrl;
+ break;
+ case UART_STATUS:
+ retvalue = (fifo8_num_used(&s->rx_fifo) << UART_RX_LVL) |
+ UART_TX_EM |
+ (fifo8_is_empty(&s->rx_fifo) ? UART_RX_EM : 0);
+ break;
+ case UART_INT_EN:
+ retvalue = s->int_en;
+ break;
+ case UART_INT_FL:
+ retvalue = s->int_fl;
+ break;
+ case UART_CLKDIV:
+ retvalue = s->clkdiv;
+ break;
+ case UART_OSR:
+ retvalue = s->osr;
+ break;
+ case UART_TXPEEK:
+ if (!fifo8_is_empty(&s->rx_fifo)) {
+ retvalue = fifo8_peek(&s->rx_fifo);
+ }
+ break;
+ case UART_PNR:
+ retvalue = s->pnr;
+ break;
+ case UART_FIFO:
+ if (!fifo8_is_empty(&s->rx_fifo)) {
+ retvalue = fifo8_pop(&s->rx_fifo);
+ max78000_update_irq(s);
+ }
+ break;
+ case UART_DMA:
+ /* DMA not implemented */
+ retvalue = s->dma;
+ break;
+ case UART_WKEN:
+ retvalue = s->wken;
+ break;
+ case UART_WKFL:
+ retvalue = s->wkfl;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr);
+ break;
+ }
+
+ return retvalue;
+}
+
+static void max78000_uart_write(void *opaque, hwaddr addr,
+ uint64_t val64, unsigned int size)
+{
+ Max78000UartState *s = opaque;
+
+ uint32_t value = val64;
+ uint8_t data;
+
+ switch (addr) {
+ case UART_CTRL:
+ if (value & UART_FLUSH_RX) {
+ fifo8_reset(&s->rx_fifo);
+ }
+ if (value & UART_BCLKEN) {
+ value = value | UART_BCLKRDY;
+ }
+ s->ctrl = value & ~(UART_FLUSH_RX | UART_FLUSH_TX);
+
+ /*
+ * Software can manage UART flow control manually by setting hfc_en
+ * in UART_CTRL. This would require emulating uart at a lower level,
+ * and is currently unimplemented.
+ */
+
+ return;
+ case UART_STATUS:
+ /* UART_STATUS is read only */
+ return;
+ case UART_INT_EN:
+ s->int_en = value;
+ return;
+ case UART_INT_FL:
+ s->int_fl = s->int_fl & ~(value);
+ max78000_update_irq(s);
+ return;
+ case UART_CLKDIV:
+ s->clkdiv = value;
+ return;
+ case UART_OSR:
+ s->osr = value;
+ return;
+ case UART_PNR:
+ s->pnr = value;
+ return;
+ case UART_FIFO:
+ data = value & 0xff;
+ /*
+ * XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks
+ */
+ qemu_chr_fe_write_all(&s->chr, &data, 1);
+
+ /* TX is always empty */
+ s->int_fl |= UART_TX_HE;
+ max78000_update_irq(s);
+
+ return;
+ case UART_DMA:
+ /* DMA not implemented */
+ s->dma = value;
+ return;
+ case UART_WKEN:
+ s->wken = value;
+ return;
+ case UART_WKFL:
+ s->wkfl = value;
+ return;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"
+ HWADDR_PRIx "\n", __func__, addr);
+ }
+}
+
+static const MemoryRegionOps max78000_uart_ops = {
+ .read = max78000_uart_read,
+ .write = max78000_uart_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static const Property max78000_uart_properties[] = {
+ DEFINE_PROP_CHR("chardev", Max78000UartState, chr),
+};
+
+static const VMStateDescription max78000_uart_vmstate = {
+ .name = TYPE_MAX78000_UART,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(ctrl, Max78000UartState),
+ VMSTATE_UINT32(status, Max78000UartState),
+ VMSTATE_UINT32(int_en, Max78000UartState),
+ VMSTATE_UINT32(int_fl, Max78000UartState),
+ VMSTATE_UINT32(clkdiv, Max78000UartState),
+ VMSTATE_UINT32(osr, Max78000UartState),
+ VMSTATE_UINT32(txpeek, Max78000UartState),
+ VMSTATE_UINT32(pnr, Max78000UartState),
+ VMSTATE_UINT32(fifo, Max78000UartState),
+ VMSTATE_UINT32(dma, Max78000UartState),
+ VMSTATE_UINT32(wken, Max78000UartState),
+ VMSTATE_UINT32(wkfl, Max78000UartState),
+ VMSTATE_FIFO8(rx_fifo, Max78000UartState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void max78000_uart_init(Object *obj)
+{
+ Max78000UartState *s = MAX78000_UART(obj);
+ fifo8_create(&s->rx_fifo, 8);
+
+ sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq);
+
+ memory_region_init_io(&s->mmio, obj, &max78000_uart_ops, s,
+ TYPE_MAX78000_UART, 0x400);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+}
+
+static void max78000_uart_realize(DeviceState *dev, Error **errp)
+{
+ Max78000UartState *s = MAX78000_UART(dev);
+
+ qemu_chr_fe_set_handlers(&s->chr, max78000_uart_can_receive,
+ max78000_uart_receive, NULL, NULL,
+ s, NULL, true);
+}
+
+static void max78000_uart_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ rc->phases.hold = max78000_uart_reset_hold;
+
+ device_class_set_props(dc, max78000_uart_properties);
+ dc->realize = max78000_uart_realize;
+
+ dc->vmsd = &max78000_uart_vmstate;
+}
+
+static const TypeInfo max78000_uart_info = {
+ .name = TYPE_MAX78000_UART,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(Max78000UartState),
+ .instance_init = max78000_uart_init,
+ .class_init = max78000_uart_class_init,
+};
+
+static void max78000_uart_register_types(void)
+{
+ type_register_static(&max78000_uart_info);
+}
+
+type_init(max78000_uart_register_types)
diff --git a/hw/char/meson.build b/hw/char/meson.build
index 4e439da..a9e1dc2 100644
--- a/hw/char/meson.build
+++ b/hw/char/meson.build
@@ -26,6 +26,7 @@ system_ss.add(when: 'CONFIG_AVR_USART', if_true: files('avr_usart.c'))
system_ss.add(when: 'CONFIG_COLDFIRE', if_true: files('mcf_uart.c'))
system_ss.add(when: 'CONFIG_DIGIC', if_true: files('digic-uart.c'))
system_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_uart.c'))
+system_ss.add(when: 'CONFIG_MAX78000_UART', if_true: files('max78000_uart.c'))
system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_uart.c'))
system_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_aux.c'))
system_ss.add(when: 'CONFIG_RENESAS_SCI', if_true: files('renesas_sci.c'))
diff --git a/hw/cxl/cxl-host-stubs.c b/hw/cxl/cxl-host-stubs.c
index cae4afc..c015baa 100644
--- a/hw/cxl/cxl-host-stubs.c
+++ b/hw/cxl/cxl-host-stubs.c
@@ -8,8 +8,13 @@
#include "hw/cxl/cxl.h"
#include "hw/cxl/cxl_host.h"
-void cxl_fmws_link_targets(CXLState *stat, Error **errp) {};
+void cxl_fmws_link_targets(Error **errp) {};
void cxl_machine_init(Object *obj, CXLState *state) {};
void cxl_hook_up_pxb_registers(PCIBus *bus, CXLState *state, Error **errp) {};
+hwaddr cxl_fmws_set_memmap(hwaddr base, hwaddr max_addr)
+{
+ return base;
+};
+void cxl_fmws_update_mmio(void) {};
const MemoryRegionOps cfmws_ops;
diff --git a/hw/cxl/cxl-host.c b/hw/cxl/cxl-host.c
index e010163..5c2ce25 100644
--- a/hw/cxl/cxl-host.c
+++ b/hw/cxl/cxl-host.c
@@ -22,15 +22,17 @@
#include "hw/pci/pcie_port.h"
#include "hw/pci-bridge/pci_expander_bridge.h"
-static void cxl_fixed_memory_window_config(CXLState *cxl_state,
- CXLFixedMemoryWindowOptions *object,
- Error **errp)
+static void cxl_fixed_memory_window_config(CXLFixedMemoryWindowOptions *object,
+ int index, Error **errp)
{
ERRP_GUARD();
- g_autofree CXLFixedWindow *fw = g_malloc0(sizeof(*fw));
+ DeviceState *dev = qdev_new(TYPE_CXL_FMW);
+ CXLFixedWindow *fw = CXL_FMW(dev);
strList *target;
int i;
+ fw->index = index;
+
for (target = object->targets; target; target = target->next) {
fw->num_targets++;
}
@@ -65,35 +67,39 @@ static void cxl_fixed_memory_window_config(CXLState *cxl_state,
fw->targets[i] = g_strdup(target->value);
}
- cxl_state->fixed_windows = g_list_append(cxl_state->fixed_windows,
- g_steal_pointer(&fw));
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), errp);
}
-void cxl_fmws_link_targets(CXLState *cxl_state, Error **errp)
+static int cxl_fmws_link(Object *obj, void *opaque)
{
- if (cxl_state && cxl_state->fixed_windows) {
- GList *it;
-
- for (it = cxl_state->fixed_windows; it; it = it->next) {
- CXLFixedWindow *fw = it->data;
- int i;
-
- for (i = 0; i < fw->num_targets; i++) {
- Object *o;
- bool ambig;
-
- o = object_resolve_path_type(fw->targets[i],
- TYPE_PXB_CXL_DEV,
- &ambig);
- if (!o) {
- error_setg(errp, "Could not resolve CXLFM target %s",
- fw->targets[i]);
- return;
- }
- fw->target_hbs[i] = PXB_CXL_DEV(o);
- }
+ struct CXLFixedWindow *fw;
+ int i;
+
+ if (!object_dynamic_cast(obj, TYPE_CXL_FMW)) {
+ return 0;
+ }
+ fw = CXL_FMW(obj);
+
+ for (i = 0; i < fw->num_targets; i++) {
+ Object *o;
+ bool ambig;
+
+ o = object_resolve_path_type(fw->targets[i], TYPE_PXB_CXL_DEV,
+ &ambig);
+ if (!o) {
+ error_setg(&error_fatal, "Could not resolve CXLFM target %s",
+ fw->targets[i]);
+ return 1;
}
+ fw->target_hbs[i] = PXB_CXL_DEV(o);
}
+ return 0;
+}
+
+void cxl_fmws_link_targets(Error **errp)
+{
+ /* Order doesn't matter for this, so no need to build list */
+ object_child_foreach_recursive(object_get_root(), cxl_fmws_link, NULL);
}
static bool cxl_hdm_find_target(uint32_t *cache_mem, hwaddr addr,
@@ -325,14 +331,15 @@ static void machine_set_cfmw(Object *obj, Visitor *v, const char *name,
CXLState *state = opaque;
CXLFixedMemoryWindowOptionsList *cfmw_list = NULL;
CXLFixedMemoryWindowOptionsList *it;
+ int index;
visit_type_CXLFixedMemoryWindowOptionsList(v, name, &cfmw_list, errp);
if (!cfmw_list) {
return;
}
- for (it = cfmw_list; it; it = it->next) {
- cxl_fixed_memory_window_config(state, it->value, errp);
+ for (it = cfmw_list, index = 0; it; it = it->next, index++) {
+ cxl_fixed_memory_window_config(it->value, index, errp);
}
state->cfmw_list = cfmw_list;
}
@@ -370,3 +377,110 @@ void cxl_hook_up_pxb_registers(PCIBus *bus, CXLState *state, Error **errp)
}
}
}
+
+static int cxl_fmws_find(Object *obj, void *opaque)
+{
+ GSList **list = opaque;
+
+ if (!object_dynamic_cast(obj, TYPE_CXL_FMW)) {
+ return 0;
+ }
+ *list = g_slist_prepend(*list, obj);
+
+ return 0;
+}
+
+static GSList *cxl_fmws_get_all(void)
+{
+ GSList *list = NULL;
+
+ object_child_foreach_recursive(object_get_root(), cxl_fmws_find, &list);
+
+ return list;
+}
+
+static gint cfmws_cmp(gconstpointer a, gconstpointer b, gpointer d)
+{
+ const struct CXLFixedWindow *ap = a;
+ const struct CXLFixedWindow *bp = b;
+
+ return ap->index > bp->index;
+}
+
+GSList *cxl_fmws_get_all_sorted(void)
+{
+ return g_slist_sort_with_data(cxl_fmws_get_all(), cfmws_cmp, NULL);
+}
+
+static int cxl_fmws_mmio_map(Object *obj, void *opaque)
+{
+ struct CXLFixedWindow *fw;
+
+ if (!object_dynamic_cast(obj, TYPE_CXL_FMW)) {
+ return 0;
+ }
+ fw = CXL_FMW(obj);
+ sysbus_mmio_map(SYS_BUS_DEVICE(fw), 0, fw->base);
+
+ return 0;
+}
+
+void cxl_fmws_update_mmio(void)
+{
+ /* Ordering is not required for this */
+ object_child_foreach_recursive(object_get_root(), cxl_fmws_mmio_map, NULL);
+}
+
+hwaddr cxl_fmws_set_memmap(hwaddr base, hwaddr max_addr)
+{
+ GSList *cfmws_list, *iter;
+ CXLFixedWindow *fw;
+
+ cfmws_list = cxl_fmws_get_all_sorted();
+ for (iter = cfmws_list; iter; iter = iter->next) {
+ fw = CXL_FMW(iter->data);
+ if (base + fw->size <= max_addr) {
+ fw->base = base;
+ base += fw->size;
+ }
+ }
+ g_slist_free(cfmws_list);
+
+ return base;
+}
+
+static void cxl_fmw_realize(DeviceState *dev, Error **errp)
+{
+ CXLFixedWindow *fw = CXL_FMW(dev);
+
+ memory_region_init_io(&fw->mr, OBJECT(dev), &cfmws_ops, fw,
+ "cxl-fixed-memory-region", fw->size);
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &fw->mr);
+}
+
+/*
+ * Note: Fixed memory windows represent fixed address decoders on the host and
+ * as such have no dynamic state to reset or migrate
+ */
+static void cxl_fmw_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "CXL Fixed Memory Window";
+ dc->realize = cxl_fmw_realize;
+ /* Reason - created by machines as tightly coupled to machine memory map */
+ dc->user_creatable = false;
+}
+
+static const TypeInfo cxl_fmw_info = {
+ .name = TYPE_CXL_FMW,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(CXLFixedWindow),
+ .class_init = cxl_fmw_class_init,
+};
+
+static void cxl_host_register_types(void)
+{
+ type_register_static(&cxl_fmw_info);
+}
+type_init(cxl_host_register_types)
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index b211633..860346d 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -609,7 +609,7 @@ void pc_machine_done(Notifier *notifier, void *data)
&error_fatal);
if (pcms->cxl_devices_state.is_enabled) {
- cxl_fmws_link_targets(&pcms->cxl_devices_state, &error_fatal);
+ cxl_fmws_link_targets(&error_fatal);
}
/* set the number of CPUs */
@@ -718,20 +718,28 @@ static uint64_t pc_get_cxl_range_start(PCMachineState *pcms)
return cxl_base;
}
-static uint64_t pc_get_cxl_range_end(PCMachineState *pcms)
+static int cxl_get_fmw_end(Object *obj, void *opaque)
{
- uint64_t start = pc_get_cxl_range_start(pcms) + MiB;
+ struct CXLFixedWindow *fw;
+ uint64_t *start = opaque;
- if (pcms->cxl_devices_state.fixed_windows) {
- GList *it;
-
- start = ROUND_UP(start, 256 * MiB);
- for (it = pcms->cxl_devices_state.fixed_windows; it; it = it->next) {
- CXLFixedWindow *fw = it->data;
- start += fw->size;
- }
+ if (!object_dynamic_cast(obj, TYPE_CXL_FMW)) {
+ return 0;
}
+ fw = CXL_FMW(obj);
+
+ *start += fw->size;
+ return 0;
+}
+
+static uint64_t pc_get_cxl_range_end(PCMachineState *pcms)
+{
+ uint64_t start = pc_get_cxl_range_start(pcms) + MiB;
+
+ /* Ordering doesn't matter so no need to build a sorted list */
+ object_child_foreach_recursive(object_get_root(), cxl_get_fmw_end,
+ &start);
return start;
}
@@ -933,23 +941,9 @@ void pc_memory_init(PCMachineState *pcms,
cxl_base = pc_get_cxl_range_start(pcms);
memory_region_init(mr, OBJECT(machine), "cxl_host_reg", cxl_size);
memory_region_add_subregion(system_memory, cxl_base, mr);
- cxl_resv_end = cxl_base + cxl_size;
- if (pcms->cxl_devices_state.fixed_windows) {
- hwaddr cxl_fmw_base;
- GList *it;
-
- cxl_fmw_base = ROUND_UP(cxl_base + cxl_size, 256 * MiB);
- for (it = pcms->cxl_devices_state.fixed_windows; it; it = it->next) {
- CXLFixedWindow *fw = it->data;
-
- fw->base = cxl_fmw_base;
- memory_region_init_io(&fw->mr, OBJECT(machine), &cfmws_ops, fw,
- "cxl-fixed-memory-region", fw->size);
- memory_region_add_subregion(system_memory, fw->base, &fw->mr);
- cxl_fmw_base += fw->size;
- cxl_resv_end = cxl_fmw_base;
- }
- }
+ cxl_base = ROUND_UP(cxl_base + cxl_size, 256 * MiB);
+ cxl_resv_end = cxl_fmws_set_memmap(cxl_base, maxphysaddr);
+ cxl_fmws_update_mmio();
}
/* Initialize PC system firmware */
diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c
index 1cee681..e438d8c 100644
--- a/hw/intc/arm_gicv3_common.c
+++ b/hw/intc/arm_gicv3_common.c
@@ -612,6 +612,7 @@ static const Property arm_gicv3_common_properties[] = {
DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0),
DEFINE_PROP_BOOL("has-nmi", GICv3State, nmi_support, 0),
DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0),
+ DEFINE_PROP_UINT32("maintenance-interrupt-id", GICv3State, maint_irq, 0),
/*
* Compatibility property: force 8 bits of physical priority, even
* if the CPU being emulated should have fewer.
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
index 3be3bf6..8ed88e7 100644
--- a/hw/intc/arm_gicv3_kvm.c
+++ b/hw/intc/arm_gicv3_kvm.c
@@ -22,6 +22,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/intc/arm_gicv3_common.h"
+#include "hw/arm/virt.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "system/kvm.h"
@@ -825,6 +826,34 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
return;
}
+ if (s->maint_irq) {
+ Error *kvm_nv_migration_blocker = NULL;
+ int ret;
+
+ error_setg(&kvm_nv_migration_blocker,
+ "Live migration disabled because KVM nested virt is enabled");
+ if (migrate_add_blocker(&kvm_nv_migration_blocker, errp)) {
+ error_free(kvm_nv_migration_blocker);
+ return;
+ }
+
+ ret = kvm_device_check_attr(s->dev_fd,
+ KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ, 0);
+ if (!ret) {
+ error_setg_errno(errp, errno,
+ "VGICv3 setting maintenance IRQ is not "
+ "supported by this host kernel");
+ return;
+ }
+
+ ret = kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ, 0,
+ &s->maint_irq, true, errp);
+ if (ret) {
+ error_setg_errno(errp, errno, "Failed to set VGIC maintenance IRQ");
+ return;
+ }
+ }
+
multiple_redist_region_allowed =
kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION);
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index 6d85720..7c78961 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -1279,7 +1279,7 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
goto bad_offset;
}
- return cpu->id_afr0;
+ return GET_IDREG(isar, ID_AFR0);
case 0xd50: /* MMFR0. */
if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
goto bad_offset;
@@ -1331,7 +1331,7 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
}
return GET_IDREG(&cpu->isar, ID_ISAR5);
case 0xd78: /* CLIDR */
- return cpu->clidr;
+ return GET_IDREG(&cpu->isar, CLIDR);
case 0xd7c: /* CTR */
return cpu->ctr;
case 0xd80: /* CSSIDR */
diff --git a/hw/intc/loongarch_extioi.c b/hw/intc/loongarch_extioi.c
index 8b8ac6b..3e9c88d 100644
--- a/hw/intc/loongarch_extioi.c
+++ b/hw/intc/loongarch_extioi.c
@@ -377,13 +377,6 @@ static void loongarch_extioi_realize(DeviceState *dev, Error **errp)
}
}
-static void loongarch_extioi_unrealize(DeviceState *dev)
-{
- LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(dev);
-
- g_free(s->cpu);
-}
-
static void loongarch_extioi_reset_hold(Object *obj, ResetType type)
{
LoongArchExtIOIClass *lec = LOONGARCH_EXTIOI_GET_CLASS(obj);
@@ -436,8 +429,6 @@ static void loongarch_extioi_class_init(ObjectClass *klass, const void *data)
device_class_set_parent_realize(dc, loongarch_extioi_realize,
&lec->parent_realize);
- device_class_set_parent_unrealize(dc, loongarch_extioi_unrealize,
- &lec->parent_unrealize);
resettable_class_set_parent_phases(rc, NULL, loongarch_extioi_reset_hold,
NULL, &lec->parent_phases);
lecc->pre_save = vmstate_extioi_pre_save;
diff --git a/hw/intc/loongarch_extioi_common.c b/hw/intc/loongarch_extioi_common.c
index 4a904b3..ba03383 100644
--- a/hw/intc/loongarch_extioi_common.c
+++ b/hw/intc/loongarch_extioi_common.c
@@ -108,6 +108,13 @@ static void loongarch_extioi_common_realize(DeviceState *dev, Error **errp)
}
}
+static void loongarch_extioi_common_unrealize(DeviceState *dev)
+{
+ LoongArchExtIOICommonState *s = LOONGARCH_EXTIOI_COMMON(dev);
+
+ g_free(s->cpu);
+}
+
static void loongarch_extioi_common_reset_hold(Object *obj, ResetType type)
{
LoongArchExtIOICommonClass *lecc = LOONGARCH_EXTIOI_COMMON_GET_CLASS(obj);
@@ -221,6 +228,8 @@ static void loongarch_extioi_common_class_init(ObjectClass *klass,
device_class_set_parent_realize(dc, loongarch_extioi_common_realize,
&lecc->parent_realize);
+ device_class_set_parent_unrealize(dc, loongarch_extioi_common_unrealize,
+ &lecc->parent_unrealize);
resettable_class_set_parent_phases(rc, NULL,
loongarch_extioi_common_reset_hold,
NULL, &lecc->parent_phases);
diff --git a/hw/misc/Kconfig b/hw/misc/Kconfig
index ec0fa5a..c27285b 100644
--- a/hw/misc/Kconfig
+++ b/hw/misc/Kconfig
@@ -47,6 +47,18 @@ config A9SCU
config ARM11SCU
bool
+config MAX78000_AES
+ bool
+
+config MAX78000_GCR
+ bool
+
+config MAX78000_ICC
+ bool
+
+config MAX78000_TRNG
+ bool
+
config MOS6522
bool
diff --git a/hw/misc/max78000_aes.c b/hw/misc/max78000_aes.c
new file mode 100644
index 0000000..0bfb2f0
--- /dev/null
+++ b/hw/misc/max78000_aes.c
@@ -0,0 +1,223 @@
+/*
+ * MAX78000 AES
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "trace.h"
+#include "hw/irq.h"
+#include "migration/vmstate.h"
+#include "hw/misc/max78000_aes.h"
+#include "crypto/aes.h"
+
+static void max78000_aes_set_status(Max78000AesState *s)
+{
+ s->status = 0;
+ if (s->result_index >= 16) {
+ s->status |= OUTPUT_FULL;
+ }
+ if (s->result_index == 0) {
+ s->status |= OUTPUT_EMPTY;
+ }
+ if (s->data_index >= 16) {
+ s->status |= INPUT_FULL;
+ }
+ if (s->data_index == 0) {
+ s->status |= INPUT_EMPTY;
+ }
+}
+
+static uint64_t max78000_aes_read(void *opaque, hwaddr addr,
+ unsigned int size)
+{
+ Max78000AesState *s = opaque;
+ switch (addr) {
+ case CTRL:
+ return s->ctrl;
+
+ case STATUS:
+ return s->status;
+
+ case INTFL:
+ return s->intfl;
+
+ case INTEN:
+ return s->inten;
+
+ case FIFO:
+ if (s->result_index >= 4) {
+ s->intfl &= ~DONE;
+ s->result_index -= 4;
+ max78000_aes_set_status(s);
+ return ldl_be_p(&s->result[s->result_index]);
+ } else{
+ return 0;
+ }
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"
+ HWADDR_PRIx "\n", __func__, addr);
+ break;
+
+ }
+ return 0;
+}
+
+static void max78000_aes_do_crypto(Max78000AesState *s)
+{
+ int keylen = 256;
+ uint8_t *keydata = s->key;
+ if ((s->ctrl & KEY_SIZE) == 0) {
+ keylen = 128;
+ keydata += 16;
+ } else if ((s->ctrl & KEY_SIZE) == 1 << 6) {
+ keylen = 192;
+ keydata += 8;
+ }
+
+ AES_KEY key;
+ if ((s->ctrl & TYPE) == 0) {
+ AES_set_encrypt_key(keydata, keylen, &key);
+ AES_set_decrypt_key(keydata, keylen, &s->internal_key);
+ AES_encrypt(s->data, s->result, &key);
+ s->result_index = 16;
+ } else if ((s->ctrl & TYPE) == 1 << 8) {
+ AES_set_decrypt_key(keydata, keylen, &key);
+ AES_set_decrypt_key(keydata, keylen, &s->internal_key);
+ AES_decrypt(s->data, s->result, &key);
+ s->result_index = 16;
+ } else{
+ AES_decrypt(s->data, s->result, &s->internal_key);
+ s->result_index = 16;
+ }
+ s->intfl |= DONE;
+}
+
+static void max78000_aes_write(void *opaque, hwaddr addr,
+ uint64_t val64, unsigned int size)
+{
+ Max78000AesState *s = opaque;
+ uint32_t val = val64;
+ switch (addr) {
+ case CTRL:
+ if (val & OUTPUT_FLUSH) {
+ s->result_index = 0;
+ val &= ~OUTPUT_FLUSH;
+ }
+ if (val & INPUT_FLUSH) {
+ s->data_index = 0;
+ val &= ~INPUT_FLUSH;
+ }
+ if (val & START) {
+ max78000_aes_do_crypto(s);
+ }
+
+ /* Hardware appears to stay enabled even if 0 written */
+ s->ctrl = val | (s->ctrl & AES_EN);
+ break;
+
+ case FIFO:
+ assert(s->data_index <= 12);
+ stl_be_p(&s->data[12 - s->data_index], val);
+ s->data_index += 4;
+ if (s->data_index >= 16) {
+ s->data_index = 0;
+ max78000_aes_do_crypto(s);
+ }
+ break;
+
+ case KEY_BASE ... KEY_END - 4:
+ stl_be_p(&s->key[(KEY_END - KEY_BASE - 4) - (addr - KEY_BASE)], val);
+ break;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"
+ HWADDR_PRIx "\n", __func__, addr);
+ break;
+
+ }
+ max78000_aes_set_status(s);
+}
+
+static void max78000_aes_reset_hold(Object *obj, ResetType type)
+{
+ Max78000AesState *s = MAX78000_AES(obj);
+ s->ctrl = 0;
+ s->status = 0;
+ s->intfl = 0;
+ s->inten = 0;
+
+ s->data_index = 0;
+ s->result_index = 0;
+
+ memset(s->data, 0, sizeof(s->data));
+ memset(s->key, 0, sizeof(s->key));
+ memset(s->result, 0, sizeof(s->result));
+ memset(&s->internal_key, 0, sizeof(s->internal_key));
+}
+
+static const MemoryRegionOps max78000_aes_ops = {
+ .read = max78000_aes_read,
+ .write = max78000_aes_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static const VMStateDescription vmstate_max78000_aes = {
+ .name = TYPE_MAX78000_AES,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32(ctrl, Max78000AesState),
+ VMSTATE_UINT32(status, Max78000AesState),
+ VMSTATE_UINT32(intfl, Max78000AesState),
+ VMSTATE_UINT32(inten, Max78000AesState),
+ VMSTATE_UINT8_ARRAY(data, Max78000AesState, 16),
+ VMSTATE_UINT8_ARRAY(key, Max78000AesState, 32),
+ VMSTATE_UINT8_ARRAY(result, Max78000AesState, 16),
+ VMSTATE_UINT32_ARRAY(internal_key.rd_key, Max78000AesState, 60),
+ VMSTATE_INT32(internal_key.rounds, Max78000AesState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void max78000_aes_init(Object *obj)
+{
+ Max78000AesState *s = MAX78000_AES(obj);
+ sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq);
+
+ memory_region_init_io(&s->mmio, obj, &max78000_aes_ops, s,
+ TYPE_MAX78000_AES, 0xc00);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+
+}
+
+static void max78000_aes_class_init(ObjectClass *klass, const void *data)
+{
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ rc->phases.hold = max78000_aes_reset_hold;
+ dc->vmsd = &vmstate_max78000_aes;
+
+}
+
+static const TypeInfo max78000_aes_info = {
+ .name = TYPE_MAX78000_AES,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(Max78000AesState),
+ .instance_init = max78000_aes_init,
+ .class_init = max78000_aes_class_init,
+};
+
+static void max78000_aes_register_types(void)
+{
+ type_register_static(&max78000_aes_info);
+}
+
+type_init(max78000_aes_register_types)
diff --git a/hw/misc/max78000_gcr.c b/hw/misc/max78000_gcr.c
new file mode 100644
index 0000000..fbbc92c
--- /dev/null
+++ b/hw/misc/max78000_gcr.c
@@ -0,0 +1,351 @@
+/*
+ * MAX78000 Global Control Registers
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "trace.h"
+#include "hw/irq.h"
+#include "system/runstate.h"
+#include "migration/vmstate.h"
+#include "hw/qdev-properties.h"
+#include "hw/char/max78000_uart.h"
+#include "hw/misc/max78000_trng.h"
+#include "hw/misc/max78000_aes.h"
+#include "hw/misc/max78000_gcr.h"
+
+
+static void max78000_gcr_reset_hold(Object *obj, ResetType type)
+{
+ DeviceState *dev = DEVICE(obj);
+ Max78000GcrState *s = MAX78000_GCR(dev);
+ s->sysctrl = 0x21002;
+ s->rst0 = 0;
+ /* All clocks are always ready */
+ s->clkctrl = 0x3e140008;
+ s->pm = 0x3f000;
+ s->pclkdiv = 0;
+ s->pclkdis0 = 0xffffffff;
+ s->memctrl = 0x5;
+ s->memz = 0;
+ s->sysst = 0;
+ s->rst1 = 0;
+ s->pckdis1 = 0xffffffff;
+ s->eventen = 0;
+ s->revision = 0xa1;
+ s->sysie = 0;
+ s->eccerr = 0;
+ s->ecced = 0;
+ s->eccie = 0;
+ s->eccaddr = 0;
+}
+
+static uint64_t max78000_gcr_read(void *opaque, hwaddr addr,
+ unsigned int size)
+{
+ Max78000GcrState *s = opaque;
+
+ switch (addr) {
+ case SYSCTRL:
+ return s->sysctrl;
+
+ case RST0:
+ return s->rst0;
+
+ case CLKCTRL:
+ return s->clkctrl;
+
+ case PM:
+ return s->pm;
+
+ case PCLKDIV:
+ return s->pclkdiv;
+
+ case PCLKDIS0:
+ return s->pclkdis0;
+
+ case MEMCTRL:
+ return s->memctrl;
+
+ case MEMZ:
+ return s->memz;
+
+ case SYSST:
+ return s->sysst;
+
+ case RST1:
+ return s->rst1;
+
+ case PCKDIS1:
+ return s->pckdis1;
+
+ case EVENTEN:
+ return s->eventen;
+
+ case REVISION:
+ return s->revision;
+
+ case SYSIE:
+ return s->sysie;
+
+ case ECCERR:
+ return s->eccerr;
+
+ case ECCED:
+ return s->ecced;
+
+ case ECCIE:
+ return s->eccie;
+
+ case ECCADDR:
+ return s->eccaddr;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"
+ HWADDR_PRIx "\n", __func__, addr);
+ return 0;
+
+ }
+}
+
+static void max78000_gcr_write(void *opaque, hwaddr addr,
+ uint64_t val64, unsigned int size)
+{
+ Max78000GcrState *s = opaque;
+ uint32_t val = val64;
+ uint8_t zero[0xc000] = {0};
+ switch (addr) {
+ case SYSCTRL:
+ /* Checksum calculations always pass immediately */
+ s->sysctrl = (val & 0x30000) | 0x1002;
+ break;
+
+ case RST0:
+ if (val & SYSTEM_RESET) {
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ }
+ if (val & PERIPHERAL_RESET) {
+ /*
+ * Peripheral reset resets all peripherals. The CPU
+ * retains its state. The GPIO, watchdog timers, AoD,
+ * RAM retention, and general control registers (GCR),
+ * including the clock configuration, are unaffected.
+ */
+ val = UART2_RESET | UART1_RESET | UART0_RESET |
+ ADC_RESET | CNN_RESET | TRNG_RESET |
+ RTC_RESET | I2C0_RESET | SPI1_RESET |
+ TMR3_RESET | TMR2_RESET | TMR1_RESET |
+ TMR0_RESET | WDT0_RESET | DMA_RESET;
+ }
+ if (val & SOFT_RESET) {
+ /* Soft reset also resets GPIO */
+ val = UART2_RESET | UART1_RESET | UART0_RESET |
+ ADC_RESET | CNN_RESET | TRNG_RESET |
+ RTC_RESET | I2C0_RESET | SPI1_RESET |
+ TMR3_RESET | TMR2_RESET | TMR1_RESET |
+ TMR0_RESET | GPIO1_RESET | GPIO0_RESET |
+ DMA_RESET;
+ }
+ if (val & UART2_RESET) {
+ device_cold_reset(s->uart2);
+ }
+ if (val & UART1_RESET) {
+ device_cold_reset(s->uart1);
+ }
+ if (val & UART0_RESET) {
+ device_cold_reset(s->uart0);
+ }
+ if (val & TRNG_RESET) {
+ device_cold_reset(s->trng);
+ }
+ if (val & AES_RESET) {
+ device_cold_reset(s->aes);
+ }
+ /* TODO: As other devices are implemented, add them here */
+ break;
+
+ case CLKCTRL:
+ s->clkctrl = val | SYSCLK_RDY;
+ break;
+
+ case PM:
+ s->pm = val;
+ break;
+
+ case PCLKDIV:
+ s->pclkdiv = val;
+ break;
+
+ case PCLKDIS0:
+ s->pclkdis0 = val;
+ break;
+
+ case MEMCTRL:
+ s->memctrl = val;
+ break;
+
+ case MEMZ:
+ if (val & ram0) {
+ address_space_write(&s->sram_as, SYSRAM0_START,
+ MEMTXATTRS_UNSPECIFIED, zero, 0x8000);
+ }
+ if (val & ram1) {
+ address_space_write(&s->sram_as, SYSRAM1_START,
+ MEMTXATTRS_UNSPECIFIED, zero, 0x8000);
+ }
+ if (val & ram2) {
+ address_space_write(&s->sram_as, SYSRAM2_START,
+ MEMTXATTRS_UNSPECIFIED, zero, 0xC000);
+ }
+ if (val & ram3) {
+ address_space_write(&s->sram_as, SYSRAM3_START,
+ MEMTXATTRS_UNSPECIFIED, zero, 0x4000);
+ }
+ break;
+
+ case SYSST:
+ s->sysst = val;
+ break;
+
+ case RST1:
+ /* TODO: As other devices are implemented, add them here */
+ s->rst1 = val;
+ break;
+
+ case PCKDIS1:
+ s->pckdis1 = val;
+ break;
+
+ case EVENTEN:
+ s->eventen = val;
+ break;
+
+ case REVISION:
+ s->revision = val;
+ break;
+
+ case SYSIE:
+ s->sysie = val;
+ break;
+
+ case ECCERR:
+ s->eccerr = val;
+ break;
+
+ case ECCED:
+ s->ecced = val;
+ break;
+
+ case ECCIE:
+ s->eccie = val;
+ break;
+
+ case ECCADDR:
+ s->eccaddr = val;
+ break;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
+ break;
+
+ }
+}
+
+static const Property max78000_gcr_properties[] = {
+ DEFINE_PROP_LINK("sram", Max78000GcrState, sram,
+ TYPE_MEMORY_REGION, MemoryRegion*),
+ DEFINE_PROP_LINK("uart0", Max78000GcrState, uart0,
+ TYPE_MAX78000_UART, DeviceState*),
+ DEFINE_PROP_LINK("uart1", Max78000GcrState, uart1,
+ TYPE_MAX78000_UART, DeviceState*),
+ DEFINE_PROP_LINK("uart2", Max78000GcrState, uart2,
+ TYPE_MAX78000_UART, DeviceState*),
+ DEFINE_PROP_LINK("trng", Max78000GcrState, trng,
+ TYPE_MAX78000_TRNG, DeviceState*),
+ DEFINE_PROP_LINK("aes", Max78000GcrState, aes,
+ TYPE_MAX78000_AES, DeviceState*),
+};
+
+static const MemoryRegionOps max78000_gcr_ops = {
+ .read = max78000_gcr_read,
+ .write = max78000_gcr_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static const VMStateDescription vmstate_max78000_gcr = {
+ .name = TYPE_MAX78000_GCR,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32(sysctrl, Max78000GcrState),
+ VMSTATE_UINT32(rst0, Max78000GcrState),
+ VMSTATE_UINT32(clkctrl, Max78000GcrState),
+ VMSTATE_UINT32(pm, Max78000GcrState),
+ VMSTATE_UINT32(pclkdiv, Max78000GcrState),
+ VMSTATE_UINT32(pclkdis0, Max78000GcrState),
+ VMSTATE_UINT32(memctrl, Max78000GcrState),
+ VMSTATE_UINT32(memz, Max78000GcrState),
+ VMSTATE_UINT32(sysst, Max78000GcrState),
+ VMSTATE_UINT32(rst1, Max78000GcrState),
+ VMSTATE_UINT32(pckdis1, Max78000GcrState),
+ VMSTATE_UINT32(eventen, Max78000GcrState),
+ VMSTATE_UINT32(revision, Max78000GcrState),
+ VMSTATE_UINT32(sysie, Max78000GcrState),
+ VMSTATE_UINT32(eccerr, Max78000GcrState),
+ VMSTATE_UINT32(ecced, Max78000GcrState),
+ VMSTATE_UINT32(eccie, Max78000GcrState),
+ VMSTATE_UINT32(eccaddr, Max78000GcrState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void max78000_gcr_init(Object *obj)
+{
+ Max78000GcrState *s = MAX78000_GCR(obj);
+
+ memory_region_init_io(&s->mmio, obj, &max78000_gcr_ops, s,
+ TYPE_MAX78000_GCR, 0x400);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+
+}
+
+static void max78000_gcr_realize(DeviceState *dev, Error **errp)
+{
+ Max78000GcrState *s = MAX78000_GCR(dev);
+
+ address_space_init(&s->sram_as, s->sram, "sram");
+}
+
+static void max78000_gcr_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ device_class_set_props(dc, max78000_gcr_properties);
+
+ dc->realize = max78000_gcr_realize;
+ dc->vmsd = &vmstate_max78000_gcr;
+ rc->phases.hold = max78000_gcr_reset_hold;
+}
+
+static const TypeInfo max78000_gcr_info = {
+ .name = TYPE_MAX78000_GCR,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(Max78000GcrState),
+ .instance_init = max78000_gcr_init,
+ .class_init = max78000_gcr_class_init,
+};
+
+static void max78000_gcr_register_types(void)
+{
+ type_register_static(&max78000_gcr_info);
+}
+
+type_init(max78000_gcr_register_types)
diff --git a/hw/misc/max78000_icc.c b/hw/misc/max78000_icc.c
new file mode 100644
index 0000000..6f7d2b2
--- /dev/null
+++ b/hw/misc/max78000_icc.c
@@ -0,0 +1,120 @@
+/*
+ * MAX78000 Instruction Cache
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "trace.h"
+#include "hw/irq.h"
+#include "migration/vmstate.h"
+#include "hw/misc/max78000_icc.h"
+
+
+static uint64_t max78000_icc_read(void *opaque, hwaddr addr,
+ unsigned int size)
+{
+ Max78000IccState *s = opaque;
+ switch (addr) {
+ case ICC_INFO:
+ return s->info;
+
+ case ICC_SZ:
+ return s->sz;
+
+ case ICC_CTRL:
+ return s->ctrl;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad offset 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
+ return 0;
+
+ }
+}
+
+static void max78000_icc_write(void *opaque, hwaddr addr,
+ uint64_t val64, unsigned int size)
+{
+ Max78000IccState *s = opaque;
+
+ switch (addr) {
+ case ICC_CTRL:
+ s->ctrl = 0x10000 | (val64 & 1);
+ break;
+
+ case ICC_INVALIDATE:
+ break;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad offset 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
+ break;
+ }
+}
+
+static const MemoryRegionOps max78000_icc_ops = {
+ .read = max78000_icc_read,
+ .write = max78000_icc_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static const VMStateDescription max78000_icc_vmstate = {
+ .name = TYPE_MAX78000_ICC,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32(info, Max78000IccState),
+ VMSTATE_UINT32(sz, Max78000IccState),
+ VMSTATE_UINT32(ctrl, Max78000IccState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void max78000_icc_reset_hold(Object *obj, ResetType type)
+{
+ Max78000IccState *s = MAX78000_ICC(obj);
+ s->info = 0;
+ s->sz = 0x10000010;
+ s->ctrl = 0x10000;
+}
+
+static void max78000_icc_init(Object *obj)
+{
+ Max78000IccState *s = MAX78000_ICC(obj);
+
+ memory_region_init_io(&s->mmio, obj, &max78000_icc_ops, s,
+ TYPE_MAX78000_ICC, 0x800);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+}
+
+static void max78000_icc_class_init(ObjectClass *klass, const void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+
+ rc->phases.hold = max78000_icc_reset_hold;
+ dc->vmsd = &max78000_icc_vmstate;
+}
+
+static const TypeInfo max78000_icc_info = {
+ .name = TYPE_MAX78000_ICC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(Max78000IccState),
+ .instance_init = max78000_icc_init,
+ .class_init = max78000_icc_class_init,
+};
+
+static void max78000_icc_register_types(void)
+{
+ type_register_static(&max78000_icc_info);
+}
+
+type_init(max78000_icc_register_types)
diff --git a/hw/misc/max78000_trng.c b/hw/misc/max78000_trng.c
new file mode 100644
index 0000000..ecdaef5
--- /dev/null
+++ b/hw/misc/max78000_trng.c
@@ -0,0 +1,139 @@
+/*
+ * MAX78000 True Random Number Generator
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "trace.h"
+#include "hw/irq.h"
+#include "migration/vmstate.h"
+#include "hw/misc/max78000_trng.h"
+#include "qemu/guest-random.h"
+
+static uint64_t max78000_trng_read(void *opaque, hwaddr addr,
+ unsigned int size)
+{
+ uint32_t data;
+
+ Max78000TrngState *s = opaque;
+ switch (addr) {
+ case CTRL:
+ return s->ctrl;
+
+ case STATUS:
+ return 1;
+
+ case DATA:
+ /*
+ * When interrupts are enabled, reading random data should cause a
+ * new interrupt to be generated; since there's always a random number
+ * available, we could qemu_set_irq(s->irq, s->ctrl & RND_IE). Because
+ * of how trng_write is set up, this is always a noop, so don't
+ */
+ qemu_guest_getrandom_nofail(&data, sizeof(data));
+ return data;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"
+ HWADDR_PRIx "\n", __func__, addr);
+ break;
+ }
+ return 0;
+}
+
+static void max78000_trng_write(void *opaque, hwaddr addr,
+ uint64_t val64, unsigned int size)
+{
+ Max78000TrngState *s = opaque;
+ uint32_t val = val64;
+ switch (addr) {
+ case CTRL:
+ /* TODO: implement AES keygen */
+ s->ctrl = val;
+
+ /*
+ * This device models random number generation as taking 0 time.
+ * A new random number is always available, so the condition for the
+ * RND interrupt is always fulfilled; we can just set irq to 1.
+ */
+ if (val & RND_IE) {
+ qemu_set_irq(s->irq, 1);
+ } else{
+ qemu_set_irq(s->irq, 0);
+ }
+ break;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%"
+ HWADDR_PRIx "\n", __func__, addr);
+ break;
+ }
+}
+
+static void max78000_trng_reset_hold(Object *obj, ResetType type)
+{
+ Max78000TrngState *s = MAX78000_TRNG(obj);
+ s->ctrl = 0;
+ s->status = 0;
+ s->data = 0;
+}
+
+static const MemoryRegionOps max78000_trng_ops = {
+ .read = max78000_trng_read,
+ .write = max78000_trng_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static const VMStateDescription max78000_trng_vmstate = {
+ .name = TYPE_MAX78000_TRNG,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT32(ctrl, Max78000TrngState),
+ VMSTATE_UINT32(status, Max78000TrngState),
+ VMSTATE_UINT32(data, Max78000TrngState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void max78000_trng_init(Object *obj)
+{
+ Max78000TrngState *s = MAX78000_TRNG(obj);
+ sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq);
+
+ memory_region_init_io(&s->mmio, obj, &max78000_trng_ops, s,
+ TYPE_MAX78000_TRNG, 0x1000);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+
+}
+
+static void max78000_trng_class_init(ObjectClass *klass, const void *data)
+{
+ ResettableClass *rc = RESETTABLE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ rc->phases.hold = max78000_trng_reset_hold;
+ dc->vmsd = &max78000_trng_vmstate;
+
+}
+
+static const TypeInfo max78000_trng_info = {
+ .name = TYPE_MAX78000_TRNG,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(Max78000TrngState),
+ .instance_init = max78000_trng_init,
+ .class_init = max78000_trng_class_init,
+};
+
+static void max78000_trng_register_types(void)
+{
+ type_register_static(&max78000_trng_info);
+}
+
+type_init(max78000_trng_register_types)
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
index 6d47de4..b1d8d8e 100644
--- a/hw/misc/meson.build
+++ b/hw/misc/meson.build
@@ -70,6 +70,10 @@ system_ss.add(when: 'CONFIG_IMX', if_true: files(
'imx_ccm.c',
'imx_rngc.c',
))
+system_ss.add(when: 'CONFIG_MAX78000_AES', if_true: files('max78000_aes.c'))
+system_ss.add(when: 'CONFIG_MAX78000_GCR', if_true: files('max78000_gcr.c'))
+system_ss.add(when: 'CONFIG_MAX78000_ICC', if_true: files('max78000_icc.c'))
+system_ss.add(when: 'CONFIG_MAX78000_TRNG', if_true: files('max78000_trng.c'))
system_ss.add(when: 'CONFIG_NPCM7XX', if_true: files(
'npcm_clk.c',
'npcm_gcr.c',
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 08615f6..40f53ad 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -2518,7 +2518,7 @@ static void htab_save_cleanup(void *opaque)
static SaveVMHandlers savevm_htab_handlers = {
.save_setup = htab_save_setup,
.save_live_iterate = htab_save_iterate,
- .save_live_complete_precopy = htab_save_complete,
+ .save_complete = htab_save_complete,
.save_cleanup = htab_save_cleanup,
.load_state = htab_load,
};
diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c
index e6aa445..f87d274 100644
--- a/hw/s390x/s390-pci-bus.c
+++ b/hw/s390x/s390-pci-bus.c
@@ -384,9 +384,9 @@ static uint64_t get_table_index(uint64_t iova, int8_t ett)
return calc_sx(iova);
case ZPCI_ETT_RT:
return calc_rtx(iova);
+ default:
+ g_assert_not_reached();
}
-
- return -1;
}
static bool entry_isvalid(uint64_t entry, int8_t ett)
@@ -397,22 +397,24 @@ static bool entry_isvalid(uint64_t entry, int8_t ett)
case ZPCI_ETT_ST:
case ZPCI_ETT_RT:
return rt_entry_isvalid(entry);
+ default:
+ g_assert_not_reached();
}
-
- return false;
}
/* Return true if address translation is done */
static bool translate_iscomplete(uint64_t entry, int8_t ett)
{
switch (ett) {
- case 0:
+ case ZPCI_ETT_ST:
return (entry & ZPCI_TABLE_FC) ? true : false;
- case 1:
+ case ZPCI_ETT_RT:
return false;
+ case ZPCI_ETT_PT:
+ return true;
+ default:
+ g_assert_not_reached();
}
-
- return true;
}
static uint64_t get_frame_size(int8_t ett)
@@ -424,9 +426,9 @@ static uint64_t get_frame_size(int8_t ett)
return 1ULL << 20;
case ZPCI_ETT_RT:
return 1ULL << 31;
+ default:
+ g_assert_not_reached();
}
-
- return 0;
}
static uint64_t get_next_table_origin(uint64_t entry, int8_t ett)
@@ -438,9 +440,9 @@ static uint64_t get_next_table_origin(uint64_t entry, int8_t ett)
return get_st_pto(entry);
case ZPCI_ETT_RT:
return get_rt_sto(entry);
+ default:
+ g_assert_not_reached();
}
-
- return 0;
}
/**
diff --git a/hw/s390x/s390-stattrib.c b/hw/s390x/s390-stattrib.c
index f74cf32..13a678a 100644
--- a/hw/s390x/s390-stattrib.c
+++ b/hw/s390x/s390-stattrib.c
@@ -338,7 +338,7 @@ static const TypeInfo qemu_s390_stattrib_info = {
static SaveVMHandlers savevm_s390_stattrib_handlers = {
.save_setup = cmma_save_setup,
.save_live_iterate = cmma_save_iterate,
- .save_live_complete_precopy = cmma_save_complete,
+ .save_complete = cmma_save_complete,
.state_pending_exact = cmma_state_pending,
.state_pending_estimate = cmma_state_pending,
.save_cleanup = cmma_save_cleanup,
diff --git a/hw/vfio/migration-multifd.c b/hw/vfio/migration-multifd.c
index 850a319..5563548 100644
--- a/hw/vfio/migration-multifd.c
+++ b/hw/vfio/migration-multifd.c
@@ -583,7 +583,7 @@ vfio_save_complete_precopy_thread_config_state(VFIODevice *vbasedev,
/*
* This thread is spawned by the migration core directly via
- * .save_live_complete_precopy_thread SaveVMHandler.
+ * .save_complete_precopy_thread SaveVMHandler.
*
* It exits after either:
* * completing saving the remaining device state and device config, OR:
@@ -592,7 +592,7 @@ vfio_save_complete_precopy_thread_config_state(VFIODevice *vbasedev,
* multifd_device_state_save_thread_should_exit() returning true.
*/
bool
-vfio_multifd_save_complete_precopy_thread(SaveLiveCompletePrecopyThreadData *d,
+vfio_multifd_save_complete_precopy_thread(SaveCompletePrecopyThreadData *d,
Error **errp)
{
VFIODevice *vbasedev = d->handler_opaque;
diff --git a/hw/vfio/migration-multifd.h b/hw/vfio/migration-multifd.h
index 0bab632..ebf22a7 100644
--- a/hw/vfio/migration-multifd.h
+++ b/hw/vfio/migration-multifd.h
@@ -26,7 +26,7 @@ bool vfio_multifd_load_state_buffer(void *opaque, char *data, size_t data_size,
void vfio_multifd_emit_dummy_eos(VFIODevice *vbasedev, QEMUFile *f);
bool
-vfio_multifd_save_complete_precopy_thread(SaveLiveCompletePrecopyThreadData *d,
+vfio_multifd_save_complete_precopy_thread(SaveCompletePrecopyThreadData *d,
Error **errp);
int vfio_multifd_switchover_start(VFIODevice *vbasedev);
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index b76697bd..c329578 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -824,7 +824,7 @@ static const SaveVMHandlers savevm_vfio_handlers = {
.state_pending_exact = vfio_state_pending_exact,
.is_active_iterate = vfio_is_active_iterate,
.save_live_iterate = vfio_save_iterate,
- .save_live_complete_precopy = vfio_save_complete_precopy,
+ .save_complete = vfio_save_complete_precopy,
.save_state = vfio_save_state,
.load_setup = vfio_load_setup,
.load_cleanup = vfio_load_cleanup,
@@ -835,7 +835,7 @@ static const SaveVMHandlers savevm_vfio_handlers = {
*/
.load_state_buffer = vfio_multifd_load_state_buffer,
.switchover_start = vfio_switchover_start,
- .save_live_complete_precopy_thread = vfio_multifd_save_complete_precopy_thread,
+ .save_complete_precopy_thread = vfio_multifd_save_complete_precopy_thread,
};
/* ---------------------------------------------------------------------- */
diff --git a/include/hw/arm/max78000_soc.h b/include/hw/arm/max78000_soc.h
new file mode 100644
index 0000000..a203079
--- /dev/null
+++ b/include/hw/arm/max78000_soc.h
@@ -0,0 +1,50 @@
+/*
+ * MAX78000 SOC
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_ARM_MAX78000_SOC_H
+#define HW_ARM_MAX78000_SOC_H
+
+#include "hw/or-irq.h"
+#include "hw/arm/armv7m.h"
+#include "hw/misc/max78000_aes.h"
+#include "hw/misc/max78000_gcr.h"
+#include "hw/misc/max78000_icc.h"
+#include "hw/char/max78000_uart.h"
+#include "hw/misc/max78000_trng.h"
+#include "qom/object.h"
+
+#define TYPE_MAX78000_SOC "max78000-soc"
+OBJECT_DECLARE_SIMPLE_TYPE(MAX78000State, MAX78000_SOC)
+
+#define FLASH_BASE_ADDRESS 0x10000000
+#define FLASH_SIZE (512 * 1024)
+#define SRAM_BASE_ADDRESS 0x20000000
+#define SRAM_SIZE (128 * 1024)
+
+/* The MAX78k has 2 instruction caches; only icc0 matters, icc1 is for RISC */
+#define MAX78000_NUM_ICC 2
+#define MAX78000_NUM_UART 3
+
+struct MAX78000State {
+ SysBusDevice parent_obj;
+
+ ARMv7MState armv7m;
+
+ MemoryRegion sram;
+ MemoryRegion flash;
+
+ Max78000GcrState gcr;
+ Max78000IccState icc[MAX78000_NUM_ICC];
+ Max78000UartState uart[MAX78000_NUM_UART];
+ Max78000TrngState trng;
+ Max78000AesState aes;
+
+ Clock *sysclk;
+};
+
+#endif
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index 9a1b0f5..4375819 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -36,6 +36,7 @@
#include "hw/arm/boot.h"
#include "hw/arm/bsa.h"
#include "hw/block/flash.h"
+#include "hw/cxl/cxl.h"
#include "system/kvm.h"
#include "hw/intc/arm_gicv3_common.h"
#include "qom/object.h"
@@ -85,6 +86,7 @@ enum {
/* indices of IO regions located after the RAM */
enum {
VIRT_HIGH_GIC_REDIST2 = VIRT_LOWMEMMAP_LAST,
+ VIRT_CXL_HOST,
VIRT_HIGH_PCIE_ECAM,
VIRT_HIGH_PCIE_MMIO,
};
@@ -140,6 +142,7 @@ struct VirtMachineState {
bool secure;
bool highmem;
bool highmem_compact;
+ bool highmem_cxl;
bool highmem_ecam;
bool highmem_mmio;
bool highmem_redists;
@@ -174,6 +177,7 @@ struct VirtMachineState {
char *oem_id;
char *oem_table_id;
bool ns_el2_virt_timer_irq;
+ CXLState cxl_devices_state;
};
#define VIRT_ECAM_ID(high) (high ? VIRT_HIGH_PCIE_ECAM : VIRT_PCIE_ECAM)
diff --git a/include/hw/char/max78000_uart.h b/include/hw/char/max78000_uart.h
new file mode 100644
index 0000000..cf90d51
--- /dev/null
+++ b/include/hw/char/max78000_uart.h
@@ -0,0 +1,78 @@
+/*
+ * MAX78000 UART
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MAX78000_UART_H
+#define HW_MAX78000_UART_H
+
+#include "hw/sysbus.h"
+#include "chardev/char-fe.h"
+#include "qemu/fifo8.h"
+#include "qom/object.h"
+
+#define UART_CTRL 0x0
+#define UART_STATUS 0x4
+#define UART_INT_EN 0x8
+#define UART_INT_FL 0xc
+#define UART_CLKDIV 0x10
+#define UART_OSR 0x14
+#define UART_TXPEEK 0x18
+#define UART_PNR 0x1c
+#define UART_FIFO 0x20
+#define UART_DMA 0x30
+#define UART_WKEN 0x34
+#define UART_WKFL 0x38
+
+/* CTRL */
+#define UART_CTF_DIS (1 << 7)
+#define UART_FLUSH_TX (1 << 8)
+#define UART_FLUSH_RX (1 << 9)
+#define UART_BCLKEN (1 << 15)
+#define UART_BCLKRDY (1 << 19)
+
+/* STATUS */
+#define UART_RX_LVL 8
+#define UART_TX_EM (1 << 6)
+#define UART_RX_FULL (1 << 5)
+#define UART_RX_EM (1 << 4)
+
+/* PNR (Pin Control Register) */
+#define UART_CTS 1
+#define UART_RTS (1 << 1)
+
+/* INT_EN / INT_FL */
+#define UART_RX_THD (1 << 4)
+#define UART_TX_HE (1 << 6)
+
+#define UART_RXBUFLEN 0x100
+#define TYPE_MAX78000_UART "max78000-uart"
+OBJECT_DECLARE_SIMPLE_TYPE(Max78000UartState, MAX78000_UART)
+
+struct Max78000UartState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ uint32_t ctrl;
+ uint32_t status;
+ uint32_t int_en;
+ uint32_t int_fl;
+ uint32_t clkdiv;
+ uint32_t osr;
+ uint32_t txpeek;
+ uint32_t pnr;
+ uint32_t fifo;
+ uint32_t dma;
+ uint32_t wken;
+ uint32_t wkfl;
+
+ Fifo8 rx_fifo;
+
+ CharBackend chr;
+ qemu_irq irq;
+};
+#endif /* HW_STM32F2XX_USART_H */
diff --git a/include/hw/cxl/cxl.h b/include/hw/cxl/cxl.h
index 75e47b6..de66ab8 100644
--- a/include/hw/cxl/cxl.h
+++ b/include/hw/cxl/cxl.h
@@ -27,6 +27,8 @@
typedef struct PXBCXLDev PXBCXLDev;
typedef struct CXLFixedWindow {
+ SysBusDevice parent_obj;
+ int index;
uint64_t size;
char **targets;
PXBCXLDev *target_hbs[16];
@@ -37,12 +39,13 @@ typedef struct CXLFixedWindow {
MemoryRegion mr;
hwaddr base;
} CXLFixedWindow;
+#define TYPE_CXL_FMW "cxl-fmw"
+OBJECT_DECLARE_SIMPLE_TYPE(CXLFixedWindow, CXL_FMW)
typedef struct CXLState {
bool is_enabled;
MemoryRegion host_mr;
unsigned int next_mr_idx;
- GList *fixed_windows;
CXLFixedMemoryWindowOptionsList *cfmw_list;
} CXLState;
diff --git a/include/hw/cxl/cxl_host.h b/include/hw/cxl/cxl_host.h
index c9bc9c7..cd3c368 100644
--- a/include/hw/cxl/cxl_host.h
+++ b/include/hw/cxl/cxl_host.h
@@ -14,8 +14,11 @@
#define CXL_HOST_H
void cxl_machine_init(Object *obj, CXLState *state);
-void cxl_fmws_link_targets(CXLState *stat, Error **errp);
+void cxl_fmws_link_targets(Error **errp);
void cxl_hook_up_pxb_registers(PCIBus *bus, CXLState *state, Error **errp);
+hwaddr cxl_fmws_set_memmap(hwaddr base, hwaddr max_addr);
+void cxl_fmws_update_mmio(void);
+GSList *cxl_fmws_get_all_sorted(void);
extern const MemoryRegionOps cfmws_ops;
diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h
index a3d6a0e..c185038 100644
--- a/include/hw/intc/arm_gicv3_common.h
+++ b/include/hw/intc/arm_gicv3_common.h
@@ -231,6 +231,7 @@ struct GICv3State {
uint32_t num_cpu;
uint32_t num_irq;
uint32_t revision;
+ uint32_t maint_irq;
bool lpi_enable;
bool nmi_support;
bool security_extn;
diff --git a/include/hw/intc/loongarch_extioi.h b/include/hw/intc/loongarch_extioi.h
index 9be1d73..4795bdc 100644
--- a/include/hw/intc/loongarch_extioi.h
+++ b/include/hw/intc/loongarch_extioi.h
@@ -22,7 +22,6 @@ struct LoongArchExtIOIClass {
LoongArchExtIOICommonClass parent_class;
DeviceRealize parent_realize;
- DeviceUnrealize parent_unrealize;
ResettablePhases parent_phases;
};
diff --git a/include/hw/intc/loongarch_extioi_common.h b/include/hw/intc/loongarch_extioi_common.h
index dca25ff..c021cce 100644
--- a/include/hw/intc/loongarch_extioi_common.h
+++ b/include/hw/intc/loongarch_extioi_common.h
@@ -94,6 +94,7 @@ struct LoongArchExtIOICommonClass {
SysBusDeviceClass parent_class;
DeviceRealize parent_realize;
+ DeviceUnrealize parent_unrealize;
ResettablePhases parent_phases;
int (*pre_save)(void *s);
int (*post_load)(void *s, int version_id);
diff --git a/include/hw/misc/max78000_aes.h b/include/hw/misc/max78000_aes.h
new file mode 100644
index 0000000..407c45e
--- /dev/null
+++ b/include/hw/misc/max78000_aes.h
@@ -0,0 +1,68 @@
+/*
+ * MAX78000 AES
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_MAX78000_AES_H
+#define HW_MAX78000_AES_H
+
+#include "hw/sysbus.h"
+#include "crypto/aes.h"
+#include "qom/object.h"
+
+#define TYPE_MAX78000_AES "max78000-aes"
+OBJECT_DECLARE_SIMPLE_TYPE(Max78000AesState, MAX78000_AES)
+
+#define CTRL 0
+#define STATUS 4
+#define INTFL 8
+#define INTEN 0xc
+#define FIFO 0x10
+
+#define KEY_BASE 0x400
+#define KEY_END 0x420
+
+/* CTRL */
+#define TYPE (1 << 9 | 1 << 8)
+#define KEY_SIZE (1 << 7 | 1 << 6)
+#define OUTPUT_FLUSH (1 << 5)
+#define INPUT_FLUSH (1 << 4)
+#define START (1 << 3)
+
+#define AES_EN (1 << 0)
+
+/* STATUS */
+#define OUTPUT_FULL (1 << 4)
+#define OUTPUT_EMPTY (1 << 3)
+#define INPUT_FULL (1 << 2)
+#define INPUT_EMPTY (1 << 1)
+#define BUSY (1 << 0)
+
+/* INTFL*/
+#define DONE (1 << 0)
+
+struct Max78000AesState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ uint32_t ctrl;
+ uint32_t status;
+ uint32_t intfl;
+ uint32_t inten;
+ uint32_t data_index;
+ uint8_t data[16];
+
+ uint8_t key[32];
+ AES_KEY internal_key;
+
+ uint32_t result_index;
+ uint8_t result[16];
+
+
+ qemu_irq irq;
+};
+
+#endif
diff --git a/include/hw/misc/max78000_gcr.h b/include/hw/misc/max78000_gcr.h
new file mode 100644
index 0000000..d5858a4
--- /dev/null
+++ b/include/hw/misc/max78000_gcr.h
@@ -0,0 +1,131 @@
+/*
+ * MAX78000 Global Control Register
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_MAX78000_GCR_H
+#define HW_MAX78000_GCR_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_MAX78000_GCR "max78000-gcr"
+OBJECT_DECLARE_SIMPLE_TYPE(Max78000GcrState, MAX78000_GCR)
+
+#define SYSCTRL 0x0
+#define RST0 0x4
+#define CLKCTRL 0x8
+#define PM 0xc
+#define PCLKDIV 0x18
+#define PCLKDIS0 0x24
+#define MEMCTRL 0x28
+#define MEMZ 0x2c
+#define SYSST 0x40
+#define RST1 0x44
+#define PCKDIS1 0x48
+#define EVENTEN 0x4c
+#define REVISION 0x50
+#define SYSIE 0x54
+#define ECCERR 0x64
+#define ECCED 0x68
+#define ECCIE 0x6c
+#define ECCADDR 0x70
+
+/* RST0 */
+#define SYSTEM_RESET (1 << 31)
+#define PERIPHERAL_RESET (1 << 30)
+#define SOFT_RESET (1 << 29)
+#define UART2_RESET (1 << 28)
+
+#define ADC_RESET (1 << 26)
+#define CNN_RESET (1 << 25)
+#define TRNG_RESET (1 << 24)
+
+#define RTC_RESET (1 << 17)
+#define I2C0_RESET (1 << 16)
+
+#define SPI1_RESET (1 << 13)
+#define UART1_RESET (1 << 12)
+#define UART0_RESET (1 << 11)
+
+#define TMR3_RESET (1 << 8)
+#define TMR2_RESET (1 << 7)
+#define TMR1_RESET (1 << 6)
+#define TMR0_RESET (1 << 5)
+
+#define GPIO1_RESET (1 << 3)
+#define GPIO0_RESET (1 << 2)
+#define WDT0_RESET (1 << 1)
+#define DMA_RESET (1 << 0)
+
+/* CLKCTRL */
+#define SYSCLK_RDY (1 << 13)
+
+/* MEMZ */
+#define ram0 (1 << 0)
+#define ram1 (1 << 1)
+#define ram2 (1 << 2)
+#define ram3 (1 << 3)
+
+/* RST1 */
+#define CPU1_RESET (1 << 31)
+
+#define SIMO_RESET (1 << 25)
+#define DVS_RESET (1 << 24)
+
+#define I2C2_RESET (1 << 20)
+#define I2S_RESET (1 << 19)
+
+#define SMPHR_RESET (1 << 16)
+
+#define SPI0_RESET (1 << 11)
+#define AES_RESET (1 << 10)
+#define CRC_RESET (1 << 9)
+
+#define PT_RESET (1 << 1)
+#define I2C1_RESET (1 << 0)
+
+
+#define SYSRAM0_START 0x20000000
+#define SYSRAM1_START 0x20008000
+#define SYSRAM2_START 0x20010000
+#define SYSRAM3_START 0x2001C000
+
+struct Max78000GcrState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ uint32_t sysctrl;
+ uint32_t rst0;
+ uint32_t clkctrl;
+ uint32_t pm;
+ uint32_t pclkdiv;
+ uint32_t pclkdis0;
+ uint32_t memctrl;
+ uint32_t memz;
+ uint32_t sysst;
+ uint32_t rst1;
+ uint32_t pckdis1;
+ uint32_t eventen;
+ uint32_t revision;
+ uint32_t sysie;
+ uint32_t eccerr;
+ uint32_t ecced;
+ uint32_t eccie;
+ uint32_t eccaddr;
+
+ MemoryRegion *sram;
+ AddressSpace sram_as;
+
+ DeviceState *uart0;
+ DeviceState *uart1;
+ DeviceState *uart2;
+ DeviceState *trng;
+ DeviceState *aes;
+
+};
+
+#endif
diff --git a/include/hw/misc/max78000_icc.h b/include/hw/misc/max78000_icc.h
new file mode 100644
index 0000000..6fe2bb7
--- /dev/null
+++ b/include/hw/misc/max78000_icc.h
@@ -0,0 +1,33 @@
+/*
+ * MAX78000 Instruction Cache
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_MAX78000_ICC_H
+#define HW_MAX78000_ICC_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_MAX78000_ICC "max78000-icc"
+OBJECT_DECLARE_SIMPLE_TYPE(Max78000IccState, MAX78000_ICC)
+
+#define ICC_INFO 0x0
+#define ICC_SZ 0x4
+#define ICC_CTRL 0x100
+#define ICC_INVALIDATE 0x700
+
+struct Max78000IccState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ uint32_t info;
+ uint32_t sz;
+ uint32_t ctrl;
+};
+
+#endif
diff --git a/include/hw/misc/max78000_trng.h b/include/hw/misc/max78000_trng.h
new file mode 100644
index 0000000..c5a8129
--- /dev/null
+++ b/include/hw/misc/max78000_trng.h
@@ -0,0 +1,35 @@
+/*
+ * MAX78000 True Random Number Generator
+ *
+ * Copyright (c) 2025 Jackson Donaldson <jcksn@duck.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HW_MAX78000_TRNG_H
+#define HW_MAX78000_TRNG_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+
+#define TYPE_MAX78000_TRNG "max78000-trng"
+OBJECT_DECLARE_SIMPLE_TYPE(Max78000TrngState, MAX78000_TRNG)
+
+#define CTRL 0
+#define STATUS 4
+#define DATA 8
+
+#define RND_IE (1 << 1)
+
+struct Max78000TrngState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion mmio;
+
+ uint32_t ctrl;
+ uint32_t status;
+ uint32_t data;
+
+ qemu_irq irq;
+};
+
+#endif
diff --git a/include/migration/misc.h b/include/migration/misc.h
index 8fd36eb..a261f99 100644
--- a/include/migration/misc.h
+++ b/include/migration/misc.h
@@ -119,19 +119,19 @@ bool migrate_uri_parse(const char *uri, MigrationChannel **channel,
Error **errp);
/* migration/multifd-device-state.c */
-typedef struct SaveLiveCompletePrecopyThreadData {
- SaveLiveCompletePrecopyThreadHandler hdlr;
+typedef struct SaveCompletePrecopyThreadData {
+ SaveCompletePrecopyThreadHandler hdlr;
char *idstr;
uint32_t instance_id;
void *handler_opaque;
-} SaveLiveCompletePrecopyThreadData;
+} SaveCompletePrecopyThreadData;
bool multifd_queue_device_state(char *idstr, uint32_t instance_id,
char *data, size_t len);
bool multifd_device_state_supported(void);
void
-multifd_spawn_device_state_save_thread(SaveLiveCompletePrecopyThreadHandler hdlr,
+multifd_spawn_device_state_save_thread(SaveCompletePrecopyThreadHandler hdlr,
char *idstr, uint32_t instance_id,
void *opaque);
diff --git a/include/migration/register.h b/include/migration/register.h
index b79dc81..ae79794 100644
--- a/include/migration/register.h
+++ b/include/migration/register.h
@@ -78,51 +78,43 @@ typedef struct SaveVMHandlers {
void (*save_cleanup)(void *opaque);
/**
- * @save_live_complete_postcopy
+ * @save_complete
*
- * Called at the end of postcopy for all postcopyable devices.
+ * Transmits the last section for the device containing any
+ * remaining data at the end phase of migration.
*
- * @f: QEMUFile where to send the data
- * @opaque: data pointer passed to register_savevm_live()
+ * For precopy, this will be invoked _during_ the switchover phase
+ * after source VM is stopped.
*
- * Returns zero to indicate success and negative for error
- */
- int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
-
- /**
- * @save_live_complete_precopy
- *
- * Transmits the last section for the device containing any
- * remaining data at the end of a precopy phase. When postcopy is
- * enabled, devices that support postcopy will skip this step,
- * where the final data will be flushed at the end of postcopy via
- * @save_live_complete_postcopy instead.
+ * For postcopy, this will be invoked _after_ the switchover phase
+ * (except some very unusual cases, like PMEM ramblocks), while
+ * destination VM can be running.
*
* @f: QEMUFile where to send the data
* @opaque: data pointer passed to register_savevm_live()
*
* Returns zero to indicate success and negative for error
*/
- int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
+ int (*save_complete)(QEMUFile *f, void *opaque);
/**
- * @save_live_complete_precopy_thread (invoked in a separate thread)
+ * @save_complete_precopy_thread (invoked in a separate thread)
*
* Called at the end of a precopy phase from a separate worker thread
* in configurations where multifd device state transfer is supported
* in order to perform asynchronous transmission of the remaining data in
- * parallel with @save_live_complete_precopy handlers.
+ * parallel with @save_complete handlers.
* When postcopy is enabled, devices that support postcopy will skip this
* step.
*
- * @d: a #SaveLiveCompletePrecopyThreadData containing parameters that the
+ * @d: a #SaveCompletePrecopyThreadData containing parameters that the
* handler may need, including this device section idstr and instance_id,
* and opaque data pointer passed to register_savevm_live().
* @errp: pointer to Error*, to store an error if it happens.
*
* Returns true to indicate success and false for errors.
*/
- SaveLiveCompletePrecopyThreadHandler save_live_complete_precopy_thread;
+ SaveCompletePrecopyThreadHandler save_complete_precopy_thread;
/* This runs both outside and inside the BQL. */
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index 507f081..4a94af9 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -109,7 +109,7 @@ typedef struct QString QString;
typedef struct RAMBlock RAMBlock;
typedef struct Range Range;
typedef struct ReservedRegion ReservedRegion;
-typedef struct SaveLiveCompletePrecopyThreadData SaveLiveCompletePrecopyThreadData;
+typedef struct SaveCompletePrecopyThreadData SaveCompletePrecopyThreadData;
typedef struct SHPCDevice SHPCDevice;
typedef struct SSIBus SSIBus;
typedef struct TCGCPUOps TCGCPUOps;
@@ -135,7 +135,7 @@ typedef struct IRQState *qemu_irq;
typedef void (*qemu_irq_handler)(void *opaque, int n, int level);
typedef bool (*MigrationLoadThread)(void *opaque, bool *should_quit,
Error **errp);
-typedef bool (*SaveLiveCompletePrecopyThreadHandler)(SaveLiveCompletePrecopyThreadData *d,
- Error **errp);
+typedef bool (*SaveCompletePrecopyThreadHandler)(SaveCompletePrecopyThreadData *d,
+ Error **errp);
#endif /* QEMU_TYPEDEFS_H */
diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c
index f2c352d..a061aad 100644
--- a/migration/block-dirty-bitmap.c
+++ b/migration/block-dirty-bitmap.c
@@ -1248,8 +1248,7 @@ static bool dirty_bitmap_has_postcopy(void *opaque)
static SaveVMHandlers savevm_dirty_bitmap_handlers = {
.save_setup = dirty_bitmap_save_setup,
- .save_live_complete_postcopy = dirty_bitmap_save_complete,
- .save_live_complete_precopy = dirty_bitmap_save_complete,
+ .save_complete = dirty_bitmap_save_complete,
.has_postcopy = dirty_bitmap_has_postcopy,
.state_pending_exact = dirty_bitmap_state_pending,
.state_pending_estimate = dirty_bitmap_state_pending,
diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c
index e8a563c..cef5608 100644
--- a/migration/migration-hmp-cmds.c
+++ b/migration/migration-hmp-cmds.c
@@ -52,6 +52,88 @@ static void migration_global_dump(Monitor *mon)
ms->clear_bitmap_shift);
}
+static const gchar *format_time_str(uint64_t us)
+{
+ const char *units[] = {"us", "ms", "sec"};
+ int index = 0;
+
+ while (us > 1000) {
+ us /= 1000;
+ if (++index >= (sizeof(units) - 1)) {
+ break;
+ }
+ }
+
+ return g_strdup_printf("%"PRIu64" %s", us, units[index]);
+}
+
+static void migration_dump_blocktime(Monitor *mon, MigrationInfo *info)
+{
+ if (info->has_postcopy_blocktime) {
+ monitor_printf(mon, "Postcopy Blocktime (ms): %" PRIu32 "\n",
+ info->postcopy_blocktime);
+ }
+
+ if (info->has_postcopy_vcpu_blocktime) {
+ uint32List *item = info->postcopy_vcpu_blocktime;
+ const char *sep = "";
+ int count = 0;
+
+ monitor_printf(mon, "Postcopy vCPU Blocktime (ms):\n [");
+
+ while (item) {
+ monitor_printf(mon, "%s%"PRIu32, sep, item->value);
+ item = item->next;
+ /* Each line 10 vcpu results, newline if there's more */
+ sep = ((++count % 10 == 0) && item) ? ",\n " : ", ";
+ }
+ monitor_printf(mon, "]\n");
+ }
+
+ if (info->has_postcopy_latency) {
+ monitor_printf(mon, "Postcopy Latency (ns): %" PRIu64 "\n",
+ info->postcopy_latency);
+ }
+
+ if (info->has_postcopy_non_vcpu_latency) {
+ monitor_printf(mon, "Postcopy non-vCPU Latencies (ns): %" PRIu64 "\n",
+ info->postcopy_non_vcpu_latency);
+ }
+
+ if (info->has_postcopy_vcpu_latency) {
+ uint64List *item = info->postcopy_vcpu_latency;
+ const char *sep = "";
+ int count = 0;
+
+ monitor_printf(mon, "Postcopy vCPU Latencies (ns):\n [");
+
+ while (item) {
+ monitor_printf(mon, "%s%"PRIu64, sep, item->value);
+ item = item->next;
+ /* Each line 10 vcpu results, newline if there's more */
+ sep = ((++count % 10 == 0) && item) ? ",\n " : ", ";
+ }
+ monitor_printf(mon, "]\n");
+ }
+
+ if (info->has_postcopy_latency_dist) {
+ uint64List *item = info->postcopy_latency_dist;
+ int count = 0;
+
+ monitor_printf(mon, "Postcopy Latency Distribution:\n");
+
+ while (item) {
+ g_autofree const gchar *from = format_time_str(1UL << count);
+ g_autofree const gchar *to = format_time_str(1UL << (count + 1));
+
+ monitor_printf(mon, " [ %8s - %8s ]: %10"PRIu64"\n",
+ from, to, item->value);
+ item = item->next;
+ count++;
+ }
+ }
+}
+
void hmp_info_migrate(Monitor *mon, const QDict *qdict)
{
bool show_all = qdict_get_try_bool(qdict, "all", false);
@@ -69,7 +151,7 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
}
if (info->has_status) {
- monitor_printf(mon, "Status: %s",
+ monitor_printf(mon, "Status: \t\t%s",
MigrationStatus_str(info->status));
if (info->status == MIGRATION_STATUS_FAILED && info->error_desc) {
monitor_printf(mon, " (%s)\n", info->error_desc);
@@ -78,7 +160,7 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
}
if (info->total_time) {
- monitor_printf(mon, "Time (ms): total=%" PRIu64,
+ monitor_printf(mon, "Time (ms): \t\ttotal=%" PRIu64,
info->total_time);
if (info->has_setup_time) {
monitor_printf(mon, ", setup=%" PRIu64,
@@ -110,48 +192,51 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
}
if (info->ram) {
+ g_autofree char *str_psize = size_to_str(info->ram->page_size);
+ g_autofree char *str_total = size_to_str(info->ram->total);
+ g_autofree char *str_transferred = size_to_str(info->ram->transferred);
+ g_autofree char *str_remaining = size_to_str(info->ram->remaining);
+ g_autofree char *str_precopy = size_to_str(info->ram->precopy_bytes);
+ g_autofree char *str_multifd = size_to_str(info->ram->multifd_bytes);
+ g_autofree char *str_postcopy = size_to_str(info->ram->postcopy_bytes);
+
monitor_printf(mon, "RAM info:\n");
- monitor_printf(mon, " Throughput (Mbps): %0.2f\n",
+ monitor_printf(mon, " Throughput (Mbps): \t%0.2f\n",
info->ram->mbps);
- monitor_printf(mon, " Sizes (KiB): pagesize=%" PRIu64
- ", total=%" PRIu64 ",\n",
- info->ram->page_size >> 10,
- info->ram->total >> 10);
- monitor_printf(mon, " transferred=%" PRIu64
- ", remain=%" PRIu64 ",\n",
- info->ram->transferred >> 10,
- info->ram->remaining >> 10);
- monitor_printf(mon, " precopy=%" PRIu64
- ", multifd=%" PRIu64
- ", postcopy=%" PRIu64,
- info->ram->precopy_bytes >> 10,
- info->ram->multifd_bytes >> 10,
- info->ram->postcopy_bytes >> 10);
+ monitor_printf(mon, " Sizes: \t\tpagesize=%s, total=%s\n",
+ str_psize, str_total);
+ monitor_printf(mon, " Transfers: \t\ttransferred=%s, remain=%s\n",
+ str_transferred, str_remaining);
+ monitor_printf(mon, " Channels: \t\tprecopy=%s, "
+ "multifd=%s, postcopy=%s",
+ str_precopy, str_multifd, str_postcopy);
if (info->vfio) {
- monitor_printf(mon, ", vfio=%" PRIu64,
- info->vfio->transferred >> 10);
+ g_autofree char *str_vfio = size_to_str(info->vfio->transferred);
+
+ monitor_printf(mon, ", vfio=%s", str_vfio);
}
monitor_printf(mon, "\n");
- monitor_printf(mon, " Pages: normal=%" PRIu64 ", zero=%" PRIu64
- ", rate_per_sec=%" PRIu64 "\n",
- info->ram->normal,
- info->ram->duplicate,
+ monitor_printf(mon, " Page Types: \tnormal=%" PRIu64
+ ", zero=%" PRIu64 "\n",
+ info->ram->normal, info->ram->duplicate);
+ monitor_printf(mon, " Page Rates (pps): \ttransfer=%" PRIu64,
info->ram->pages_per_second);
- monitor_printf(mon, " Others: dirty_syncs=%" PRIu64,
- info->ram->dirty_sync_count);
-
if (info->ram->dirty_pages_rate) {
- monitor_printf(mon, ", dirty_pages_rate=%" PRIu64,
+ monitor_printf(mon, ", dirty=%" PRIu64,
info->ram->dirty_pages_rate);
}
+ monitor_printf(mon, "\n");
+
+ monitor_printf(mon, " Others: \t\tdirty_syncs=%" PRIu64,
+ info->ram->dirty_sync_count);
if (info->ram->postcopy_requests) {
monitor_printf(mon, ", postcopy_req=%" PRIu64,
info->ram->postcopy_requests);
}
if (info->ram->downtime_bytes) {
- monitor_printf(mon, ", downtime_ram=%" PRIu64,
+ monitor_printf(mon, ", downtime_bytes=%" PRIu64,
info->ram->downtime_bytes);
}
if (info->ram->dirty_sync_missed_zero_copy) {
@@ -199,23 +284,7 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
info->dirty_limit_ring_full_time);
}
- if (info->has_postcopy_blocktime) {
- monitor_printf(mon, "Postcopy Blocktime (ms): %" PRIu32 "\n",
- info->postcopy_blocktime);
- }
-
- if (info->has_postcopy_vcpu_blocktime) {
- Visitor *v;
- char *str;
- v = string_output_visitor_new(false, &str);
- visit_type_uint32List(v, NULL, &info->postcopy_vcpu_blocktime,
- &error_abort);
- visit_complete(v, &str);
- monitor_printf(mon, "Postcopy vCPU Blocktime: %s\n", str);
- g_free(str);
- visit_free(v);
- }
-
+ migration_dump_blocktime(mon, info);
out:
qapi_free_MigrationInfo(info);
}
diff --git a/migration/migration.c b/migration/migration.c
index 4098870..10c216d 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -576,22 +576,27 @@ int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
}
int migrate_send_rp_req_pages(MigrationIncomingState *mis,
- RAMBlock *rb, ram_addr_t start, uint64_t haddr)
+ RAMBlock *rb, ram_addr_t start, uint64_t haddr,
+ uint32_t tid)
{
void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
bool received = false;
WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) {
received = ramblock_recv_bitmap_test_byte_offset(rb, start);
- if (!received && !g_tree_lookup(mis->page_requested, aligned)) {
- /*
- * The page has not been received, and it's not yet in the page
- * request list. Queue it. Set the value of element to 1, so that
- * things like g_tree_lookup() will return TRUE (1) when found.
- */
- g_tree_insert(mis->page_requested, aligned, (gpointer)1);
- qatomic_inc(&mis->page_requested_count);
- trace_postcopy_page_req_add(aligned, mis->page_requested_count);
+ if (!received) {
+ if (!g_tree_lookup(mis->page_requested, aligned)) {
+ /*
+ * The page has not been received, and it's not yet in the
+ * page request list. Queue it. Set the value of element
+ * to 1, so that things like g_tree_lookup() will return
+ * TRUE (1) when found.
+ */
+ g_tree_insert(mis->page_requested, aligned, (gpointer)1);
+ qatomic_inc(&mis->page_requested_count);
+ trace_postcopy_page_req_add(aligned, mis->page_requested_count);
+ }
+ mark_postcopy_blocktime_begin(haddr, tid, rb);
}
}
@@ -3436,33 +3441,60 @@ static MigIterateState migration_iteration_run(MigrationState *s)
Error *local_err = NULL;
bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
bool can_switchover = migration_can_switchover(s);
+ bool complete_ready;
+ /* Fast path - get the estimated amount of pending data */
qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy);
pending_size = must_precopy + can_postcopy;
trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy);
- if (pending_size < s->threshold_size) {
- qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy);
- pending_size = must_precopy + can_postcopy;
- trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy);
+ if (in_postcopy) {
+ /*
+ * Iterate in postcopy until all pending data flushed. Note that
+ * postcopy completion doesn't rely on can_switchover, because when
+ * POSTCOPY_ACTIVE it means switchover already happened.
+ */
+ complete_ready = !pending_size;
+ } else {
+ /*
+ * Exact pending reporting is only needed for precopy. Taking RAM
+ * as example, there'll be no extra dirty information after
+ * postcopy started, so ESTIMATE should always match with EXACT
+ * during postcopy phase.
+ */
+ if (pending_size < s->threshold_size) {
+ qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy);
+ pending_size = must_precopy + can_postcopy;
+ trace_migrate_pending_exact(pending_size, must_precopy,
+ can_postcopy);
+ }
+
+ /* Should we switch to postcopy now? */
+ if (must_precopy <= s->threshold_size &&
+ can_switchover && qatomic_read(&s->start_postcopy)) {
+ if (postcopy_start(s, &local_err)) {
+ migrate_set_error(s, local_err);
+ error_report_err(local_err);
+ }
+ return MIG_ITERATE_SKIP;
+ }
+
+ /*
+ * For precopy, migration can complete only if:
+ *
+ * (1) Switchover is acknowledged by destination
+ * (2) Pending size is no more than the threshold specified
+ * (which was calculated from expected downtime)
+ */
+ complete_ready = can_switchover && (pending_size <= s->threshold_size);
}
- if ((!pending_size || pending_size < s->threshold_size) && can_switchover) {
+ if (complete_ready) {
trace_migration_thread_low_pending(pending_size);
migration_completion(s);
return MIG_ITERATE_BREAK;
}
- /* Still a significant amount to transfer */
- if (!in_postcopy && must_precopy <= s->threshold_size && can_switchover &&
- qatomic_read(&s->start_postcopy)) {
- if (postcopy_start(s, &local_err)) {
- migrate_set_error(s, local_err);
- error_report_err(local_err);
- }
- return MIG_ITERATE_SKIP;
- }
-
/* Just another iteration step */
qemu_savevm_state_iterate(s->to_dst_file, in_postcopy);
return MIG_ITERATE_RESUME;
@@ -3887,9 +3919,8 @@ static void *bg_migration_thread(void *opaque)
while (migration_is_active()) {
MigIterateState iter_state = bg_migration_iteration_run(s);
- if (iter_state == MIG_ITERATE_SKIP) {
- continue;
- } else if (iter_state == MIG_ITERATE_BREAK) {
+
+ if (iter_state == MIG_ITERATE_BREAK) {
break;
}
diff --git a/migration/migration.h b/migration/migration.h
index 739289d..01329bf 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -546,7 +546,7 @@ void migrate_send_rp_shut(MigrationIncomingState *mis,
void migrate_send_rp_pong(MigrationIncomingState *mis,
uint32_t value);
int migrate_send_rp_req_pages(MigrationIncomingState *mis, RAMBlock *rb,
- ram_addr_t start, uint64_t haddr);
+ ram_addr_t start, uint64_t haddr, uint32_t tid);
int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
RAMBlock *rb, ram_addr_t start);
void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
diff --git a/migration/multifd-device-state.c b/migration/multifd-device-state.c
index 94222d0..fce64f0 100644
--- a/migration/multifd-device-state.c
+++ b/migration/multifd-device-state.c
@@ -131,7 +131,7 @@ bool multifd_device_state_supported(void)
static void multifd_device_state_save_thread_data_free(void *opaque)
{
- SaveLiveCompletePrecopyThreadData *data = opaque;
+ SaveCompletePrecopyThreadData *data = opaque;
g_clear_pointer(&data->idstr, g_free);
g_free(data);
@@ -139,7 +139,7 @@ static void multifd_device_state_save_thread_data_free(void *opaque)
static int multifd_device_state_save_thread(void *opaque)
{
- SaveLiveCompletePrecopyThreadData *data = opaque;
+ SaveCompletePrecopyThreadData *data = opaque;
g_autoptr(Error) local_err = NULL;
if (!data->hdlr(data, &local_err)) {
@@ -170,18 +170,18 @@ bool multifd_device_state_save_thread_should_exit(void)
}
void
-multifd_spawn_device_state_save_thread(SaveLiveCompletePrecopyThreadHandler hdlr,
+multifd_spawn_device_state_save_thread(SaveCompletePrecopyThreadHandler hdlr,
char *idstr, uint32_t instance_id,
void *opaque)
{
- SaveLiveCompletePrecopyThreadData *data;
+ SaveCompletePrecopyThreadData *data;
assert(multifd_device_state_supported());
assert(multifd_send_device_state);
assert(!qatomic_read(&multifd_send_device_state->threads_abort));
- data = g_new(SaveLiveCompletePrecopyThreadData, 1);
+ data = g_new(SaveCompletePrecopyThreadData, 1);
data->hdlr = hdlr;
data->idstr = g_strdup(idstr);
data->instance_id = instance_id;
diff --git a/migration/options.c b/migration/options.c
index 162c72c..4e923a2 100644
--- a/migration/options.c
+++ b/migration/options.c
@@ -187,6 +187,8 @@ const Property migration_properties[] = {
DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM),
DEFINE_PROP_MIG_CAP("x-postcopy-preempt",
MIGRATION_CAPABILITY_POSTCOPY_PREEMPT),
+ DEFINE_PROP_MIG_CAP("postcopy-blocktime",
+ MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME),
DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO),
DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM),
DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH),
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index 75fd310..45af9a3 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -110,19 +110,104 @@ void postcopy_thread_create(MigrationIncomingState *mis,
#include <sys/eventfd.h>
#include <linux/userfaultfd.h>
+/*
+ * Here we use 24 buckets, which means the last bucket will cover [2^24 us,
+ * 2^25 us) ~= [16, 32) seconds. It should be far enough to record even
+ * extreme (perf-wise broken) 1G pages moving over, which can sometimes
+ * take a few seconds due to various reasons. Anything more than that
+ * might be unsensible to account anymore.
+ */
+#define BLOCKTIME_LATENCY_BUCKET_N (24)
+
+/* All the time records are in unit of nanoseconds */
typedef struct PostcopyBlocktimeContext {
- /* time when page fault initiated per vCPU */
- uint32_t *page_fault_vcpu_time;
- /* page address per vCPU */
- uintptr_t *vcpu_addr;
- uint32_t total_blocktime;
/* blocktime per vCPU */
- uint32_t *vcpu_blocktime;
+ uint64_t *vcpu_blocktime_total;
+ /* count of faults per vCPU */
+ uint64_t *vcpu_faults_count;
+ /*
+ * count of currently blocked faults per vCPU.
+ *
+ * NOTE: Normally there should only be one fault in-progress per vCPU
+ * thread, so logically it _seems_ vcpu_faults_count[] for any vCPU
+ * should be either zero or one. However, there can be reasons we see
+ * >1 faults on the same vCPU thread.
+ *
+ * CASE (1): since the process to resolve faults (ioctl(UFFDIO_COPY),
+ * for example) is done before taking the mutex that protects the
+ * blocktime context, it can happen that we read more than one faulted
+ * addresses per vCPU.
+ *
+ * One example when we can see >1 faulted addresses for one vCPU:
+ *
+ * vcpu1 thread fault thread resolve thread
+ * ============ ============ ==============
+ *
+ * faulted on addr1
+ * read uffd msg (addr1)
+ * MUTEX_LOCK
+ * add entry (cpu1, addr1)
+ * MUTEX_UNLOCK
+ * request remote fault (addr1)
+ * resolve fault (addr1)
+ * addr1 resolved, continue..
+ * faulted on addr2
+ * read uffd msg (addr2)
+ * MUTEX_LOCK
+ * add entry (cpu1, addr2) <--------------- [A]
+ * MUTEX_UNLOCK
+ * MUTEX_LOCK
+ * remove entry (cpu1, addr1)
+ * MUTEX_UNLOCK
+ *
+ * In above case, we may see (cpu1, addr1) and (cpu1, addr2) entries to
+ * appear together at [A], when it gets the lock before the resolve
+ * thread. Use this counter to maintain such case, and only when it
+ * reaches zero we know the vCPU is not blocked anymore.
+ *
+ * CASE (2): theoretically (the author admit to not have verified
+ * this..), one vCPU thread can also generate more than one userfaultfd
+ * message on the same address. It can happen e.g. for whatever reason
+ * the fault got retried before a resolution arrives. In that extremely
+ * rare case, we could also see two (cpu1, addr1) entries.
+ *
+ * In all cases, be prepared with such re-entrancies with this array.
+ *
+ * Using uint8_t should be far enough for now. For example, when
+ * there're only one resolve thread (postcopy ram listening thread),
+ * the max (concurrent fault entries) should be two.
+ */
+ uint8_t *vcpu_faults_current;
+ /*
+ * The hash that contains addr1->[(cpu1,ts1),(cpu2,ts2) ...] mappings.
+ * Each of the entry is a tuple of (CPU index, fault timestamp) showing
+ * that a fault was requested.
+ */
+ GHashTable *vcpu_addr_hash;
+ /*
+ * Each bucket stores the count of faults that were resolved within the
+ * bucket window [2^N us, 2^(N+1) us).
+ */
+ uint64_t latency_buckets[BLOCKTIME_LATENCY_BUCKET_N];
+ /* total blocktime when all vCPUs are stopped */
+ uint64_t total_blocktime;
/* point in time when last page fault was initiated */
- uint32_t last_begin;
+ uint64_t last_begin;
/* number of vCPU are suspended */
int smp_cpus_down;
- uint64_t start_time;
+
+ /*
+ * Fast path for looking up vcpu_index from tid. NOTE: this result
+ * only reflects the vcpu setup when postcopy is running. It may not
+ * always match with the current vcpu setup because vcpus can be hot
+ * attached/detached after migration completes. However this should be
+ * stable when blocktime is using the structure.
+ */
+ GHashTable *tid_to_vcpu_hash;
+ /* Count of non-vCPU faults. This is only for debugging purpose. */
+ uint64_t non_vcpu_faults;
+ /* total blocktime when a non-vCPU thread is stopped */
+ uint64_t non_vcpu_blocktime_total;
/*
* Handler for exit event, necessary for
@@ -131,11 +216,41 @@ typedef struct PostcopyBlocktimeContext {
Notifier exit_notifier;
} PostcopyBlocktimeContext;
+typedef struct {
+ /* The time the fault was triggered */
+ uint64_t fault_time;
+ /*
+ * The vCPU index that was blocked, when cpu==-1, it means it's a
+ * fault from non-vCPU threads.
+ */
+ int cpu;
+} BlocktimeVCPUEntry;
+
+/* Alloc an entry to record a vCPU fault */
+static BlocktimeVCPUEntry *
+blocktime_vcpu_entry_alloc(int cpu, uint64_t fault_time)
+{
+ BlocktimeVCPUEntry *entry = g_new(BlocktimeVCPUEntry, 1);
+
+ entry->fault_time = fault_time;
+ entry->cpu = cpu;
+
+ return entry;
+}
+
+/* Free a @GList of @BlocktimeVCPUEntry */
+static void blocktime_vcpu_list_free(gpointer data)
+{
+ g_list_free_full(data, g_free);
+}
+
static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx)
{
- g_free(ctx->page_fault_vcpu_time);
- g_free(ctx->vcpu_addr);
- g_free(ctx->vcpu_blocktime);
+ g_hash_table_destroy(ctx->tid_to_vcpu_hash);
+ g_hash_table_destroy(ctx->vcpu_addr_hash);
+ g_free(ctx->vcpu_blocktime_total);
+ g_free(ctx->vcpu_faults_count);
+ g_free(ctx->vcpu_faults_current);
g_free(ctx);
}
@@ -146,32 +261,65 @@ static void migration_exit_cb(Notifier *n, void *data)
destroy_blocktime_context(ctx);
}
+static GHashTable *blocktime_init_tid_to_vcpu_hash(void)
+{
+ /*
+ * TID as an unsigned int can be directly used as the key. However,
+ * CPU index can NOT be directly used as value, because CPU index can
+ * be 0, which means NULL. Then when lookup we can never know whether
+ * it's 0 or "not found". Hence use an indirection for CPU index.
+ */
+ GHashTable *table = g_hash_table_new_full(g_direct_hash, g_direct_equal,
+ NULL, g_free);
+ CPUState *cpu;
+
+ /*
+ * Initialize the tid->cpu_id mapping for lookups. The caller needs to
+ * make sure when reaching here the CPU topology is frozen and will be
+ * stable for the whole blocktime trapping period.
+ */
+ CPU_FOREACH(cpu) {
+ int *value = g_new(int, 1);
+
+ *value = cpu->cpu_index;
+ g_hash_table_insert(table,
+ GUINT_TO_POINTER((uint32_t)cpu->thread_id),
+ value);
+ trace_postcopy_blocktime_tid_cpu_map(cpu->cpu_index, cpu->thread_id);
+ }
+
+ return table;
+}
+
static struct PostcopyBlocktimeContext *blocktime_context_new(void)
{
MachineState *ms = MACHINE(qdev_get_machine());
unsigned int smp_cpus = ms->smp.cpus;
PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1);
- ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus);
- ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus);
- ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus);
- ctx->exit_notifier.notify = migration_exit_cb;
- ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
- qemu_add_exit_notifier(&ctx->exit_notifier);
- return ctx;
-}
+ /* Initialize all counters to be zeros */
+ memset(ctx->latency_buckets, 0, sizeof(ctx->latency_buckets));
-static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx)
-{
- MachineState *ms = MACHINE(qdev_get_machine());
- uint32List *list = NULL;
- int i;
+ ctx->vcpu_blocktime_total = g_new0(uint64_t, smp_cpus);
+ ctx->vcpu_faults_count = g_new0(uint64_t, smp_cpus);
+ ctx->vcpu_faults_current = g_new0(uint8_t, smp_cpus);
+ ctx->tid_to_vcpu_hash = blocktime_init_tid_to_vcpu_hash();
- for (i = ms->smp.cpus - 1; i >= 0; i--) {
- QAPI_LIST_PREPEND(list, ctx->vcpu_blocktime[i]);
- }
+ /*
+ * The key (host virtual addresses) will always be gpointer-sized on
+ * either 32bits or 64bits systems, so it'll fit as a direct key.
+ *
+ * The value will be a list of BlocktimeVCPUEntry entries.
+ */
+ ctx->vcpu_addr_hash = g_hash_table_new_full(g_direct_hash,
+ g_direct_equal,
+ NULL,
+ blocktime_vcpu_list_free);
+
+ ctx->exit_notifier.notify = migration_exit_cb;
+ qemu_add_exit_notifier(&ctx->exit_notifier);
- return list;
+ return ctx;
}
/*
@@ -185,18 +333,64 @@ void fill_destination_postcopy_migration_info(MigrationInfo *info)
{
MigrationIncomingState *mis = migration_incoming_get_current();
PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
+ MachineState *ms = MACHINE(qdev_get_machine());
+ uint64_t latency_total = 0, faults = 0;
+ uint32List *list_blocktime = NULL;
+ uint64List *list_latency = NULL;
+ uint64List *latency_buckets = NULL;
+ int i;
if (!bc) {
return;
}
+ for (i = ms->smp.cpus - 1; i >= 0; i--) {
+ uint64_t latency, total, count;
+
+ /* Convert ns -> ms */
+ QAPI_LIST_PREPEND(list_blocktime,
+ (uint32_t)(bc->vcpu_blocktime_total[i] / SCALE_MS));
+
+ /* The rest in nanoseconds */
+ total = bc->vcpu_blocktime_total[i];
+ latency_total += total;
+ count = bc->vcpu_faults_count[i];
+ faults += count;
+
+ if (count) {
+ latency = total / count;
+ } else {
+ /* No fault detected */
+ latency = 0;
+ }
+
+ QAPI_LIST_PREPEND(list_latency, latency);
+ }
+
+ for (i = BLOCKTIME_LATENCY_BUCKET_N - 1; i >= 0; i--) {
+ QAPI_LIST_PREPEND(latency_buckets, bc->latency_buckets[i]);
+ }
+
+ latency_total += bc->non_vcpu_blocktime_total;
+ faults += bc->non_vcpu_faults;
+
+ info->has_postcopy_non_vcpu_latency = true;
+ info->postcopy_non_vcpu_latency = bc->non_vcpu_faults ?
+ (bc->non_vcpu_blocktime_total / bc->non_vcpu_faults) : 0;
info->has_postcopy_blocktime = true;
- info->postcopy_blocktime = bc->total_blocktime;
+ /* Convert ns -> ms */
+ info->postcopy_blocktime = (uint32_t)(bc->total_blocktime / SCALE_MS);
info->has_postcopy_vcpu_blocktime = true;
- info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc);
+ info->postcopy_vcpu_blocktime = list_blocktime;
+ info->has_postcopy_latency = true;
+ info->postcopy_latency = faults ? (latency_total / faults) : 0;
+ info->has_postcopy_vcpu_latency = true;
+ info->postcopy_vcpu_latency = list_latency;
+ info->has_postcopy_latency_dist = true;
+ info->postcopy_latency_dist = latency_buckets;
}
-static uint32_t get_postcopy_total_blocktime(void)
+static uint64_t get_postcopy_total_blocktime(void)
{
MigrationIncomingState *mis = migration_incoming_get_current();
PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
@@ -300,13 +494,13 @@ static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis,
}
#ifdef UFFD_FEATURE_THREAD_ID
+ /*
+ * Postcopy blocktime conditionally needs THREAD_ID feature (introduced
+ * to Linux in 2017). Always try to enable it when QEMU is compiled
+ * with such environment.
+ */
if (UFFD_FEATURE_THREAD_ID & supported_features) {
asked_features |= UFFD_FEATURE_THREAD_ID;
- if (migrate_postcopy_blocktime()) {
- if (!mis->blocktime_ctx) {
- mis->blocktime_ctx = blocktime_context_new();
- }
- }
}
#endif
@@ -752,8 +946,12 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd,
pagesize);
}
+/*
+ * NOTE: @tid is only used when postcopy-blocktime feature is enabled, and
+ * also optional: when zero is provided, the fault accounting will be ignored.
+ */
static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb,
- ram_addr_t start, uint64_t haddr)
+ ram_addr_t start, uint64_t haddr, uint32_t tid)
{
void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
@@ -772,7 +970,7 @@ static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb,
return received ? 0 : postcopy_place_page_zero(mis, aligned, rb);
}
- return migrate_send_rp_req_pages(mis, rb, start, haddr);
+ return migrate_send_rp_req_pages(mis, rb, start, haddr, tid);
}
/*
@@ -793,83 +991,204 @@ int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
qemu_ram_get_idstr(rb), rb_offset);
return postcopy_wake_shared(pcfd, client_addr, rb);
}
- postcopy_request_page(mis, rb, aligned_rbo, client_addr);
+ /* TODO: support blocktime tracking */
+ postcopy_request_page(mis, rb, aligned_rbo, client_addr, 0);
return 0;
}
-static int get_mem_fault_cpu_index(uint32_t pid)
+static int blocktime_get_vcpu(PostcopyBlocktimeContext *ctx, uint32_t tid)
{
- CPUState *cpu_iter;
+ int *found;
- CPU_FOREACH(cpu_iter) {
- if (cpu_iter->thread_id == pid) {
- trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid);
- return cpu_iter->cpu_index;
- }
+ found = g_hash_table_lookup(ctx->tid_to_vcpu_hash, GUINT_TO_POINTER(tid));
+ if (!found) {
+ /*
+ * NOTE: this is possible, because QEMU's non-vCPU threads can
+ * also access a missing page. Or, when KVM async pf is enabled, a
+ * fault can even happen from a kworker..
+ */
+ return -1;
}
- trace_get_mem_fault_cpu_index(-1, pid);
- return -1;
+
+ return *found;
}
-static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc)
+static uint64_t get_current_ns(void)
{
- int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
- dc->start_time;
- return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX;
+ return (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+}
+
+/*
+ * Inject an (cpu, fault_time) entry into the database, using addr as key.
+ * When cpu==-1, it means it's a non-vCPU fault.
+ */
+static void blocktime_fault_inject(PostcopyBlocktimeContext *ctx,
+ uintptr_t addr, int cpu, uint64_t time)
+{
+ BlocktimeVCPUEntry *entry = blocktime_vcpu_entry_alloc(cpu, time);
+ GHashTable *table = ctx->vcpu_addr_hash;
+ gpointer key = (gpointer)addr;
+ GList *head, *list;
+ gboolean result;
+
+ head = g_hash_table_lookup(table, key);
+ if (head) {
+ /*
+ * If existed, steal the @head for list operation rather than
+ * freeing it, making sure steal succeeded.
+ */
+ result = g_hash_table_steal(table, key);
+ assert(result == TRUE);
+ }
+
+ /*
+ * Now the key is guaranteed to be absent. Two cases:
+ *
+ * (1) There's no existing entry, list contains the only one. Insert.
+ * (2) There're existing entries, after stealing we own it, prepend the
+ * result and re-insert.
+ */
+ list = g_list_prepend(head, entry);
+ g_hash_table_insert(table, key, list);
+
+ trace_postcopy_blocktime_begin(addr, time, cpu, !!head);
}
/*
- * This function is being called when pagefault occurs. It
- * tracks down vCPU blocking time.
+ * This function is being called when pagefault occurs. It tracks down vCPU
+ * blocking time. It's protected by @page_request_mutex.
*
* @addr: faulted host virtual address
* @ptid: faulted process thread id
* @rb: ramblock appropriate to addr
*/
-static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid,
- RAMBlock *rb)
+void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid,
+ RAMBlock *rb)
{
- int cpu, already_received;
+ int cpu;
MigrationIncomingState *mis = migration_incoming_get_current();
PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
- uint32_t low_time_offset;
+ uint64_t current;
if (!dc || ptid == 0) {
return;
}
- cpu = get_mem_fault_cpu_index(ptid);
- if (cpu < 0) {
- return;
+
+ /*
+ * The caller should only inject a blocktime entry when the page is
+ * yet missing.
+ */
+ assert(!ramblock_recv_bitmap_test(rb, (void *)addr));
+
+ current = get_current_ns();
+ cpu = blocktime_get_vcpu(dc, ptid);
+
+ if (cpu >= 0) {
+ /* How many faults on this vCPU in total? */
+ dc->vcpu_faults_count[cpu]++;
+
+ /*
+ * Account how many concurrent faults on this vCPU we trapped. See
+ * comments above vcpu_faults_current[] on why it can be more than one.
+ */
+ if (dc->vcpu_faults_current[cpu]++ == 0) {
+ dc->smp_cpus_down++;
+ /*
+ * We use last_begin to cover (1) the 1st fault on this specific
+ * vCPU, but meanwhile (2) the last vCPU that got blocked. It's
+ * only used to calculate system-wide blocktime.
+ */
+ dc->last_begin = current;
+ }
+
+ /* Making sure it won't overflow - it really should never! */
+ assert(dc->vcpu_faults_current[cpu] <= 255);
+ } else {
+ /*
+ * For non-vCPU thread faults, we don't care about tid or cpu index
+ * or time the thread is blocked (e.g., a kworker trying to help
+ * KVM when async_pf=on is OK to be blocked and not affect guest
+ * responsiveness), but we care about latency. Track it with
+ * cpu=-1.
+ *
+ * Note that this will NOT affect blocktime reports on vCPU being
+ * blocked, but only about system-wide latency reports.
+ */
+ dc->non_vcpu_faults++;
}
- low_time_offset = get_low_time_offset(dc);
- if (dc->vcpu_addr[cpu] == 0) {
- qatomic_inc(&dc->smp_cpus_down);
+ blocktime_fault_inject(dc, addr, cpu, current);
+}
+
+static void blocktime_latency_account(PostcopyBlocktimeContext *ctx,
+ uint64_t time_us)
+{
+ /*
+ * Convert time (in us) to bucket index it belongs. Take extra caution
+ * of time_us==0 even if normally rare - when happens put into bucket 0.
+ */
+ int index = time_us ? (63 - clz64(time_us)) : 0;
+
+ assert(index >= 0);
+
+ /* If it's too large, put into top bucket */
+ if (index >= BLOCKTIME_LATENCY_BUCKET_N) {
+ index = BLOCKTIME_LATENCY_BUCKET_N - 1;
}
- qatomic_xchg(&dc->last_begin, low_time_offset);
- qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset);
- qatomic_xchg(&dc->vcpu_addr[cpu], addr);
+ ctx->latency_buckets[index]++;
+}
+
+typedef struct {
+ PostcopyBlocktimeContext *ctx;
+ uint64_t current;
+ int affected_cpus;
+ int affected_non_cpus;
+} BlockTimeVCPUIter;
+
+static void blocktime_cpu_list_iter_fn(gpointer data, gpointer user_data)
+{
+ BlockTimeVCPUIter *iter = user_data;
+ PostcopyBlocktimeContext *ctx = iter->ctx;
+ BlocktimeVCPUEntry *entry = data;
+ uint64_t time_passed;
+ int cpu = entry->cpu;
/*
- * check it here, not at the beginning of the function,
- * due to, check could occur early than bitmap_set in
- * qemu_ufd_copy_ioctl
+ * Time should never go back.. so when the fault is resolved it must be
+ * later than when it was faulted.
*/
- already_received = ramblock_recv_bitmap_test(rb, (void *)addr);
- if (already_received) {
- qatomic_xchg(&dc->vcpu_addr[cpu], 0);
- qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0);
- qatomic_dec(&dc->smp_cpus_down);
+ assert(iter->current >= entry->fault_time);
+ time_passed = iter->current - entry->fault_time;
+
+ /* Latency buckets are in microseconds */
+ blocktime_latency_account(ctx, time_passed / SCALE_US);
+
+ if (cpu >= 0) {
+ /*
+ * If we resolved all pending faults on one vCPU due to this page
+ * resolution, take a note.
+ */
+ if (--ctx->vcpu_faults_current[cpu] == 0) {
+ ctx->vcpu_blocktime_total[cpu] += time_passed;
+ iter->affected_cpus += 1;
+ }
+ trace_postcopy_blocktime_end_one(cpu, ctx->vcpu_faults_current[cpu]);
+ } else {
+ iter->affected_non_cpus++;
+ ctx->non_vcpu_blocktime_total += time_passed;
+ /*
+ * We do not maintain how many pending non-vCPU faults because we
+ * do not care about blocktime, only latency.
+ */
+ trace_postcopy_blocktime_end_one(-1, 0);
}
- trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu],
- cpu, already_received);
}
/*
- * This function just provide calculated blocktime per cpu and trace it.
- * Total blocktime is calculated in mark_postcopy_blocktime_end.
- *
+ * This function just provide calculated blocktime per cpu and trace it.
+ * Total blocktime is calculated in mark_postcopy_blocktime_end. It's
+ * protected by @page_request_mutex.
*
* Assume we have 3 CPU
*
@@ -899,48 +1218,45 @@ static void mark_postcopy_blocktime_end(uintptr_t addr)
PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
MachineState *ms = MACHINE(qdev_get_machine());
unsigned int smp_cpus = ms->smp.cpus;
- int i, affected_cpu = 0;
- bool vcpu_total_blocktime = false;
- uint32_t read_vcpu_time, low_time_offset;
+ BlockTimeVCPUIter iter = {
+ .current = get_current_ns(),
+ .affected_cpus = 0,
+ .affected_non_cpus = 0,
+ .ctx = dc,
+ };
+ gpointer key = (gpointer)addr;
+ GHashTable *table;
+ GList *list;
if (!dc) {
return;
}
- low_time_offset = get_low_time_offset(dc);
- /* lookup cpu, to clear it,
- * that algorithm looks straightforward, but it's not
- * optimal, more optimal algorithm is keeping tree or hash
- * where key is address value is a list of */
- for (i = 0; i < smp_cpus; i++) {
- uint32_t vcpu_blocktime = 0;
-
- read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0);
- if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr ||
- read_vcpu_time == 0) {
- continue;
- }
- qatomic_xchg(&dc->vcpu_addr[i], 0);
- vcpu_blocktime = low_time_offset - read_vcpu_time;
- affected_cpu += 1;
- /* we need to know is that mark_postcopy_end was due to
- * faulted page, another possible case it's prefetched
- * page and in that case we shouldn't be here */
- if (!vcpu_total_blocktime &&
- qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) {
- vcpu_total_blocktime = true;
- }
- /* continue cycle, due to one page could affect several vCPUs */
- dc->vcpu_blocktime[i] += vcpu_blocktime;
+ table = dc->vcpu_addr_hash;
+ /* the address wasn't tracked at all? */
+ list = g_hash_table_lookup(table, key);
+ if (!list) {
+ return;
}
- qatomic_sub(&dc->smp_cpus_down, affected_cpu);
- if (vcpu_total_blocktime) {
- dc->total_blocktime += low_time_offset - qatomic_fetch_add(
- &dc->last_begin, 0);
+ /*
+ * Loop over the set of vCPUs that got blocked on this addr, do the
+ * blocktime accounting. After that, remove the whole list.
+ */
+ g_list_foreach(list, blocktime_cpu_list_iter_fn, &iter);
+ g_hash_table_remove(table, key);
+
+ /*
+ * If all vCPUs used to be down, and copying this page would free some
+ * vCPUs, then the system-level blocktime ends here.
+ */
+ if (dc->smp_cpus_down == smp_cpus && iter.affected_cpus) {
+ dc->total_blocktime += iter.current - dc->last_begin;
}
- trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime,
- affected_cpu);
+ dc->smp_cpus_down -= iter.affected_cpus;
+
+ trace_postcopy_blocktime_end(addr, iter.current, iter.affected_cpus,
+ iter.affected_non_cpus);
}
static void postcopy_pause_fault_thread(MigrationIncomingState *mis)
@@ -1068,17 +1384,14 @@ static void *postcopy_ram_fault_thread(void *opaque)
qemu_ram_get_idstr(rb),
rb_offset,
msg.arg.pagefault.feat.ptid);
- mark_postcopy_blocktime_begin(
- (uintptr_t)(msg.arg.pagefault.address),
- msg.arg.pagefault.feat.ptid, rb);
-
retry:
/*
* Send the request to the source - we want to request one
* of our host page sizes (which is >= TPS)
*/
ret = postcopy_request_page(mis, rb, rb_offset,
- msg.arg.pagefault.address);
+ msg.arg.pagefault.address,
+ msg.arg.pagefault.feat.ptid);
if (ret) {
/* May be network failure, try to wait for recovery */
postcopy_pause_fault_thread(mis);
@@ -1221,6 +1534,11 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
return -1;
}
+ if (migrate_postcopy_blocktime()) {
+ assert(mis->blocktime_ctx == NULL);
+ mis->blocktime_ctx = blocktime_context_new();
+ }
+
/* Now an eventfd we use to tell the fault-thread to quit */
mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC);
if (mis->userfault_event_fd == -1) {
@@ -1299,8 +1617,8 @@ static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr,
qemu_cond_signal(&mis->page_request_cond);
}
}
- qemu_mutex_unlock(&mis->page_request_mutex);
mark_postcopy_blocktime_end((uintptr_t)host_addr);
+ qemu_mutex_unlock(&mis->page_request_mutex);
}
return ret;
}
@@ -1430,6 +1748,11 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd,
{
g_assert_not_reached();
}
+
+void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid,
+ RAMBlock *rb)
+{
+}
#endif
/* ------------------------------------------------------------------------- */
diff --git a/migration/postcopy-ram.h b/migration/postcopy-ram.h
index a6df1b2..3852141 100644
--- a/migration/postcopy-ram.h
+++ b/migration/postcopy-ram.h
@@ -196,5 +196,7 @@ void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file);
void postcopy_preempt_setup(MigrationState *s);
int postcopy_preempt_establish_channel(MigrationState *s);
bool postcopy_is_paused(MigrationStatus status);
+void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid,
+ RAMBlock *rb);
#endif
diff --git a/migration/ram.c b/migration/ram.c
index 2140785..7208bc1 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -835,8 +835,10 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
* protections isn't needed as we know there will be either (1) no
* further writes if migration will complete, or (2) migration fails
* at last then tracking isn't needed either.
+ *
+ * Do the same for postcopy due to the same reason.
*/
- if (!rs->last_stage) {
+ if (!rs->last_stage && !migration_in_postcopy()) {
/*
* Clear dirty bitmap if needed. This _must_ be called before we
* send any of the page in the chunk because we need to make sure
@@ -2286,16 +2288,18 @@ static int ram_find_and_save_block(RAMState *rs)
if (!get_queued_page(rs, pss)) {
/* priority queue empty, so just search for something dirty */
int res = find_dirty_block(rs, pss);
- if (res != PAGE_DIRTY_FOUND) {
- if (res == PAGE_ALL_CLEAN) {
- break;
- } else if (res == PAGE_TRY_AGAIN) {
- continue;
- } else if (res < 0) {
- pages = res;
- break;
- }
+
+ if (res == PAGE_ALL_CLEAN) {
+ break;
+ } else if (res == PAGE_TRY_AGAIN) {
+ continue;
+ } else if (res < 0) {
+ pages = res;
+ break;
}
+
+ /* Otherwise we must have a dirty page to move */
+ assert(res == PAGE_DIRTY_FOUND);
}
pages = ram_save_host_page(rs, pss);
if (pages) {
@@ -3288,6 +3292,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
RAMState *rs = *temp;
int ret = 0;
+ trace_ram_save_complete(rs->migration_dirty_pages, 0);
+
rs->last_stage = !migration_in_colo_state();
WITH_RCU_READ_LOCK_GUARD() {
@@ -3351,6 +3357,9 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
}
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
+
+ trace_ram_save_complete(rs->migration_dirty_pages, 1);
+
return qemu_fflush(f);
}
@@ -4548,8 +4557,7 @@ void postcopy_preempt_shutdown_file(MigrationState *s)
static SaveVMHandlers savevm_ram_handlers = {
.save_setup = ram_save_setup,
.save_live_iterate = ram_save_iterate,
- .save_live_complete_postcopy = ram_save_complete,
- .save_live_complete_precopy = ram_save_complete,
+ .save_complete = ram_save_complete,
.has_postcopy = ram_has_postcopy,
.state_pending_exact = ram_state_pending_exact,
.state_pending_estimate = ram_state_pending_estimate,
diff --git a/migration/savevm.c b/migration/savevm.c
index bb04a45..fabbeb2 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1484,37 +1484,54 @@ bool should_send_vmdesc(void)
return !machine->suppress_vmdesc;
}
+static bool qemu_savevm_complete_exists(SaveStateEntry *se)
+{
+ return se->ops && se->ops->save_complete;
+}
+
+/*
+ * Invoke the ->save_complete() if necessary.
+ * Returns: 0 if skip the current SE or succeeded, <0 if error happened.
+ */
+static int qemu_savevm_complete(SaveStateEntry *se, QEMUFile *f)
+{
+ int ret;
+
+ if (se->ops->is_active) {
+ if (!se->ops->is_active(se->opaque)) {
+ return 0;
+ }
+ }
+
+ trace_savevm_section_start(se->idstr, se->section_id);
+ save_section_header(f, se, QEMU_VM_SECTION_END);
+ ret = se->ops->save_complete(f, se->opaque);
+ trace_savevm_section_end(se->idstr, se->section_id, ret);
+ save_section_footer(f, se);
+
+ if (ret < 0) {
+ qemu_file_set_error(f, ret);
+ }
+
+ return ret;
+}
+
/*
- * Calls the save_live_complete_postcopy methods
- * causing the last few pages to be sent immediately and doing any associated
- * cleanup.
+ * Complete saving any postcopy-able devices.
+ *
* Note postcopy also calls qemu_savevm_state_complete_precopy to complete
* all the other devices, but that happens at the point we switch to postcopy.
*/
void qemu_savevm_state_complete_postcopy(QEMUFile *f)
{
SaveStateEntry *se;
- int ret;
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
- if (!se->ops || !se->ops->save_live_complete_postcopy) {
+ if (!qemu_savevm_complete_exists(se)) {
continue;
}
- if (se->ops->is_active) {
- if (!se->ops->is_active(se->opaque)) {
- continue;
- }
- }
- trace_savevm_section_start(se->idstr, se->section_id);
- /* Section type */
- qemu_put_byte(f, QEMU_VM_SECTION_END);
- qemu_put_be32(f, se->section_id);
- ret = se->ops->save_live_complete_postcopy(f, se->opaque);
- trace_savevm_section_end(se->idstr, se->section_id, ret);
- save_section_footer(f, se);
- if (ret < 0) {
- qemu_file_set_error(f, ret);
+ if (qemu_savevm_complete(se, f) < 0) {
return;
}
}
@@ -1560,20 +1577,19 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy)
{
int64_t start_ts_each, end_ts_each;
SaveStateEntry *se;
- int ret;
bool multifd_device_state = multifd_device_state_supported();
if (multifd_device_state) {
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
- SaveLiveCompletePrecopyThreadHandler hdlr;
+ SaveCompletePrecopyThreadHandler hdlr;
if (!se->ops || (in_postcopy && se->ops->has_postcopy &&
se->ops->has_postcopy(se->opaque)) ||
- !se->ops->save_live_complete_precopy_thread) {
+ !se->ops->save_complete_precopy_thread) {
continue;
}
- hdlr = se->ops->save_live_complete_precopy_thread;
+ hdlr = se->ops->save_complete_precopy_thread;
multifd_spawn_device_state_save_thread(hdlr,
se->idstr, se->instance_id,
se->opaque);
@@ -1581,32 +1597,25 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy)
}
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
- if (!se->ops ||
- (in_postcopy && se->ops->has_postcopy &&
- se->ops->has_postcopy(se->opaque)) ||
- !se->ops->save_live_complete_precopy) {
+ if (!qemu_savevm_complete_exists(se)) {
continue;
}
- if (se->ops->is_active) {
- if (!se->ops->is_active(se->opaque)) {
- continue;
- }
+ if (in_postcopy && se->ops->has_postcopy &&
+ se->ops->has_postcopy(se->opaque)) {
+ /*
+ * If postcopy will start soon, and if the SE supports
+ * postcopy, then we can skip the SE for the postcopy phase.
+ */
+ continue;
}
start_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME);
- trace_savevm_section_start(se->idstr, se->section_id);
-
- save_section_header(f, se, QEMU_VM_SECTION_END);
-
- ret = se->ops->save_live_complete_precopy(f, se->opaque);
- trace_savevm_section_end(se->idstr, se->section_id, ret);
- save_section_footer(f, se);
- if (ret < 0) {
- qemu_file_set_error(f, ret);
+ if (qemu_savevm_complete(se, f) < 0) {
goto ret_fail_abort_threads;
}
end_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME);
+
trace_vmstate_downtime_save("iterable", se->idstr, se->instance_id,
end_ts_each - start_ts_each);
}
diff --git a/migration/trace-events b/migration/trace-events
index c506e11..706db97 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -105,6 +105,7 @@ ram_load_postcopy_loop(int channel, uint64_t addr, int flags) "chan=%d addr=0x%"
ram_postcopy_send_discard_bitmap(void) ""
ram_save_page(const char *rbname, uint64_t offset, void *host) "%s: offset: 0x%" PRIx64 " host: %p"
ram_save_queue_pages(const char *rbname, size_t start, size_t len) "%s: start: 0x%zx len: 0x%zx"
+ram_save_complete(uint64_t dirty_pages, int done) "dirty=%" PRIu64 ", done=%d"
ram_dirty_bitmap_request(char *str) "%s"
ram_dirty_bitmap_reload_begin(char *str) "%s"
ram_dirty_bitmap_reload_complete(char *str) "%s"
@@ -284,8 +285,6 @@ postcopy_nhp_range(const char *ramblock, void *host_addr, size_t offset, size_t
postcopy_place_page(void *host_addr) "host=%p"
postcopy_place_page_zero(void *host_addr) "host=%p"
postcopy_ram_enable_notify(void) ""
-mark_postcopy_blocktime_begin(uint64_t addr, void *dd, uint32_t time, int cpu, int received) "addr: 0x%" PRIx64 ", dd: %p, time: %u, cpu: %d, already_received: %d"
-mark_postcopy_blocktime_end(uint64_t addr, void *dd, uint32_t time, int affected_cpu) "addr: 0x%" PRIx64 ", dd: %p, time: %u, affected_cpu: %d"
postcopy_pause_fault_thread(void) ""
postcopy_pause_fault_thread_continued(void) ""
postcopy_pause_fast_load(void) ""
@@ -309,8 +308,10 @@ postcopy_preempt_tls_handshake(void) ""
postcopy_preempt_new_channel(void) ""
postcopy_preempt_thread_entry(void) ""
postcopy_preempt_thread_exit(void) ""
-
-get_mem_fault_cpu_index(int cpu, uint32_t pid) "cpu: %d, pid: %u"
+postcopy_blocktime_tid_cpu_map(int cpu, uint32_t tid) "cpu: %d, tid: %u"
+postcopy_blocktime_begin(uint64_t addr, uint64_t time, int cpu, bool exists) "addr: 0x%" PRIx64 ", time: %" PRIu64 ", cpu: %d, exist: %d"
+postcopy_blocktime_end(uint64_t addr, uint64_t time, int affected_cpu, int affected_non_cpus) "addr: 0x%" PRIx64 ", time: %" PRIu64 ", affected_cpus: %d, affected_non_cpus: %d"
+postcopy_blocktime_end_one(int cpu, uint8_t left_faults) "cpu: %d, left_faults: %" PRIu8
# exec.c
migration_exec_outgoing(const char *cmd) "cmd=%s"
diff --git a/pc-bios/s390-ccw.img b/pc-bios/s390-ccw.img
index 47240f0..ff60978 100644
--- a/pc-bios/s390-ccw.img
+++ b/pc-bios/s390-ccw.img
Binary files differ
diff --git a/pc-bios/s390-ccw/Makefile b/pc-bios/s390-ccw/Makefile
index dc69dd4..a0f24c9 100644
--- a/pc-bios/s390-ccw/Makefile
+++ b/pc-bios/s390-ccw/Makefile
@@ -47,7 +47,7 @@ EXTRA_CFLAGS += -fwrapv -fno-strict-aliasing -fno-asynchronous-unwind-tables
EXTRA_CFLAGS += -msoft-float
EXTRA_CFLAGS += -std=gnu99
EXTRA_CFLAGS += $(LIBC_INC) $(LIBNET_INC)
-EXTRA_LDFLAGS += -Wl,-pie -nostdlib -z noexecstack -z text
+EXTRA_LDFLAGS += -static-pie -nostdlib -z noexecstack -z text
cc-test = $(CC) -Werror $1 -c -o /dev/null -xc /dev/null >/dev/null 2>/dev/null
cc-option = if $(call cc-test, $1); then \
diff --git a/pc-bios/s390-ccw/menu.c b/pc-bios/s390-ccw/menu.c
index 84062e9..eeaff78 100644
--- a/pc-bios/s390-ccw/menu.c
+++ b/pc-bios/s390-ccw/menu.c
@@ -159,7 +159,7 @@ static void boot_menu_prompt(bool retry)
}
}
-static int get_boot_index(bool *valid_entries)
+int menu_get_boot_index(bool *valid_entries)
{
int boot_index;
bool retry = false;
@@ -224,7 +224,7 @@ int menu_get_zipl_boot_index(const char *menu_data)
}
printf("\n");
- return get_boot_index(valid_entries);
+ return menu_get_boot_index(valid_entries);
}
int menu_get_enum_boot_index(bool *valid_entries)
@@ -247,7 +247,7 @@ int menu_get_enum_boot_index(bool *valid_entries)
}
printf("\n");
- return get_boot_index(valid_entries);
+ return menu_get_boot_index(valid_entries);
}
void menu_set_parms(uint8_t boot_menu_flag, uint32_t boot_menu_timeout)
diff --git a/pc-bios/s390-ccw/netmain.c b/pc-bios/s390-ccw/netmain.c
index 719a547..a9521df 100644
--- a/pc-bios/s390-ccw/netmain.c
+++ b/pc-bios/s390-ccw/netmain.c
@@ -332,22 +332,64 @@ static int load_kernel_with_initrd(filename_ip_t *fn_ip,
return rc;
}
-#define MAX_PXELINUX_ENTRIES 16
+static int net_boot_menu(int num_ent, int def_ent,
+ struct pl_cfg_entry *entries)
+{
+ bool valid_entries[MAX_BOOT_ENTRIES] = { false };
+ int idx;
+
+ puts("\ns390-ccw pxelinux.cfg boot menu:\n");
+ printf(" [0] default (%d)\n", def_ent + 1);
+ valid_entries[0] = true;
+
+ for (idx = 1; idx <= num_ent; idx++) {
+ printf(" [%d] %s\n", idx, entries[idx - 1].label);
+ valid_entries[idx] = true;
+ }
+ putchar('\n');
+
+ idx = menu_get_boot_index(valid_entries);
+ putchar('\n');
+
+ return idx;
+}
+
+static int net_select_and_load_kernel(filename_ip_t *fn_ip,
+ int num_ent, int selected,
+ struct pl_cfg_entry *entries)
+{
+ unsigned int loadparm = get_loadparm_index();
+
+ if (num_ent <= 0) {
+ return -1;
+ }
+
+ if (menu_is_enabled_enum() && num_ent > 1) {
+ loadparm = net_boot_menu(num_ent, selected, entries);
+ }
+
+ IPL_assert(loadparm <= num_ent,
+ "loadparm is set to an entry that is not available in the "
+ "pxelinux.cfg file!");
+
+ if (loadparm > 0) {
+ selected = loadparm - 1;
+ }
+
+ return load_kernel_with_initrd(fn_ip, &entries[selected]);
+}
static int net_try_pxelinux_cfg(filename_ip_t *fn_ip)
{
- struct pl_cfg_entry entries[MAX_PXELINUX_ENTRIES];
+ struct pl_cfg_entry entries[MAX_BOOT_ENTRIES];
int num_ent, def_ent = 0;
num_ent = pxelinux_load_parse_cfg(fn_ip, mac, get_uuid(),
DEFAULT_TFTP_RETRIES,
cfgbuf, sizeof(cfgbuf),
- entries, MAX_PXELINUX_ENTRIES, &def_ent);
- if (num_ent > 0) {
- return load_kernel_with_initrd(fn_ip, &entries[def_ent]);
- }
+ entries, MAX_BOOT_ENTRIES, &def_ent);
- return -1;
+ return net_select_and_load_kernel(fn_ip, num_ent, def_ent, entries);
}
/**
@@ -428,15 +470,13 @@ static int net_try_direct_tftp_load(filename_ip_t *fn_ip)
* a magic comment string.
*/
if (!strncasecmp("# pxelinux", cfgbuf, 10)) {
- struct pl_cfg_entry entries[MAX_PXELINUX_ENTRIES];
+ struct pl_cfg_entry entries[MAX_BOOT_ENTRIES];
int num_ent, def_ent = 0;
num_ent = pxelinux_parse_cfg(cfgbuf, sizeof(cfgbuf), entries,
- MAX_PXELINUX_ENTRIES, &def_ent);
- if (num_ent <= 0) {
- return -1;
- }
- return load_kernel_with_initrd(fn_ip, &entries[def_ent]);
+ MAX_BOOT_ENTRIES, &def_ent);
+ return net_select_and_load_kernel(fn_ip, num_ent, def_ent,
+ entries);
}
}
diff --git a/pc-bios/s390-ccw/s390-ccw.h b/pc-bios/s390-ccw/s390-ccw.h
index 6cdce3e..b1dc35c 100644
--- a/pc-bios/s390-ccw/s390-ccw.h
+++ b/pc-bios/s390-ccw/s390-ccw.h
@@ -87,6 +87,7 @@ int menu_get_zipl_boot_index(const char *menu_data);
bool menu_is_enabled_zipl(void);
int menu_get_enum_boot_index(bool *valid_entries);
bool menu_is_enabled_enum(void);
+int menu_get_boot_index(bool *valid_entries);
#define MAX_BOOT_ENTRIES 31
diff --git a/qapi/migration.json b/qapi/migration.json
index e8a7d3b..2d39a8f 100644
--- a/qapi/migration.json
+++ b/qapi/migration.json
@@ -236,6 +236,31 @@
# This is only present when the postcopy-blocktime migration
# capability is enabled. (Since 3.0)
#
+# @postcopy-latency: average remote page fault latency (in ns). Note that
+# this doesn't include all faults, but only the ones that require a
+# remote page request. So it should be always bigger than the real
+# average page fault latency. This is only present when the
+# postcopy-blocktime migration capability is enabled. (Since 10.1)
+#
+# @postcopy-latency-dist: remote page fault latency distributions. Each
+# element of the array is the number of faults that fall into the
+# bucket period. For the N-th bucket (N>=0), the latency window is
+# [2^Nus, 2^(N+1)us). For example, the 8th element stores how many
+# remote faults got resolved within [256us, 512us) window. This is only
+# present when the postcopy-blocktime migration capability is enabled.
+# (Since 10.1)
+#
+# @postcopy-vcpu-latency: average remote page fault latency per vCPU (in
+# ns). It has the same definition of @postcopy-latency, but instead
+# this is the per-vCPU statistics. This is only present when the
+# postcopy-blocktime migration capability is enabled. (Since 10.1)
+#
+# @postcopy-non-vcpu-latency: average remote page fault latency for all
+# faults happend in non-vCPU threads (in ns). It has the same
+# definition of @postcopy-latency but this only provides statistics to
+# non-vCPU faults. This is only present when the postcopy-blocktime
+# migration capability is enabled. (Since 10.1)
+#
# @socket-address: Only used for tcp, to know what the real port is
# (Since 4.0)
#
@@ -260,6 +285,11 @@
# average memory load of the virtual CPU indirectly. Note that
# zero means guest doesn't dirty memory. (Since 8.1)
#
+# Features:
+#
+# @unstable: Members @postcopy-latency, @postcopy-vcpu-latency,
+# @postcopy-latency-dist, @postcopy-non-vcpu-latency are experimental.
+#
# Since: 0.14
##
{ 'struct': 'MigrationInfo',
@@ -275,6 +305,14 @@
'*blocked-reasons': ['str'],
'*postcopy-blocktime': 'uint32',
'*postcopy-vcpu-blocktime': ['uint32'],
+ '*postcopy-latency': {
+ 'type': 'uint64', 'features': [ 'unstable' ] },
+ '*postcopy-latency-dist': {
+ 'type': ['uint64'], 'features': [ 'unstable' ] },
+ '*postcopy-vcpu-latency': {
+ 'type': ['uint64'], 'features': [ 'unstable' ] },
+ '*postcopy-non-vcpu-latency': {
+ 'type': 'uint64', 'features': [ 'unstable' ] },
'*socket-address': ['SocketAddress'],
'*dirty-limit-throttle-time-per-round': 'uint64',
'*dirty-limit-ring-full-time': 'uint64'} }
diff --git a/system/qdev-monitor.c b/system/qdev-monitor.c
index 5588ed2..2ac92d0 100644
--- a/system/qdev-monitor.c
+++ b/system/qdev-monitor.c
@@ -628,7 +628,7 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts,
DeviceClass *dc;
const char *driver, *path;
char *id;
- DeviceState *dev = NULL;
+ DeviceState *dev;
BusState *bus = NULL;
QDict *properties;
@@ -717,10 +717,9 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts,
return dev;
err_del_dev:
- if (dev) {
- object_unparent(OBJECT(dev));
- object_unref(OBJECT(dev));
- }
+ object_unparent(OBJECT(dev));
+ object_unref(OBJECT(dev));
+
return NULL;
}
diff --git a/target/arm/cpregs-pmu.c b/target/arm/cpregs-pmu.c
new file mode 100644
index 0000000..0f295b1
--- /dev/null
+++ b/target/arm/cpregs-pmu.c
@@ -0,0 +1,1309 @@
+/*
+ * QEMU ARM CP Register PMU insns
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/timer.h"
+#include "exec/icount.h"
+#include "hw/irq.h"
+#include "cpu.h"
+#include "cpu-features.h"
+#include "cpregs.h"
+#include "internals.h"
+
+
+#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
+
+/*
+ * Check for traps to performance monitor registers, which are controlled
+ * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
+ */
+static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+ uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
+
+ if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+typedef struct pm_event {
+ uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
+ /* If the event is supported on this CPU (used to generate PMCEID[01]) */
+ bool (*supported)(CPUARMState *);
+ /*
+ * Retrieve the current count of the underlying event. The programmed
+ * counters hold a difference from the return value from this function
+ */
+ uint64_t (*get_count)(CPUARMState *);
+ /*
+ * Return how many nanoseconds it will take (at a minimum) for count events
+ * to occur. A negative value indicates the counter will never overflow, or
+ * that the counter has otherwise arranged for the overflow bit to be set
+ * and the PMU interrupt to be raised on overflow.
+ */
+ int64_t (*ns_per_count)(uint64_t);
+} pm_event;
+
+static bool event_always_supported(CPUARMState *env)
+{
+ return true;
+}
+
+static uint64_t swinc_get_count(CPUARMState *env)
+{
+ /*
+ * SW_INCR events are written directly to the pmevcntr's by writes to
+ * PMSWINC, so there is no underlying count maintained by the PMU itself
+ */
+ return 0;
+}
+
+static int64_t swinc_ns_per(uint64_t ignored)
+{
+ return -1;
+}
+
+/*
+ * Return the underlying cycle count for the PMU cycle counters. If we're in
+ * usermode, simply return 0.
+ */
+static uint64_t cycles_get_count(CPUARMState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
+#else
+ return cpu_get_host_ticks();
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+static int64_t cycles_ns_per(uint64_t cycles)
+{
+ return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
+}
+
+static bool instructions_supported(CPUARMState *env)
+{
+ /* Precise instruction counting */
+ return icount_enabled() == ICOUNT_PRECISE;
+}
+
+static uint64_t instructions_get_count(CPUARMState *env)
+{
+ assert(icount_enabled() == ICOUNT_PRECISE);
+ return (uint64_t)icount_get_raw();
+}
+
+static int64_t instructions_ns_per(uint64_t icount)
+{
+ assert(icount_enabled() == ICOUNT_PRECISE);
+ return icount_to_ns((int64_t)icount);
+}
+#endif
+
+static bool pmuv3p1_events_supported(CPUARMState *env)
+{
+ /* For events which are supported in any v8.1 PMU */
+ return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
+}
+
+static bool pmuv3p4_events_supported(CPUARMState *env)
+{
+ /* For events which are supported in any v8.1 PMU */
+ return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
+}
+
+static uint64_t zero_event_get_count(CPUARMState *env)
+{
+ /* For events which on QEMU never fire, so their count is always zero */
+ return 0;
+}
+
+static int64_t zero_event_ns_per(uint64_t cycles)
+{
+ /* An event which never fires can never overflow */
+ return -1;
+}
+
+static const pm_event pm_events[] = {
+ { .number = 0x000, /* SW_INCR */
+ .supported = event_always_supported,
+ .get_count = swinc_get_count,
+ .ns_per_count = swinc_ns_per,
+ },
+#ifndef CONFIG_USER_ONLY
+ { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
+ .supported = instructions_supported,
+ .get_count = instructions_get_count,
+ .ns_per_count = instructions_ns_per,
+ },
+ { .number = 0x011, /* CPU_CYCLES, Cycle */
+ .supported = event_always_supported,
+ .get_count = cycles_get_count,
+ .ns_per_count = cycles_ns_per,
+ },
+#endif
+ { .number = 0x023, /* STALL_FRONTEND */
+ .supported = pmuv3p1_events_supported,
+ .get_count = zero_event_get_count,
+ .ns_per_count = zero_event_ns_per,
+ },
+ { .number = 0x024, /* STALL_BACKEND */
+ .supported = pmuv3p1_events_supported,
+ .get_count = zero_event_get_count,
+ .ns_per_count = zero_event_ns_per,
+ },
+ { .number = 0x03c, /* STALL */
+ .supported = pmuv3p4_events_supported,
+ .get_count = zero_event_get_count,
+ .ns_per_count = zero_event_ns_per,
+ },
+};
+
+/*
+ * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
+ * events (i.e. the statistical profiling extension), this implementation
+ * should first be updated to something sparse instead of the current
+ * supported_event_map[] array.
+ */
+#define MAX_EVENT_ID 0x3c
+#define UNSUPPORTED_EVENT UINT16_MAX
+static uint16_t supported_event_map[MAX_EVENT_ID + 1];
+
+/*
+ * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
+ * of ARM event numbers to indices in our pm_events array.
+ *
+ * Note: Events in the 0x40XX range are not currently supported.
+ */
+void pmu_init(ARMCPU *cpu)
+{
+ unsigned int i;
+
+ /*
+ * Empty supported_event_map and cpu->pmceid[01] before adding supported
+ * events to them
+ */
+ for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
+ supported_event_map[i] = UNSUPPORTED_EVENT;
+ }
+ cpu->pmceid0 = 0;
+ cpu->pmceid1 = 0;
+
+ for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
+ const pm_event *cnt = &pm_events[i];
+ assert(cnt->number <= MAX_EVENT_ID);
+ /* We do not currently support events in the 0x40xx range */
+ assert(cnt->number <= 0x3f);
+
+ if (cnt->supported(&cpu->env)) {
+ supported_event_map[cnt->number] = i;
+ uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
+ if (cnt->number & 0x20) {
+ cpu->pmceid1 |= event_mask;
+ } else {
+ cpu->pmceid0 |= event_mask;
+ }
+ }
+ }
+}
+
+/*
+ * Check at runtime whether a PMU event is supported for the current machine
+ */
+static bool event_supported(uint16_t number)
+{
+ if (number > MAX_EVENT_ID) {
+ return false;
+ }
+ return supported_event_map[number] != UNSUPPORTED_EVENT;
+}
+
+static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /*
+ * Performance monitor registers user accessibility is controlled
+ * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
+ * trapping to EL2 or EL3 for other accesses.
+ */
+ int el = arm_current_el(env);
+ uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
+
+ if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
+ return CP_ACCESS_TRAP_EL1;
+ }
+ if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* ER: event counter read trap control */
+ if (arm_feature(env, ARM_FEATURE_V8)
+ && arm_current_el(env) == 0
+ && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
+ && isread) {
+ return CP_ACCESS_OK;
+ }
+
+ return pmreg_access(env, ri, isread);
+}
+
+static CPAccessResult pmreg_access_swinc(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* SW: software increment write trap control */
+ if (arm_feature(env, ARM_FEATURE_V8)
+ && arm_current_el(env) == 0
+ && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
+ && !isread) {
+ return CP_ACCESS_OK;
+ }
+
+ return pmreg_access(env, ri, isread);
+}
+
+static CPAccessResult pmreg_access_selr(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* ER: event counter read trap control */
+ if (arm_feature(env, ARM_FEATURE_V8)
+ && arm_current_el(env) == 0
+ && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
+ return CP_ACCESS_OK;
+ }
+
+ return pmreg_access(env, ri, isread);
+}
+
+static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* CR: cycle counter read trap control */
+ if (arm_feature(env, ARM_FEATURE_V8)
+ && arm_current_el(env) == 0
+ && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
+ && isread) {
+ return CP_ACCESS_OK;
+ }
+
+ return pmreg_access(env, ri, isread);
+}
+
+/*
+ * Returns true if the counter (pass 31 for PMCCNTR) should count events using
+ * the current EL, security state, and register configuration.
+ */
+static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
+{
+ uint64_t filter;
+ bool e, p, u, nsk, nsu, nsh, m;
+ bool enabled, prohibited = false, filtered;
+ bool secure = arm_is_secure(env);
+ int el = arm_current_el(env);
+ uint64_t mdcr_el2;
+ uint8_t hpmn;
+
+ /*
+ * We might be called for M-profile cores where MDCR_EL2 doesn't
+ * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
+ * must be before we read that value.
+ */
+ if (!arm_feature(env, ARM_FEATURE_PMU)) {
+ return false;
+ }
+
+ mdcr_el2 = arm_mdcr_el2_eff(env);
+ hpmn = mdcr_el2 & MDCR_HPMN;
+
+ if (!arm_feature(env, ARM_FEATURE_EL2) ||
+ (counter < hpmn || counter == 31)) {
+ e = env->cp15.c9_pmcr & PMCRE;
+ } else {
+ e = mdcr_el2 & MDCR_HPME;
+ }
+ enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
+
+ /* Is event counting prohibited? */
+ if (el == 2 && (counter < hpmn || counter == 31)) {
+ prohibited = mdcr_el2 & MDCR_HPMD;
+ }
+ if (secure) {
+ prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
+ }
+
+ if (counter == 31) {
+ /*
+ * The cycle counter defaults to running. PMCR.DP says "disable
+ * the cycle counter when event counting is prohibited".
+ * Some MDCR bits disable the cycle counter specifically.
+ */
+ prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
+ if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+ if (secure) {
+ prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
+ }
+ if (el == 2) {
+ prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
+ }
+ }
+ }
+
+ if (counter == 31) {
+ filter = env->cp15.pmccfiltr_el0;
+ } else {
+ filter = env->cp15.c14_pmevtyper[counter];
+ }
+
+ p = filter & PMXEVTYPER_P;
+ u = filter & PMXEVTYPER_U;
+ nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
+ nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
+ nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
+ m = arm_el_is_aa64(env, 1) &&
+ arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
+
+ if (el == 0) {
+ filtered = secure ? u : u != nsu;
+ } else if (el == 1) {
+ filtered = secure ? p : p != nsk;
+ } else if (el == 2) {
+ filtered = !nsh;
+ } else { /* EL3 */
+ filtered = m != p;
+ }
+
+ if (counter != 31) {
+ /*
+ * If not checking PMCCNTR, ensure the counter is setup to an event we
+ * support
+ */
+ uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
+ if (!event_supported(event)) {
+ return false;
+ }
+ }
+
+ return enabled && !prohibited && !filtered;
+}
+
+static void pmu_update_irq(CPUARMState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
+ (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
+}
+
+static bool pmccntr_clockdiv_enabled(CPUARMState *env)
+{
+ /*
+ * Return true if the clock divider is enabled and the cycle counter
+ * is supposed to tick only once every 64 clock cycles. This is
+ * controlled by PMCR.D, but if PMCR.LC is set to enable the long
+ * (64-bit) cycle counter PMCR.D has no effect.
+ */
+ return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
+}
+
+static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
+{
+ /* Return true if the specified event counter is configured to be 64 bit */
+
+ /* This isn't intended to be used with the cycle counter */
+ assert(counter < 31);
+
+ if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+ return false;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ /*
+ * MDCR_EL2.HLP still applies even when EL2 is disabled in the
+ * current security state, so we don't use arm_mdcr_el2_eff() here.
+ */
+ bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
+ int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
+
+ if (counter >= hpmn) {
+ return hlp;
+ }
+ }
+ return env->cp15.c9_pmcr & PMCRLP;
+}
+
+/*
+ * Ensure c15_ccnt is the guest-visible count so that operations such as
+ * enabling/disabling the counter or filtering, modifying the count itself,
+ * etc. can be done logically. This is essentially a no-op if the counter is
+ * not enabled at the time of the call.
+ */
+static void pmccntr_op_start(CPUARMState *env)
+{
+ uint64_t cycles = cycles_get_count(env);
+
+ if (pmu_counter_enabled(env, 31)) {
+ uint64_t eff_cycles = cycles;
+ if (pmccntr_clockdiv_enabled(env)) {
+ eff_cycles /= 64;
+ }
+
+ uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
+
+ uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
+ 1ull << 63 : 1ull << 31;
+ if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
+ env->cp15.c9_pmovsr |= (1ULL << 31);
+ pmu_update_irq(env);
+ }
+
+ env->cp15.c15_ccnt = new_pmccntr;
+ }
+ env->cp15.c15_ccnt_delta = cycles;
+}
+
+/*
+ * If PMCCNTR is enabled, recalculate the delta between the clock and the
+ * guest-visible count. A call to pmccntr_op_finish should follow every call to
+ * pmccntr_op_start.
+ */
+static void pmccntr_op_finish(CPUARMState *env)
+{
+ if (pmu_counter_enabled(env, 31)) {
+#ifndef CONFIG_USER_ONLY
+ /* Calculate when the counter will next overflow */
+ uint64_t remaining_cycles = -env->cp15.c15_ccnt;
+ if (!(env->cp15.c9_pmcr & PMCRLC)) {
+ remaining_cycles = (uint32_t)remaining_cycles;
+ }
+ int64_t overflow_in = cycles_ns_per(remaining_cycles);
+
+ if (overflow_in > 0) {
+ int64_t overflow_at;
+
+ if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ overflow_in, &overflow_at)) {
+ ARMCPU *cpu = env_archcpu(env);
+ timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
+ }
+ }
+#endif
+
+ uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
+ if (pmccntr_clockdiv_enabled(env)) {
+ prev_cycles /= 64;
+ }
+ env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
+ }
+}
+
+static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
+{
+
+ uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
+ uint64_t count = 0;
+ if (event_supported(event)) {
+ uint16_t event_idx = supported_event_map[event];
+ count = pm_events[event_idx].get_count(env);
+ }
+
+ if (pmu_counter_enabled(env, counter)) {
+ uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
+ uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
+ 1ULL << 63 : 1ULL << 31;
+
+ if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
+ env->cp15.c9_pmovsr |= (1 << counter);
+ pmu_update_irq(env);
+ }
+ env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
+ }
+ env->cp15.c14_pmevcntr_delta[counter] = count;
+}
+
+static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
+{
+ if (pmu_counter_enabled(env, counter)) {
+#ifndef CONFIG_USER_ONLY
+ uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
+ uint16_t event_idx = supported_event_map[event];
+ uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
+ int64_t overflow_in;
+
+ if (!pmevcntr_is_64_bit(env, counter)) {
+ delta = (uint32_t)delta;
+ }
+ overflow_in = pm_events[event_idx].ns_per_count(delta);
+
+ if (overflow_in > 0) {
+ int64_t overflow_at;
+
+ if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ overflow_in, &overflow_at)) {
+ ARMCPU *cpu = env_archcpu(env);
+ timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
+ }
+ }
+#endif
+
+ env->cp15.c14_pmevcntr_delta[counter] -=
+ env->cp15.c14_pmevcntr[counter];
+ }
+}
+
+void pmu_op_start(CPUARMState *env)
+{
+ unsigned int i;
+ pmccntr_op_start(env);
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ pmevcntr_op_start(env, i);
+ }
+}
+
+void pmu_op_finish(CPUARMState *env)
+{
+ unsigned int i;
+ pmccntr_op_finish(env);
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ pmevcntr_op_finish(env, i);
+ }
+}
+
+void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
+{
+ pmu_op_start(&cpu->env);
+}
+
+void pmu_post_el_change(ARMCPU *cpu, void *ignored)
+{
+ pmu_op_finish(&cpu->env);
+}
+
+void arm_pmu_timer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ /*
+ * Update all the counter values based on the current underlying counts,
+ * triggering interrupts to be raised, if necessary. pmu_op_finish() also
+ * has the effect of setting the cpu->pmu_timer to the next earliest time a
+ * counter may expire.
+ */
+ pmu_op_start(&cpu->env);
+ pmu_op_finish(&cpu->env);
+}
+
+static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmu_op_start(env);
+
+ if (value & PMCRC) {
+ /* The counter has been reset */
+ env->cp15.c15_ccnt = 0;
+ }
+
+ if (value & PMCRP) {
+ unsigned int i;
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ env->cp15.c14_pmevcntr[i] = 0;
+ }
+ }
+
+ env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
+ env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
+
+ pmu_op_finish(env);
+}
+
+static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint64_t pmcr = env->cp15.c9_pmcr;
+
+ /*
+ * If EL2 is implemented and enabled for the current security state, reads
+ * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
+ */
+ if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) {
+ pmcr &= ~PMCRN_MASK;
+ pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
+ }
+
+ return pmcr;
+}
+
+static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ unsigned int i;
+ uint64_t overflow_mask, new_pmswinc;
+
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ /* Increment a counter's count iff: */
+ if ((value & (1 << i)) && /* counter's bit is set */
+ /* counter is enabled and not filtered */
+ pmu_counter_enabled(env, i) &&
+ /* counter is SW_INCR */
+ (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
+ pmevcntr_op_start(env, i);
+
+ /*
+ * Detect if this write causes an overflow since we can't predict
+ * PMSWINC overflows like we can for other events
+ */
+ new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
+
+ overflow_mask = pmevcntr_is_64_bit(env, i) ?
+ 1ULL << 63 : 1ULL << 31;
+
+ if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
+ env->cp15.c9_pmovsr |= (1 << i);
+ pmu_update_irq(env);
+ }
+
+ env->cp15.c14_pmevcntr[i] = new_pmswinc;
+
+ pmevcntr_op_finish(env, i);
+ }
+ }
+}
+
+static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint64_t ret;
+ pmccntr_op_start(env);
+ ret = env->cp15.c15_ccnt;
+ pmccntr_op_finish(env);
+ return ret;
+}
+
+static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
+ * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
+ * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
+ * accessed.
+ */
+ env->cp15.c9_pmselr = value & 0x1f;
+}
+
+static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmccntr_op_start(env);
+ env->cp15.c15_ccnt = value;
+ pmccntr_op_finish(env);
+}
+
+static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint64_t cur_val = pmccntr_read(env, NULL);
+
+ pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
+}
+
+static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmccntr_op_start(env);
+ env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
+ pmccntr_op_finish(env);
+}
+
+static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmccntr_op_start(env);
+ /* M is not accessible from AArch32 */
+ env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
+ (value & PMCCFILTR);
+ pmccntr_op_finish(env);
+}
+
+static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* M is not visible in AArch32 */
+ return env->cp15.pmccfiltr_el0 & PMCCFILTR;
+}
+
+static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmu_op_start(env);
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pmcnten |= value;
+ pmu_op_finish(env);
+}
+
+static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmu_op_start(env);
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pmcnten &= ~value;
+ pmu_op_finish(env);
+}
+
+static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pmovsr &= ~value;
+ pmu_update_irq(env);
+}
+
+static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pmovsr |= value;
+ pmu_update_irq(env);
+}
+
+static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value, const uint8_t counter)
+{
+ if (counter == 31) {
+ pmccfiltr_write(env, ri, value);
+ } else if (counter < pmu_num_counters(env)) {
+ pmevcntr_op_start(env, counter);
+
+ /*
+ * If this counter's event type is changing, store the current
+ * underlying count for the new type in c14_pmevcntr_delta[counter] so
+ * pmevcntr_op_finish has the correct baseline when it converts back to
+ * a delta.
+ */
+ uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
+ PMXEVTYPER_EVTCOUNT;
+ uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
+ if (old_event != new_event) {
+ uint64_t count = 0;
+ if (event_supported(new_event)) {
+ uint16_t event_idx = supported_event_map[new_event];
+ count = pm_events[event_idx].get_count(env);
+ }
+ env->cp15.c14_pmevcntr_delta[counter] = count;
+ }
+
+ env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
+ pmevcntr_op_finish(env, counter);
+ }
+ /*
+ * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
+ * PMSELR value is equal to or greater than the number of implemented
+ * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
+ */
+}
+
+static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
+ const uint8_t counter)
+{
+ if (counter == 31) {
+ return env->cp15.pmccfiltr_el0;
+ } else if (counter < pmu_num_counters(env)) {
+ return env->cp15.c14_pmevtyper[counter];
+ } else {
+ /*
+ * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
+ * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
+ */
+ return 0;
+ }
+}
+
+static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ pmevtyper_write(env, ri, value, counter);
+}
+
+static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ env->cp15.c14_pmevtyper[counter] = value;
+
+ /*
+ * pmevtyper_rawwrite is called between a pair of pmu_op_start and
+ * pmu_op_finish calls when loading saved state for a migration. Because
+ * we're potentially updating the type of event here, the value written to
+ * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
+ * different counter type. Therefore, we need to set this value to the
+ * current count for the counter type we're writing so that pmu_op_finish
+ * has the correct count for its calculation.
+ */
+ uint16_t event = value & PMXEVTYPER_EVTCOUNT;
+ if (event_supported(event)) {
+ uint16_t event_idx = supported_event_map[event];
+ env->cp15.c14_pmevcntr_delta[counter] =
+ pm_events[event_idx].get_count(env);
+ }
+}
+
+static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ return pmevtyper_read(env, ri, counter);
+}
+
+static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
+}
+
+static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
+}
+
+static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value, uint8_t counter)
+{
+ if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+ /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
+ value &= MAKE_64BIT_MASK(0, 32);
+ }
+ if (counter < pmu_num_counters(env)) {
+ pmevcntr_op_start(env, counter);
+ env->cp15.c14_pmevcntr[counter] = value;
+ pmevcntr_op_finish(env, counter);
+ }
+ /*
+ * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
+ * are CONSTRAINED UNPREDICTABLE.
+ */
+}
+
+static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint8_t counter)
+{
+ if (counter < pmu_num_counters(env)) {
+ uint64_t ret;
+ pmevcntr_op_start(env, counter);
+ ret = env->cp15.c14_pmevcntr[counter];
+ pmevcntr_op_finish(env, counter);
+ if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+ /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
+ ret &= MAKE_64BIT_MASK(0, 32);
+ }
+ return ret;
+ } else {
+ /*
+ * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
+ * are CONSTRAINED UNPREDICTABLE.
+ */
+ return 0;
+ }
+}
+
+static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ pmevcntr_write(env, ri, value, counter);
+}
+
+static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ return pmevcntr_read(env, ri, counter);
+}
+
+static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ assert(counter < pmu_num_counters(env));
+ env->cp15.c14_pmevcntr[counter] = value;
+ pmevcntr_write(env, ri, value, counter);
+}
+
+static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ assert(counter < pmu_num_counters(env));
+ return env->cp15.c14_pmevcntr[counter];
+}
+
+static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
+}
+
+static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
+}
+
+static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ env->cp15.c9_pmuserenr = value & 0xf;
+ } else {
+ env->cp15.c9_pmuserenr = value & 1;
+ }
+}
+
+static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* We have no event counters so only the C bit can be changed */
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pminten |= value;
+ pmu_update_irq(env);
+}
+
+static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pminten &= ~value;
+ pmu_update_irq(env);
+}
+
+static const ARMCPRegInfo v7_pm_reginfo[] = {
+ /*
+ * Performance monitors are implementation defined in v7,
+ * but with an ARM recommended set of registers, which we
+ * follow.
+ *
+ * Performance registers fall into three categories:
+ * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
+ * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
+ * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
+ * For the cases controlled by PMUSERENR we must set .access to PL0_RW
+ * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
+ */
+ { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
+ .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
+ .writefn = pmcntenset_write,
+ .accessfn = pmreg_access,
+ .fgt = FGT_PMCNTEN,
+ .raw_writefn = raw_write },
+ { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMCNTEN,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
+ .writefn = pmcntenset_write, .raw_writefn = raw_write },
+ { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
+ .access = PL0_RW,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
+ .accessfn = pmreg_access,
+ .fgt = FGT_PMCNTEN,
+ .writefn = pmcntenclr_write, .raw_writefn = raw_write,
+ .type = ARM_CP_ALIAS | ARM_CP_IO },
+ { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMCNTEN,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
+ .writefn = pmcntenclr_write, .raw_writefn = raw_write },
+ { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
+ .access = PL0_RW, .type = ARM_CP_IO,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
+ .accessfn = pmreg_access,
+ .fgt = FGT_PMOVS,
+ .writefn = pmovsr_write,
+ .raw_writefn = raw_write },
+ { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMOVS,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
+ .writefn = pmovsr_write,
+ .raw_writefn = raw_write },
+ { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
+ .access = PL0_W, .accessfn = pmreg_access_swinc,
+ .fgt = FGT_PMSWINC_EL0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .writefn = pmswinc_write },
+ { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
+ .access = PL0_W, .accessfn = pmreg_access_swinc,
+ .fgt = FGT_PMSWINC_EL0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .writefn = pmswinc_write },
+ { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
+ .access = PL0_RW, .type = ARM_CP_ALIAS,
+ .fgt = FGT_PMSELR_EL0,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
+ .accessfn = pmreg_access_selr, .writefn = pmselr_write,
+ .raw_writefn = raw_write},
+ { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
+ .access = PL0_RW, .accessfn = pmreg_access_selr,
+ .fgt = FGT_PMSELR_EL0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
+ .writefn = pmselr_write, .raw_writefn = raw_write, },
+ { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
+ .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fgt = FGT_PMCCNTR_EL0,
+ .readfn = pmccntr_read, .writefn = pmccntr_write32,
+ .accessfn = pmreg_access_ccntr },
+ { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
+ .access = PL0_RW, .accessfn = pmreg_access_ccntr,
+ .fgt = FGT_PMCCNTR_EL0,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
+ .readfn = pmccntr_read, .writefn = pmccntr_write,
+ .raw_readfn = raw_read, .raw_writefn = raw_write, },
+ { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
+ .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMCCFILTR_EL0,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .resetvalue = 0, },
+ { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
+ .writefn = pmccfiltr_write, .raw_writefn = raw_write,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMCCFILTR_EL0,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
+ .resetvalue = 0, },
+ { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
+ .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = pmreg_access,
+ .fgt = FGT_PMEVTYPERN_EL0,
+ .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
+ { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
+ .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = pmreg_access,
+ .fgt = FGT_PMEVTYPERN_EL0,
+ .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
+ { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
+ .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = pmreg_access_xevcntr,
+ .fgt = FGT_PMEVCNTRN_EL0,
+ .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
+ { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
+ .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = pmreg_access_xevcntr,
+ .fgt = FGT_PMEVCNTRN_EL0,
+ .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
+ { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R | PL1_RW, .accessfn = access_tpm,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
+ .resetvalue = 0,
+ .writefn = pmuserenr_write, .raw_writefn = raw_write },
+ { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
+ .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
+ .resetvalue = 0,
+ .writefn = pmuserenr_write, .raw_writefn = raw_write },
+ { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tpm,
+ .fgt = FGT_PMINTEN,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
+ .resetvalue = 0,
+ .writefn = pmintenset_write, .raw_writefn = raw_write },
+ { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tpm,
+ .fgt = FGT_PMINTEN,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+ .writefn = pmintenset_write, .raw_writefn = raw_write,
+ .resetvalue = 0x0 },
+ { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_tpm,
+ .fgt = FGT_PMINTEN,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+ .writefn = pmintenclr_write, .raw_writefn = raw_write },
+ { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_tpm,
+ .fgt = FGT_PMINTEN,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+ .writefn = pmintenclr_write, .raw_writefn = raw_write },
+};
+
+static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
+ /* PMOVSSET is not implemented in v7 before v7ve */
+ { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMOVS,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
+ .writefn = pmovsset_write,
+ .raw_writefn = raw_write },
+ { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMOVS,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
+ .writefn = pmovsset_write,
+ .raw_writefn = raw_write },
+};
+
+void define_pm_cpregs(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+
+ if (arm_feature(env, ARM_FEATURE_V7)) {
+ /*
+ * v7 performance monitor control register: same implementor
+ * field as main ID register, and we implement four counters in
+ * addition to the cycle count register.
+ */
+ static const ARMCPRegInfo pmcr = {
+ .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
+ .access = PL0_RW,
+ .fgt = FGT_PMCR_EL0,
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
+ .accessfn = pmreg_access,
+ .readfn = pmcr_read, .raw_readfn = raw_read,
+ .writefn = pmcr_write, .raw_writefn = raw_write,
+ };
+ const ARMCPRegInfo pmcr64 = {
+ .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMCR_EL0,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
+ .resetvalue = cpu->isar.reset_pmcr_el0,
+ .readfn = pmcr_read, .raw_readfn = raw_read,
+ .writefn = pmcr_write, .raw_writefn = raw_write,
+ };
+
+ define_one_arm_cp_reg(cpu, &pmcr);
+ define_one_arm_cp_reg(cpu, &pmcr64);
+ define_arm_cp_regs(cpu, v7_pm_reginfo);
+
+ for (unsigned i = 0, pmcrn = pmu_num_counters(env); i < pmcrn; i++) {
+ g_autofree char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
+ g_autofree char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
+ g_autofree char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
+ g_autofree char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
+
+ ARMCPRegInfo pmev_regs[] = {
+ { .name = pmevcntr_name, .cp = 15, .crn = 14,
+ .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
+ .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
+ .fgt = FGT_PMEVCNTRN_EL0,
+ .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
+ .accessfn = pmreg_access_xevcntr },
+ { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
+ .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
+ .type = ARM_CP_IO,
+ .fgt = FGT_PMEVCNTRN_EL0,
+ .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
+ .raw_readfn = pmevcntr_rawread,
+ .raw_writefn = pmevcntr_rawwrite },
+ { .name = pmevtyper_name, .cp = 15, .crn = 14,
+ .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
+ .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
+ .fgt = FGT_PMEVTYPERN_EL0,
+ .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
+ .accessfn = pmreg_access },
+ { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
+ .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMEVTYPERN_EL0,
+ .type = ARM_CP_IO,
+ .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
+ .raw_writefn = pmevtyper_rawwrite },
+ };
+ define_arm_cp_regs(cpu, pmev_regs);
+ }
+ }
+ if (arm_feature(env, ARM_FEATURE_V7VE)) {
+ define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ const ARMCPRegInfo v8_pm_reginfo[] = {
+ { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .fgt = FGT_PMCEIDN_EL0,
+ .resetvalue = extract64(cpu->pmceid0, 0, 32) },
+ { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .fgt = FGT_PMCEIDN_EL0,
+ .resetvalue = cpu->pmceid0 },
+ { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .fgt = FGT_PMCEIDN_EL0,
+ .resetvalue = extract64(cpu->pmceid1, 0, 32) },
+ { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .fgt = FGT_PMCEIDN_EL0,
+ .resetvalue = cpu->pmceid1 },
+ };
+ define_arm_cp_regs(cpu, v8_pm_reginfo);
+ }
+
+ if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
+ ARMCPRegInfo v81_pmu_regs[] = {
+ { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .fgt = FGT_PMCEIDN_EL0,
+ .resetvalue = extract64(cpu->pmceid0, 32, 32) },
+ { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .fgt = FGT_PMCEIDN_EL0,
+ .resetvalue = extract64(cpu->pmceid1, 32, 32) },
+ };
+ define_arm_cp_regs(cpu, v81_pmu_regs);
+ }
+
+ if (cpu_isar_feature(any_pmuv3p4, cpu)) {
+ static const ARMCPRegInfo v84_pmmir = {
+ .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
+ .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .fgt = FGT_PMMIR_EL1,
+ .resetvalue = 0
+ };
+ define_one_arm_cp_reg(cpu, &v84_pmmir);
+ }
+}
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
index c1a7ae3..c9506aa 100644
--- a/target/arm/cpregs.h
+++ b/target/arm/cpregs.h
@@ -1065,6 +1065,9 @@ void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
/* CPReadFn that can be used for read-as-zero behaviour */
uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
+/* CPReadFn that just reads the value from ri->fieldoffset */
+uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri);
+
/* CPWriteFn that just writes the value to ri->fieldoffset */
void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value);
diff --git a/target/arm/cpu-sysregs.h.inc b/target/arm/cpu-sysregs.h.inc
index cb99286..f48a9da 100644
--- a/target/arm/cpu-sysregs.h.inc
+++ b/target/arm/cpu-sysregs.h.inc
@@ -4,6 +4,8 @@ DEF(ID_AA64PFR1_EL1, 3, 0, 0, 4, 1)
DEF(ID_AA64SMFR0_EL1, 3, 0, 0, 4, 5)
DEF(ID_AA64DFR0_EL1, 3, 0, 0, 5, 0)
DEF(ID_AA64DFR1_EL1, 3, 0, 0, 5, 1)
+DEF(ID_AA64AFR0_EL1, 3, 0, 0, 5, 4)
+DEF(ID_AA64AFR1_EL1, 3, 0, 0, 5, 5)
DEF(ID_AA64ISAR0_EL1, 3, 0, 0, 6, 0)
DEF(ID_AA64ISAR1_EL1, 3, 0, 0, 6, 1)
DEF(ID_AA64ISAR2_EL1, 3, 0, 0, 6, 2)
@@ -14,6 +16,7 @@ DEF(ID_AA64MMFR3_EL1, 3, 0, 0, 7, 3)
DEF(ID_PFR0_EL1, 3, 0, 0, 1, 0)
DEF(ID_PFR1_EL1, 3, 0, 0, 1, 1)
DEF(ID_DFR0_EL1, 3, 0, 0, 1, 2)
+DEF(ID_AFR0_EL1, 3, 0, 0, 1, 3)
DEF(ID_MMFR0_EL1, 3, 0, 0, 1, 4)
DEF(ID_MMFR1_EL1, 3, 0, 0, 1, 5)
DEF(ID_MMFR2_EL1, 3, 0, 0, 1, 6)
@@ -32,5 +35,6 @@ DEF(MVFR2_EL1, 3, 0, 0, 3, 2)
DEF(ID_PFR2_EL1, 3, 0, 0, 3, 4)
DEF(ID_DFR1_EL1, 3, 0, 0, 3, 5)
DEF(ID_MMFR5_EL1, 3, 0, 0, 3, 6)
+DEF(CLIDR_EL1, 3, 1, 0, 0, 1)
DEF(ID_AA64ZFR0_EL1, 3, 0, 0, 4, 4)
DEF(CTR_EL0, 3, 3, 0, 0, 1)
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index c8cf0ab..dc9b6dc 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1082,10 +1082,6 @@ struct ArchCPU {
uint32_t reset_sctlr;
uint64_t pmceid0;
uint64_t pmceid1;
- uint32_t id_afr0;
- uint64_t id_aa64afr0;
- uint64_t id_aa64afr1;
- uint64_t clidr;
uint64_t mp_affinity; /* MP ID without feature bits */
/* The elements of this array are the CCSIDR values for each cache,
* in the order L1DCache, L1ICache, L2DCache, L2ICache, etc.
@@ -2948,7 +2944,7 @@ static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu)
/* If all the CLIDR.Ctypem bits are 0 there are no caches, and
* CSSELR is RAZ/WI.
*/
- return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0;
+ return (GET_IDREG(&cpu->isar, CLIDR) & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0;
}
static inline bool arm_sctlr_b(CPUARMState *env)
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index bd33d6c..26cf7e6 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -663,7 +663,7 @@ static void aarch64_a57_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x00000131);
SET_IDREG(isar, ID_PFR1, 0x00011011);
SET_IDREG(isar, ID_DFR0, 0x03010066);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_MMFR0, 0x10101105);
SET_IDREG(isar, ID_MMFR1, 0x40000000);
SET_IDREG(isar, ID_MMFR2, 0x01260000);
@@ -683,7 +683,7 @@ static void aarch64_a57_initfn(Object *obj)
cpu->isar.dbgdevid = 0x01110f13;
cpu->isar.dbgdevid1 = 0x2;
cpu->isar.reset_pmcr_el0 = 0x41013000;
- cpu->clidr = 0x0a200023;
+ SET_IDREG(isar, CLIDR, 0x0a200023);
/* 32KB L1 dcache */
cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
/* 48KB L1 icache */
@@ -725,7 +725,7 @@ static void aarch64_a53_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x00000131);
SET_IDREG(isar, ID_PFR1, 0x00011011);
SET_IDREG(isar, ID_DFR0, 0x03010066);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_MMFR0, 0x10101105);
SET_IDREG(isar, ID_MMFR1, 0x40000000);
SET_IDREG(isar, ID_MMFR2, 0x01260000);
@@ -745,7 +745,7 @@ static void aarch64_a53_initfn(Object *obj)
cpu->isar.dbgdevid = 0x00110f13;
cpu->isar.dbgdevid1 = 0x1;
cpu->isar.reset_pmcr_el0 = 0x41033000;
- cpu->clidr = 0x0a200023;
+ SET_IDREG(isar, CLIDR, 0x0a200023);
/* 32KB L1 dcache */
cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
/* 32KB L1 icache */
diff --git a/target/arm/helper.c b/target/arm/helper.c
index b3f0d6f..0c1299f 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -38,11 +38,9 @@
#define HELPER_H "tcg/helper.h"
#include "exec/helper-proto.h.inc"
-#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
-
static void switch_mode(CPUARMState *env, int mode);
-static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
+uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
assert(ri->fieldoffset);
if (cpreg_field_is_64bit(ri)) {
@@ -270,7 +268,7 @@ void init_cpreg_list(ARMCPU *cpu)
g_list_free(keys);
}
-static bool arm_pan_enabled(CPUARMState *env)
+bool arm_pan_enabled(CPUARMState *env)
{
if (is_a64(env)) {
if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) {
@@ -319,25 +317,6 @@ static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
return CP_ACCESS_UNDEFINED;
}
-/*
- * Check for traps to performance monitor registers, which are controlled
- * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
- */
-static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- int el = arm_current_el(env);
- uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
-
- if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
- return CP_ACCESS_TRAP_EL2;
- }
- if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
- return CP_ACCESS_TRAP_EL3;
- }
- return CP_ACCESS_OK;
-}
-
/* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
@@ -681,283 +660,6 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
.resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
};
-typedef struct pm_event {
- uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
- /* If the event is supported on this CPU (used to generate PMCEID[01]) */
- bool (*supported)(CPUARMState *);
- /*
- * Retrieve the current count of the underlying event. The programmed
- * counters hold a difference from the return value from this function
- */
- uint64_t (*get_count)(CPUARMState *);
- /*
- * Return how many nanoseconds it will take (at a minimum) for count events
- * to occur. A negative value indicates the counter will never overflow, or
- * that the counter has otherwise arranged for the overflow bit to be set
- * and the PMU interrupt to be raised on overflow.
- */
- int64_t (*ns_per_count)(uint64_t);
-} pm_event;
-
-static bool event_always_supported(CPUARMState *env)
-{
- return true;
-}
-
-static uint64_t swinc_get_count(CPUARMState *env)
-{
- /*
- * SW_INCR events are written directly to the pmevcntr's by writes to
- * PMSWINC, so there is no underlying count maintained by the PMU itself
- */
- return 0;
-}
-
-static int64_t swinc_ns_per(uint64_t ignored)
-{
- return -1;
-}
-
-/*
- * Return the underlying cycle count for the PMU cycle counters. If we're in
- * usermode, simply return 0.
- */
-static uint64_t cycles_get_count(CPUARMState *env)
-{
-#ifndef CONFIG_USER_ONLY
- return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
- ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
-#else
- return cpu_get_host_ticks();
-#endif
-}
-
-#ifndef CONFIG_USER_ONLY
-static int64_t cycles_ns_per(uint64_t cycles)
-{
- return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
-}
-
-static bool instructions_supported(CPUARMState *env)
-{
- /* Precise instruction counting */
- return icount_enabled() == ICOUNT_PRECISE;
-}
-
-static uint64_t instructions_get_count(CPUARMState *env)
-{
- assert(icount_enabled() == ICOUNT_PRECISE);
- return (uint64_t)icount_get_raw();
-}
-
-static int64_t instructions_ns_per(uint64_t icount)
-{
- assert(icount_enabled() == ICOUNT_PRECISE);
- return icount_to_ns((int64_t)icount);
-}
-#endif
-
-static bool pmuv3p1_events_supported(CPUARMState *env)
-{
- /* For events which are supported in any v8.1 PMU */
- return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
-}
-
-static bool pmuv3p4_events_supported(CPUARMState *env)
-{
- /* For events which are supported in any v8.1 PMU */
- return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
-}
-
-static uint64_t zero_event_get_count(CPUARMState *env)
-{
- /* For events which on QEMU never fire, so their count is always zero */
- return 0;
-}
-
-static int64_t zero_event_ns_per(uint64_t cycles)
-{
- /* An event which never fires can never overflow */
- return -1;
-}
-
-static const pm_event pm_events[] = {
- { .number = 0x000, /* SW_INCR */
- .supported = event_always_supported,
- .get_count = swinc_get_count,
- .ns_per_count = swinc_ns_per,
- },
-#ifndef CONFIG_USER_ONLY
- { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
- .supported = instructions_supported,
- .get_count = instructions_get_count,
- .ns_per_count = instructions_ns_per,
- },
- { .number = 0x011, /* CPU_CYCLES, Cycle */
- .supported = event_always_supported,
- .get_count = cycles_get_count,
- .ns_per_count = cycles_ns_per,
- },
-#endif
- { .number = 0x023, /* STALL_FRONTEND */
- .supported = pmuv3p1_events_supported,
- .get_count = zero_event_get_count,
- .ns_per_count = zero_event_ns_per,
- },
- { .number = 0x024, /* STALL_BACKEND */
- .supported = pmuv3p1_events_supported,
- .get_count = zero_event_get_count,
- .ns_per_count = zero_event_ns_per,
- },
- { .number = 0x03c, /* STALL */
- .supported = pmuv3p4_events_supported,
- .get_count = zero_event_get_count,
- .ns_per_count = zero_event_ns_per,
- },
-};
-
-/*
- * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
- * events (i.e. the statistical profiling extension), this implementation
- * should first be updated to something sparse instead of the current
- * supported_event_map[] array.
- */
-#define MAX_EVENT_ID 0x3c
-#define UNSUPPORTED_EVENT UINT16_MAX
-static uint16_t supported_event_map[MAX_EVENT_ID + 1];
-
-/*
- * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
- * of ARM event numbers to indices in our pm_events array.
- *
- * Note: Events in the 0x40XX range are not currently supported.
- */
-void pmu_init(ARMCPU *cpu)
-{
- unsigned int i;
-
- /*
- * Empty supported_event_map and cpu->pmceid[01] before adding supported
- * events to them
- */
- for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
- supported_event_map[i] = UNSUPPORTED_EVENT;
- }
- cpu->pmceid0 = 0;
- cpu->pmceid1 = 0;
-
- for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
- const pm_event *cnt = &pm_events[i];
- assert(cnt->number <= MAX_EVENT_ID);
- /* We do not currently support events in the 0x40xx range */
- assert(cnt->number <= 0x3f);
-
- if (cnt->supported(&cpu->env)) {
- supported_event_map[cnt->number] = i;
- uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
- if (cnt->number & 0x20) {
- cpu->pmceid1 |= event_mask;
- } else {
- cpu->pmceid0 |= event_mask;
- }
- }
- }
-}
-
-/*
- * Check at runtime whether a PMU event is supported for the current machine
- */
-static bool event_supported(uint16_t number)
-{
- if (number > MAX_EVENT_ID) {
- return false;
- }
- return supported_event_map[number] != UNSUPPORTED_EVENT;
-}
-
-static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- /*
- * Performance monitor registers user accessibility is controlled
- * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
- * trapping to EL2 or EL3 for other accesses.
- */
- int el = arm_current_el(env);
- uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
-
- if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
- return CP_ACCESS_TRAP_EL1;
- }
- if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
- return CP_ACCESS_TRAP_EL2;
- }
- if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
- return CP_ACCESS_TRAP_EL3;
- }
-
- return CP_ACCESS_OK;
-}
-
-static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
- const ARMCPRegInfo *ri,
- bool isread)
-{
- /* ER: event counter read trap control */
- if (arm_feature(env, ARM_FEATURE_V8)
- && arm_current_el(env) == 0
- && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
- && isread) {
- return CP_ACCESS_OK;
- }
-
- return pmreg_access(env, ri, isread);
-}
-
-static CPAccessResult pmreg_access_swinc(CPUARMState *env,
- const ARMCPRegInfo *ri,
- bool isread)
-{
- /* SW: software increment write trap control */
- if (arm_feature(env, ARM_FEATURE_V8)
- && arm_current_el(env) == 0
- && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
- && !isread) {
- return CP_ACCESS_OK;
- }
-
- return pmreg_access(env, ri, isread);
-}
-
-static CPAccessResult pmreg_access_selr(CPUARMState *env,
- const ARMCPRegInfo *ri,
- bool isread)
-{
- /* ER: event counter read trap control */
- if (arm_feature(env, ARM_FEATURE_V8)
- && arm_current_el(env) == 0
- && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
- return CP_ACCESS_OK;
- }
-
- return pmreg_access(env, ri, isread);
-}
-
-static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
- const ARMCPRegInfo *ri,
- bool isread)
-{
- /* CR: cycle counter read trap control */
- if (arm_feature(env, ARM_FEATURE_V8)
- && arm_current_el(env) == 0
- && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
- && isread) {
- return CP_ACCESS_OK;
- }
-
- return pmreg_access(env, ri, isread);
-}
-
/*
* Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
* We use these to decide whether we need to wrap a write to MDCR_EL2
@@ -967,684 +669,6 @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
(MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
-/*
- * Returns true if the counter (pass 31 for PMCCNTR) should count events using
- * the current EL, security state, and register configuration.
- */
-static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
-{
- uint64_t filter;
- bool e, p, u, nsk, nsu, nsh, m;
- bool enabled, prohibited = false, filtered;
- bool secure = arm_is_secure(env);
- int el = arm_current_el(env);
- uint64_t mdcr_el2;
- uint8_t hpmn;
-
- /*
- * We might be called for M-profile cores where MDCR_EL2 doesn't
- * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
- * must be before we read that value.
- */
- if (!arm_feature(env, ARM_FEATURE_PMU)) {
- return false;
- }
-
- mdcr_el2 = arm_mdcr_el2_eff(env);
- hpmn = mdcr_el2 & MDCR_HPMN;
-
- if (!arm_feature(env, ARM_FEATURE_EL2) ||
- (counter < hpmn || counter == 31)) {
- e = env->cp15.c9_pmcr & PMCRE;
- } else {
- e = mdcr_el2 & MDCR_HPME;
- }
- enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
-
- /* Is event counting prohibited? */
- if (el == 2 && (counter < hpmn || counter == 31)) {
- prohibited = mdcr_el2 & MDCR_HPMD;
- }
- if (secure) {
- prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
- }
-
- if (counter == 31) {
- /*
- * The cycle counter defaults to running. PMCR.DP says "disable
- * the cycle counter when event counting is prohibited".
- * Some MDCR bits disable the cycle counter specifically.
- */
- prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
- if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
- if (secure) {
- prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
- }
- if (el == 2) {
- prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
- }
- }
- }
-
- if (counter == 31) {
- filter = env->cp15.pmccfiltr_el0;
- } else {
- filter = env->cp15.c14_pmevtyper[counter];
- }
-
- p = filter & PMXEVTYPER_P;
- u = filter & PMXEVTYPER_U;
- nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
- nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
- nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
- m = arm_el_is_aa64(env, 1) &&
- arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
-
- if (el == 0) {
- filtered = secure ? u : u != nsu;
- } else if (el == 1) {
- filtered = secure ? p : p != nsk;
- } else if (el == 2) {
- filtered = !nsh;
- } else { /* EL3 */
- filtered = m != p;
- }
-
- if (counter != 31) {
- /*
- * If not checking PMCCNTR, ensure the counter is setup to an event we
- * support
- */
- uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
- if (!event_supported(event)) {
- return false;
- }
- }
-
- return enabled && !prohibited && !filtered;
-}
-
-static void pmu_update_irq(CPUARMState *env)
-{
- ARMCPU *cpu = env_archcpu(env);
- qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
- (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
-}
-
-static bool pmccntr_clockdiv_enabled(CPUARMState *env)
-{
- /*
- * Return true if the clock divider is enabled and the cycle counter
- * is supposed to tick only once every 64 clock cycles. This is
- * controlled by PMCR.D, but if PMCR.LC is set to enable the long
- * (64-bit) cycle counter PMCR.D has no effect.
- */
- return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
-}
-
-static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
-{
- /* Return true if the specified event counter is configured to be 64 bit */
-
- /* This isn't intended to be used with the cycle counter */
- assert(counter < 31);
-
- if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
- return false;
- }
-
- if (arm_feature(env, ARM_FEATURE_EL2)) {
- /*
- * MDCR_EL2.HLP still applies even when EL2 is disabled in the
- * current security state, so we don't use arm_mdcr_el2_eff() here.
- */
- bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
- int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
-
- if (counter >= hpmn) {
- return hlp;
- }
- }
- return env->cp15.c9_pmcr & PMCRLP;
-}
-
-/*
- * Ensure c15_ccnt is the guest-visible count so that operations such as
- * enabling/disabling the counter or filtering, modifying the count itself,
- * etc. can be done logically. This is essentially a no-op if the counter is
- * not enabled at the time of the call.
- */
-static void pmccntr_op_start(CPUARMState *env)
-{
- uint64_t cycles = cycles_get_count(env);
-
- if (pmu_counter_enabled(env, 31)) {
- uint64_t eff_cycles = cycles;
- if (pmccntr_clockdiv_enabled(env)) {
- eff_cycles /= 64;
- }
-
- uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
-
- uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
- 1ull << 63 : 1ull << 31;
- if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
- env->cp15.c9_pmovsr |= (1ULL << 31);
- pmu_update_irq(env);
- }
-
- env->cp15.c15_ccnt = new_pmccntr;
- }
- env->cp15.c15_ccnt_delta = cycles;
-}
-
-/*
- * If PMCCNTR is enabled, recalculate the delta between the clock and the
- * guest-visible count. A call to pmccntr_op_finish should follow every call to
- * pmccntr_op_start.
- */
-static void pmccntr_op_finish(CPUARMState *env)
-{
- if (pmu_counter_enabled(env, 31)) {
-#ifndef CONFIG_USER_ONLY
- /* Calculate when the counter will next overflow */
- uint64_t remaining_cycles = -env->cp15.c15_ccnt;
- if (!(env->cp15.c9_pmcr & PMCRLC)) {
- remaining_cycles = (uint32_t)remaining_cycles;
- }
- int64_t overflow_in = cycles_ns_per(remaining_cycles);
-
- if (overflow_in > 0) {
- int64_t overflow_at;
-
- if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
- overflow_in, &overflow_at)) {
- ARMCPU *cpu = env_archcpu(env);
- timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
- }
- }
-#endif
-
- uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
- if (pmccntr_clockdiv_enabled(env)) {
- prev_cycles /= 64;
- }
- env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
- }
-}
-
-static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
-{
-
- uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
- uint64_t count = 0;
- if (event_supported(event)) {
- uint16_t event_idx = supported_event_map[event];
- count = pm_events[event_idx].get_count(env);
- }
-
- if (pmu_counter_enabled(env, counter)) {
- uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
- uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
- 1ULL << 63 : 1ULL << 31;
-
- if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
- env->cp15.c9_pmovsr |= (1 << counter);
- pmu_update_irq(env);
- }
- env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
- }
- env->cp15.c14_pmevcntr_delta[counter] = count;
-}
-
-static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
-{
- if (pmu_counter_enabled(env, counter)) {
-#ifndef CONFIG_USER_ONLY
- uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
- uint16_t event_idx = supported_event_map[event];
- uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
- int64_t overflow_in;
-
- if (!pmevcntr_is_64_bit(env, counter)) {
- delta = (uint32_t)delta;
- }
- overflow_in = pm_events[event_idx].ns_per_count(delta);
-
- if (overflow_in > 0) {
- int64_t overflow_at;
-
- if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
- overflow_in, &overflow_at)) {
- ARMCPU *cpu = env_archcpu(env);
- timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
- }
- }
-#endif
-
- env->cp15.c14_pmevcntr_delta[counter] -=
- env->cp15.c14_pmevcntr[counter];
- }
-}
-
-void pmu_op_start(CPUARMState *env)
-{
- unsigned int i;
- pmccntr_op_start(env);
- for (i = 0; i < pmu_num_counters(env); i++) {
- pmevcntr_op_start(env, i);
- }
-}
-
-void pmu_op_finish(CPUARMState *env)
-{
- unsigned int i;
- pmccntr_op_finish(env);
- for (i = 0; i < pmu_num_counters(env); i++) {
- pmevcntr_op_finish(env, i);
- }
-}
-
-void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
-{
- pmu_op_start(&cpu->env);
-}
-
-void pmu_post_el_change(ARMCPU *cpu, void *ignored)
-{
- pmu_op_finish(&cpu->env);
-}
-
-void arm_pmu_timer_cb(void *opaque)
-{
- ARMCPU *cpu = opaque;
-
- /*
- * Update all the counter values based on the current underlying counts,
- * triggering interrupts to be raised, if necessary. pmu_op_finish() also
- * has the effect of setting the cpu->pmu_timer to the next earliest time a
- * counter may expire.
- */
- pmu_op_start(&cpu->env);
- pmu_op_finish(&cpu->env);
-}
-
-static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmu_op_start(env);
-
- if (value & PMCRC) {
- /* The counter has been reset */
- env->cp15.c15_ccnt = 0;
- }
-
- if (value & PMCRP) {
- unsigned int i;
- for (i = 0; i < pmu_num_counters(env); i++) {
- env->cp15.c14_pmevcntr[i] = 0;
- }
- }
-
- env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
- env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
-
- pmu_op_finish(env);
-}
-
-static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint64_t pmcr = env->cp15.c9_pmcr;
-
- /*
- * If EL2 is implemented and enabled for the current security state, reads
- * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
- */
- if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) {
- pmcr &= ~PMCRN_MASK;
- pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
- }
-
- return pmcr;
-}
-
-static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- unsigned int i;
- uint64_t overflow_mask, new_pmswinc;
-
- for (i = 0; i < pmu_num_counters(env); i++) {
- /* Increment a counter's count iff: */
- if ((value & (1 << i)) && /* counter's bit is set */
- /* counter is enabled and not filtered */
- pmu_counter_enabled(env, i) &&
- /* counter is SW_INCR */
- (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
- pmevcntr_op_start(env, i);
-
- /*
- * Detect if this write causes an overflow since we can't predict
- * PMSWINC overflows like we can for other events
- */
- new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
-
- overflow_mask = pmevcntr_is_64_bit(env, i) ?
- 1ULL << 63 : 1ULL << 31;
-
- if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
- env->cp15.c9_pmovsr |= (1 << i);
- pmu_update_irq(env);
- }
-
- env->cp15.c14_pmevcntr[i] = new_pmswinc;
-
- pmevcntr_op_finish(env, i);
- }
- }
-}
-
-static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint64_t ret;
- pmccntr_op_start(env);
- ret = env->cp15.c15_ccnt;
- pmccntr_op_finish(env);
- return ret;
-}
-
-static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /*
- * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
- * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
- * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
- * accessed.
- */
- env->cp15.c9_pmselr = value & 0x1f;
-}
-
-static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmccntr_op_start(env);
- env->cp15.c15_ccnt = value;
- pmccntr_op_finish(env);
-}
-
-static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint64_t cur_val = pmccntr_read(env, NULL);
-
- pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
-}
-
-static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmccntr_op_start(env);
- env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
- pmccntr_op_finish(env);
-}
-
-static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmccntr_op_start(env);
- /* M is not accessible from AArch32 */
- env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
- (value & PMCCFILTR);
- pmccntr_op_finish(env);
-}
-
-static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- /* M is not visible in AArch32 */
- return env->cp15.pmccfiltr_el0 & PMCCFILTR;
-}
-
-static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmu_op_start(env);
- value &= pmu_counter_mask(env);
- env->cp15.c9_pmcnten |= value;
- pmu_op_finish(env);
-}
-
-static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmu_op_start(env);
- value &= pmu_counter_mask(env);
- env->cp15.c9_pmcnten &= ~value;
- pmu_op_finish(env);
-}
-
-static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- value &= pmu_counter_mask(env);
- env->cp15.c9_pmovsr &= ~value;
- pmu_update_irq(env);
-}
-
-static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- value &= pmu_counter_mask(env);
- env->cp15.c9_pmovsr |= value;
- pmu_update_irq(env);
-}
-
-static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value, const uint8_t counter)
-{
- if (counter == 31) {
- pmccfiltr_write(env, ri, value);
- } else if (counter < pmu_num_counters(env)) {
- pmevcntr_op_start(env, counter);
-
- /*
- * If this counter's event type is changing, store the current
- * underlying count for the new type in c14_pmevcntr_delta[counter] so
- * pmevcntr_op_finish has the correct baseline when it converts back to
- * a delta.
- */
- uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
- PMXEVTYPER_EVTCOUNT;
- uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
- if (old_event != new_event) {
- uint64_t count = 0;
- if (event_supported(new_event)) {
- uint16_t event_idx = supported_event_map[new_event];
- count = pm_events[event_idx].get_count(env);
- }
- env->cp15.c14_pmevcntr_delta[counter] = count;
- }
-
- env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
- pmevcntr_op_finish(env, counter);
- }
- /*
- * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
- * PMSELR value is equal to or greater than the number of implemented
- * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
- */
-}
-
-static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
- const uint8_t counter)
-{
- if (counter == 31) {
- return env->cp15.pmccfiltr_el0;
- } else if (counter < pmu_num_counters(env)) {
- return env->cp15.c14_pmevtyper[counter];
- } else {
- /*
- * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
- * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
- */
- return 0;
- }
-}
-
-static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- pmevtyper_write(env, ri, value, counter);
-}
-
-static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- env->cp15.c14_pmevtyper[counter] = value;
-
- /*
- * pmevtyper_rawwrite is called between a pair of pmu_op_start and
- * pmu_op_finish calls when loading saved state for a migration. Because
- * we're potentially updating the type of event here, the value written to
- * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
- * different counter type. Therefore, we need to set this value to the
- * current count for the counter type we're writing so that pmu_op_finish
- * has the correct count for its calculation.
- */
- uint16_t event = value & PMXEVTYPER_EVTCOUNT;
- if (event_supported(event)) {
- uint16_t event_idx = supported_event_map[event];
- env->cp15.c14_pmevcntr_delta[counter] =
- pm_events[event_idx].get_count(env);
- }
-}
-
-static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- return pmevtyper_read(env, ri, counter);
-}
-
-static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
-}
-
-static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
-}
-
-static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value, uint8_t counter)
-{
- if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
- /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
- value &= MAKE_64BIT_MASK(0, 32);
- }
- if (counter < pmu_num_counters(env)) {
- pmevcntr_op_start(env, counter);
- env->cp15.c14_pmevcntr[counter] = value;
- pmevcntr_op_finish(env, counter);
- }
- /*
- * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
- * are CONSTRAINED UNPREDICTABLE.
- */
-}
-
-static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
- uint8_t counter)
-{
- if (counter < pmu_num_counters(env)) {
- uint64_t ret;
- pmevcntr_op_start(env, counter);
- ret = env->cp15.c14_pmevcntr[counter];
- pmevcntr_op_finish(env, counter);
- if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
- /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
- ret &= MAKE_64BIT_MASK(0, 32);
- }
- return ret;
- } else {
- /*
- * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
- * are CONSTRAINED UNPREDICTABLE.
- */
- return 0;
- }
-}
-
-static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- pmevcntr_write(env, ri, value, counter);
-}
-
-static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- return pmevcntr_read(env, ri, counter);
-}
-
-static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- assert(counter < pmu_num_counters(env));
- env->cp15.c14_pmevcntr[counter] = value;
- pmevcntr_write(env, ri, value, counter);
-}
-
-static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- assert(counter < pmu_num_counters(env));
- return env->cp15.c14_pmevcntr[counter];
-}
-
-static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
-}
-
-static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
-}
-
-static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- if (arm_feature(env, ARM_FEATURE_V8)) {
- env->cp15.c9_pmuserenr = value & 0xf;
- } else {
- env->cp15.c9_pmuserenr = value & 1;
- }
-}
-
-static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /* We have no event counters so only the C bit can be changed */
- value &= pmu_counter_mask(env);
- env->cp15.c9_pminten |= value;
- pmu_update_irq(env);
-}
-
-static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- value &= pmu_counter_mask(env);
- env->cp15.c9_pminten &= ~value;
- pmu_update_irq(env);
-}
-
static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -1874,171 +898,6 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
/* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
{ .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
.access = PL1_W, .type = ARM_CP_NOP },
- /*
- * Performance monitors are implementation defined in v7,
- * but with an ARM recommended set of registers, which we
- * follow.
- *
- * Performance registers fall into three categories:
- * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
- * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
- * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
- * For the cases controlled by PMUSERENR we must set .access to PL0_RW
- * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
- */
- { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
- .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
- .writefn = pmcntenset_write,
- .accessfn = pmreg_access,
- .fgt = FGT_PMCNTEN,
- .raw_writefn = raw_write },
- { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCNTEN,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
- .writefn = pmcntenset_write, .raw_writefn = raw_write },
- { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
- .access = PL0_RW,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
- .accessfn = pmreg_access,
- .fgt = FGT_PMCNTEN,
- .writefn = pmcntenclr_write, .raw_writefn = raw_write,
- .type = ARM_CP_ALIAS | ARM_CP_IO },
- { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCNTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
- .writefn = pmcntenclr_write, .raw_writefn = raw_write },
- { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
- .access = PL0_RW, .type = ARM_CP_IO,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
- .accessfn = pmreg_access,
- .fgt = FGT_PMOVS,
- .writefn = pmovsr_write,
- .raw_writefn = raw_write },
- { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMOVS,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
- .writefn = pmovsr_write,
- .raw_writefn = raw_write },
- { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
- .access = PL0_W, .accessfn = pmreg_access_swinc,
- .fgt = FGT_PMSWINC_EL0,
- .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .writefn = pmswinc_write },
- { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
- .access = PL0_W, .accessfn = pmreg_access_swinc,
- .fgt = FGT_PMSWINC_EL0,
- .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .writefn = pmswinc_write },
- { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
- .access = PL0_RW, .type = ARM_CP_ALIAS,
- .fgt = FGT_PMSELR_EL0,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
- .accessfn = pmreg_access_selr, .writefn = pmselr_write,
- .raw_writefn = raw_write},
- { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
- .access = PL0_RW, .accessfn = pmreg_access_selr,
- .fgt = FGT_PMSELR_EL0,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
- .writefn = pmselr_write, .raw_writefn = raw_write, },
- { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
- .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fgt = FGT_PMCCNTR_EL0,
- .readfn = pmccntr_read, .writefn = pmccntr_write32,
- .accessfn = pmreg_access_ccntr },
- { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
- .access = PL0_RW, .accessfn = pmreg_access_ccntr,
- .fgt = FGT_PMCCNTR_EL0,
- .type = ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
- .readfn = pmccntr_read, .writefn = pmccntr_write,
- .raw_readfn = raw_read, .raw_writefn = raw_write, },
- { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
- .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCCFILTR_EL0,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .resetvalue = 0, },
- { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
- .writefn = pmccfiltr_write, .raw_writefn = raw_write,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCCFILTR_EL0,
- .type = ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
- .resetvalue = 0, },
- { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
- .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .accessfn = pmreg_access,
- .fgt = FGT_PMEVTYPERN_EL0,
- .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
- { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
- .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .accessfn = pmreg_access,
- .fgt = FGT_PMEVTYPERN_EL0,
- .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
- { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
- .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .accessfn = pmreg_access_xevcntr,
- .fgt = FGT_PMEVCNTRN_EL0,
- .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
- { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
- .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .accessfn = pmreg_access_xevcntr,
- .fgt = FGT_PMEVCNTRN_EL0,
- .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
- { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
- .access = PL0_R | PL1_RW, .accessfn = access_tpm,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
- .resetvalue = 0,
- .writefn = pmuserenr_write, .raw_writefn = raw_write },
- { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
- .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
- .resetvalue = 0,
- .writefn = pmuserenr_write, .raw_writefn = raw_write },
- { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
- .access = PL1_RW, .accessfn = access_tpm,
- .fgt = FGT_PMINTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
- .resetvalue = 0,
- .writefn = pmintenset_write, .raw_writefn = raw_write },
- { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
- .access = PL1_RW, .accessfn = access_tpm,
- .fgt = FGT_PMINTEN,
- .type = ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
- .writefn = pmintenset_write, .raw_writefn = raw_write,
- .resetvalue = 0x0 },
- { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
- .access = PL1_RW, .accessfn = access_tpm,
- .fgt = FGT_PMINTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
- .writefn = pmintenclr_write, .raw_writefn = raw_write },
- { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
- .access = PL1_RW, .accessfn = access_tpm,
- .fgt = FGT_PMINTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
- .writefn = pmintenclr_write, .raw_writefn = raw_write },
{ .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
.access = PL1_R,
@@ -2121,25 +980,6 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
};
-static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
- /* PMOVSSET is not implemented in v7 before v7ve */
- { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMOVS,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
- .writefn = pmovsset_write,
- .raw_writefn = raw_write },
- { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMOVS,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
- .writefn = pmovsset_write,
- .raw_writefn = raw_write },
-};
-
static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -3448,402 +2288,6 @@ static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
}
}
-#ifndef CONFIG_USER_ONLY
-/* get_phys_addr() isn't present for user-mode-only targets */
-
-static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- if (ri->opc2 & 4) {
- /*
- * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
- * Secure EL1 (which can only happen if EL3 is AArch64).
- * They are simply UNDEF if executed from NS EL1.
- * They function normally from EL2 or EL3.
- */
- if (arm_current_el(env) == 1) {
- if (arm_is_secure_below_el3(env)) {
- if (env->cp15.scr_el3 & SCR_EEL2) {
- return CP_ACCESS_TRAP_EL2;
- }
- return CP_ACCESS_TRAP_EL3;
- }
- return CP_ACCESS_UNDEFINED;
- }
- }
- return CP_ACCESS_OK;
-}
-
-#ifdef CONFIG_TCG
-static int par_el1_shareability(GetPhysAddrResult *res)
-{
- /*
- * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
- * memory -- see pseudocode PAREncodeShareability().
- */
- if (((res->cacheattrs.attrs & 0xf0) == 0) ||
- res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) {
- return 2;
- }
- return res->cacheattrs.shareability;
-}
-
-static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- ARMSecuritySpace ss)
-{
- bool ret;
- uint64_t par64;
- bool format64 = false;
- ARMMMUFaultInfo fi = {};
- GetPhysAddrResult res = {};
-
- /*
- * I_MXTJT: Granule protection checks are not performed on the final
- * address of a successful translation. This is a translation not a
- * memory reference, so "memop = none = 0".
- */
- ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0,
- mmu_idx, ss, &res, &fi);
-
- /*
- * ATS operations only do S1 or S1+S2 translations, so we never
- * have to deal with the ARMCacheAttrs format for S2 only.
- */
- assert(!res.cacheattrs.is_s2_format);
-
- if (ret) {
- /*
- * Some kinds of translation fault must cause exceptions rather
- * than being reported in the PAR.
- */
- int current_el = arm_current_el(env);
- int target_el;
- uint32_t syn, fsr, fsc;
- bool take_exc = false;
-
- if (fi.s1ptw && current_el == 1
- && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
- /*
- * Synchronous stage 2 fault on an access made as part of the
- * translation table walk for AT S1E0* or AT S1E1* insn
- * executed from NS EL1. If this is a synchronous external abort
- * and SCR_EL3.EA == 1, then we take a synchronous external abort
- * to EL3. Otherwise the fault is taken as an exception to EL2,
- * and HPFAR_EL2 holds the faulting IPA.
- */
- if (fi.type == ARMFault_SyncExternalOnWalk &&
- (env->cp15.scr_el3 & SCR_EA)) {
- target_el = 3;
- } else {
- env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
- if (arm_is_secure_below_el3(env) && fi.s1ns) {
- env->cp15.hpfar_el2 |= HPFAR_NS;
- }
- target_el = 2;
- }
- take_exc = true;
- } else if (fi.type == ARMFault_SyncExternalOnWalk) {
- /*
- * Synchronous external aborts during a translation table walk
- * are taken as Data Abort exceptions.
- */
- if (fi.stage2) {
- if (current_el == 3) {
- target_el = 3;
- } else {
- target_el = 2;
- }
- } else {
- target_el = exception_target_el(env);
- }
- take_exc = true;
- }
-
- if (take_exc) {
- /* Construct FSR and FSC using same logic as arm_deliver_fault() */
- if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
- arm_s1_regime_using_lpae_format(env, mmu_idx)) {
- fsr = arm_fi_to_lfsc(&fi);
- fsc = extract32(fsr, 0, 6);
- } else {
- fsr = arm_fi_to_sfsc(&fi);
- fsc = 0x3f;
- }
- /*
- * Report exception with ESR indicating a fault due to a
- * translation table walk for a cache maintenance instruction.
- */
- syn = syn_data_abort_no_iss(current_el == target_el, 0,
- fi.ea, 1, fi.s1ptw, 1, fsc);
- env->exception.vaddress = value;
- env->exception.fsr = fsr;
- raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
- }
- }
-
- if (is_a64(env)) {
- format64 = true;
- } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
- /*
- * ATS1Cxx:
- * * TTBCR.EAE determines whether the result is returned using the
- * 32-bit or the 64-bit PAR format
- * * Instructions executed in Hyp mode always use the 64bit format
- *
- * ATS1S2NSOxx uses the 64bit format if any of the following is true:
- * * The Non-secure TTBCR.EAE bit is set to 1
- * * The implementation includes EL2, and the value of HCR.VM is 1
- *
- * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
- *
- * ATS1Hx always uses the 64bit format.
- */
- format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
-
- if (arm_feature(env, ARM_FEATURE_EL2)) {
- if (mmu_idx == ARMMMUIdx_E10_0 ||
- mmu_idx == ARMMMUIdx_E10_1 ||
- mmu_idx == ARMMMUIdx_E10_1_PAN) {
- format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
- } else {
- format64 |= arm_current_el(env) == 2;
- }
- }
- }
-
- if (format64) {
- /* Create a 64-bit PAR */
- par64 = (1 << 11); /* LPAE bit always set */
- if (!ret) {
- par64 |= res.f.phys_addr & ~0xfffULL;
- if (!res.f.attrs.secure) {
- par64 |= (1 << 9); /* NS */
- }
- par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
- par64 |= par_el1_shareability(&res) << 7; /* SH */
- } else {
- uint32_t fsr = arm_fi_to_lfsc(&fi);
-
- par64 |= 1; /* F */
- par64 |= (fsr & 0x3f) << 1; /* FS */
- if (fi.stage2) {
- par64 |= (1 << 9); /* S */
- }
- if (fi.s1ptw) {
- par64 |= (1 << 8); /* PTW */
- }
- }
- } else {
- /*
- * fsr is a DFSR/IFSR value for the short descriptor
- * translation table format (with WnR always clear).
- * Convert it to a 32-bit PAR.
- */
- if (!ret) {
- /* We do not set any attribute bits in the PAR */
- if (res.f.lg_page_size == 24
- && arm_feature(env, ARM_FEATURE_V7)) {
- par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
- } else {
- par64 = res.f.phys_addr & 0xfffff000;
- }
- if (!res.f.attrs.secure) {
- par64 |= (1 << 9); /* NS */
- }
- } else {
- uint32_t fsr = arm_fi_to_sfsc(&fi);
-
- par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
- ((fsr & 0xf) << 1) | 1;
- }
- }
- return par64;
-}
-#endif /* CONFIG_TCG */
-
-static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
-{
-#ifdef CONFIG_TCG
- MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
- uint64_t par64;
- ARMMMUIdx mmu_idx;
- int el = arm_current_el(env);
- ARMSecuritySpace ss = arm_security_space(env);
-
- switch (ri->opc2 & 6) {
- case 0:
- /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
- switch (el) {
- case 3:
- if (ri->crm == 9 && arm_pan_enabled(env)) {
- mmu_idx = ARMMMUIdx_E30_3_PAN;
- } else {
- mmu_idx = ARMMMUIdx_E3;
- }
- break;
- case 2:
- g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
- /* fall through */
- case 1:
- if (ri->crm == 9 && arm_pan_enabled(env)) {
- mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
- } else {
- mmu_idx = ARMMMUIdx_Stage1_E1;
- }
- break;
- default:
- g_assert_not_reached();
- }
- break;
- case 2:
- /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
- switch (el) {
- case 3:
- mmu_idx = ARMMMUIdx_E30_0;
- break;
- case 2:
- g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
- mmu_idx = ARMMMUIdx_Stage1_E0;
- break;
- case 1:
- mmu_idx = ARMMMUIdx_Stage1_E0;
- break;
- default:
- g_assert_not_reached();
- }
- break;
- case 4:
- /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
- mmu_idx = ARMMMUIdx_E10_1;
- ss = ARMSS_NonSecure;
- break;
- case 6:
- /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
- mmu_idx = ARMMMUIdx_E10_0;
- ss = ARMSS_NonSecure;
- break;
- default:
- g_assert_not_reached();
- }
-
- par64 = do_ats_write(env, value, access_type, mmu_idx, ss);
-
- A32_BANKED_CURRENT_REG_SET(env, par, par64);
-#else
- /* Handled by hardware accelerator. */
- g_assert_not_reached();
-#endif /* CONFIG_TCG */
-}
-
-static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
-#ifdef CONFIG_TCG
- MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
- uint64_t par64;
-
- /* There is no SecureEL2 for AArch32. */
- par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2,
- ARMSS_NonSecure);
-
- A32_BANKED_CURRENT_REG_SET(env, par, par64);
-#else
- /* Handled by hardware accelerator. */
- g_assert_not_reached();
-#endif /* CONFIG_TCG */
-}
-
-static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- /*
- * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level
- * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can
- * only happen when executing at EL3 because that combination also causes an
- * illegal exception return. We don't need to check FEAT_RME either, because
- * scr_write() ensures that the NSE bit is not set otherwise.
- */
- if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
- return CP_ACCESS_UNDEFINED;
- }
- return CP_ACCESS_OK;
-}
-
-static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- if (arm_current_el(env) == 3 &&
- !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
- return CP_ACCESS_UNDEFINED;
- }
- return at_e012_access(env, ri, isread);
-}
-
-static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) {
- return CP_ACCESS_TRAP_EL2;
- }
- return at_e012_access(env, ri, isread);
-}
-
-static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
-#ifdef CONFIG_TCG
- MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
- ARMMMUIdx mmu_idx;
- uint64_t hcr_el2 = arm_hcr_el2_eff(env);
- bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
- bool for_el3 = false;
- ARMSecuritySpace ss;
-
- switch (ri->opc2 & 6) {
- case 0:
- switch (ri->opc1) {
- case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
- if (ri->crm == 9 && arm_pan_enabled(env)) {
- mmu_idx = regime_e20 ?
- ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN;
- } else {
- mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1;
- }
- break;
- case 4: /* AT S1E2R, AT S1E2W */
- mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
- break;
- case 6: /* AT S1E3R, AT S1E3W */
- mmu_idx = ARMMMUIdx_E3;
- for_el3 = true;
- break;
- default:
- g_assert_not_reached();
- }
- break;
- case 2: /* AT S1E0R, AT S1E0W */
- mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0;
- break;
- case 4: /* AT S12E1R, AT S12E1W */
- mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1;
- break;
- case 6: /* AT S12E0R, AT S12E0W */
- mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0;
- break;
- default:
- g_assert_not_reached();
- }
-
- ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env);
- env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss);
-#else
- /* Handled by hardware accelerator. */
- g_assert_not_reached();
-#endif /* CONFIG_TCG */
-}
-#endif
-
/* Return basic MPU access permission bits. */
static uint32_t simple_mpu_ap_bits(uint32_t val)
{
@@ -5094,53 +3538,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
.fgt = FGT_DCCISW,
.access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
-#ifndef CONFIG_USER_ONLY
- /* 64 bit address translation operations */
- { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E1R,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E1W,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E0R,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E0W,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .accessfn = at_e012_access, .writefn = ats_write64 },
- { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .accessfn = at_e012_access, .writefn = ats_write64 },
- { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .accessfn = at_e012_access, .writefn = ats_write64 },
- { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .accessfn = at_e012_access, .writefn = ats_write64 },
- /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
- { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .writefn = ats_write64 },
- { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .writefn = ats_write64 },
{ .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
@@ -5148,7 +3545,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.fgt = FGT_PAR_EL1,
.fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
.writefn = par_write },
-#endif
/* 32 bit cache operations */
{ .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
.type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab },
@@ -5751,33 +4147,6 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
.access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
#ifndef CONFIG_USER_ONLY
- /*
- * Unlike the other EL2-related AT operations, these must
- * UNDEF from EL3 if EL2 is not implemented, which is why we
- * define them here rather than with the rest of the AT ops.
- */
- { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL2_W, .accessfn = at_s1e2_access,
- .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = ats_write64 },
- { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL2_W, .accessfn = at_s1e2_access,
- .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = ats_write64 },
- /*
- * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
- * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
- * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
- * to behave as if SCR.NS was 1.
- */
- { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL2_W,
- .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
- { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL2_W,
- .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
{ .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
/*
@@ -6827,105 +5196,6 @@ static const ARMCPRegInfo nmi_reginfo[] = {
.resetfn = arm_cp_reset_ignore },
};
-static void define_pmu_regs(ARMCPU *cpu)
-{
- /*
- * v7 performance monitor control register: same implementor
- * field as main ID register, and we implement four counters in
- * addition to the cycle count register.
- */
- unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
- ARMCPRegInfo pmcr = {
- .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
- .access = PL0_RW,
- .fgt = FGT_PMCR_EL0,
- .type = ARM_CP_IO | ARM_CP_ALIAS,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
- .accessfn = pmreg_access,
- .readfn = pmcr_read, .raw_readfn = raw_read,
- .writefn = pmcr_write, .raw_writefn = raw_write,
- };
- ARMCPRegInfo pmcr64 = {
- .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCR_EL0,
- .type = ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
- .resetvalue = cpu->isar.reset_pmcr_el0,
- .readfn = pmcr_read, .raw_readfn = raw_read,
- .writefn = pmcr_write, .raw_writefn = raw_write,
- };
-
- define_one_arm_cp_reg(cpu, &pmcr);
- define_one_arm_cp_reg(cpu, &pmcr64);
- for (i = 0; i < pmcrn; i++) {
- char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
- char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
- char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
- char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
- ARMCPRegInfo pmev_regs[] = {
- { .name = pmevcntr_name, .cp = 15, .crn = 14,
- .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
- .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
- .fgt = FGT_PMEVCNTRN_EL0,
- .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
- .accessfn = pmreg_access_xevcntr },
- { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
- .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
- .type = ARM_CP_IO,
- .fgt = FGT_PMEVCNTRN_EL0,
- .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
- .raw_readfn = pmevcntr_rawread,
- .raw_writefn = pmevcntr_rawwrite },
- { .name = pmevtyper_name, .cp = 15, .crn = 14,
- .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
- .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
- .fgt = FGT_PMEVTYPERN_EL0,
- .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
- .accessfn = pmreg_access },
- { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
- .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMEVTYPERN_EL0,
- .type = ARM_CP_IO,
- .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
- .raw_writefn = pmevtyper_rawwrite },
- };
- define_arm_cp_regs(cpu, pmev_regs);
- g_free(pmevcntr_name);
- g_free(pmevcntr_el0_name);
- g_free(pmevtyper_name);
- g_free(pmevtyper_el0_name);
- }
- if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
- ARMCPRegInfo v81_pmu_regs[] = {
- { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = extract64(cpu->pmceid0, 32, 32) },
- { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = extract64(cpu->pmceid1, 32, 32) },
- };
- define_arm_cp_regs(cpu, v81_pmu_regs);
- }
- if (cpu_isar_feature(any_pmuv3p4, cpu)) {
- static const ARMCPRegInfo v84_pmmir = {
- .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
- .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMMIR_EL1,
- .resetvalue = 0
- };
- define_one_arm_cp_reg(cpu, &v84_pmmir);
- }
-}
-
#ifndef CONFIG_USER_ONLY
/*
* We don't know until after realize whether there's a GICv3
@@ -7704,32 +5974,6 @@ static const ARMCPRegInfo vhe_reginfo[] = {
#endif
};
-#ifndef CONFIG_USER_ONLY
-static const ARMCPRegInfo ats1e1_reginfo[] = {
- { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E1RP,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E1WP,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
-};
-
-static const ARMCPRegInfo ats1cp_reginfo[] = {
- { .name = "ATS1CPRP",
- .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .writefn = ats_write },
- { .name = "ATS1CPWP",
- .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .writefn = ats_write },
-};
-#endif
-
/*
* ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
* ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
@@ -7771,7 +6015,10 @@ void register_cp_regs_for_features(ARMCPU *cpu)
}
#ifndef CONFIG_USER_ONLY
- define_tlb_insn_regs(cpu);
+ if (tcg_enabled()) {
+ define_tlb_insn_regs(cpu);
+ define_at_insn_regs(cpu);
+ }
#endif
if (arm_feature(env, ARM_FEATURE_V6)) {
@@ -7809,7 +6056,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->id_afr0 },
+ .resetvalue = GET_IDREG(isar, ID_AFR0)},
{ .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -7879,9 +6126,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_V6K)) {
define_arm_cp_regs(cpu, v6k_cp_reginfo);
}
- if (arm_feature(env, ARM_FEATURE_V7VE)) {
- define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
- }
if (arm_feature(env, ARM_FEATURE_V7)) {
ARMCPRegInfo clidr = {
.name = "CLIDR", .state = ARM_CP_STATE_BOTH,
@@ -7889,12 +6133,11 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_tid4,
.fgt = FGT_CLIDR_EL1,
- .resetvalue = cpu->clidr
+ .resetvalue = GET_IDREG(isar, CLIDR)
};
define_one_arm_cp_reg(cpu, &clidr);
define_arm_cp_regs(cpu, v7_cp_reginfo);
define_debug_regs(cpu);
- define_pmu_regs(cpu);
} else {
define_arm_cp_regs(cpu, not_v7_cp_reginfo);
}
@@ -7987,12 +6230,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->id_aa64afr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64AFR0) },
{ .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->id_aa64afr1 },
+ .resetvalue = GET_IDREG(isar, ID_AA64AFR1) },
{ .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -8150,26 +6393,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
.resetvalue = 0 },
- { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = extract64(cpu->pmceid0, 0, 32) },
- { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = cpu->pmceid0 },
- { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = extract64(cpu->pmceid1, 0, 32) },
- { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = cpu->pmceid1 },
};
#ifdef CONFIG_USER_ONLY
static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
@@ -8504,12 +6727,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
offsetoflow32(CPUARMState, cp15.par_ns) },
.writefn = par_write},
-#ifndef CONFIG_USER_ONLY
- /* This underdecoding is safe because the reginfo is NO_RAW. */
- { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
- .access = PL1_W, .accessfn = ats_access,
- .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
-#endif
};
/*
@@ -8915,14 +7132,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (cpu_isar_feature(aa64_pan, cpu)) {
define_one_arm_cp_reg(cpu, &pan_reginfo);
}
-#ifndef CONFIG_USER_ONLY
- if (cpu_isar_feature(aa64_ats1e1, cpu)) {
- define_arm_cp_regs(cpu, ats1e1_reginfo);
- }
- if (cpu_isar_feature(aa32_ats1e1, cpu)) {
- define_arm_cp_regs(cpu, ats1cp_reginfo);
- }
-#endif
if (cpu_isar_feature(aa64_uao, cpu)) {
define_one_arm_cp_reg(cpu, &uao_reginfo);
}
@@ -9022,6 +7231,8 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, ccsidr2_reginfo);
}
+ define_pm_cpregs(cpu);
+
#ifndef CONFIG_USER_ONLY
/*
* Register redirections and aliases must be done last,
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 21a8d67..c4765e4 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1871,6 +1871,10 @@ void define_debug_regs(ARMCPU *cpu);
/* Add the cpreg definitions for TLBI instructions */
void define_tlb_insn_regs(ARMCPU *cpu);
+/* Add the cpreg definitions for AT instructions */
+void define_at_insn_regs(ARMCPU *cpu);
+/* Add the cpreg definitions for PM cpregs */
+void define_pm_cpregs(ARMCPU *cpu);
/* Effective value of MDCR_EL2 */
static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
@@ -1981,5 +1985,6 @@ void vfp_clear_float_status_exc_flags(CPUARMState *env);
* specified by mask changing to the values in val.
*/
void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask);
+bool arm_pan_enabled(CPUARMState *env);
#endif
diff --git a/target/arm/kvm-stub.c b/target/arm/kvm-stub.c
index 34e57fa..c93462c 100644
--- a/target/arm/kvm-stub.c
+++ b/target/arm/kvm-stub.c
@@ -47,6 +47,11 @@ bool kvm_arm_mte_supported(void)
return false;
}
+bool kvm_arm_el2_supported(void)
+{
+ return false;
+}
+
/*
* These functions should never actually be called without KVM support.
*/
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 426f8b1..6672344 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -230,7 +230,8 @@ static uint64_t idregs_sysreg_to_kvm_reg(ARMSysRegs sysreg)
}
/* read a sysreg value and store it in the idregs */
-static int get_host_cpu_reg(int fd, ARMHostCPUFeatures *ahcf, ARMIDRegisterIdx index)
+static int get_host_cpu_reg(int fd, ARMHostCPUFeatures *ahcf,
+ ARMIDRegisterIdx index)
{
uint64_t *reg;
int ret;
@@ -250,6 +251,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
*/
int fdarray[3];
bool sve_supported;
+ bool el2_supported;
bool pmu_supported = false;
uint64_t features = 0;
int err;
@@ -270,6 +272,14 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
}
/*
+ * Ask for EL2 if supported.
+ */
+ el2_supported = kvm_arm_el2_supported();
+ if (el2_supported) {
+ init.features[0] |= 1 << KVM_ARM_VCPU_HAS_EL2;
+ }
+
+ /*
* Ask for Pointer Authentication if supported, so that we get
* the unsanitized field values for AA64ISAR1_EL1.
*/
@@ -422,6 +432,10 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
features |= 1ULL << ARM_FEATURE_AARCH64;
features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
+ if (el2_supported) {
+ features |= 1ULL << ARM_FEATURE_EL2;
+ }
+
ahcf->features = features;
return true;
@@ -1762,6 +1776,11 @@ bool kvm_arm_aarch32_supported(void)
return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT);
}
+bool kvm_arm_el2_supported(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL2);
+}
+
bool kvm_arm_sve_supported(void)
{
return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
@@ -1882,6 +1901,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
1 << KVM_ARM_VCPU_PTRAUTH_GENERIC);
}
+ if (cpu->has_el2 && kvm_arm_el2_supported()) {
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_HAS_EL2;
+ }
/* Do KVM_ARM_VCPU_INIT ioctl */
ret = kvm_arm_vcpu_init(cpu);
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index 7dc83ca..b4cad05 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -192,6 +192,13 @@ bool kvm_arm_sve_supported(void);
bool kvm_arm_mte_supported(void);
/**
+ * kvm_arm_el2_supported:
+ *
+ * Returns true if KVM can enable EL2 and false otherwise.
+ */
+bool kvm_arm_el2_supported(void);
+
+/**
* kvm_arm_get_max_vm_ipa_size:
* @ms: Machine state handle
* @fixed_ipa: True when the IPA limit is fixed at 40. This is the case
diff --git a/target/arm/meson.build b/target/arm/meson.build
index 7aa81e3..07d9271 100644
--- a/target/arm/meson.build
+++ b/target/arm/meson.build
@@ -22,6 +22,7 @@ arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files(
'cpu32-stubs.c',
))
arm_user_ss.add(files(
+ 'cpregs-pmu.c',
'debug_helper.c',
'helper.c',
'vfp_fpscr.c',
@@ -36,6 +37,7 @@ arm_common_system_ss.add(files(
'arch_dump.c',
'arm-powerctl.c',
'cortex-regs.c',
+ 'cpregs-pmu.c',
'debug_helper.c',
'helper.c',
'machine.c',
diff --git a/target/arm/tcg-stubs.c b/target/arm/tcg-stubs.c
index 5e5166c..aac99b2 100644
--- a/target/arm/tcg-stubs.c
+++ b/target/arm/tcg-stubs.c
@@ -22,11 +22,6 @@ void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
g_assert_not_reached();
}
-/* TLBI insns are only used by TCG, so we don't need to do anything for KVM */
-void define_tlb_insn_regs(ARMCPU *cpu)
-{
-}
-
/* With KVM, we never use float_status, so these can be no-ops */
void arm_set_default_fp_behaviours(float_status *s)
{
diff --git a/target/arm/tcg/cpregs-at.c b/target/arm/tcg/cpregs-at.c
new file mode 100644
index 0000000..398a61d
--- /dev/null
+++ b/target/arm/tcg/cpregs-at.c
@@ -0,0 +1,519 @@
+/*
+ * System instructions for address translation
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "cpu-features.h"
+#include "internals.h"
+#include "cpregs.h"
+
+
+static int par_el1_shareability(GetPhysAddrResult *res)
+{
+ /*
+ * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
+ * memory -- see pseudocode PAREncodeShareability().
+ */
+ if (((res->cacheattrs.attrs & 0xf0) == 0) ||
+ res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) {
+ return 2;
+ }
+ return res->cacheattrs.shareability;
+}
+
+static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ ARMSecuritySpace ss)
+{
+ bool ret;
+ uint64_t par64;
+ bool format64 = false;
+ ARMMMUFaultInfo fi = {};
+ GetPhysAddrResult res = {};
+
+ /*
+ * I_MXTJT: Granule protection checks are not performed on the final
+ * address of a successful translation. This is a translation not a
+ * memory reference, so "memop = none = 0".
+ */
+ ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0,
+ mmu_idx, ss, &res, &fi);
+
+ /*
+ * ATS operations only do S1 or S1+S2 translations, so we never
+ * have to deal with the ARMCacheAttrs format for S2 only.
+ */
+ assert(!res.cacheattrs.is_s2_format);
+
+ if (ret) {
+ /*
+ * Some kinds of translation fault must cause exceptions rather
+ * than being reported in the PAR.
+ */
+ int current_el = arm_current_el(env);
+ int target_el;
+ uint32_t syn, fsr, fsc;
+ bool take_exc = false;
+
+ if (fi.s1ptw && current_el == 1
+ && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
+ /*
+ * Synchronous stage 2 fault on an access made as part of the
+ * translation table walk for AT S1E0* or AT S1E1* insn
+ * executed from NS EL1. If this is a synchronous external abort
+ * and SCR_EL3.EA == 1, then we take a synchronous external abort
+ * to EL3. Otherwise the fault is taken as an exception to EL2,
+ * and HPFAR_EL2 holds the faulting IPA.
+ */
+ if (fi.type == ARMFault_SyncExternalOnWalk &&
+ (env->cp15.scr_el3 & SCR_EA)) {
+ target_el = 3;
+ } else {
+ env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
+ if (arm_is_secure_below_el3(env) && fi.s1ns) {
+ env->cp15.hpfar_el2 |= HPFAR_NS;
+ }
+ target_el = 2;
+ }
+ take_exc = true;
+ } else if (fi.type == ARMFault_SyncExternalOnWalk) {
+ /*
+ * Synchronous external aborts during a translation table walk
+ * are taken as Data Abort exceptions.
+ */
+ if (fi.stage2) {
+ if (current_el == 3) {
+ target_el = 3;
+ } else {
+ target_el = 2;
+ }
+ } else {
+ target_el = exception_target_el(env);
+ }
+ take_exc = true;
+ }
+
+ if (take_exc) {
+ /* Construct FSR and FSC using same logic as arm_deliver_fault() */
+ if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
+ arm_s1_regime_using_lpae_format(env, mmu_idx)) {
+ fsr = arm_fi_to_lfsc(&fi);
+ fsc = extract32(fsr, 0, 6);
+ } else {
+ fsr = arm_fi_to_sfsc(&fi);
+ fsc = 0x3f;
+ }
+ /*
+ * Report exception with ESR indicating a fault due to a
+ * translation table walk for a cache maintenance instruction.
+ */
+ syn = syn_data_abort_no_iss(current_el == target_el, 0,
+ fi.ea, 1, fi.s1ptw, 1, fsc);
+ env->exception.vaddress = value;
+ env->exception.fsr = fsr;
+ raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
+ }
+ }
+
+ if (is_a64(env)) {
+ format64 = true;
+ } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ /*
+ * ATS1Cxx:
+ * * TTBCR.EAE determines whether the result is returned using the
+ * 32-bit or the 64-bit PAR format
+ * * Instructions executed in Hyp mode always use the 64bit format
+ *
+ * ATS1S2NSOxx uses the 64bit format if any of the following is true:
+ * * The Non-secure TTBCR.EAE bit is set to 1
+ * * The implementation includes EL2, and the value of HCR.VM is 1
+ *
+ * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
+ *
+ * ATS1Hx always uses the 64bit format.
+ */
+ format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
+
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ if (mmu_idx == ARMMMUIdx_E10_0 ||
+ mmu_idx == ARMMMUIdx_E10_1 ||
+ mmu_idx == ARMMMUIdx_E10_1_PAN) {
+ format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
+ } else {
+ format64 |= arm_current_el(env) == 2;
+ }
+ }
+ }
+
+ if (format64) {
+ /* Create a 64-bit PAR */
+ par64 = (1 << 11); /* LPAE bit always set */
+ if (!ret) {
+ par64 |= res.f.phys_addr & ~0xfffULL;
+ if (!res.f.attrs.secure) {
+ par64 |= (1 << 9); /* NS */
+ }
+ par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
+ par64 |= par_el1_shareability(&res) << 7; /* SH */
+ } else {
+ uint32_t fsr = arm_fi_to_lfsc(&fi);
+
+ par64 |= 1; /* F */
+ par64 |= (fsr & 0x3f) << 1; /* FS */
+ if (fi.stage2) {
+ par64 |= (1 << 9); /* S */
+ }
+ if (fi.s1ptw) {
+ par64 |= (1 << 8); /* PTW */
+ }
+ }
+ } else {
+ /*
+ * fsr is a DFSR/IFSR value for the short descriptor
+ * translation table format (with WnR always clear).
+ * Convert it to a 32-bit PAR.
+ */
+ if (!ret) {
+ /* We do not set any attribute bits in the PAR */
+ if (res.f.lg_page_size == 24
+ && arm_feature(env, ARM_FEATURE_V7)) {
+ par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
+ } else {
+ par64 = res.f.phys_addr & 0xfffff000;
+ }
+ if (!res.f.attrs.secure) {
+ par64 |= (1 << 9); /* NS */
+ }
+ } else {
+ uint32_t fsr = arm_fi_to_sfsc(&fi);
+
+ par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
+ ((fsr & 0xf) << 1) | 1;
+ }
+ }
+ return par64;
+}
+
+static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ uint64_t par64;
+ ARMMMUIdx mmu_idx;
+ int el = arm_current_el(env);
+ ARMSecuritySpace ss = arm_security_space(env);
+
+ switch (ri->opc2 & 6) {
+ case 0:
+ /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
+ switch (el) {
+ case 3:
+ if (ri->crm == 9 && arm_pan_enabled(env)) {
+ mmu_idx = ARMMMUIdx_E30_3_PAN;
+ } else {
+ mmu_idx = ARMMMUIdx_E3;
+ }
+ break;
+ case 2:
+ g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
+ /* fall through */
+ case 1:
+ if (ri->crm == 9 && arm_pan_enabled(env)) {
+ mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
+ } else {
+ mmu_idx = ARMMMUIdx_Stage1_E1;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 2:
+ /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
+ switch (el) {
+ case 3:
+ mmu_idx = ARMMMUIdx_E30_0;
+ break;
+ case 2:
+ g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
+ mmu_idx = ARMMMUIdx_Stage1_E0;
+ break;
+ case 1:
+ mmu_idx = ARMMMUIdx_Stage1_E0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 4:
+ /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
+ mmu_idx = ARMMMUIdx_E10_1;
+ ss = ARMSS_NonSecure;
+ break;
+ case 6:
+ /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
+ mmu_idx = ARMMMUIdx_E10_0;
+ ss = ARMSS_NonSecure;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ par64 = do_ats_write(env, value, access_type, mmu_idx, ss);
+
+ A32_BANKED_CURRENT_REG_SET(env, par, par64);
+}
+
+static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ uint64_t par64;
+
+ /* There is no SecureEL2 for AArch32. */
+ par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2,
+ ARMSS_NonSecure);
+
+ A32_BANKED_CURRENT_REG_SET(env, par, par64);
+}
+
+static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /*
+ * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level
+ * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can
+ * only happen when executing at EL3 because that combination also causes an
+ * illegal exception return. We don't need to check FEAT_RME either, because
+ * scr_write() ensures that the NSE bit is not set otherwise.
+ */
+ if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
+ return CP_ACCESS_UNDEFINED;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 3 &&
+ !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
+ return CP_ACCESS_UNDEFINED;
+ }
+ return at_e012_access(env, ri, isread);
+}
+
+static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return at_e012_access(env, ri, isread);
+}
+
+static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ ARMMMUIdx mmu_idx;
+ uint64_t hcr_el2 = arm_hcr_el2_eff(env);
+ bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
+ bool for_el3 = false;
+ ARMSecuritySpace ss;
+
+ switch (ri->opc2 & 6) {
+ case 0:
+ switch (ri->opc1) {
+ case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
+ if (ri->crm == 9 && arm_pan_enabled(env)) {
+ mmu_idx = regime_e20 ?
+ ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN;
+ } else {
+ mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1;
+ }
+ break;
+ case 4: /* AT S1E2R, AT S1E2W */
+ mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
+ break;
+ case 6: /* AT S1E3R, AT S1E3W */
+ mmu_idx = ARMMMUIdx_E3;
+ for_el3 = true;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 2: /* AT S1E0R, AT S1E0W */
+ mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0;
+ break;
+ case 4: /* AT S12E1R, AT S12E1W */
+ mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1;
+ break;
+ case 6: /* AT S12E0R, AT S12E0W */
+ mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env);
+ env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss);
+}
+
+static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (ri->opc2 & 4) {
+ /*
+ * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
+ * Secure EL1 (which can only happen if EL3 is AArch64).
+ * They are simply UNDEF if executed from NS EL1.
+ * They function normally from EL2 or EL3.
+ */
+ if (arm_current_el(env) == 1) {
+ if (arm_is_secure_below_el3(env)) {
+ if (env->cp15.scr_el3 & SCR_EEL2) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_UNDEFINED;
+ }
+ }
+ return CP_ACCESS_OK;
+}
+
+static const ARMCPRegInfo vapa_ats_reginfo[] = {
+ /* This underdecoding is safe because the reginfo is NO_RAW. */
+ { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_W, .accessfn = ats_access,
+ .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
+};
+
+static const ARMCPRegInfo v8_ats_reginfo[] = {
+ /* 64 bit address translation operations */
+ { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .fgt = FGT_ATS1E1R,
+ .accessfn = at_s1e01_access, .writefn = ats_write64 },
+ { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .fgt = FGT_ATS1E1W,
+ .accessfn = at_s1e01_access, .writefn = ats_write64 },
+ { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .fgt = FGT_ATS1E0R,
+ .accessfn = at_s1e01_access, .writefn = ats_write64 },
+ { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .fgt = FGT_ATS1E0W,
+ .accessfn = at_s1e01_access, .writefn = ats_write64 },
+ { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .accessfn = at_e012_access, .writefn = ats_write64 },
+ { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .accessfn = at_e012_access, .writefn = ats_write64 },
+ { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .accessfn = at_e012_access, .writefn = ats_write64 },
+ { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .accessfn = at_e012_access, .writefn = ats_write64 },
+ /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
+ { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+};
+
+static const ARMCPRegInfo el2_ats_reginfo[] = {
+ /*
+ * Unlike the other EL2-related AT operations, these must
+ * UNDEF from EL3 if EL2 is not implemented, which is why we
+ * define them here rather than with the rest of the AT ops.
+ */
+ { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL2_W, .accessfn = at_s1e2_access,
+ .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = ats_write64 },
+ { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL2_W, .accessfn = at_s1e2_access,
+ .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = ats_write64 },
+ /*
+ * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
+ * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
+ * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
+ * to behave as if SCR.NS was 1.
+ */
+ { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL2_W,
+ .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
+ { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL2_W,
+ .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
+};
+
+static const ARMCPRegInfo ats1e1_reginfo[] = {
+ { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .fgt = FGT_ATS1E1RP,
+ .accessfn = at_s1e01_access, .writefn = ats_write64 },
+ { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .fgt = FGT_ATS1E1WP,
+ .accessfn = at_s1e01_access, .writefn = ats_write64 },
+};
+
+static const ARMCPRegInfo ats1cp_reginfo[] = {
+ { .name = "ATS1CPRP",
+ .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write },
+ { .name = "ATS1CPWP",
+ .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write },
+};
+
+void define_at_insn_regs(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+
+ if (arm_feature(env, ARM_FEATURE_VAPA)) {
+ define_arm_cp_regs(cpu, vapa_ats_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ define_arm_cp_regs(cpu, v8_ats_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_EL2)
+ || (arm_feature(env, ARM_FEATURE_EL3)
+ && arm_feature(env, ARM_FEATURE_V8))) {
+ define_arm_cp_regs(cpu, el2_ats_reginfo);
+ }
+ if (cpu_isar_feature(aa64_ats1e1, cpu)) {
+ define_arm_cp_regs(cpu, ats1e1_reginfo);
+ }
+ if (cpu_isar_feature(aa32_ats1e1, cpu)) {
+ define_arm_cp_regs(cpu, ats1cp_reginfo);
+ }
+}
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
index eddd711..dc249ce 100644
--- a/target/arm/tcg/cpu-v7m.c
+++ b/target/arm/tcg/cpu-v7m.c
@@ -62,7 +62,7 @@ static void cortex_m0_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x00000030);
SET_IDREG(isar, ID_PFR1, 0x00000200);
SET_IDREG(isar, ID_DFR0, 0x00100000);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_MMFR0, 0x00000030);
SET_IDREG(isar, ID_MMFR1, 0x00000000);
SET_IDREG(isar, ID_MMFR2, 0x00000000);
@@ -88,7 +88,7 @@ static void cortex_m3_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x00000030);
SET_IDREG(isar, ID_PFR1, 0x00000200);
SET_IDREG(isar, ID_DFR0, 0x00100000);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_MMFR0, 0x00000030);
SET_IDREG(isar, ID_MMFR1, 0x00000000);
SET_IDREG(isar, ID_MMFR2, 0x00000000);
@@ -119,7 +119,7 @@ static void cortex_m4_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x00000030);
SET_IDREG(isar, ID_PFR1, 0x00000200);
SET_IDREG(isar, ID_DFR0, 0x00100000);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_MMFR0, 0x00000030);
SET_IDREG(isar, ID_MMFR1, 0x00000000);
SET_IDREG(isar, ID_MMFR2, 0x00000000);
@@ -150,7 +150,7 @@ static void cortex_m7_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x00000030);
SET_IDREG(isar, ID_PFR1, 0x00000200);
SET_IDREG(isar, ID_DFR0, 0x00100000);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_MMFR0, 0x00100030);
SET_IDREG(isar, ID_MMFR1, 0x00000000);
SET_IDREG(isar, ID_MMFR2, 0x01000000);
@@ -183,7 +183,7 @@ static void cortex_m33_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x00000030);
SET_IDREG(isar, ID_PFR1, 0x00000210);
SET_IDREG(isar, ID_DFR0, 0x00200000);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_MMFR0, 0x00101F40);
SET_IDREG(isar, ID_MMFR1, 0x00000000);
SET_IDREG(isar, ID_MMFR2, 0x01000000);
@@ -195,7 +195,7 @@ static void cortex_m33_initfn(Object *obj)
SET_IDREG(isar, ID_ISAR4, 0x01310132);
SET_IDREG(isar, ID_ISAR5, 0x00000000);
SET_IDREG(isar, ID_ISAR6, 0x00000000);
- cpu->clidr = 0x00000000;
+ SET_IDREG(isar, CLIDR, 0x00000000);
cpu->ctr = 0x8000c000;
}
@@ -221,7 +221,7 @@ static void cortex_m55_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x20000030);
SET_IDREG(isar, ID_PFR1, 0x00000230);
SET_IDREG(isar, ID_DFR0, 0x10200000);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_MMFR0, 0x00111040);
SET_IDREG(isar, ID_MMFR1, 0x00000000);
SET_IDREG(isar, ID_MMFR2, 0x01000000);
@@ -233,7 +233,7 @@ static void cortex_m55_initfn(Object *obj)
SET_IDREG(isar, ID_ISAR4, 0x01310132);
SET_IDREG(isar, ID_ISAR5, 0x00000000);
SET_IDREG(isar, ID_ISAR6, 0x00000000);
- cpu->clidr = 0x00000000; /* caches not implemented */
+ SET_IDREG(isar, CLIDR, 0x00000000); /* caches not implemented */
cpu->ctr = 0x8303c003;
}
diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c
index 942b636..a2a23ea 100644
--- a/target/arm/tcg/cpu32.c
+++ b/target/arm/tcg/cpu32.c
@@ -225,7 +225,7 @@ static void arm1136_r2_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x111);
SET_IDREG(isar, ID_PFR1, 0x1);
SET_IDREG(isar, ID_DFR0, 0x2);
- cpu->id_afr0 = 0x3;
+ SET_IDREG(isar, ID_AFR0, 0x3);
SET_IDREG(isar, ID_MMFR0, 0x01130003);
SET_IDREG(isar, ID_MMFR1, 0x10030302);
SET_IDREG(isar, ID_MMFR2, 0x01222110);
@@ -257,7 +257,7 @@ static void arm1136_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x111);
SET_IDREG(isar, ID_PFR1, 0x1);
SET_IDREG(isar, ID_DFR0, 0x2);
- cpu->id_afr0 = 0x3;
+ SET_IDREG(isar, ID_AFR0, 0x3);
SET_IDREG(isar, ID_MMFR0, 0x01130003);
SET_IDREG(isar, ID_MMFR1, 0x10030302);
SET_IDREG(isar, ID_MMFR2, 0x01222110);
@@ -290,7 +290,7 @@ static void arm1176_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x111);
SET_IDREG(isar, ID_PFR1, 0x11);
SET_IDREG(isar, ID_DFR0, 0x33);
- cpu->id_afr0 = 0;
+ SET_IDREG(isar, ID_AFR0, 0);
SET_IDREG(isar, ID_MMFR0, 0x01130003);
SET_IDREG(isar, ID_MMFR1, 0x10030302);
SET_IDREG(isar, ID_MMFR2, 0x01222100);
@@ -320,7 +320,7 @@ static void arm11mpcore_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x111);
SET_IDREG(isar, ID_PFR1, 0x1);
SET_IDREG(isar, ID_DFR0, 0);
- cpu->id_afr0 = 0x2;
+ SET_IDREG(isar, ID_AFR0, 0x2);
SET_IDREG(isar, ID_MMFR0, 0x01100103);
SET_IDREG(isar, ID_MMFR1, 0x10020302);
SET_IDREG(isar, ID_MMFR2, 0x01222000);
@@ -360,7 +360,7 @@ static void cortex_a8_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x1031);
SET_IDREG(isar, ID_PFR1, 0x11);
SET_IDREG(isar, ID_DFR0, 0x400);
- cpu->id_afr0 = 0;
+ SET_IDREG(isar, ID_AFR0, 0);
SET_IDREG(isar, ID_MMFR0, 0x31100003);
SET_IDREG(isar, ID_MMFR1, 0x20000000);
SET_IDREG(isar, ID_MMFR2, 0x01202000);
@@ -371,7 +371,7 @@ static void cortex_a8_initfn(Object *obj)
SET_IDREG(isar, ID_ISAR3, 0x11112131);
SET_IDREG(isar, ID_ISAR4, 0x00111142);
cpu->isar.dbgdidr = 0x15141000;
- cpu->clidr = (1 << 27) | (2 << 24) | 3;
+ SET_IDREG(isar, CLIDR, (1 << 27) | (2 << 24) | 3);
cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
@@ -436,7 +436,7 @@ static void cortex_a9_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x1031);
SET_IDREG(isar, ID_PFR1, 0x11);
SET_IDREG(isar, ID_DFR0, 0x000);
- cpu->id_afr0 = 0;
+ SET_IDREG(isar, ID_AFR0, 0);
SET_IDREG(isar, ID_MMFR0, 0x00100103);
SET_IDREG(isar, ID_MMFR1, 0x20000000);
SET_IDREG(isar, ID_MMFR2, 0x01230000);
@@ -447,7 +447,7 @@ static void cortex_a9_initfn(Object *obj)
SET_IDREG(isar, ID_ISAR3, 0x11112131);
SET_IDREG(isar, ID_ISAR4, 0x00111142);
cpu->isar.dbgdidr = 0x35141000;
- cpu->clidr = (1 << 27) | (1 << 24) | 3;
+ SET_IDREG(isar, CLIDR, (1 << 27) | (1 << 24) | 3);
cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
cpu->isar.reset_pmcr_el0 = 0x41093000;
@@ -502,7 +502,7 @@ static void cortex_a7_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x00001131);
SET_IDREG(isar, ID_PFR1, 0x00011011);
SET_IDREG(isar, ID_DFR0, 0x02010555);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_MMFR0, 0x10101105);
SET_IDREG(isar, ID_MMFR1, 0x40000000);
SET_IDREG(isar, ID_MMFR2, 0x01240000);
@@ -519,7 +519,7 @@ static void cortex_a7_initfn(Object *obj)
cpu->isar.dbgdidr = 0x3515f005;
cpu->isar.dbgdevid = 0x01110f13;
cpu->isar.dbgdevid1 = 0x1;
- cpu->clidr = 0x0a200023;
+ SET_IDREG(isar, CLIDR, 0x0a200023);
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
@@ -554,7 +554,7 @@ static void cortex_a15_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x00001131);
SET_IDREG(isar, ID_PFR1, 0x00011011);
SET_IDREG(isar, ID_DFR0, 0x02010555);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_MMFR0, 0x10201105);
SET_IDREG(isar, ID_MMFR1, 0x20000000);
SET_IDREG(isar, ID_MMFR2, 0x01240000);
@@ -567,7 +567,7 @@ static void cortex_a15_initfn(Object *obj)
cpu->isar.dbgdidr = 0x3515f021;
cpu->isar.dbgdevid = 0x01110f13;
cpu->isar.dbgdevid1 = 0x0;
- cpu->clidr = 0x0a200023;
+ SET_IDREG(isar, CLIDR, 0x0a200023);
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
@@ -598,7 +598,7 @@ static void cortex_r5_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x0131);
SET_IDREG(isar, ID_PFR1, 0x001);
SET_IDREG(isar, ID_DFR0, 0x010400);
- cpu->id_afr0 = 0x0;
+ SET_IDREG(isar, ID_AFR0, 0x0);
SET_IDREG(isar, ID_MMFR0, 0x0210030);
SET_IDREG(isar, ID_MMFR1, 0x00000000);
SET_IDREG(isar, ID_MMFR2, 0x01200000);
@@ -745,7 +745,7 @@ static void cortex_r52_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x00000131);
SET_IDREG(isar, ID_PFR1, 0x10111001);
SET_IDREG(isar, ID_DFR0, 0x03010006);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_MMFR0, 0x00211040);
SET_IDREG(isar, ID_MMFR1, 0x40000000);
SET_IDREG(isar, ID_MMFR2, 0x01200000);
@@ -758,7 +758,7 @@ static void cortex_r52_initfn(Object *obj)
SET_IDREG(isar, ID_ISAR4, 0x00010142);
SET_IDREG(isar, ID_ISAR5, 0x00010001);
cpu->isar.dbgdidr = 0x77168000;
- cpu->clidr = (1 << 27) | (1 << 24) | 0x3;
+ SET_IDREG(isar, CLIDR, (1 << 27) | (1 << 24) | 0x3);
cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
@@ -977,7 +977,7 @@ static void arm_max_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x00000131);
SET_IDREG(isar, ID_PFR1, 0x00011011);
SET_IDREG(isar, ID_DFR0, 0x03010066);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_MMFR0, 0x10101105);
SET_IDREG(isar, ID_MMFR1, 0x40000000);
SET_IDREG(isar, ID_MMFR2, 0x01260000);
@@ -990,7 +990,7 @@ static void arm_max_initfn(Object *obj)
SET_IDREG(isar, ID_ISAR5, 0x00011121);
SET_IDREG(isar, ID_ISAR6, 0);
cpu->isar.reset_pmcr_el0 = 0x41013000;
- cpu->clidr = 0x0a200023;
+ SET_IDREG(isar, CLIDR, 0x0a200023);
cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
index d0df50a..35cddba 100644
--- a/target/arm/tcg/cpu64.c
+++ b/target/arm/tcg/cpu64.c
@@ -52,7 +52,7 @@ static void aarch64_a35_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x00000131);
SET_IDREG(isar, ID_PFR1, 0x00011011);
SET_IDREG(isar, ID_DFR0, 0x03010066);
- cpu->id_afr0 = 0;
+ SET_IDREG(isar, ID_AFR0, 0);
SET_IDREG(isar, ID_MMFR0, 0x10201105);
SET_IDREG(isar, ID_MMFR1, 0x40000000);
SET_IDREG(isar, ID_MMFR2, 0x01260000);
@@ -71,7 +71,7 @@ static void aarch64_a35_initfn(Object *obj)
SET_IDREG(isar, ID_AA64ISAR1, 0);
SET_IDREG(isar, ID_AA64MMFR0, 0x00101122);
SET_IDREG(isar, ID_AA64MMFR1, 0);
- cpu->clidr = 0x0a200023;
+ SET_IDREG(isar, CLIDR, 0x0a200023);
cpu->dcz_blocksize = 4;
/* From B2.4 AArch64 Virtual Memory control registers */
@@ -216,7 +216,7 @@ static void aarch64_a55_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_PMU);
/* Ordered by B2.4 AArch64 registers by functional group */
- cpu->clidr = 0x82000023;
+ SET_IDREG(isar, CLIDR, 0x82000023);
cpu->ctr = 0x84448004; /* L1Ip = VIPT */
cpu->dcz_blocksize = 4; /* 64 bytes */
SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408ull);
@@ -227,7 +227,7 @@ static void aarch64_a55_initfn(Object *obj)
SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
SET_IDREG(isar, ID_AA64PFR0, 0x0000000010112222ull);
SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000010ull);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_DFR0, 0x04010088);
SET_IDREG(isar, ID_ISAR0, 0x02101110);
SET_IDREG(isar, ID_ISAR1, 0x13112111);
@@ -298,7 +298,7 @@ static void aarch64_a72_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x00000131);
SET_IDREG(isar, ID_PFR1, 0x00011011);
SET_IDREG(isar, ID_DFR0, 0x03010066);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_MMFR0, 0x10201105);
SET_IDREG(isar, ID_MMFR1, 0x40000000);
SET_IDREG(isar, ID_MMFR2, 0x01260000);
@@ -317,7 +317,7 @@ static void aarch64_a72_initfn(Object *obj)
cpu->isar.dbgdevid = 0x01110f13;
cpu->isar.dbgdevid1 = 0x2;
cpu->isar.reset_pmcr_el0 = 0x41023000;
- cpu->clidr = 0x0a200023;
+ SET_IDREG(isar, CLIDR, 0x0a200023);
/* 32KB L1 dcache */
cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
/* 48KB L1 dcache */
@@ -349,10 +349,10 @@ static void aarch64_a76_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_PMU);
/* Ordered by B2.4 AArch64 registers by functional group */
- cpu->clidr = 0x82000023;
+ SET_IDREG(isar, CLIDR, 0x82000023);
cpu->ctr = 0x8444C004;
cpu->dcz_blocksize = 4;
- SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408ull),
+ SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408ull);
SET_IDREG(isar, ID_AA64ISAR0, 0x0000100010211120ull);
SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000100001ull);
SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101122ull);
@@ -360,7 +360,7 @@ static void aarch64_a76_initfn(Object *obj)
SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
SET_IDREG(isar, ID_AA64PFR0, 0x1100000010111112ull); /* GIC filled in later */
SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000010ull);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_DFR0, 0x04010088);
SET_IDREG(isar, ID_ISAR0, 0x02101110);
SET_IDREG(isar, ID_ISAR1, 0x13112111);
@@ -426,17 +426,17 @@ static void aarch64_a64fx_initfn(Object *obj)
cpu->reset_sctlr = 0x30000180;
SET_IDREG(isar, ID_AA64PFR0, 0x0000000101111111); /* No RAS Extensions */
SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000000);
- SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408),
- SET_IDREG(isar, ID_AA64DFR1, 0x0000000000000000),
- cpu->id_aa64afr0 = 0x0000000000000000;
- cpu->id_aa64afr1 = 0x0000000000000000;
+ SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408);
+ SET_IDREG(isar, ID_AA64DFR1, 0x0000000000000000);
+ SET_IDREG(isar, ID_AA64AFR0, 0x0000000000000000);
+ SET_IDREG(isar, ID_AA64AFR1, 0x0000000000000000);
SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000001122);
SET_IDREG(isar, ID_AA64MMFR1, 0x0000000011212100);
SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011);
SET_IDREG(isar, ID_AA64ISAR0, 0x0000000010211120);
SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000010001);
SET_IDREG(isar, ID_AA64ZFR0, 0x0000000000000000);
- cpu->clidr = 0x0000000080000023;
+ SET_IDREG(isar, CLIDR, 0x0000000080000023);
/* 64KB L1 dcache */
cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 256, 64 * KiB, 7);
/* 64KB L1 icache */
@@ -597,7 +597,7 @@ static void aarch64_neoverse_n1_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_PMU);
/* Ordered by B2.4 AArch64 registers by functional group */
- cpu->clidr = 0x82000023;
+ SET_IDREG(isar, CLIDR, 0x82000023);
cpu->ctr = 0x8444c004;
cpu->dcz_blocksize = 4;
SET_IDREG(isar, ID_AA64DFR0, 0x0000000110305408ull);
@@ -608,7 +608,7 @@ static void aarch64_neoverse_n1_initfn(Object *obj)
SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
SET_IDREG(isar, ID_AA64PFR0, 0x1100000010111112ull); /* GIC filled in later */
SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000020ull);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_DFR0, 0x04010088);
SET_IDREG(isar, ID_ISAR0, 0x02101110);
SET_IDREG(isar, ID_ISAR1, 0x13112111);
@@ -673,21 +673,21 @@ static void aarch64_neoverse_v1_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_PMU);
/* Ordered by 3.2.4 AArch64 registers by functional group */
- cpu->clidr = 0x82000023;
+ SET_IDREG(isar, CLIDR, 0x82000023);
cpu->ctr = 0xb444c004; /* With DIC and IDC set */
cpu->dcz_blocksize = 4;
- cpu->id_aa64afr0 = 0x00000000;
- cpu->id_aa64afr1 = 0x00000000;
- SET_IDREG(isar, ID_AA64DFR0, 0x000001f210305519ull),
- SET_IDREG(isar, ID_AA64DFR1, 0x00000000),
+ SET_IDREG(isar, ID_AA64AFR0, 0x00000000);
+ SET_IDREG(isar, ID_AA64AFR1, 0x00000000);
+ SET_IDREG(isar, ID_AA64DFR0, 0x000001f210305519ull);
+ SET_IDREG(isar, ID_AA64DFR1, 0x00000000);
SET_IDREG(isar, ID_AA64ISAR0, 0x1011111110212120ull); /* with FEAT_RNG */
SET_IDREG(isar, ID_AA64ISAR1, 0x0011000001211032ull);
SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101125ull);
- SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull),
- SET_IDREG(isar, ID_AA64MMFR2, 0x0220011102101011ull),
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
+ SET_IDREG(isar, ID_AA64MMFR2, 0x0220011102101011ull);
SET_IDREG(isar, ID_AA64PFR0, 0x1101110120111112ull); /* GIC filled in later */
SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000020ull);
- cpu->id_afr0 = 0x00000000;
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
SET_IDREG(isar, ID_DFR0, 0x15011099);
SET_IDREG(isar, ID_ISAR0, 0x02101110);
SET_IDREG(isar, ID_ISAR1, 0x13112111);
@@ -905,7 +905,7 @@ static void aarch64_a710_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x21110131);
SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
SET_IDREG(isar, ID_DFR0, 0x16011099);
- cpu->id_afr0 = 0;
+ SET_IDREG(isar, ID_AFR0, 0);
SET_IDREG(isar, ID_MMFR0, 0x10201105);
SET_IDREG(isar, ID_MMFR1, 0x40000000);
SET_IDREG(isar, ID_MMFR2, 0x01260000);
@@ -927,14 +927,14 @@ static void aarch64_a710_initfn(Object *obj)
SET_IDREG(isar, ID_AA64ZFR0, 0x0000110100110021ull); /* with Crypto */
SET_IDREG(isar, ID_AA64DFR0, 0x000011f010305619ull);
SET_IDREG(isar, ID_AA64DFR1, 0);
- cpu->id_aa64afr0 = 0;
- cpu->id_aa64afr1 = 0;
+ SET_IDREG(isar, ID_AA64AFR0, 0);
+ SET_IDREG(isar, ID_AA64AFR1, 0);
SET_IDREG(isar, ID_AA64ISAR0, 0x0221111110212120ull); /* with Crypto */
SET_IDREG(isar, ID_AA64ISAR1, 0x0010111101211052ull);
SET_IDREG(isar, ID_AA64MMFR0, 0x0000022200101122ull);
SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
SET_IDREG(isar, ID_AA64MMFR2, 0x1221011110101011ull);
- cpu->clidr = 0x0000001482000023ull;
+ SET_IDREG(isar, CLIDR, 0x0000001482000023ull);
cpu->gm_blocksize = 4;
cpu->ctr = 0x000000049444c004ull;
cpu->dcz_blocksize = 4;
@@ -1007,7 +1007,7 @@ static void aarch64_neoverse_n2_initfn(Object *obj)
SET_IDREG(isar, ID_PFR0, 0x21110131);
SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
SET_IDREG(isar, ID_DFR0, 0x16011099);
- cpu->id_afr0 = 0;
+ SET_IDREG(isar, ID_AFR0, 0);
SET_IDREG(isar, ID_MMFR0, 0x10201105);
SET_IDREG(isar, ID_MMFR1, 0x40000000);
SET_IDREG(isar, ID_MMFR2, 0x01260000);
@@ -1029,14 +1029,14 @@ static void aarch64_neoverse_n2_initfn(Object *obj)
SET_IDREG(isar, ID_AA64ZFR0, 0x0000110100110021ull); /* with Crypto */
SET_IDREG(isar, ID_AA64DFR0, 0x000011f210305619ull);
SET_IDREG(isar, ID_AA64DFR1, 0);
- cpu->id_aa64afr0 = 0;
- cpu->id_aa64afr1 = 0;
+ SET_IDREG(isar, ID_AA64AFR0, 0);
+ SET_IDREG(isar, ID_AA64AFR1, 0);
SET_IDREG(isar, ID_AA64ISAR0, 0x1221111110212120ull); /* with Crypto and FEAT_RNG */
SET_IDREG(isar, ID_AA64ISAR1, 0x0011111101211052ull);
SET_IDREG(isar, ID_AA64MMFR0, 0x0000022200101125ull);
SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
SET_IDREG(isar, ID_AA64MMFR2, 0x1221011112101011ull);
- cpu->clidr = 0x0000001482000023ull;
+ SET_IDREG(isar, CLIDR, 0x0000001482000023ull);
cpu->gm_blocksize = 4;
cpu->ctr = 0x00000004b444c004ull;
cpu->dcz_blocksize = 4;
@@ -1125,10 +1125,10 @@ void aarch64_max_tcg_initfn(Object *obj)
* We're going to set FEAT_S2FWB, which mandates that CLIDR_EL1.{LoUU,LoUIS}
* are zero.
*/
- u = cpu->clidr;
+ u = GET_IDREG(isar, CLIDR);
u = FIELD_DP32(u, CLIDR_EL1, LOUIS, 0);
u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0);
- cpu->clidr = u;
+ SET_IDREG(isar, CLIDR, u);
/*
* Set CTR_EL0.DIC and IDC to tell the guest it doesnt' need to
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
index c66d521..71c6c44 100644
--- a/target/arm/tcg/helper-a64.c
+++ b/target/arm/tcg/helper-a64.c
@@ -658,15 +658,6 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
spsr &= ~PSTATE_SS;
}
- /*
- * FEAT_RME forbids return from EL3 with an invalid security state.
- * We don't need an explicit check for FEAT_RME here because we enforce
- * in scr_write() that you can't set the NSE bit without it.
- */
- if (cur_el == 3 && (env->cp15.scr_el3 & (SCR_NS | SCR_NSE)) == SCR_NSE) {
- goto illegal_return;
- }
-
new_el = el_from_spsr(spsr);
if (new_el == -1) {
goto illegal_return;
@@ -678,6 +669,17 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
goto illegal_return;
}
+ /*
+ * FEAT_RME forbids return from EL3 to a lower exception level
+ * with an invalid security state.
+ * We don't need an explicit check for FEAT_RME here because we enforce
+ * in scr_write() that you can't set the NSE bit without it.
+ */
+ if (cur_el == 3 && new_el < 3 &&
+ (env->cp15.scr_el3 & (SCR_NS | SCR_NSE)) == SCR_NSE) {
+ goto illegal_return;
+ }
+
if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
/* Return to an EL which is configured for a different register width */
goto illegal_return;
diff --git a/target/arm/tcg/helper.h b/target/arm/tcg/helper.h
index d9565c8..0a006d9 100644
--- a/target/arm/tcg/helper.h
+++ b/target/arm/tcg/helper.h
@@ -1209,6 +1209,5 @@ DEF_HELPER_FLAGS_4(sme2_luti4_2b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
DEF_HELPER_FLAGS_4(sme2_luti4_2h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
DEF_HELPER_FLAGS_4(sme2_luti4_2s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(sme2_luti4_4b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
DEF_HELPER_FLAGS_4(sme2_luti4_4h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
DEF_HELPER_FLAGS_4(sme2_luti4_4s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
diff --git a/target/arm/tcg/meson.build b/target/arm/tcg/meson.build
index c59f0f0..895facd 100644
--- a/target/arm/tcg/meson.build
+++ b/target/arm/tcg/meson.build
@@ -64,6 +64,7 @@ arm_common_ss.add(files(
))
arm_common_system_ss.add(files(
+ 'cpregs-at.c',
'hflags.c',
'iwmmxt_helper.c',
'neon_helper.c',
diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c
index 0603db0..bae6165 100644
--- a/target/arm/tcg/vec_helper.c
+++ b/target/arm/tcg/vec_helper.c
@@ -3526,7 +3526,6 @@ DO_SME2_LUT(4,1,s, 4)
DO_SME2_LUT(4,2,b, 1)
DO_SME2_LUT(4,2,h, 2)
DO_SME2_LUT(4,2,s, 4)
-DO_SME2_LUT(4,4,b, 1)
DO_SME2_LUT(4,4,h, 2)
DO_SME2_LUT(4,4,s, 4)
diff --git a/target/loongarch/tcg/csr_helper.c b/target/loongarch/tcg/csr_helper.c
index 2942d7f..28b1bb8 100644
--- a/target/loongarch/tcg/csr_helper.c
+++ b/target/loongarch/tcg/csr_helper.c
@@ -29,7 +29,11 @@ target_ulong helper_csrwr_stlbps(CPULoongArchState *env, target_ulong val)
if (!check_ps(env, tlb_ps)) {
qemu_log_mask(LOG_GUEST_ERROR,
"Attempted set ps %d\n", tlb_ps);
+ } else {
+ /* Only update PS field, reserved bit keeps zero */
+ env->CSR_STLBPS = FIELD_DP64(old_v, CSR_STLBPS, PS, tlb_ps);
}
+
return old_v;
}
@@ -131,8 +135,8 @@ target_ulong helper_csrwr_pwcl(CPULoongArchState *env, target_ulong val)
}
if (!check_ps(env, ptbase)) {
qemu_log_mask(LOG_GUEST_ERROR,
- "Attrmpted set ptbase 2^%d\n", ptbase);
+ "Attempted set ptbase 2^%d\n", ptbase);
}
- env->CSR_PWCL =val;
+ env->CSR_PWCL = val;
return old_v;
}
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index dc48b0f..8872593 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -173,12 +173,6 @@ static void fill_tlb_entry(CPULoongArchState *env, int index)
lo1 = env->CSR_TLBELO1;
}
- /*check csr_ps */
- if (!check_ps(env, csr_ps)) {
- qemu_log_mask(LOG_GUEST_ERROR, "csr_ps %d is illegal\n", csr_ps);
- return;
- }
-
/* Only MTLB has the ps fields */
if (index >= LOONGARCH_STLB) {
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps);
@@ -340,23 +334,16 @@ void helper_tlbfill(CPULoongArchState *env)
if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
entryhi = env->CSR_TLBREHI;
+ /* Validity of pagesize is checked in helper_ldpte() */
pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
} else {
entryhi = env->CSR_TLBEHI;
+ /* Validity of pagesize is checked in helper_tlbrd() */
pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
}
- if (!check_ps(env, pagesize)) {
- qemu_log_mask(LOG_GUEST_ERROR, "pagesize %d is illegal\n", pagesize);
- return;
- }
-
+ /* Validity of stlb_ps is checked in helper_csrwr_stlbps() */
stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
- if (!check_ps(env, stlb_ps)) {
- qemu_log_mask(LOG_GUEST_ERROR, "stlb_ps %d is illegal\n", stlb_ps);
- return;
- }
-
if (pagesize == stlb_ps) {
/* Only write into STLB bits [47:13] */
address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT);
@@ -611,10 +598,11 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
uint32_t mem_idx)
{
CPUState *cs = env_cpu(env);
- target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv;
+ target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, badv;
uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
uint64_t dir_base, dir_width;
+ uint8_t ps;
/*
* The parameter "base" has only two types,
@@ -651,6 +639,11 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
if (odd) {
tmp0 += MAKE_64BIT_MASK(ps, 1);
}
+
+ if (!check_ps(env, ps)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Illegal huge pagesize %d\n", ps);
+ return;
+ }
} else {
badv = env->CSR_TLBRBADV;
diff --git a/target/s390x/cpu-system.c b/target/s390x/cpu-system.c
index 9b380e3..709ccd5 100644
--- a/target/s390x/cpu-system.c
+++ b/target/s390x/cpu-system.c
@@ -196,7 +196,7 @@ static bool disabled_wait(CPUState *cpu)
(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK));
}
-static unsigned s390_count_running_cpus(void)
+unsigned s390_count_running_cpus(void)
{
CPUState *cpu;
int nr_running = 0;
@@ -214,7 +214,7 @@ static unsigned s390_count_running_cpus(void)
return nr_running;
}
-unsigned int s390_cpu_halt(S390CPU *cpu)
+void s390_cpu_halt(S390CPU *cpu)
{
CPUState *cs = CPU(cpu);
trace_cpu_halt(cs->cpu_index);
@@ -223,8 +223,6 @@ unsigned int s390_cpu_halt(S390CPU *cpu)
cs->halted = 1;
cs->exception_index = EXCP_HLT;
}
-
- return s390_count_running_cpus();
}
void s390_cpu_unhalt(S390CPU *cpu)
diff --git a/target/s390x/helper.c b/target/s390x/helper.c
index 3c57c32..5c127da 100644
--- a/target/s390x/helper.c
+++ b/target/s390x/helper.c
@@ -91,7 +91,9 @@ void s390_handle_wait(S390CPU *cpu)
{
CPUState *cs = CPU(cpu);
- if (s390_cpu_halt(cpu) == 0) {
+ s390_cpu_halt(cpu);
+
+ if (s390_count_running_cpus() == 0) {
if (is_special_wait_psw(cpu->env.psw.addr)) {
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
} else {
diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c
index 67d9a19..491cc5f 100644
--- a/target/s390x/kvm/kvm.c
+++ b/target/s390x/kvm/kvm.c
@@ -889,7 +889,7 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
return 0;
}
-static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
+static struct kvm_hw_breakpoint *find_hw_breakpoint(vaddr addr,
int len, int type)
{
int n;
@@ -904,7 +904,7 @@ static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
return NULL;
}
-static int insert_hw_breakpoint(target_ulong addr, int len, int type)
+static int insert_hw_breakpoint(vaddr addr, int len, int type)
{
int size;
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
index a4ba622..56cce2e 100644
--- a/target/s390x/s390x-internal.h
+++ b/target/s390x/s390x-internal.h
@@ -238,7 +238,8 @@ uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
/* cpu.c */
#ifndef CONFIG_USER_ONLY
-unsigned int s390_cpu_halt(S390CPU *cpu);
+unsigned int s390_count_running_cpus(void);
+void s390_cpu_halt(S390CPU *cpu);
void s390_cpu_unhalt(S390CPU *cpu);
void s390_cpu_system_init(Object *obj);
bool s390_cpu_system_realize(DeviceState *dev, Error **errp);
@@ -246,16 +247,6 @@ void s390_cpu_finalize(Object *obj);
void s390_cpu_system_class_init(CPUClass *cc);
void s390_cpu_machine_reset_cb(void *opaque);
bool s390_cpu_has_work(CPUState *cs);
-
-#else
-static inline unsigned int s390_cpu_halt(S390CPU *cpu)
-{
- return 0;
-}
-
-static inline void s390_cpu_unhalt(S390CPU *cpu)
-{
-}
#endif /* CONFIG_USER_ONLY */
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
index a03609a..f1acb16 100644
--- a/target/s390x/tcg/mem_helper.c
+++ b/target/s390x/tcg/mem_helper.c
@@ -126,8 +126,8 @@ static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr,
/* An access covers at most 4096 bytes and therefore at most two pages. */
typedef struct S390Access {
- target_ulong vaddr1;
- target_ulong vaddr2;
+ vaddr vaddr1;
+ vaddr vaddr2;
void *haddr1;
void *haddr2;
uint16_t size1;
@@ -148,7 +148,7 @@ typedef struct S390Access {
* For !CONFIG_USER_ONLY, the TEC is stored stored to env->tlb_fill_tec.
* For CONFIG_USER_ONLY, the faulting address is stored to env->__excp_addr.
*/
-static inline int s390_probe_access(CPUArchState *env, target_ulong addr,
+static inline int s390_probe_access(CPUArchState *env, vaddr addr,
int size, MMUAccessType access_type,
int mmu_idx, bool nonfault,
void **phost, uintptr_t ra)
@@ -258,7 +258,7 @@ static void access_memset(CPUS390XState *env, S390Access *desta,
static uint8_t access_get_byte(CPUS390XState *env, S390Access *access,
int offset, uintptr_t ra)
{
- target_ulong vaddr = access->vaddr1;
+ vaddr vaddr = access->vaddr1;
void *haddr = access->haddr1;
if (unlikely(offset >= access->size1)) {
@@ -278,7 +278,7 @@ static uint8_t access_get_byte(CPUS390XState *env, S390Access *access,
static void access_set_byte(CPUS390XState *env, S390Access *access,
int offset, uint8_t byte, uintptr_t ra)
{
- target_ulong vaddr = access->vaddr1;
+ vaddr vaddr = access->vaddr1;
void *haddr = access->haddr1;
if (unlikely(offset >= access->size1)) {
diff --git a/tests/functional/meson.build b/tests/functional/meson.build
index 050c900..8bebcd4 100644
--- a/tests/functional/meson.build
+++ b/tests/functional/meson.build
@@ -138,6 +138,7 @@ tests_arm_system_thorough = [
'arm_cubieboard',
'arm_emcraft_sf2',
'arm_integratorcp',
+ 'arm_max78000fthr',
'arm_microbit',
'arm_orangepi',
'arm_quanta_gsj',
@@ -281,6 +282,7 @@ tests_rx_system_thorough = [
tests_s390x_system_thorough = [
's390x_ccw_virtio',
+ 's390x_pxelinux',
's390x_replay',
's390x_topology',
's390x_tuxrun',
@@ -373,7 +375,7 @@ foreach speed : ['quick', 'thorough']
target_tests = get_variable('tests_' + target_base + '_' + sysmode + '_' + speed, [])
endif
- test_deps = roms
+ test_deps = [roms, keymap_targets]
test_env = environment()
if have_tools
test_env.set('QEMU_TEST_QEMU_IMG', meson.global_build_root() / 'qemu-img')
diff --git a/tests/functional/test_arm_max78000fthr.py b/tests/functional/test_arm_max78000fthr.py
new file mode 100755
index 0000000..a82980b
--- /dev/null
+++ b/tests/functional/test_arm_max78000fthr.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+#
+# Functional test that checks the max78000fthr machine.
+# Tests ICC, GCR, TRNG, AES, and UART
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import QemuSystemTest, Asset, exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern
+
+
+class Max78000Machine(QemuSystemTest):
+
+ ASSET_FW = Asset(
+ 'https://github.com/JacksonDonaldson/max78000Test/raw/main/build/max78000.bin',
+ '86940b4bf60931bc6a8aa5db4b9f7f3cf8f64dbbd7ac534647980e536cf3adf7')
+
+ def test_fthr(self):
+ self.set_machine('max78000fthr')
+ fw_path = self.ASSET_FW.fetch()
+ self.vm.set_console()
+ self.vm.add_args('-kernel', fw_path)
+ self.vm.add_args('-device', "loader,file=" + fw_path + ",addr=0x10000000")
+ self.vm.launch()
+
+ wait_for_console_pattern(self, 'started')
+
+ # i -> prints instruction cache values
+ exec_command_and_wait_for_pattern(self, 'i', 'CTRL: 00010001')
+
+ # r -> gcr resets the machine
+ exec_command_and_wait_for_pattern(self, 'r', 'started')
+
+ # z -> sets some memory, then has gcr zero it
+ exec_command_and_wait_for_pattern(self, 'z', 'initial value: 12345678')
+ wait_for_console_pattern(self, "after memz: 00000000")
+
+ # t -> runs trng
+ exec_command_and_wait_for_pattern(self, 't', 'random data:')
+
+ # a -> runs aes
+ exec_command_and_wait_for_pattern(self, 'a',
+ 'encrypted to : a47ca9dd e0df4c86 a070af6e 91710dec')
+ wait_for_console_pattern(self,
+ 'encrypted to : cab7a28e bf456751 9049fcea 8960494b')
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/functional/test_ppc_bamboo.py b/tests/functional/test_ppc_bamboo.py
index fddcc24..c634ae7 100755
--- a/tests/functional/test_ppc_bamboo.py
+++ b/tests/functional/test_ppc_bamboo.py
@@ -16,28 +16,32 @@ class BambooMachine(QemuSystemTest):
timeout = 90
- ASSET_IMAGE = Asset(
- ('http://landley.net/aboriginal/downloads/binaries/'
- 'system-image-powerpc-440fp.tar.gz'),
- 'c12b58f841c775a0e6df4832a55afe6b74814d1565d08ddeafc1fb949a075c5e')
+ ASSET_KERNEL = Asset(
+ ('https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main/'
+ 'buildroot/qemu_ppc_bamboo-2023.11-8-gdcd9f0f6eb-20240105/vmlinux'),
+ 'a2e12eb45b73491ac62fc0bbeb68dead0dc5c0f22cf83146558389209b420ad1')
+ ASSET_INITRD = Asset(
+ ('https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main/'
+ 'buildroot/qemu_ppc_bamboo-2023.11-8-gdcd9f0f6eb-20240105/rootfs.cpio'),
+ 'd2a36bdb8763b389765dc8c29d4904cec2bd001c587f92e85ab9eb10d5ddda54')
def test_ppc_bamboo(self):
self.set_machine('bamboo')
self.require_accelerator("tcg")
self.require_netdev('user')
- self.archive_extract(self.ASSET_IMAGE)
+
+ kernel = self.ASSET_KERNEL.fetch()
+ initrd = self.ASSET_INITRD.fetch()
+
self.vm.set_console()
- self.vm.add_args('-kernel',
- self.scratch_file('system-image-powerpc-440fp',
- 'linux'),
- '-initrd',
- self.scratch_file('system-image-powerpc-440fp',
- 'rootfs.cpio.gz'),
- '-nic', 'user,model=rtl8139,restrict=on')
+ self.vm.add_args('-kernel', kernel,
+ '-initrd', initrd,
+ '-nic', 'user,model=virtio-net-pci,restrict=on')
self.vm.launch()
- wait_for_console_pattern(self, 'Type exit when done')
- exec_command_and_wait_for_pattern(self, 'ping 10.0.2.2',
- '10.0.2.2 is alive!')
+ wait_for_console_pattern(self, 'buildroot login:')
+ exec_command_and_wait_for_pattern(self, 'root', '#')
+ exec_command_and_wait_for_pattern(self, 'ping -c1 10.0.2.2',
+ '1 packets transmitted, 1 packets received, 0% packet loss')
exec_command_and_wait_for_pattern(self, 'halt', 'System Halted')
if __name__ == '__main__':
diff --git a/tests/functional/test_s390x_pxelinux.py b/tests/functional/test_s390x_pxelinux.py
new file mode 100755
index 0000000..4fc33b8
--- /dev/null
+++ b/tests/functional/test_s390x_pxelinux.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Functional test that checks the pxelinux.cfg network booting of a s390x VM
+# (TFTP booting without config file is already tested by the pxe qtest, so
+# we don't repeat that here).
+
+import os
+import shutil
+
+from qemu_test import QemuSystemTest, Asset, wait_for_console_pattern
+
+
+pxelinux_cfg_contents='''# pxelinux.cfg style config file
+default Debian
+label Nonexisting
+kernel kernel.notavailable
+initrd initrd.notavailable
+label Debian
+kernel kernel.debian
+initrd initrd.debian
+append testoption=teststring
+label Fedora
+kernel kernel.fedora
+'''
+
+class S390PxeLinux(QemuSystemTest):
+
+ ASSET_DEBIAN_KERNEL = Asset(
+ ('https://snapshot.debian.org/archive/debian/'
+ '20201126T092837Z/dists/buster/main/installer-s390x/'
+ '20190702+deb10u6/images/generic/kernel.debian'),
+ 'd411d17c39ae7ad38d27534376cbe88b68b403c325739364122c2e6f1537e818')
+
+ ASSET_DEBIAN_INITRD = Asset(
+ ('https://snapshot.debian.org/archive/debian/'
+ '20201126T092837Z/dists/buster/main/installer-s390x/'
+ '20190702+deb10u6/images/generic/initrd.debian'),
+ '836bbd0fe6a5ca81274c28c2b063ea315ce1868660866e9b60180c575fef9fd5')
+
+ ASSET_FEDORA_KERNEL = Asset(
+ ('https://archives.fedoraproject.org/pub/archive'
+ '/fedora-secondary/releases/31/Server/s390x/os'
+ '/images/kernel.img'),
+ '480859574f3f44caa6cd35c62d70e1ac0609134e22ce2a954bbed9b110c06e0b')
+
+ def pxelinux_launch(self, pl_name='default', extra_opts=None):
+ self.require_netdev('user')
+ self.set_machine('s390-ccw-virtio')
+
+ debian_kernel = self.ASSET_DEBIAN_KERNEL.fetch()
+ debian_initrd = self.ASSET_DEBIAN_INITRD.fetch()
+ fedora_kernel = self.ASSET_FEDORA_KERNEL.fetch()
+
+ # Prepare a folder for the TFTP "server":
+ tftpdir = self.scratch_file('tftp')
+ shutil.rmtree(tftpdir, ignore_errors=True) # Remove stale stuff
+ os.mkdir(tftpdir)
+ shutil.copy(debian_kernel, os.path.join(tftpdir, 'kernel.debian'))
+ shutil.copy(debian_initrd, os.path.join(tftpdir, 'initrd.debian'))
+ shutil.copy(fedora_kernel, os.path.join(tftpdir, 'kernel.fedora'))
+
+ pxelinuxdir = self.scratch_file('tftp', 'pxelinux.cfg')
+ os.mkdir(pxelinuxdir)
+
+ cfg_fname = self.scratch_file('tftp', 'pxelinux.cfg', pl_name)
+ with open(cfg_fname, 'w', encoding='utf-8') as f:
+ f.write(pxelinux_cfg_contents)
+
+ virtio_net_dev = 'virtio-net-ccw,netdev=n1,bootindex=1'
+ if extra_opts:
+ virtio_net_dev += ',' + extra_opts
+
+ self.vm.add_args('-m', '384',
+ '-netdev', f'user,id=n1,tftp={tftpdir}',
+ '-device', virtio_net_dev)
+ self.vm.set_console()
+ self.vm.launch()
+
+
+ def test_default(self):
+ self.pxelinux_launch()
+ # The kernel prints its arguments to the console, so we can use
+ # this to check whether the kernel parameters are correctly handled:
+ wait_for_console_pattern(self, 'testoption=teststring')
+ # Now also check that we've successfully loaded the initrd:
+ wait_for_console_pattern(self, 'Unpacking initramfs...')
+ wait_for_console_pattern(self, 'Run /init as init process')
+
+ def test_mac(self):
+ self.pxelinux_launch(pl_name='01-02-ca-fe-ba-be-42',
+ extra_opts='mac=02:ca:fe:ba:be:42,loadparm=3')
+ wait_for_console_pattern(self, 'Linux version 5.3.7-301.fc31.s390x')
+
+ def test_uuid(self):
+ # Also add a non-bootable disk to check the fallback to network boot:
+ self.vm.add_args('-blockdev', 'null-co,size=65536,node-name=d1',
+ '-device', 'virtio-blk,drive=d1,bootindex=0,loadparm=1',
+ '-uuid', '550e8400-e29b-11d4-a716-446655441234')
+ self.pxelinux_launch(pl_name='550e8400-e29b-11d4-a716-446655441234')
+ wait_for_console_pattern(self, 'Debian 4.19.146-1 (2020-09-17)')
+
+ def test_ip(self):
+ self.vm.add_args('-M', 'loadparm=3')
+ self.pxelinux_launch(pl_name='0A00020F')
+ wait_for_console_pattern(self, 'Linux version 5.3.7-301.fc31.s390x')
+
+ def test_menu(self):
+ self.vm.add_args('-boot', 'menu=on,splash-time=10')
+ self.pxelinux_launch(pl_name='0A00')
+ wait_for_console_pattern(self, '[1] Nonexisting')
+ wait_for_console_pattern(self, '[2] Debian')
+ wait_for_console_pattern(self, '[3] Fedora')
+ wait_for_console_pattern(self, 'Debian 4.19.146-1 (2020-09-17)')
+
+
+if __name__ == '__main__':
+ QemuSystemTest.main()
diff --git a/tests/qtest/cxl-test.c b/tests/qtest/cxl-test.c
index a600331..8fb7e58 100644
--- a/tests/qtest/cxl-test.c
+++ b/tests/qtest/cxl-test.c
@@ -19,6 +19,12 @@
"-device pxb-cxl,id=cxl.1,bus=pcie.0,bus_nr=53 " \
"-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=4G "
+#define QEMU_VIRT_2PXB_CMD \
+ "-machine virt,cxl=on -cpu max " \
+ "-device pxb-cxl,id=cxl.0,bus=pcie.0,bus_nr=52 " \
+ "-device pxb-cxl,id=cxl.1,bus=pcie.0,bus_nr=53 " \
+ "-M cxl-fmw.0.targets.0=cxl.0,cxl-fmw.0.targets.1=cxl.1,cxl-fmw.0.size=4G "
+
#define QEMU_RP \
"-device cxl-rp,id=rp0,bus=cxl.0,chassis=0,slot=0 "
@@ -197,25 +203,51 @@ static void cxl_2pxb_4rp_4t3d(void)
qtest_end();
rmdir(tmpfs);
}
+
+static void cxl_virt_2pxb_4rp_4t3d(void)
+{
+ g_autoptr(GString) cmdline = g_string_new(NULL);
+ g_autofree const char *tmpfs = NULL;
+
+ tmpfs = g_dir_make_tmp("cxl-test-XXXXXX", NULL);
+
+ g_string_printf(cmdline, QEMU_VIRT_2PXB_CMD QEMU_4RP QEMU_4T3D,
+ tmpfs, tmpfs, tmpfs, tmpfs, tmpfs, tmpfs,
+ tmpfs, tmpfs);
+
+ qtest_start(cmdline->str);
+ qtest_end();
+ rmdir(tmpfs);
+}
#endif /* CONFIG_POSIX */
int main(int argc, char **argv)
{
- g_test_init(&argc, &argv, NULL);
+ const char *arch = qtest_get_arch();
- qtest_add_func("/pci/cxl/basic_hostbridge", cxl_basic_hb);
- qtest_add_func("/pci/cxl/basic_pxb", cxl_basic_pxb);
- qtest_add_func("/pci/cxl/pxb_with_window", cxl_pxb_with_window);
- qtest_add_func("/pci/cxl/pxb_x2_with_window", cxl_2pxb_with_window);
- qtest_add_func("/pci/cxl/rp", cxl_root_port);
- qtest_add_func("/pci/cxl/rp_x2", cxl_2root_port);
+ g_test_init(&argc, &argv, NULL);
+ if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
+ qtest_add_func("/pci/cxl/basic_hostbridge", cxl_basic_hb);
+ qtest_add_func("/pci/cxl/basic_pxb", cxl_basic_pxb);
+ qtest_add_func("/pci/cxl/pxb_with_window", cxl_pxb_with_window);
+ qtest_add_func("/pci/cxl/pxb_x2_with_window", cxl_2pxb_with_window);
+ qtest_add_func("/pci/cxl/rp", cxl_root_port);
+ qtest_add_func("/pci/cxl/rp_x2", cxl_2root_port);
#ifdef CONFIG_POSIX
- qtest_add_func("/pci/cxl/type3_device", cxl_t3d_deprecated);
- qtest_add_func("/pci/cxl/type3_device_pmem", cxl_t3d_persistent);
- qtest_add_func("/pci/cxl/type3_device_vmem", cxl_t3d_volatile);
- qtest_add_func("/pci/cxl/type3_device_vmem_lsa", cxl_t3d_volatile_lsa);
- qtest_add_func("/pci/cxl/rp_x2_type3_x2", cxl_1pxb_2rp_2t3d);
- qtest_add_func("/pci/cxl/pxb_x2_root_port_x4_type3_x4", cxl_2pxb_4rp_4t3d);
+ qtest_add_func("/pci/cxl/type3_device", cxl_t3d_deprecated);
+ qtest_add_func("/pci/cxl/type3_device_pmem", cxl_t3d_persistent);
+ qtest_add_func("/pci/cxl/type3_device_vmem", cxl_t3d_volatile);
+ qtest_add_func("/pci/cxl/type3_device_vmem_lsa", cxl_t3d_volatile_lsa);
+ qtest_add_func("/pci/cxl/rp_x2_type3_x2", cxl_1pxb_2rp_2t3d);
+ qtest_add_func("/pci/cxl/pxb_x2_root_port_x4_type3_x4",
+ cxl_2pxb_4rp_4t3d);
#endif
+ } else if (strcmp(arch, "aarch64") == 0) {
+#ifdef CONFIG_POSIX
+ qtest_add_func("/pci/cxl/virt/pxb_x2_root_port_x4_type3_x4",
+ cxl_virt_2pxb_4rp_4t3d);
+#endif
+ }
+
return g_test_run();
}
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
index 91b4a71..5ad969f 100644
--- a/tests/qtest/meson.build
+++ b/tests/qtest/meson.build
@@ -262,6 +262,7 @@ qtests_aarch64 = \
config_all_devices.has_key('CONFIG_TPM_TIS_I2C') ? ['tpm-tis-i2c-test'] : []) + \
(config_all_devices.has_key('CONFIG_ASPEED_SOC') ? qtests_aspeed64 : []) + \
(config_all_devices.has_key('CONFIG_NPCM8XX') ? qtests_npcm8xx : []) + \
+ qtests_cxl + \
['arm-cpu-features',
'numa-test',
'boot-serial-test',
diff --git a/tests/qtest/migration/migration-qmp.c b/tests/qtest/migration/migration-qmp.c
index fb59741..66dd369 100644
--- a/tests/qtest/migration/migration-qmp.c
+++ b/tests/qtest/migration/migration-qmp.c
@@ -358,6 +358,11 @@ void read_blocktime(QTestState *who)
rsp_return = migrate_query_not_failed(who);
g_assert(qdict_haskey(rsp_return, "postcopy-blocktime"));
+ g_assert(qdict_haskey(rsp_return, "postcopy-vcpu-blocktime"));
+ g_assert(qdict_haskey(rsp_return, "postcopy-latency"));
+ g_assert(qdict_haskey(rsp_return, "postcopy-latency-dist"));
+ g_assert(qdict_haskey(rsp_return, "postcopy-vcpu-latency"));
+ g_assert(qdict_haskey(rsp_return, "postcopy-non-vcpu-latency"));
qobject_unref(rsp_return);
}