aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-07-02 12:58:32 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-07-02 12:58:32 +0100
commitc4e42a9c2b714de5cddabffe46c7789fcff49c30 (patch)
tree4a9b7f8a620492deb865c17e952ca20f717b6a1e /target
parentd247c8e7f4fc856abf799c37ca9818514ddb08b7 (diff)
parent787a7e76c2e93a48c47b324fea592c9910a70483 (diff)
downloadqemu-c4e42a9c2b714de5cddabffe46c7789fcff49c30.zip
qemu-c4e42a9c2b714de5cddabffe46c7789fcff49c30.tar.gz
qemu-c4e42a9c2b714de5cddabffe46c7789fcff49c30.tar.bz2
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20190701' into staging
target-arm queue: * hw/arm/boot: fix direct kernel boot with initrd * hw/arm/msf2-som: Exit when the cpu is not the expected one * i.mx7: fix bugs in PCI controller needed to boot recent kernels * aspeed: add RTC device * aspeed: fix some timer device bugs * aspeed: add swift-bmc board * aspeed: vic: Add support for legacy register interface * aspeed: add aspeed-xdma device * Add new sbsa-ref board for aarch64 * target/arm: code refactoring in preparation for support of compilation with TCG disabled # gpg: Signature made Mon 01 Jul 2019 17:38:10 BST # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20190701: (46 commits) target/arm: Declare some M-profile functions publicly target/arm: Declare arm_log_exception() function publicly target/arm: Restrict PSCI to TCG target/arm/vfp_helper: Restrict the SoftFloat use to TCG target/arm/vfp_helper: Extract vfp_set_fpscr_from_host() target/arm/vfp_helper: Extract vfp_set_fpscr_to_host() target/arm/vfp_helper: Move code around target/arm: Move TLB related routines to tlb_helper.c target/arm: Declare get_phys_addr() function publicly target/arm: Move CPU state dumping routines to cpu.c target/arm: Move the DC ZVA helper into op_helper target/arm: Fix coding style issues target/arm: Fix multiline comment syntax target/arm/helper: Remove unused include target/arm: Add copyright boilerplate target/arm: Makefile cleanup (softmmu) target/arm: Makefile cleanup (KVM) target/arm: Makefile cleanup (ARM) target/arm: Makefile cleanup (Aarch64) hw/arm: Add arm SBSA reference machine, devices part ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/arm/Makefile.objs24
-rw-r--r--target/arm/cpu.c232
-rw-r--r--target/arm/cpu.h2
-rw-r--r--target/arm/helper.c498
-rw-r--r--target/arm/internals.h69
-rw-r--r--target/arm/op_helper.c262
-rw-r--r--target/arm/tlb_helper.c200
-rw-r--r--target/arm/translate-a64.c128
-rw-r--r--target/arm/translate.c91
-rw-r--r--target/arm/translate.h5
-rw-r--r--target/arm/vfp_helper.c199
11 files changed, 950 insertions, 760 deletions
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
index dfa736a..5c154f0 100644
--- a/target/arm/Makefile.objs
+++ b/target/arm/Makefile.objs
@@ -1,16 +1,15 @@
obj-y += arm-semi.o
-obj-$(CONFIG_SOFTMMU) += machine.o psci.o arch_dump.o monitor.o
+obj-y += helper.o vfp_helper.o
+obj-y += cpu.o gdbstub.o
+obj-$(TARGET_AARCH64) += cpu64.o gdbstub64.o
+
+obj-$(CONFIG_SOFTMMU) += machine.o arch_dump.o monitor.o
+obj-$(CONFIG_SOFTMMU) += arm-powerctl.o
+
obj-$(CONFIG_KVM) += kvm.o
obj-$(call land,$(CONFIG_KVM),$(call lnot,$(TARGET_AARCH64))) += kvm32.o
obj-$(call land,$(CONFIG_KVM),$(TARGET_AARCH64)) += kvm64.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
-obj-y += translate.o op_helper.o helper.o cpu.o
-obj-y += neon_helper.o iwmmxt_helper.o vec_helper.o vfp_helper.o
-obj-y += gdbstub.o
-obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o helper-a64.o gdbstub64.o
-obj-$(TARGET_AARCH64) += pauth_helper.o
-obj-y += crypto_helper.o
-obj-$(CONFIG_SOFTMMU) += arm-powerctl.o
DECODETREE = $(SRC_PATH)/scripts/decodetree.py
@@ -33,4 +32,13 @@ target/arm/translate-sve.o: target/arm/decode-sve.inc.c
target/arm/translate.o: target/arm/decode-vfp.inc.c
target/arm/translate.o: target/arm/decode-vfp-uncond.inc.c
+obj-y += tlb_helper.o
+obj-y += translate.o op_helper.o
+obj-y += crypto_helper.o
+obj-y += iwmmxt_helper.o vec_helper.o neon_helper.o
+
+obj-$(CONFIG_SOFTMMU) += psci.o
+
+obj-$(TARGET_AARCH64) += translate-a64.o helper-a64.o
obj-$(TARGET_AARCH64) += translate-sve.o sve_helper.o
+obj-$(TARGET_AARCH64) += pauth_helper.o
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 376db15..f21261c 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -19,6 +19,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/qemu-print.h"
#include "qemu-common.h"
#include "target/arm/idau.h"
#include "qemu/module.h"
@@ -676,6 +677,231 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
#endif
}
+#ifdef TARGET_AARCH64
+
+static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint32_t psr = pstate_read(env);
+ int i;
+ int el = arm_current_el(env);
+ const char *ns_status;
+
+ qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
+ for (i = 0; i < 32; i++) {
+ if (i == 31) {
+ qemu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
+ } else {
+ qemu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
+ (i + 2) % 3 ? " " : "\n");
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
+ ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
+ } else {
+ ns_status = "";
+ }
+ qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
+ psr,
+ psr & PSTATE_N ? 'N' : '-',
+ psr & PSTATE_Z ? 'Z' : '-',
+ psr & PSTATE_C ? 'C' : '-',
+ psr & PSTATE_V ? 'V' : '-',
+ ns_status,
+ el,
+ psr & PSTATE_SP ? 'h' : 't');
+
+ if (cpu_isar_feature(aa64_bti, cpu)) {
+ qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
+ }
+ if (!(flags & CPU_DUMP_FPU)) {
+ qemu_fprintf(f, "\n");
+ return;
+ }
+ if (fp_exception_el(env, el) != 0) {
+ qemu_fprintf(f, " FPU disabled\n");
+ return;
+ }
+ qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
+ vfp_get_fpcr(env), vfp_get_fpsr(env));
+
+ if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
+ int j, zcr_len = sve_zcr_len_for_el(env, el);
+
+ for (i = 0; i <= FFR_PRED_NUM; i++) {
+ bool eol;
+ if (i == FFR_PRED_NUM) {
+ qemu_fprintf(f, "FFR=");
+ /* It's last, so end the line. */
+ eol = true;
+ } else {
+ qemu_fprintf(f, "P%02d=", i);
+ switch (zcr_len) {
+ case 0:
+ eol = i % 8 == 7;
+ break;
+ case 1:
+ eol = i % 6 == 5;
+ break;
+ case 2:
+ case 3:
+ eol = i % 3 == 2;
+ break;
+ default:
+ /* More than one quadword per predicate. */
+ eol = true;
+ break;
+ }
+ }
+ for (j = zcr_len / 4; j >= 0; j--) {
+ int digits;
+ if (j * 4 + 4 <= zcr_len + 1) {
+ digits = 16;
+ } else {
+ digits = (zcr_len % 4 + 1) * 4;
+ }
+ qemu_fprintf(f, "%0*" PRIx64 "%s", digits,
+ env->vfp.pregs[i].p[j],
+ j ? ":" : eol ? "\n" : " ");
+ }
+ }
+
+ for (i = 0; i < 32; i++) {
+ if (zcr_len == 0) {
+ qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
+ i, env->vfp.zregs[i].d[1],
+ env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
+ } else if (zcr_len == 1) {
+ qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
+ ":%016" PRIx64 ":%016" PRIx64 "\n",
+ i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
+ env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
+ } else {
+ for (j = zcr_len; j >= 0; j--) {
+ bool odd = (zcr_len - j) % 2 != 0;
+ if (j == zcr_len) {
+ qemu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
+ } else if (!odd) {
+ if (j > 0) {
+ qemu_fprintf(f, " [%x-%x]=", j, j - 1);
+ } else {
+ qemu_fprintf(f, " [%x]=", j);
+ }
+ }
+ qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
+ env->vfp.zregs[i].d[j * 2 + 1],
+ env->vfp.zregs[i].d[j * 2],
+ odd || j == 0 ? "\n" : ":");
+ }
+ }
+ }
+ } else {
+ for (i = 0; i < 32; i++) {
+ uint64_t *q = aa64_vfp_qreg(env, i);
+ qemu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
+ i, q[1], q[0], (i & 1 ? "\n" : " "));
+ }
+ }
+}
+
+#else
+
+static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ g_assert_not_reached();
+}
+
+#endif
+
+static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ int i;
+
+ if (is_a64(env)) {
+ aarch64_cpu_dump_state(cs, f, flags);
+ return;
+ }
+
+ for (i = 0; i < 16; i++) {
+ qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
+ if ((i % 4) == 3) {
+ qemu_fprintf(f, "\n");
+ } else {
+ qemu_fprintf(f, " ");
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ uint32_t xpsr = xpsr_read(env);
+ const char *mode;
+ const char *ns_status = "";
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ ns_status = env->v7m.secure ? "S " : "NS ";
+ }
+
+ if (xpsr & XPSR_EXCP) {
+ mode = "handler";
+ } else {
+ if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
+ mode = "unpriv-thread";
+ } else {
+ mode = "priv-thread";
+ }
+ }
+
+ qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
+ xpsr,
+ xpsr & XPSR_N ? 'N' : '-',
+ xpsr & XPSR_Z ? 'Z' : '-',
+ xpsr & XPSR_C ? 'C' : '-',
+ xpsr & XPSR_V ? 'V' : '-',
+ xpsr & XPSR_T ? 'T' : 'A',
+ ns_status,
+ mode);
+ } else {
+ uint32_t psr = cpsr_read(env);
+ const char *ns_status = "";
+
+ if (arm_feature(env, ARM_FEATURE_EL3) &&
+ (psr & CPSR_M) != ARM_CPU_MODE_MON) {
+ ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
+ }
+
+ qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
+ psr,
+ psr & CPSR_N ? 'N' : '-',
+ psr & CPSR_Z ? 'Z' : '-',
+ psr & CPSR_C ? 'C' : '-',
+ psr & CPSR_V ? 'V' : '-',
+ psr & CPSR_T ? 'T' : 'A',
+ ns_status,
+ aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
+ }
+
+ if (flags & CPU_DUMP_FPU) {
+ int numvfpregs = 0;
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
+ numvfpregs += 16;
+ }
+ if (arm_feature(env, ARM_FEATURE_VFP3)) {
+ numvfpregs += 16;
+ }
+ for (i = 0; i < numvfpregs; i++) {
+ uint64_t v = *aa32_vfp_dreg(env, i);
+ qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
+ i * 2, (uint32_t)v,
+ i * 2 + 1, (uint32_t)(v >> 32),
+ i, v);
+ }
+ qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
+ }
+}
+
uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz)
{
uint32_t Aff1 = idx / clustersz;
@@ -2340,8 +2566,6 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_write_register = arm_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY
cc->do_interrupt = arm_cpu_do_interrupt;
- cc->do_unaligned_access = arm_cpu_do_unaligned_access;
- cc->do_transaction_failed = arm_cpu_do_transaction_failed;
cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
cc->asidx_from_attrs = arm_asidx_from_attrs;
cc->vmsd = &vmstate_arm_cpu;
@@ -2364,6 +2588,10 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
#ifdef CONFIG_TCG
cc->tcg_initialize = arm_translate_init;
cc->tlb_fill = arm_cpu_tlb_fill;
+#if !defined(CONFIG_USER_ONLY)
+ cc->do_unaligned_access = arm_cpu_do_unaligned_access;
+ cc->do_transaction_failed = arm_cpu_do_transaction_failed;
+#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
#endif
}
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index f9da672..a9be186 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -929,8 +929,6 @@ void arm_cpu_do_interrupt(CPUState *cpu);
void arm_v7m_cpu_do_interrupt(CPUState *cpu);
bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
-void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags);
-
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
MemTxAttrs *attrs);
diff --git a/target/arm/helper.c b/target/arm/helper.c
index df4276f..38b7343 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -1,3 +1,10 @@
+/*
+ * ARM generic helpers.
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "target/arm/idau.h"
@@ -7,7 +14,6 @@
#include "exec/gdbstub.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
-#include "sysemu/arch_init.h"
#include "sysemu/sysemu.h"
#include "qemu/bitops.h"
#include "qemu/crc32c.h"
@@ -19,7 +25,6 @@
#include "hw/semihosting/semihost.h"
#include "sysemu/cpus.h"
#include "sysemu/kvm.h"
-#include "fpu/softfloat.h"
#include "qemu/range.h"
#include "qapi/qapi-commands-target.h"
#include "qapi/error.h"
@@ -28,38 +33,12 @@
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
#ifndef CONFIG_USER_ONLY
-/* Cacheability and shareability attributes for a memory access */
-typedef struct ARMCacheAttrs {
- unsigned int attrs:8; /* as in the MAIR register encoding */
- unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
-} ARMCacheAttrs;
-
-static bool get_phys_addr(CPUARMState *env, target_ulong address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
- target_ulong *page_size,
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr,
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
-
-/* Security attributes for an address, as returned by v8m_security_lookup. */
-typedef struct V8M_SAttributes {
- bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
- bool ns;
- bool nsc;
- uint8_t sregion;
- bool srvalid;
- uint8_t iregion;
- bool irvalid;
-} V8M_SAttributes;
-
-static void v8m_security_lookup(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- V8M_SAttributes *sattrs);
#endif
static void switch_mode(CPUARMState *env, int mode);
@@ -7524,7 +7503,8 @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
{
- /* The TT instructions can be used by unprivileged code, but in
+ /*
+ * The TT instructions can be used by unprivileged code, but in
* user-only emulation we don't have the MPU.
* Luckily since we know we are NonSecure unprivileged (and that in
* turn means that the A flag wasn't specified), all the bits in the
@@ -7700,22 +7680,41 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
return target_el;
}
-/*
- * Return true if the v7M CPACR permits access to the FPU for the specified
- * security state and privilege level.
- */
-static bool v7m_cpacr_pass(CPUARMState *env, bool is_secure, bool is_priv)
+void arm_log_exception(int idx)
{
- switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
- case 0:
- case 2: /* UNPREDICTABLE: we treat like 0 */
- return false;
- case 1:
- return is_priv;
- case 3:
- return true;
- default:
- g_assert_not_reached();
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ const char *exc = NULL;
+ static const char * const excnames[] = {
+ [EXCP_UDEF] = "Undefined Instruction",
+ [EXCP_SWI] = "SVC",
+ [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
+ [EXCP_DATA_ABORT] = "Data Abort",
+ [EXCP_IRQ] = "IRQ",
+ [EXCP_FIQ] = "FIQ",
+ [EXCP_BKPT] = "Breakpoint",
+ [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
+ [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
+ [EXCP_HVC] = "Hypervisor Call",
+ [EXCP_HYP_TRAP] = "Hypervisor Trap",
+ [EXCP_SMC] = "Secure Monitor Call",
+ [EXCP_VIRQ] = "Virtual IRQ",
+ [EXCP_VFIQ] = "Virtual FIQ",
+ [EXCP_SEMIHOST] = "Semihosting call",
+ [EXCP_NOCP] = "v7M NOCP UsageFault",
+ [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
+ [EXCP_STKOF] = "v8M STKOF UsageFault",
+ [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
+ [EXCP_LSERR] = "v8M LSERR UsageFault",
+ [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
+ };
+
+ if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
+ exc = excnames[idx];
+ }
+ if (!exc) {
+ exc = "unknown";
+ }
+ qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
}
}
@@ -7796,7 +7795,8 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
return true;
pend_fault:
- /* By pending the exception at this point we are making
+ /*
+ * By pending the exception at this point we are making
* the IMPDEF choice "overridden exceptions pended" (see the
* MergeExcInfo() pseudocode). The other choice would be to not
* pend them now and then make a choice about which to throw away
@@ -7871,7 +7871,8 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
return true;
pend_fault:
- /* By pending the exception at this point we are making
+ /*
+ * By pending the exception at this point we are making
* the IMPDEF choice "overridden exceptions pended" (see the
* MergeExcInfo() pseudocode). The other choice would be to not
* pend them now and then make a choice about which to throw away
@@ -7972,7 +7973,8 @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
*/
}
-/* Write to v7M CONTROL.SPSEL bit for the specified security bank.
+/*
+ * Write to v7M CONTROL.SPSEL bit for the specified security bank.
* This may change the current stack pointer between Main and Process
* stack pointers if it is done for the CONTROL register for the current
* security state.
@@ -8000,7 +8002,8 @@ static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
}
}
-/* Write to v7M CONTROL.SPSEL bit. This may change the current
+/*
+ * Write to v7M CONTROL.SPSEL bit. This may change the current
* stack pointer between Main and Process stack pointers.
*/
static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
@@ -8010,7 +8013,8 @@ static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
{
- /* Write a new value to v7m.exception, thus transitioning into or out
+ /*
+ * Write a new value to v7m.exception, thus transitioning into or out
* of Handler mode; this may result in a change of active stack pointer.
*/
bool new_is_psp, old_is_psp = v7m_using_psp(env);
@@ -8036,7 +8040,8 @@ static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
return;
}
- /* All the banked state is accessed by looking at env->v7m.secure
+ /*
+ * All the banked state is accessed by looking at env->v7m.secure
* except for the stack pointer; rearrange the SP appropriately.
*/
new_ss_msp = env->v7m.other_ss_msp;
@@ -8063,7 +8068,8 @@ static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
{
- /* Handle v7M BXNS:
+ /*
+ * Handle v7M BXNS:
* - if the return value is a magic value, do exception return (like BX)
* - otherwise bit 0 of the return value is the target security state
*/
@@ -8078,7 +8084,8 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
}
if (dest >= min_magic) {
- /* This is an exception return magic value; put it where
+ /*
+ * This is an exception return magic value; put it where
* do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
* Note that if we ever add gen_ss_advance() singlestep support to
* M profile this should count as an "instruction execution complete"
@@ -8103,7 +8110,8 @@ void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
{
- /* Handle v7M BLXNS:
+ /*
+ * Handle v7M BLXNS:
* - bit 0 of the destination address is the target security state
*/
@@ -8116,7 +8124,8 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
assert(env->v7m.secure);
if (dest & 1) {
- /* target is Secure, so this is just a normal BLX,
+ /*
+ * Target is Secure, so this is just a normal BLX,
* except that the low bit doesn't indicate Thumb/not.
*/
env->regs[14] = nextinst;
@@ -8147,7 +8156,8 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
env->regs[13] = sp;
env->regs[14] = 0xfeffffff;
if (arm_v7m_is_handler_mode(env)) {
- /* Write a dummy value to IPSR, to avoid leaking the current secure
+ /*
+ * Write a dummy value to IPSR, to avoid leaking the current secure
* exception number to non-secure code. This is guaranteed not
* to cause write_v7m_exception() to actually change stacks.
*/
@@ -8162,7 +8172,8 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
bool spsel)
{
- /* Return a pointer to the location where we currently store the
+ /*
+ * Return a pointer to the location where we currently store the
* stack pointer for the requested security state and thread mode.
* This pointer will become invalid if the CPU state is updated
* such that the stack pointers are switched around (eg changing
@@ -8208,7 +8219,8 @@ static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
- /* We don't do a get_phys_addr() here because the rules for vector
+ /*
+ * We don't do a get_phys_addr() here because the rules for vector
* loads are special: they always use the default memory map, and
* the default memory map permits reads from all addresses.
* Since there's no easy way to pass through to pmsav8_mpu_lookup()
@@ -8239,7 +8251,8 @@ static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
return true;
load_fail:
- /* All vector table fetch fails are reported as HardFault, with
+ /*
+ * All vector table fetch fails are reported as HardFault, with
* HFSR.VECTTBL and .FORCED set. (FORCED is set because
* technically the underlying exception is a MemManage or BusFault
* that is escalated to HardFault.) This is a terminal exception,
@@ -8271,7 +8284,8 @@ static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
bool ignore_faults)
{
- /* For v8M, push the callee-saves register part of the stack frame.
+ /*
+ * For v8M, push the callee-saves register part of the stack frame.
* Compare the v8M pseudocode PushCalleeStack().
* In the tailchaining case this may not be the current stack.
*/
@@ -8322,7 +8336,8 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
return true;
}
- /* Write as much of the stack frame as we can. A write failure may
+ /*
+ * Write as much of the stack frame as we can. A write failure may
* cause us to pend a derived exception.
*/
sig = v7m_integrity_sig(env, lr);
@@ -8346,7 +8361,8 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
bool ignore_stackfaults)
{
- /* Do the "take the exception" parts of exception entry,
+ /*
+ * Do the "take the exception" parts of exception entry,
* but not the pushing of state to the stack. This is
* similar to the pseudocode ExceptionTaken() function.
*/
@@ -8371,13 +8387,15 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
if (arm_feature(env, ARM_FEATURE_V8)) {
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
(lr & R_V7M_EXCRET_S_MASK)) {
- /* The background code (the owner of the registers in the
+ /*
+ * The background code (the owner of the registers in the
* exception frame) is Secure. This means it may either already
* have or now needs to push callee-saves registers.
*/
if (targets_secure) {
if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
- /* We took an exception from Secure to NonSecure
+ /*
+ * We took an exception from Secure to NonSecure
* (which means the callee-saved registers got stacked)
* and are now tailchaining to a Secure exception.
* Clear DCRS so eventual return from this Secure
@@ -8386,7 +8404,8 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
lr &= ~R_V7M_EXCRET_DCRS_MASK;
}
} else {
- /* We're going to a non-secure exception; push the
+ /*
+ * We're going to a non-secure exception; push the
* callee-saves registers to the stack now, if they're
* not already saved.
*/
@@ -8408,14 +8427,16 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
lr |= R_V7M_EXCRET_SPSEL_MASK;
}
- /* Clear registers if necessary to prevent non-secure exception
+ /*
+ * Clear registers if necessary to prevent non-secure exception
* code being able to see register values from secure code.
* Where register values become architecturally UNKNOWN we leave
* them with their previous values.
*/
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
if (!targets_secure) {
- /* Always clear the caller-saved registers (they have been
+ /*
+ * Always clear the caller-saved registers (they have been
* pushed to the stack earlier in v7m_push_stack()).
* Clear callee-saved registers if the background code is
* Secure (in which case these regs were saved in
@@ -8436,7 +8457,8 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
}
if (push_failed && !ignore_stackfaults) {
- /* Derived exception on callee-saves register stacking:
+ /*
+ * Derived exception on callee-saves register stacking:
* we might now want to take a different exception which
* targets a different security state, so try again from the top.
*/
@@ -8453,7 +8475,8 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
return;
}
- /* Now we've done everything that might cause a derived exception
+ /*
+ * Now we've done everything that might cause a derived exception
* we can go ahead and activate whichever exception we're going to
* take (which might now be the derived exception).
*/
@@ -8656,7 +8679,8 @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
static bool v7m_push_stack(ARMCPU *cpu)
{
- /* Do the "set up stack frame" part of exception entry,
+ /*
+ * Do the "set up stack frame" part of exception entry,
* similar to pseudocode PushStack().
* Return true if we generate a derived exception (and so
* should ignore further stack faults trying to process
@@ -8724,7 +8748,8 @@ static bool v7m_push_stack(ARMCPU *cpu)
}
}
- /* Write as much of the stack frame as we can. If we fail a stack
+ /*
+ * Write as much of the stack frame as we can. If we fail a stack
* write this will result in a derived exception being pended
* (which may be taken in preference to the one we started with
* if it has higher priority).
@@ -8841,7 +8866,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
bool ftype;
bool restore_s16_s31;
- /* If we're not in Handler mode then jumps to magic exception-exit
+ /*
+ * If we're not in Handler mode then jumps to magic exception-exit
* addresses don't have magic behaviour. However for the v8M
* security extensions the magic secure-function-return has to
* work in thread mode too, so to avoid doing an extra check in
@@ -8855,7 +8881,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
return;
}
- /* In the spec pseudocode ExceptionReturn() is called directly
+ /*
+ * In the spec pseudocode ExceptionReturn() is called directly
* from BXWritePC() and gets the full target PC value including
* bit zero. In QEMU's implementation we treat it as a normal
* jump-to-register (which is then caught later on), and so split
@@ -8888,7 +8915,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
}
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
- /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
+ /*
+ * EXC_RETURN.ES validation check (R_SMFL). We must do this before
* we pick which FAULTMASK to clear.
*/
if (!env->v7m.secure &&
@@ -8902,7 +8930,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
}
if (env->v7m.exception != ARMV7M_EXCP_NMI) {
- /* Auto-clear FAULTMASK on return from other than NMI.
+ /*
+ * Auto-clear FAULTMASK on return from other than NMI.
* If the security extension is implemented then this only
* happens if the raw execution priority is >= 0; the
* value of the ES bit in the exception return value indicates
@@ -8927,7 +8956,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
/* still an irq active now */
break;
case 1:
- /* we returned to base exception level, no nesting.
+ /*
+ * We returned to base exception level, no nesting.
* (In the pseudocode this is written using "NestedActivation != 1"
* where we have 'rettobase == false'.)
*/
@@ -8944,7 +8974,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_V8)) {
if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
- /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
+ /*
+ * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
* we choose to take the UsageFault.
*/
if ((excret & R_V7M_EXCRET_S_MASK) ||
@@ -8963,7 +8994,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
break;
case 13: /* Return to Thread using Process stack */
case 9: /* Return to Thread using Main stack */
- /* We only need to check NONBASETHRDENA for v7M, because in
+ /*
+ * We only need to check NONBASETHRDENA for v7M, because in
* v8M this bit does not exist (it is RES1).
*/
if (!rettobase &&
@@ -9021,7 +9053,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
}
if (ufault) {
- /* Bad exception return: instead of popping the exception
+ /*
+ * Bad exception return: instead of popping the exception
* stack, directly take a usage fault on the current stack.
*/
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
@@ -9051,7 +9084,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
switch_v7m_security_state(env, return_to_secure);
{
- /* The stack pointer we should be reading the exception frame from
+ /*
+ * The stack pointer we should be reading the exception frame from
* depends on bits in the magic exception return type value (and
* for v8M isn't necessarily the stack pointer we will eventually
* end up resuming execution with). Get a pointer to the location
@@ -9124,7 +9158,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
if (!pop_ok) {
- /* v7m_stack_read() pended a fault, so take it (as a tail
+ /*
+ * v7m_stack_read() pended a fault, so take it (as a tail
* chained exception on the same stack frame)
*/
qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
@@ -9132,7 +9167,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
return;
}
- /* Returning from an exception with a PC with bit 0 set is defined
+ /*
+ * Returning from an exception with a PC with bit 0 set is defined
* behaviour on v8M (bit 0 is ignored), but for v7M it was specified
* to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
* the lsbit, and there are several RTOSes out there which incorrectly
@@ -9150,13 +9186,15 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
}
if (arm_feature(env, ARM_FEATURE_V8)) {
- /* For v8M we have to check whether the xPSR exception field
+ /*
+ * For v8M we have to check whether the xPSR exception field
* matches the EXCRET value for return to handler/thread
* before we commit to changing the SP and xPSR.
*/
bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
if (return_to_handler != will_be_handler) {
- /* Take an INVPC UsageFault on the current stack.
+ /*
+ * Take an INVPC UsageFault on the current stack.
* By this point we will have switched to the security state
* for the background state, so this UsageFault will target
* that state.
@@ -9271,7 +9309,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
frameptr += 0x40;
}
}
- /* Undo stack alignment (the SPREALIGN bit indicates that the original
+ /*
+ * Undo stack alignment (the SPREALIGN bit indicates that the original
* pre-exception SP was not 8-aligned and we added a padding word to
* align it, so we undo this by ORing in the bit that increases it
* from the current 8-aligned value to the 8-unaligned value. (Adding 4
@@ -9297,13 +9336,15 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
V7M_CONTROL, SFPA, sfpa);
}
- /* The restored xPSR exception field will be zero if we're
+ /*
+ * The restored xPSR exception field will be zero if we're
* resuming in Thread mode. If that doesn't match what the
* exception return excret specified then this is a UsageFault.
* v7M requires we make this check here; v8M did it earlier.
*/
if (return_to_handler != arm_v7m_is_handler_mode(env)) {
- /* Take an INVPC UsageFault by pushing the stack again;
+ /*
+ * Take an INVPC UsageFault by pushing the stack again;
* we know we're v7M so this is never a Secure UsageFault.
*/
bool ignore_stackfaults;
@@ -9325,7 +9366,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
static bool do_v7m_function_return(ARMCPU *cpu)
{
- /* v8M security extensions magic function return.
+ /*
+ * v8M security extensions magic function return.
* We may either:
* (1) throw an exception (longjump)
* (2) return true if we successfully handled the function return
@@ -9355,7 +9397,8 @@ static bool do_v7m_function_return(ARMCPU *cpu)
frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
frameptr = *frame_sp_p;
- /* These loads may throw an exception (for MPU faults). We want to
+ /*
+ * These loads may throw an exception (for MPU faults). We want to
* do them as secure, so work out what MMU index that is.
*/
mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
@@ -9395,48 +9438,11 @@ static bool do_v7m_function_return(ARMCPU *cpu)
return true;
}
-static void arm_log_exception(int idx)
-{
- if (qemu_loglevel_mask(CPU_LOG_INT)) {
- const char *exc = NULL;
- static const char * const excnames[] = {
- [EXCP_UDEF] = "Undefined Instruction",
- [EXCP_SWI] = "SVC",
- [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
- [EXCP_DATA_ABORT] = "Data Abort",
- [EXCP_IRQ] = "IRQ",
- [EXCP_FIQ] = "FIQ",
- [EXCP_BKPT] = "Breakpoint",
- [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
- [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
- [EXCP_HVC] = "Hypervisor Call",
- [EXCP_HYP_TRAP] = "Hypervisor Trap",
- [EXCP_SMC] = "Secure Monitor Call",
- [EXCP_VIRQ] = "Virtual IRQ",
- [EXCP_VFIQ] = "Virtual FIQ",
- [EXCP_SEMIHOST] = "Semihosting call",
- [EXCP_NOCP] = "v7M NOCP UsageFault",
- [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
- [EXCP_STKOF] = "v8M STKOF UsageFault",
- [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
- [EXCP_LSERR] = "v8M LSERR UsageFault",
- [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
- };
-
- if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
- exc = excnames[idx];
- }
- if (!exc) {
- exc = "unknown";
- }
- qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
- }
-}
-
static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
uint32_t addr, uint16_t *insn)
{
- /* Load a 16-bit portion of a v7M instruction, returning true on success,
+ /*
+ * Load a 16-bit portion of a v7M instruction, returning true on success,
* or false on failure (in which case we will have pended the appropriate
* exception).
* We need to do the instruction fetch's MPU and SAU checks
@@ -9459,7 +9465,8 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
if (!sattrs.nsc || sattrs.ns) {
- /* This must be the second half of the insn, and it straddles a
+ /*
+ * This must be the second half of the insn, and it straddles a
* region boundary with the second half not being S&NSC.
*/
env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
@@ -9489,7 +9496,8 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
static bool v7m_handle_execute_nsc(ARMCPU *cpu)
{
- /* Check whether this attempt to execute code in a Secure & NS-Callable
+ /*
+ * Check whether this attempt to execute code in a Secure & NS-Callable
* memory region is for an SG instruction; if so, then emulate the
* effect of the SG instruction and return true. Otherwise pend
* the correct kind of exception and return false.
@@ -9498,7 +9506,8 @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
ARMMMUIdx mmu_idx;
uint16_t insn;
- /* We should never get here unless get_phys_addr_pmsav8() caused
+ /*
+ * We should never get here unless get_phys_addr_pmsav8() caused
* an exception for NS executing in S&NSC memory.
*/
assert(!env->v7m.secure);
@@ -9516,7 +9525,8 @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
}
if (insn != 0xe97f) {
- /* Not an SG instruction first half (we choose the IMPDEF
+ /*
+ * Not an SG instruction first half (we choose the IMPDEF
* early-SG-check option).
*/
goto gen_invep;
@@ -9527,13 +9537,15 @@ static bool v7m_handle_execute_nsc(ARMCPU *cpu)
}
if (insn != 0xe97f) {
- /* Not an SG instruction second half (yes, both halves of the SG
+ /*
+ * Not an SG instruction second half (yes, both halves of the SG
* insn have the same hex value)
*/
goto gen_invep;
}
- /* OK, we have confirmed that we really have an SG instruction.
+ /*
+ * OK, we have confirmed that we really have an SG instruction.
* We know we're NS in S memory so don't need to repeat those checks.
*/
qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
@@ -9562,8 +9574,10 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
arm_log_exception(cs->exception_index);
- /* For exceptions we just mark as pending on the NVIC, and let that
- handle it. */
+ /*
+ * For exceptions we just mark as pending on the NVIC, and let that
+ * handle it.
+ */
switch (cs->exception_index) {
case EXCP_UDEF:
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
@@ -9609,13 +9623,15 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
break;
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
- /* Note that for M profile we don't have a guest facing FSR, but
+ /*
+ * Note that for M profile we don't have a guest facing FSR, but
* the env->exception.fsr will be populated by the code that
* raises the fault, in the A profile short-descriptor format.
*/
switch (env->exception.fsr & 0xf) {
case M_FAKE_FSR_NSC_EXEC:
- /* Exception generated when we try to execute code at an address
+ /*
+ * Exception generated when we try to execute code at an address
* which is marked as Secure & Non-Secure Callable and the CPU
* is in the Non-Secure state. The only instruction which can
* be executed like this is SG (and that only if both halves of
@@ -9628,7 +9644,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
}
break;
case M_FAKE_FSR_SFAULT:
- /* Various flavours of SecureFault for attempts to execute or
+ /*
+ * Various flavours of SecureFault for attempts to execute or
* access data in the wrong security state.
*/
switch (cs->exception_index) {
@@ -9670,7 +9687,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
break;
default:
- /* All other FSR values are either MPU faults or "can't happen
+ /*
+ * All other FSR values are either MPU faults or "can't happen
* for M profile" cases.
*/
switch (cs->exception_index) {
@@ -9736,7 +9754,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
if (arm_feature(env, ARM_FEATURE_V8)) {
lr = R_V7M_EXCRET_RES1_MASK |
R_V7M_EXCRET_DCRS_MASK;
- /* The S bit indicates whether we should return to Secure
+ /*
+ * The S bit indicates whether we should return to Secure
* or NonSecure (ie our current state).
* The ES bit indicates whether we're taking this exception
* to Secure or NonSecure (ie our target state). We set it
@@ -9771,7 +9790,8 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
}
-/* Function used to synchronize QEMU's AArch64 register set with AArch32
+/*
+ * Function used to synchronize QEMU's AArch64 register set with AArch32
* register set. This is necessary when switching between AArch32 and AArch64
* execution state.
*/
@@ -9785,7 +9805,8 @@ void aarch64_sync_32_to_64(CPUARMState *env)
env->xregs[i] = env->regs[i];
}
- /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
+ /*
+ * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
* Otherwise, they come from the banked user regs.
*/
if (mode == ARM_CPU_MODE_FIQ) {
@@ -9798,7 +9819,8 @@ void aarch64_sync_32_to_64(CPUARMState *env)
}
}
- /* Registers x13-x23 are the various mode SP and FP registers. Registers
+ /*
+ * Registers x13-x23 are the various mode SP and FP registers. Registers
* r13 and r14 are only copied if we are in that mode, otherwise we copy
* from the mode banked register.
*/
@@ -9853,7 +9875,8 @@ void aarch64_sync_32_to_64(CPUARMState *env)
env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
}
- /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
+ /*
+ * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
* mode, then we can copy from r8-r14. Otherwise, we copy from the
* FIQ bank for r8-r14.
*/
@@ -9872,7 +9895,8 @@ void aarch64_sync_32_to_64(CPUARMState *env)
env->pc = env->regs[15];
}
-/* Function used to synchronize QEMU's AArch32 register set with AArch64
+/*
+ * Function used to synchronize QEMU's AArch32 register set with AArch64
* register set. This is necessary when switching between AArch32 and AArch64
* execution state.
*/
@@ -9886,7 +9910,8 @@ void aarch64_sync_64_to_32(CPUARMState *env)
env->regs[i] = env->xregs[i];
}
- /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
+ /*
+ * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
* Otherwise, we copy x8-x12 into the banked user regs.
*/
if (mode == ARM_CPU_MODE_FIQ) {
@@ -9899,7 +9924,8 @@ void aarch64_sync_64_to_32(CPUARMState *env)
}
}
- /* Registers r13 & r14 depend on the current mode.
+ /*
+ * Registers r13 & r14 depend on the current mode.
* If we are in a given mode, we copy the corresponding x registers to r13
* and r14. Otherwise, we copy the x register to the banked r13 and r14
* for the mode.
@@ -9910,7 +9936,8 @@ void aarch64_sync_64_to_32(CPUARMState *env)
} else {
env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
- /* HYP is an exception in that it does not have its own banked r14 but
+ /*
+ * HYP is an exception in that it does not have its own banked r14 but
* shares the USR r14
*/
if (mode == ARM_CPU_MODE_HYP) {
@@ -12056,7 +12083,7 @@ static bool v8m_is_sau_exempt(CPUARMState *env,
(address >= 0xe00ff000 && address <= 0xe00fffff);
}
-static void v8m_security_lookup(CPUARMState *env, uint32_t address,
+void v8m_security_lookup(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
V8M_SAttributes *sattrs)
{
@@ -12163,7 +12190,7 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address,
}
}
-static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
+bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs,
int *prot, bool *is_subpage,
@@ -12567,11 +12594,11 @@ static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
* @fi: set to fault info if the translation fails
* @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
*/
-static bool get_phys_addr(CPUARMState *env, target_ulong address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
- target_ulong *page_size,
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+bool get_phys_addr(CPUARMState *env, target_ulong address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size,
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
{
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
/* Call ourselves recursively to do the stage 1 and then stage 2
@@ -12753,7 +12780,8 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
return value;
}
case 0x94: /* CONTROL_NS */
- /* We have to handle this here because unprivileged Secure code
+ /*
+ * We have to handle this here because unprivileged Secure code
* can read the NS CONTROL register.
*/
if (!env->v7m.secure) {
@@ -12806,7 +12834,8 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
return env->v7m.faultmask[M_REG_NS];
case 0x98: /* SP_NS */
{
- /* This gives the non-secure SP selected based on whether we're
+ /*
+ * This gives the non-secure SP selected based on whether we're
* currently in handler mode or not, using the NS CONTROL.SPSEL.
*/
bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
@@ -12857,7 +12886,8 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
{
- /* We're passed bits [11..0] of the instruction; extract
+ /*
+ * We're passed bits [11..0] of the instruction; extract
* SYSm and the mask bits.
* Invalid combinations of SYSm and mask are UNPREDICTABLE;
* we choose to treat them as if the mask bits were valid.
@@ -12943,7 +12973,8 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
return;
case 0x98: /* SP_NS */
{
- /* This gives the non-secure SP selected based on whether we're
+ /*
+ * This gives the non-secure SP selected based on whether we're
* currently in handler mode or not, using the NS CONTROL.SPSEL.
*/
bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
@@ -13104,7 +13135,8 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
bool targetsec = env->v7m.secure;
bool is_subpage;
- /* Work out what the security state and privilege level we're
+ /*
+ * Work out what the security state and privilege level we're
* interested in is...
*/
if (alt) {
@@ -13121,12 +13153,14 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
/* ...and then figure out which MMU index this is */
mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
- /* We know that the MPU and SAU don't care about the access type
+ /*
+ * We know that the MPU and SAU don't care about the access type
* for our purposes beyond that we don't want to claim to be
* an insn fetch, so we arbitrarily call this a read.
*/
- /* MPU region info only available for privileged or if
+ /*
+ * MPU region info only available for privileged or if
* inspecting the other MPU state.
*/
if (arm_current_el(env) != 0 || alt) {
@@ -13176,146 +13210,6 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
#endif
-bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
-{
- ARMCPU *cpu = ARM_CPU(cs);
-
-#ifdef CONFIG_USER_ONLY
- cpu->env.exception.vaddress = address;
- if (access_type == MMU_INST_FETCH) {
- cs->exception_index = EXCP_PREFETCH_ABORT;
- } else {
- cs->exception_index = EXCP_DATA_ABORT;
- }
- cpu_loop_exit_restore(cs, retaddr);
-#else
- hwaddr phys_addr;
- target_ulong page_size;
- int prot, ret;
- MemTxAttrs attrs = {};
- ARMMMUFaultInfo fi = {};
-
- /*
- * Walk the page table and (if the mapping exists) add the page
- * to the TLB. On success, return true. Otherwise, if probing,
- * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
- * register format, and signal the fault.
- */
- ret = get_phys_addr(&cpu->env, address, access_type,
- core_to_arm_mmu_idx(&cpu->env, mmu_idx),
- &phys_addr, &attrs, &prot, &page_size, &fi, NULL);
- if (likely(!ret)) {
- /*
- * Map a single [sub]page. Regions smaller than our declared
- * target page size are handled specially, so for those we
- * pass in the exact addresses.
- */
- if (page_size >= TARGET_PAGE_SIZE) {
- phys_addr &= TARGET_PAGE_MASK;
- address &= TARGET_PAGE_MASK;
- }
- tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
- prot, mmu_idx, page_size);
- return true;
- } else if (probe) {
- return false;
- } else {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr, true);
- arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
- }
-#endif
-}
-
-void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
-{
- /* Implement DC ZVA, which zeroes a fixed-length block of memory.
- * Note that we do not implement the (architecturally mandated)
- * alignment fault for attempts to use this on Device memory
- * (which matches the usual QEMU behaviour of not implementing either
- * alignment faults or any memory attribute handling).
- */
-
- ARMCPU *cpu = env_archcpu(env);
- uint64_t blocklen = 4 << cpu->dcz_blocksize;
- uint64_t vaddr = vaddr_in & ~(blocklen - 1);
-
-#ifndef CONFIG_USER_ONLY
- {
- /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
- * the block size so we might have to do more than one TLB lookup.
- * We know that in fact for any v8 CPU the page size is at least 4K
- * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
- * 1K as an artefact of legacy v5 subpage support being present in the
- * same QEMU executable. So in practice the hostaddr[] array has
- * two entries, given the current setting of TARGET_PAGE_BITS_MIN.
- */
- int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
- void *hostaddr[DIV_ROUND_UP(2 * KiB, 1 << TARGET_PAGE_BITS_MIN)];
- int try, i;
- unsigned mmu_idx = cpu_mmu_index(env, false);
- TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
-
- assert(maxidx <= ARRAY_SIZE(hostaddr));
-
- for (try = 0; try < 2; try++) {
-
- for (i = 0; i < maxidx; i++) {
- hostaddr[i] = tlb_vaddr_to_host(env,
- vaddr + TARGET_PAGE_SIZE * i,
- 1, mmu_idx);
- if (!hostaddr[i]) {
- break;
- }
- }
- if (i == maxidx) {
- /* If it's all in the TLB it's fair game for just writing to;
- * we know we don't need to update dirty status, etc.
- */
- for (i = 0; i < maxidx - 1; i++) {
- memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
- }
- memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
- return;
- }
- /* OK, try a store and see if we can populate the tlb. This
- * might cause an exception if the memory isn't writable,
- * in which case we will longjmp out of here. We must for
- * this purpose use the actual register value passed to us
- * so that we get the fault address right.
- */
- helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
- /* Now we can populate the other TLB entries, if any */
- for (i = 0; i < maxidx; i++) {
- uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
- if (va != (vaddr_in & TARGET_PAGE_MASK)) {
- helper_ret_stb_mmu(env, va, 0, oi, GETPC());
- }
- }
- }
-
- /* Slow path (probably attempt to do this to an I/O device or
- * similar, or clearing of a block of code we have translations
- * cached for). Just do a series of byte writes as the architecture
- * demands. It's not worth trying to use a cpu_physical_memory_map(),
- * memset(), unmap() sequence here because:
- * + we'd need to account for the blocksize being larger than a page
- * + the direct-RAM access case is almost always going to be dealt
- * with in the fastpath code above, so there's no speed benefit
- * + we would have to deal with the map returning NULL because the
- * bounce buffer was in use
- */
- for (i = 0; i < blocklen; i++) {
- helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
- }
- }
-#else
- memset(g2h(vaddr), 0, blocklen);
-#endif
-}
-
/* Note that signed overflow is undefined in C. The following routines are
careful to use unsigned types where modulo arithmetic is required.
Failure to do so _will_ break on newer gcc. */
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 5a02f45..232d963 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -529,11 +529,15 @@ vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
/* Callback function for when a watchpoint or breakpoint triggers. */
void arm_debug_excp_handler(CPUState *cs);
-#ifdef CONFIG_USER_ONLY
+#if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
{
return false;
}
+static inline void arm_handle_psci_call(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
#else
/* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
@@ -765,9 +769,6 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
-void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
- int mmu_idx, ARMMMUFaultInfo *fi) QEMU_NORETURN;
-
/* Return true if the stage 1 translation regime is using LPAE format page
* tables */
bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
@@ -892,6 +893,27 @@ static inline uint32_t v7m_sp_limit(CPUARMState *env)
}
/**
+ * v7m_cpacr_pass:
+ * Return true if the v7M CPACR permits access to the FPU for the specified
+ * security state and privilege level.
+ */
+static inline bool v7m_cpacr_pass(CPUARMState *env,
+ bool is_secure, bool is_priv)
+{
+ switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
+ case 0:
+ case 2: /* UNPREDICTABLE: we treat like 0 */
+ return false;
+ case 1:
+ return is_priv;
+ case 3:
+ return true;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/**
* aarch32_mode_name(): Return name of the AArch32 CPU mode
* @psr: Program Status Register indicating CPU mode
*
@@ -985,4 +1007,43 @@ static inline int exception_target_el(CPUARMState *env)
return target_el;
}
+#ifndef CONFIG_USER_ONLY
+
+/* Security attributes for an address, as returned by v8m_security_lookup. */
+typedef struct V8M_SAttributes {
+ bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
+ bool ns;
+ bool nsc;
+ uint8_t sregion;
+ bool srvalid;
+ uint8_t iregion;
+ bool irvalid;
+} V8M_SAttributes;
+
+void v8m_security_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ V8M_SAttributes *sattrs);
+
+bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, bool *is_subpage,
+ ARMMMUFaultInfo *fi, uint32_t *mregion);
+
+/* Cacheability and shareability attributes for a memory access */
+typedef struct ARMCacheAttrs {
+ unsigned int attrs:8; /* as in the MAIR register encoding */
+ unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
+} ARMCacheAttrs;
+
+bool get_phys_addr(CPUARMState *env, target_ulong address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size,
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
+
+void arm_log_exception(int idx);
+
+#endif /* !CONFIG_USER_ONLY */
+
#endif
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index 4db2548..9850993 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -17,6 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
+#include "qemu/units.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
#include "cpu.h"
@@ -87,136 +88,6 @@ uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
return val;
}
-#if !defined(CONFIG_USER_ONLY)
-
-static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
- unsigned int target_el,
- bool same_el, bool ea,
- bool s1ptw, bool is_write,
- int fsc)
-{
- uint32_t syn;
-
- /* ISV is only set for data aborts routed to EL2 and
- * never for stage-1 page table walks faulting on stage 2.
- *
- * Furthermore, ISV is only set for certain kinds of load/stores.
- * If the template syndrome does not have ISV set, we should leave
- * it cleared.
- *
- * See ARMv8 specs, D7-1974:
- * ISS encoding for an exception from a Data Abort, the
- * ISV field.
- */
- if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
- syn = syn_data_abort_no_iss(same_el,
- ea, 0, s1ptw, is_write, fsc);
- } else {
- /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
- * syndrome created at translation time.
- * Now we create the runtime syndrome with the remaining fields.
- */
- syn = syn_data_abort_with_iss(same_el,
- 0, 0, 0, 0, 0,
- ea, 0, s1ptw, is_write, fsc,
- false);
- /* Merge the runtime syndrome with the template syndrome. */
- syn |= template_syn;
- }
- return syn;
-}
-
-void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
- int mmu_idx, ARMMMUFaultInfo *fi)
-{
- CPUARMState *env = &cpu->env;
- int target_el;
- bool same_el;
- uint32_t syn, exc, fsr, fsc;
- ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
-
- target_el = exception_target_el(env);
- if (fi->stage2) {
- target_el = 2;
- env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
- }
- same_el = (arm_current_el(env) == target_el);
-
- if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
- arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
- /* LPAE format fault status register : bottom 6 bits are
- * status code in the same form as needed for syndrome
- */
- fsr = arm_fi_to_lfsc(fi);
- fsc = extract32(fsr, 0, 6);
- } else {
- fsr = arm_fi_to_sfsc(fi);
- /* Short format FSR : this fault will never actually be reported
- * to an EL that uses a syndrome register. Use a (currently)
- * reserved FSR code in case the constructed syndrome does leak
- * into the guest somehow.
- */
- fsc = 0x3f;
- }
-
- if (access_type == MMU_INST_FETCH) {
- syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
- exc = EXCP_PREFETCH_ABORT;
- } else {
- syn = merge_syn_data_abort(env->exception.syndrome, target_el,
- same_el, fi->ea, fi->s1ptw,
- access_type == MMU_DATA_STORE,
- fsc);
- if (access_type == MMU_DATA_STORE
- && arm_feature(env, ARM_FEATURE_V6)) {
- fsr |= (1 << 11);
- }
- exc = EXCP_DATA_ABORT;
- }
-
- env->exception.vaddress = addr;
- env->exception.fsr = fsr;
- raise_exception(env, exc, syn, target_el);
-}
-
-/* Raise a data fault alignment exception for the specified virtual address */
-void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
- MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- ARMMMUFaultInfo fi = {};
-
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr, true);
-
- fi.type = ARMFault_Alignment;
- arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
-}
-
-/* arm_cpu_do_transaction_failed: handle a memory system error response
- * (eg "no device/memory present at address") by raising an external abort
- * exception
- */
-void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
- vaddr addr, unsigned size,
- MMUAccessType access_type,
- int mmu_idx, MemTxAttrs attrs,
- MemTxResult response, uintptr_t retaddr)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- ARMMMUFaultInfo fi = {};
-
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr, true);
-
- fi.ea = arm_extabort_type(response);
- fi.type = ARMFault_SyncExternal;
- arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
-}
-
-#endif /* !defined(CONFIG_USER_ONLY) */
-
void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
{
/*
@@ -970,7 +841,8 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
int bt;
uint32_t contextidr;
- /* Links to unimplemented or non-context aware breakpoints are
+ /*
+ * Links to unimplemented or non-context aware breakpoints are
* CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
* as if linked to an UNKNOWN context-aware breakpoint (in which
* case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
@@ -989,7 +861,8 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
bt = extract64(bcr, 20, 4);
- /* We match the whole register even if this is AArch32 using the
+ /*
+ * We match the whole register even if this is AArch32 using the
* short descriptor format (in which case it holds both PROCID and ASID),
* since we don't implement the optional v7 context ID masking.
*/
@@ -1006,7 +879,8 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn)
case 9: /* linked VMID match (reserved if no EL2) */
case 11: /* linked context ID and VMID match (reserved if no EL2) */
default:
- /* Links to Unlinked context breakpoints must generate no
+ /*
+ * Links to Unlinked context breakpoints must generate no
* events; we choose to do the same for reserved values too.
*/
return false;
@@ -1020,7 +894,8 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
CPUARMState *env = &cpu->env;
uint64_t cr;
int pac, hmc, ssc, wt, lbn;
- /* Note that for watchpoints the check is against the CPU security
+ /*
+ * Note that for watchpoints the check is against the CPU security
* state, not the S/NS attribute on the offending data access.
*/
bool is_secure = arm_is_secure(env);
@@ -1034,7 +909,8 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
}
cr = env->cp15.dbgwcr[n];
if (wp->hitattrs.user) {
- /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
+ /*
+ * The LDRT/STRT/LDT/STT "unprivileged access" instructions should
* match watchpoints as if they were accesses done at EL0, even if
* the CPU is at EL1 or higher.
*/
@@ -1048,7 +924,8 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
}
cr = env->cp15.dbgbcr[n];
}
- /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
+ /*
+ * The WATCHPOINT_HIT flag guarantees us that the watchpoint is
* enabled and that the address and access type match; for breakpoints
* we know the address matched; check the remaining fields, including
* linked breakpoints. We rely on WCR and BCR having the same layout
@@ -1116,7 +993,8 @@ static bool check_watchpoints(ARMCPU *cpu)
CPUARMState *env = &cpu->env;
int n;
- /* If watchpoints are disabled globally or we can't take debug
+ /*
+ * If watchpoints are disabled globally or we can't take debug
* exceptions here then watchpoint firings are ignored.
*/
if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
@@ -1137,7 +1015,8 @@ static bool check_breakpoints(ARMCPU *cpu)
CPUARMState *env = &cpu->env;
int n;
- /* If breakpoints are disabled globally or we can't take debug
+ /*
+ * If breakpoints are disabled globally or we can't take debug
* exceptions here then breakpoint firings are ignored.
*/
if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
@@ -1164,7 +1043,8 @@ void HELPER(check_breakpoints)(CPUARMState *env)
bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
{
- /* Called by core code when a CPU watchpoint fires; need to check if this
+ /*
+ * Called by core code when a CPU watchpoint fires; need to check if this
* is also an architectural watchpoint match.
*/
ARMCPU *cpu = ARM_CPU(cs);
@@ -1177,7 +1057,8 @@ vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
- /* In BE32 system mode, target memory is stored byteswapped (on a
+ /*
+ * In BE32 system mode, target memory is stored byteswapped (on a
* little-endian host system), and by the time we reach here (via an
* opcode helper) the addresses of subword accesses have been adjusted
* to account for that, which means that watchpoints will not match.
@@ -1196,7 +1077,8 @@ vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
void arm_debug_excp_handler(CPUState *cs)
{
- /* Called by core code when a watchpoint or breakpoint fires;
+ /*
+ * Called by core code when a watchpoint or breakpoint fires;
* need to check which one and raise the appropriate exception.
*/
ARMCPU *cpu = ARM_CPU(cs);
@@ -1220,7 +1102,8 @@ void arm_debug_excp_handler(CPUState *cs)
uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
- /* (1) GDB breakpoints should be handled first.
+ /*
+ * (1) GDB breakpoints should be handled first.
* (2) Do not raise a CPU exception if no CPU breakpoint has fired,
* since singlestep is also done by generating a debug internal
* exception.
@@ -1231,7 +1114,8 @@ void arm_debug_excp_handler(CPUState *cs)
}
env->exception.fsr = arm_debug_exception_fsr(env);
- /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
+ /*
+ * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
* values to the guest that it shouldn't be able to see at its
* exception/security level.
*/
@@ -1307,3 +1191,95 @@ uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
return ((uint32_t)x >> shift) | (x << (32 - shift));
}
}
+
+void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
+{
+ /*
+ * Implement DC ZVA, which zeroes a fixed-length block of memory.
+ * Note that we do not implement the (architecturally mandated)
+ * alignment fault for attempts to use this on Device memory
+ * (which matches the usual QEMU behaviour of not implementing either
+ * alignment faults or any memory attribute handling).
+ */
+
+ ARMCPU *cpu = env_archcpu(env);
+ uint64_t blocklen = 4 << cpu->dcz_blocksize;
+ uint64_t vaddr = vaddr_in & ~(blocklen - 1);
+
+#ifndef CONFIG_USER_ONLY
+ {
+ /*
+ * Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
+ * the block size so we might have to do more than one TLB lookup.
+ * We know that in fact for any v8 CPU the page size is at least 4K
+ * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
+ * 1K as an artefact of legacy v5 subpage support being present in the
+ * same QEMU executable. So in practice the hostaddr[] array has
+ * two entries, given the current setting of TARGET_PAGE_BITS_MIN.
+ */
+ int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
+ void *hostaddr[DIV_ROUND_UP(2 * KiB, 1 << TARGET_PAGE_BITS_MIN)];
+ int try, i;
+ unsigned mmu_idx = cpu_mmu_index(env, false);
+ TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+
+ assert(maxidx <= ARRAY_SIZE(hostaddr));
+
+ for (try = 0; try < 2; try++) {
+
+ for (i = 0; i < maxidx; i++) {
+ hostaddr[i] = tlb_vaddr_to_host(env,
+ vaddr + TARGET_PAGE_SIZE * i,
+ 1, mmu_idx);
+ if (!hostaddr[i]) {
+ break;
+ }
+ }
+ if (i == maxidx) {
+ /*
+ * If it's all in the TLB it's fair game for just writing to;
+ * we know we don't need to update dirty status, etc.
+ */
+ for (i = 0; i < maxidx - 1; i++) {
+ memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
+ }
+ memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
+ return;
+ }
+ /*
+ * OK, try a store and see if we can populate the tlb. This
+ * might cause an exception if the memory isn't writable,
+ * in which case we will longjmp out of here. We must for
+ * this purpose use the actual register value passed to us
+ * so that we get the fault address right.
+ */
+ helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
+ /* Now we can populate the other TLB entries, if any */
+ for (i = 0; i < maxidx; i++) {
+ uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
+ if (va != (vaddr_in & TARGET_PAGE_MASK)) {
+ helper_ret_stb_mmu(env, va, 0, oi, GETPC());
+ }
+ }
+ }
+
+ /*
+ * Slow path (probably attempt to do this to an I/O device or
+ * similar, or clearing of a block of code we have translations
+ * cached for). Just do a series of byte writes as the architecture
+ * demands. It's not worth trying to use a cpu_physical_memory_map(),
+ * memset(), unmap() sequence here because:
+ * + we'd need to account for the blocksize being larger than a page
+ * + the direct-RAM access case is almost always going to be dealt
+ * with in the fastpath code above, so there's no speed benefit
+ * + we would have to deal with the map returning NULL because the
+ * bounce buffer was in use
+ */
+ for (i = 0; i < blocklen; i++) {
+ helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
+ }
+ }
+#else
+ memset(g2h(vaddr), 0, blocklen);
+#endif
+}
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
new file mode 100644
index 0000000..5feb312
--- /dev/null
+++ b/target/arm/tlb_helper.c
@@ -0,0 +1,200 @@
+/*
+ * ARM TLB (Translation lookaside buffer) helpers.
+ *
+ * This code is licensed under the GNU GPL v2 or later.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internals.h"
+#include "exec/exec-all.h"
+
+#if !defined(CONFIG_USER_ONLY)
+
+static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
+ unsigned int target_el,
+ bool same_el, bool ea,
+ bool s1ptw, bool is_write,
+ int fsc)
+{
+ uint32_t syn;
+
+ /*
+ * ISV is only set for data aborts routed to EL2 and
+ * never for stage-1 page table walks faulting on stage 2.
+ *
+ * Furthermore, ISV is only set for certain kinds of load/stores.
+ * If the template syndrome does not have ISV set, we should leave
+ * it cleared.
+ *
+ * See ARMv8 specs, D7-1974:
+ * ISS encoding for an exception from a Data Abort, the
+ * ISV field.
+ */
+ if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
+ syn = syn_data_abort_no_iss(same_el,
+ ea, 0, s1ptw, is_write, fsc);
+ } else {
+ /*
+ * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
+ * syndrome created at translation time.
+ * Now we create the runtime syndrome with the remaining fields.
+ */
+ syn = syn_data_abort_with_iss(same_el,
+ 0, 0, 0, 0, 0,
+ ea, 0, s1ptw, is_write, fsc,
+ false);
+ /* Merge the runtime syndrome with the template syndrome. */
+ syn |= template_syn;
+ }
+ return syn;
+}
+
+static void QEMU_NORETURN arm_deliver_fault(ARMCPU *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, ARMMMUFaultInfo *fi)
+{
+ CPUARMState *env = &cpu->env;
+ int target_el;
+ bool same_el;
+ uint32_t syn, exc, fsr, fsc;
+ ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
+
+ target_el = exception_target_el(env);
+ if (fi->stage2) {
+ target_el = 2;
+ env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
+ }
+ same_el = (arm_current_el(env) == target_el);
+
+ if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
+ arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
+ /*
+ * LPAE format fault status register : bottom 6 bits are
+ * status code in the same form as needed for syndrome
+ */
+ fsr = arm_fi_to_lfsc(fi);
+ fsc = extract32(fsr, 0, 6);
+ } else {
+ fsr = arm_fi_to_sfsc(fi);
+ /*
+ * Short format FSR : this fault will never actually be reported
+ * to an EL that uses a syndrome register. Use a (currently)
+ * reserved FSR code in case the constructed syndrome does leak
+ * into the guest somehow.
+ */
+ fsc = 0x3f;
+ }
+
+ if (access_type == MMU_INST_FETCH) {
+ syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
+ exc = EXCP_PREFETCH_ABORT;
+ } else {
+ syn = merge_syn_data_abort(env->exception.syndrome, target_el,
+ same_el, fi->ea, fi->s1ptw,
+ access_type == MMU_DATA_STORE,
+ fsc);
+ if (access_type == MMU_DATA_STORE
+ && arm_feature(env, ARM_FEATURE_V6)) {
+ fsr |= (1 << 11);
+ }
+ exc = EXCP_DATA_ABORT;
+ }
+
+ env->exception.vaddress = addr;
+ env->exception.fsr = fsr;
+ raise_exception(env, exc, syn, target_el);
+}
+
+/* Raise a data fault alignment exception for the specified virtual address */
+void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ ARMMMUFaultInfo fi = {};
+
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
+
+ fi.type = ARMFault_Alignment;
+ arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
+}
+
+/*
+ * arm_cpu_do_transaction_failed: handle a memory system error response
+ * (eg "no device/memory present at address") by raising an external abort
+ * exception
+ */
+void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ ARMMMUFaultInfo fi = {};
+
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
+
+ fi.ea = arm_extabort_type(response);
+ fi.type = ARMFault_SyncExternal;
+ arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
+}
+
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+
+#ifdef CONFIG_USER_ONLY
+ cpu->env.exception.vaddress = address;
+ if (access_type == MMU_INST_FETCH) {
+ cs->exception_index = EXCP_PREFETCH_ABORT;
+ } else {
+ cs->exception_index = EXCP_DATA_ABORT;
+ }
+ cpu_loop_exit_restore(cs, retaddr);
+#else
+ hwaddr phys_addr;
+ target_ulong page_size;
+ int prot, ret;
+ MemTxAttrs attrs = {};
+ ARMMMUFaultInfo fi = {};
+
+ /*
+ * Walk the page table and (if the mapping exists) add the page
+ * to the TLB. On success, return true. Otherwise, if probing,
+ * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
+ * register format, and signal the fault.
+ */
+ ret = get_phys_addr(&cpu->env, address, access_type,
+ core_to_arm_mmu_idx(&cpu->env, mmu_idx),
+ &phys_addr, &attrs, &prot, &page_size, &fi, NULL);
+ if (likely(!ret)) {
+ /*
+ * Map a single [sub]page. Regions smaller than our declared
+ * target page size are handled specially, so for those we
+ * pass in the exact addresses.
+ */
+ if (page_size >= TARGET_PAGE_SIZE) {
+ phys_addr &= TARGET_PAGE_MASK;
+ address &= TARGET_PAGE_MASK;
+ }
+ tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
+ prot, mmu_idx, page_size);
+ return true;
+ } else if (probe) {
+ return false;
+ } else {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
+ arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
+ }
+#endif
+}
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 97f4164..d323147 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -27,7 +27,6 @@
#include "translate.h"
#include "internals.h"
#include "qemu/host-utils.h"
-#include "qemu/qemu-print.h"
#include "hw/semihosting/semihost.h"
#include "exec/gen-icount.h"
@@ -152,133 +151,6 @@ static void set_btype(DisasContext *s, int val)
s->btype = -1;
}
-void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- uint32_t psr = pstate_read(env);
- int i;
- int el = arm_current_el(env);
- const char *ns_status;
-
- qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
- for (i = 0; i < 32; i++) {
- if (i == 31) {
- qemu_fprintf(f, " SP=%016" PRIx64 "\n", env->xregs[i]);
- } else {
- qemu_fprintf(f, "X%02d=%016" PRIx64 "%s", i, env->xregs[i],
- (i + 2) % 3 ? " " : "\n");
- }
- }
-
- if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
- ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
- } else {
- ns_status = "";
- }
- qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
- psr,
- psr & PSTATE_N ? 'N' : '-',
- psr & PSTATE_Z ? 'Z' : '-',
- psr & PSTATE_C ? 'C' : '-',
- psr & PSTATE_V ? 'V' : '-',
- ns_status,
- el,
- psr & PSTATE_SP ? 'h' : 't');
-
- if (cpu_isar_feature(aa64_bti, cpu)) {
- qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
- }
- if (!(flags & CPU_DUMP_FPU)) {
- qemu_fprintf(f, "\n");
- return;
- }
- if (fp_exception_el(env, el) != 0) {
- qemu_fprintf(f, " FPU disabled\n");
- return;
- }
- qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
- vfp_get_fpcr(env), vfp_get_fpsr(env));
-
- if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
- int j, zcr_len = sve_zcr_len_for_el(env, el);
-
- for (i = 0; i <= FFR_PRED_NUM; i++) {
- bool eol;
- if (i == FFR_PRED_NUM) {
- qemu_fprintf(f, "FFR=");
- /* It's last, so end the line. */
- eol = true;
- } else {
- qemu_fprintf(f, "P%02d=", i);
- switch (zcr_len) {
- case 0:
- eol = i % 8 == 7;
- break;
- case 1:
- eol = i % 6 == 5;
- break;
- case 2:
- case 3:
- eol = i % 3 == 2;
- break;
- default:
- /* More than one quadword per predicate. */
- eol = true;
- break;
- }
- }
- for (j = zcr_len / 4; j >= 0; j--) {
- int digits;
- if (j * 4 + 4 <= zcr_len + 1) {
- digits = 16;
- } else {
- digits = (zcr_len % 4 + 1) * 4;
- }
- qemu_fprintf(f, "%0*" PRIx64 "%s", digits,
- env->vfp.pregs[i].p[j],
- j ? ":" : eol ? "\n" : " ");
- }
- }
-
- for (i = 0; i < 32; i++) {
- if (zcr_len == 0) {
- qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64 "%s",
- i, env->vfp.zregs[i].d[1],
- env->vfp.zregs[i].d[0], i & 1 ? "\n" : " ");
- } else if (zcr_len == 1) {
- qemu_fprintf(f, "Z%02d=%016" PRIx64 ":%016" PRIx64
- ":%016" PRIx64 ":%016" PRIx64 "\n",
- i, env->vfp.zregs[i].d[3], env->vfp.zregs[i].d[2],
- env->vfp.zregs[i].d[1], env->vfp.zregs[i].d[0]);
- } else {
- for (j = zcr_len; j >= 0; j--) {
- bool odd = (zcr_len - j) % 2 != 0;
- if (j == zcr_len) {
- qemu_fprintf(f, "Z%02d[%x-%x]=", i, j, j - 1);
- } else if (!odd) {
- if (j > 0) {
- qemu_fprintf(f, " [%x-%x]=", j, j - 1);
- } else {
- qemu_fprintf(f, " [%x]=", j);
- }
- }
- qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%s",
- env->vfp.zregs[i].d[j * 2 + 1],
- env->vfp.zregs[i].d[j * 2],
- odd || j == 0 ? "\n" : ":");
- }
- }
- }
- } else {
- for (i = 0; i < 32; i++) {
- uint64_t *q = aa64_vfp_qreg(env, i);
- qemu_fprintf(f, "Q%02d=%016" PRIx64 ":%016" PRIx64 "%s",
- i, q[1], q[0], (i & 1 ? "\n" : " "));
- }
- }
-}
-
void gen_a64_set_pc_im(uint64_t val)
{
tcg_gen_movi_i64(cpu_pc, val);
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 4750b9f..a5d7723 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -28,7 +28,6 @@
#include "tcg-op-gvec.h"
#include "qemu/log.h"
#include "qemu/bitops.h"
-#include "qemu/qemu-print.h"
#include "arm_ldst.h"
#include "hw/semihosting/semihost.h"
@@ -9109,7 +9108,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
loaded_base = 0;
loaded_var = NULL;
n = 0;
- for(i=0;i<16;i++) {
+ for (i = 0; i < 16; i++) {
if (insn & (1 << i))
n++;
}
@@ -9132,7 +9131,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
}
}
j = 0;
- for(i=0;i<16;i++) {
+ for (i = 0; i < 16; i++) {
if (insn & (1 << i)) {
if (is_load) {
/* load */
@@ -12342,92 +12341,6 @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
translator_loop(ops, &dc.base, cpu, tb, max_insns);
}
-void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- int i;
-
- if (is_a64(env)) {
- aarch64_cpu_dump_state(cs, f, flags);
- return;
- }
-
- for(i=0;i<16;i++) {
- qemu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
- if ((i % 4) == 3)
- qemu_fprintf(f, "\n");
- else
- qemu_fprintf(f, " ");
- }
-
- if (arm_feature(env, ARM_FEATURE_M)) {
- uint32_t xpsr = xpsr_read(env);
- const char *mode;
- const char *ns_status = "";
-
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
- ns_status = env->v7m.secure ? "S " : "NS ";
- }
-
- if (xpsr & XPSR_EXCP) {
- mode = "handler";
- } else {
- if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
- mode = "unpriv-thread";
- } else {
- mode = "priv-thread";
- }
- }
-
- qemu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
- xpsr,
- xpsr & XPSR_N ? 'N' : '-',
- xpsr & XPSR_Z ? 'Z' : '-',
- xpsr & XPSR_C ? 'C' : '-',
- xpsr & XPSR_V ? 'V' : '-',
- xpsr & XPSR_T ? 'T' : 'A',
- ns_status,
- mode);
- } else {
- uint32_t psr = cpsr_read(env);
- const char *ns_status = "";
-
- if (arm_feature(env, ARM_FEATURE_EL3) &&
- (psr & CPSR_M) != ARM_CPU_MODE_MON) {
- ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
- }
-
- qemu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
- psr,
- psr & CPSR_N ? 'N' : '-',
- psr & CPSR_Z ? 'Z' : '-',
- psr & CPSR_C ? 'C' : '-',
- psr & CPSR_V ? 'V' : '-',
- psr & CPSR_T ? 'T' : 'A',
- ns_status,
- aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
- }
-
- if (flags & CPU_DUMP_FPU) {
- int numvfpregs = 0;
- if (arm_feature(env, ARM_FEATURE_VFP)) {
- numvfpregs += 16;
- }
- if (arm_feature(env, ARM_FEATURE_VFP3)) {
- numvfpregs += 16;
- }
- for (i = 0; i < numvfpregs; i++) {
- uint64_t v = *aa32_vfp_dreg(env, i);
- qemu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
- i * 2, (uint32_t)v,
- i * 2 + 1, (uint32_t)(v >> 32),
- i, v);
- }
- qemu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
- }
-}
-
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
target_ulong *data)
{
diff --git a/target/arm/translate.h b/target/arm/translate.h
index bc16178..a20f6e2 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -169,7 +169,6 @@ static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
#ifdef TARGET_AARCH64
void a64_translate_init(void);
void gen_a64_set_pc_im(uint64_t val);
-void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags);
extern const TranslatorOps aarch64_translator_ops;
#else
static inline void a64_translate_init(void)
@@ -179,10 +178,6 @@ static inline void a64_translate_init(void)
static inline void gen_a64_set_pc_im(uint64_t val)
{
}
-
-static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
-}
#endif
void arm_test_cc(DisasCompare *cmp, int cc);
diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c
index d3e83b6..46041e3 100644
--- a/target/arm/vfp_helper.c
+++ b/target/arm/vfp_helper.c
@@ -18,121 +18,88 @@
*/
#include "qemu/osdep.h"
-#include "qemu/log.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "fpu/softfloat.h"
#include "internals.h"
-
+#ifdef CONFIG_TCG
+#include "qemu/log.h"
+#include "fpu/softfloat.h"
+#endif
/* VFP support. We follow the convention used for VFP instructions:
Single precision routines have a "s" suffix, double precision a
"d" suffix. */
+#ifdef CONFIG_TCG
+
/* Convert host exception flags to vfp form. */
static inline int vfp_exceptbits_from_host(int host_bits)
{
int target_bits = 0;
- if (host_bits & float_flag_invalid)
+ if (host_bits & float_flag_invalid) {
target_bits |= 1;
- if (host_bits & float_flag_divbyzero)
+ }
+ if (host_bits & float_flag_divbyzero) {
target_bits |= 2;
- if (host_bits & float_flag_overflow)
+ }
+ if (host_bits & float_flag_overflow) {
target_bits |= 4;
- if (host_bits & (float_flag_underflow | float_flag_output_denormal))
+ }
+ if (host_bits & (float_flag_underflow | float_flag_output_denormal)) {
target_bits |= 8;
- if (host_bits & float_flag_inexact)
+ }
+ if (host_bits & float_flag_inexact) {
target_bits |= 0x10;
- if (host_bits & float_flag_input_denormal)
+ }
+ if (host_bits & float_flag_input_denormal) {
target_bits |= 0x80;
+ }
return target_bits;
}
-uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
-{
- uint32_t i, fpscr;
-
- fpscr = env->vfp.xregs[ARM_VFP_FPSCR]
- | (env->vfp.vec_len << 16)
- | (env->vfp.vec_stride << 20);
-
- i = get_float_exception_flags(&env->vfp.fp_status);
- i |= get_float_exception_flags(&env->vfp.standard_fp_status);
- /* FZ16 does not generate an input denormal exception. */
- i |= (get_float_exception_flags(&env->vfp.fp_status_f16)
- & ~float_flag_input_denormal);
- fpscr |= vfp_exceptbits_from_host(i);
-
- i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3];
- fpscr |= i ? FPCR_QC : 0;
-
- return fpscr;
-}
-
-uint32_t vfp_get_fpscr(CPUARMState *env)
-{
- return HELPER(vfp_get_fpscr)(env);
-}
-
/* Convert vfp exception flags to target form. */
static inline int vfp_exceptbits_to_host(int target_bits)
{
int host_bits = 0;
- if (target_bits & 1)
+ if (target_bits & 1) {
host_bits |= float_flag_invalid;
- if (target_bits & 2)
+ }
+ if (target_bits & 2) {
host_bits |= float_flag_divbyzero;
- if (target_bits & 4)
+ }
+ if (target_bits & 4) {
host_bits |= float_flag_overflow;
- if (target_bits & 8)
+ }
+ if (target_bits & 8) {
host_bits |= float_flag_underflow;
- if (target_bits & 0x10)
+ }
+ if (target_bits & 0x10) {
host_bits |= float_flag_inexact;
- if (target_bits & 0x80)
+ }
+ if (target_bits & 0x80) {
host_bits |= float_flag_input_denormal;
+ }
return host_bits;
}
-void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
+static uint32_t vfp_get_fpscr_from_host(CPUARMState *env)
{
- int i;
- uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR];
-
- /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
- if (!cpu_isar_feature(aa64_fp16, env_archcpu(env))) {
- val &= ~FPCR_FZ16;
- }
-
- if (arm_feature(env, ARM_FEATURE_M)) {
- /*
- * M profile FPSCR is RES0 for the QC, STRIDE, FZ16, LEN bits
- * and also for the trapped-exception-handling bits IxE.
- */
- val &= 0xf7c0009f;
- }
+ uint32_t i;
- /*
- * We don't implement trapped exception handling, so the
- * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!)
- *
- * If we exclude the exception flags, IOC|DZC|OFC|UFC|IXC|IDC
- * (which are stored in fp_status), and the other RES0 bits
- * in between, then we clear all of the low 16 bits.
- */
- env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xf7c80000;
- env->vfp.vec_len = (val >> 16) & 7;
- env->vfp.vec_stride = (val >> 20) & 3;
+ i = get_float_exception_flags(&env->vfp.fp_status);
+ i |= get_float_exception_flags(&env->vfp.standard_fp_status);
+ /* FZ16 does not generate an input denormal exception. */
+ i |= (get_float_exception_flags(&env->vfp.fp_status_f16)
+ & ~float_flag_input_denormal);
+ return vfp_exceptbits_from_host(i);
+}
- /*
- * The bit we set within fpscr_q is arbitrary; the register as a
- * whole being zero/non-zero is what counts.
- */
- env->vfp.qc[0] = val & FPCR_QC;
- env->vfp.qc[1] = 0;
- env->vfp.qc[2] = 0;
- env->vfp.qc[3] = 0;
+static void vfp_set_fpscr_to_host(CPUARMState *env, uint32_t val)
+{
+ int i;
+ uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR];
changed ^= val;
if (changed & (3 << 22)) {
@@ -170,7 +137,8 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16);
}
- /* The exception flags are ORed together when we read fpscr so we
+ /*
+ * The exception flags are ORed together when we read fpscr so we
* only need to preserve the current state in one of our
* float_status values.
*/
@@ -180,11 +148,86 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
set_float_exception_flags(0, &env->vfp.standard_fp_status);
}
+#else
+
+static uint32_t vfp_get_fpscr_from_host(CPUARMState *env)
+{
+ return 0;
+}
+
+static void vfp_set_fpscr_to_host(CPUARMState *env, uint32_t val)
+{
+}
+
+#endif
+
+uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
+{
+ uint32_t i, fpscr;
+
+ fpscr = env->vfp.xregs[ARM_VFP_FPSCR]
+ | (env->vfp.vec_len << 16)
+ | (env->vfp.vec_stride << 20);
+
+ fpscr |= vfp_get_fpscr_from_host(env);
+
+ i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3];
+ fpscr |= i ? FPCR_QC : 0;
+
+ return fpscr;
+}
+
+uint32_t vfp_get_fpscr(CPUARMState *env)
+{
+ return HELPER(vfp_get_fpscr)(env);
+}
+
+void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
+{
+ /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
+ if (!cpu_isar_feature(aa64_fp16, env_archcpu(env))) {
+ val &= ~FPCR_FZ16;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ /*
+ * M profile FPSCR is RES0 for the QC, STRIDE, FZ16, LEN bits
+ * and also for the trapped-exception-handling bits IxE.
+ */
+ val &= 0xf7c0009f;
+ }
+
+ /*
+ * We don't implement trapped exception handling, so the
+ * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!)
+ *
+ * If we exclude the exception flags, IOC|DZC|OFC|UFC|IXC|IDC
+ * (which are stored in fp_status), and the other RES0 bits
+ * in between, then we clear all of the low 16 bits.
+ */
+ env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xf7c80000;
+ env->vfp.vec_len = (val >> 16) & 7;
+ env->vfp.vec_stride = (val >> 20) & 3;
+
+ /*
+ * The bit we set within fpscr_q is arbitrary; the register as a
+ * whole being zero/non-zero is what counts.
+ */
+ env->vfp.qc[0] = val & FPCR_QC;
+ env->vfp.qc[1] = 0;
+ env->vfp.qc[2] = 0;
+ env->vfp.qc[3] = 0;
+
+ vfp_set_fpscr_to_host(env, val);
+}
+
void vfp_set_fpscr(CPUARMState *env, uint32_t val)
{
HELPER(vfp_set_fpscr)(env, val);
}
+#ifdef CONFIG_TCG
+
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
#define VFP_BINOP(name) \
@@ -1278,3 +1321,5 @@ float64 HELPER(frint64_d)(float64 f, void *fpst)
{
return frint_d(f, fpst, 64);
}
+
+#endif