aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2025-03-16 02:45:12 -0400
committerStefan Hajnoczi <stefanha@redhat.com>2025-03-16 02:45:12 -0400
commit5719376f17b9ea6420603f7962e644eeb3e62cc1 (patch)
tree52de7f71fa758dd87b916e692d1f6a2a9eaaa65c
parent9beccc2df03026dc2979f0f28b8ff952e356164e (diff)
parenta019e15edfd62beae1e2f6adc0fa7415ba20b14c (diff)
downloadqemu-5719376f17b9ea6420603f7962e644eeb3e62cc1.zip
qemu-5719376f17b9ea6420603f7962e644eeb3e62cc1.tar.gz
qemu-5719376f17b9ea6420603f7962e644eeb3e62cc1.tar.bz2
Merge tag 'pull-target-arm-20250314-1' of https://git.linaro.org/people/pmaydell/qemu-arm into staging
target-arm queue: * Correctly handle corner cases of guest attempting an exception return to AArch32 when target EL is AArch64 only * MAINTAINERS: Fix status for Arm boards I "maintain" * tests/functional: Bump up arm_replay timeout * Revert "hw/char/pl011: Warn when using disabled receiver" * util/cacheflush: Make first DSB unconditional on aarch64 * target/arm: Fix SVE/SME access check logic * meson.build: Set RUST_BACKTRACE for all tests # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmfULAUZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3gMKD/9GwpPo5/q2VAsJ/e+4JcGM # 5P8+lnt/tA5A2sA3Gl5o8v1LN5zm9CvyzHSlQSnvXygXlUP5e6vkwKQ8/DGZogjL # L0wRGOqGyNWapT9sulwsKzLXlG+9GCKeLbKq8wC9mUnviQ+FxTz2IxDexJedw0pS # NrLN55RSQO3OIEGt2fqIXKG+421/TfDPx998cwA4vyIgqZY1ZtHE2BvJNfatpSAc # Y6Rdq/BqWc0Tx0BAL7RgEl86OFO6YskbJwPbT6t/2KRBrqDbeuaHrynOzfA1Wbqx # RIvYqPuFg/ncziU7a2ZJLi4JvfSNO2RTH6KyDbq8WXqB5f7x59QuwXtfsEgmQK/T # 9JkC1G2R9RWezRmVygc7pImIpkMmSs12nhiij3OTmsTCSHB/qQJ8jHoxZN/cTUCw # pphVrAEwuWx48YR9x8xorsgoMRmwIkXdlTSuvLmq6y9ypq8OjoWILZuwN48ILZZT # MqoKNQwbQJr/0L6Tg7csQayJ2L2fJgQDcVOA8lnjlAwRlRI+eMWUz181iGwwKDM9 # rvzntqrVx1d0H4I598vgv597GAn8wo3r7DK5lMt+M5zy5sJY1SgtJU6/PGNrtPKO # GwLG1jaNjBHl0+YnEgvQp0Fw2bDXftxvZIjTiySHJ69xcC9oyUKtaDvJWUk4Ft8D # USAXvWC1qKHPMACPUGRWCw== # =g6lD # -----END PGP SIGNATURE----- # gpg: Signature made Fri 14 Mar 2025 09:15:49 EDT # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [full] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full] # gpg: aka "Peter Maydell <peter@archaic.org.uk>" [unknown] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * tag 'pull-target-arm-20250314-1' of https://git.linaro.org/people/pmaydell/qemu-arm: meson.build: Set RUST_BACKTRACE for all tests target/arm: Simplify pstate_sm check in sve_access_check target/arm: Make DisasContext.{fp, sve}_access_checked tristate util/cacheflush: Make first DSB unconditional on aarch64 Revert "hw/char/pl011: Warn when using disabled receiver" tests/functional: Bump up arm_replay timeout MAINTAINERS: Fix status for Arm boards I "maintain" target/arm: Forbid return to AArch32 when CPU is AArch64-only target/arm: Add cpu local variable to exception_return helper target/arm: HCR_EL2.RW should be RAO/WI if EL1 doesn't support AArch32 target/arm: SCR_EL3.RW should be treated as 1 if EL2 doesn't support AArch32 target/arm: Move arm_current_el() and arm_el_is_aa64() to internals.h target/arm: Move arm_cpu_data_is_big_endian() etc to internals.h linux-user/arm: Remove unused get_put_user macros linux-user/aarch64: Remove unused get/put_user macros target/arm: Un-inline access_secure_reg() target/arm: Move A32_BANKED_REG_{GET,SET} macros to cpregs.h Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
-rw-r--r--.gitlab-ci.d/buildtest-template.yml1
-rw-r--r--MAINTAINERS14
-rw-r--r--hw/char/pl011.c19
-rw-r--r--hw/intc/arm_gicv3_cpuif.c1
-rw-r--r--linux-user/aarch64/cpu_loop.c48
-rw-r--r--linux-user/arm/cpu_loop.c43
-rw-r--r--meson.build9
-rw-r--r--target/arm/arch_dump.c1
-rw-r--r--target/arm/cpregs.h28
-rw-r--r--target/arm/cpu.h153
-rw-r--r--target/arm/helper.c16
-rw-r--r--target/arm/internals.h135
-rw-r--r--target/arm/tcg/helper-a64.c12
-rw-r--r--target/arm/tcg/hflags.c9
-rw-r--r--target/arm/tcg/translate-a64.c37
-rw-r--r--target/arm/tcg/translate-a64.h2
-rw-r--r--target/arm/tcg/translate.h10
-rw-r--r--util/cacheflush.c4
18 files changed, 257 insertions, 285 deletions
diff --git a/.gitlab-ci.d/buildtest-template.yml b/.gitlab-ci.d/buildtest-template.yml
index 4cc1923..39da769 100644
--- a/.gitlab-ci.d/buildtest-template.yml
+++ b/.gitlab-ci.d/buildtest-template.yml
@@ -63,7 +63,6 @@
stage: test
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
script:
- - export RUST_BACKTRACE=1
- source scripts/ci/gitlab-ci-section
- section_start buildenv "Setting up to run tests"
- scripts/git-submodule.sh update roms/SLOF
diff --git a/MAINTAINERS b/MAINTAINERS
index 31b395f..8f470a1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -786,7 +786,7 @@ F: docs/system/arm/kzm.rst
Integrator CP
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
-S: Maintained
+S: Odd Fixes
F: hw/arm/integratorcp.c
F: hw/misc/arm_integrator_debug.c
F: include/hw/misc/arm_integrator_debug.h
@@ -867,7 +867,7 @@ F: docs/system/arm/mps2.rst
Musca
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
-S: Maintained
+S: Odd Fixes
F: hw/arm/musca.c
F: docs/system/arm/musca.rst
@@ -915,7 +915,7 @@ F: tests/functional/test_aarch64_raspi4.py
Real View
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
-S: Maintained
+S: Odd Fixes
F: hw/arm/realview*
F: hw/cpu/realview_mpcore.c
F: hw/intc/realview_gic.c
@@ -965,7 +965,7 @@ F: tests/functional/test_arm_collie.py
Stellaris
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
-S: Maintained
+S: Odd Fixes
F: hw/*/stellaris*
F: hw/display/ssd03*
F: include/hw/input/gamepad.h
@@ -995,7 +995,7 @@ F: docs/system/arm/stm32.rst
Versatile Express
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
-S: Maintained
+S: Odd Fixes
F: hw/arm/vexpress.c
F: hw/display/sii9022.c
F: docs/system/arm/vexpress.rst
@@ -1004,7 +1004,7 @@ F: tests/functional/test_arm_vexpress.py
Versatile PB
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
-S: Maintained
+S: Odd Fixes
F: hw/*/versatile*
F: hw/i2c/arm_sbcon_i2c.c
F: include/hw/i2c/arm_sbcon_i2c.h
@@ -2003,7 +2003,7 @@ F: include/hw/hyperv/vmbus*.h
OMAP
M: Peter Maydell <peter.maydell@linaro.org>
L: qemu-arm@nongnu.org
-S: Maintained
+S: Odd Fixes
F: hw/*/omap*
F: include/hw/arm/omap.h
F: docs/system/arm/sx1.rst
diff --git a/hw/char/pl011.c b/hw/char/pl011.c
index 23a9db8..0e9ec13 100644
--- a/hw/char/pl011.c
+++ b/hw/char/pl011.c
@@ -490,16 +490,17 @@ static int pl011_can_receive(void *opaque)
unsigned fifo_depth = pl011_get_fifo_depth(s);
unsigned fifo_available = fifo_depth - s->read_count;
- if (!(s->cr & CR_UARTEN)) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "PL011 receiving data on disabled UART\n");
- }
- if (!(s->cr & CR_RXE)) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "PL011 receiving data on disabled RX UART\n");
- }
- trace_pl011_can_receive(s->lcr, s->read_count, fifo_depth, fifo_available);
+ /*
+ * In theory we should check the UART and RX enable bits here and
+ * return 0 if they are not set (so the guest can't receive data
+ * until you have enabled the UART). In practice we suspect there
+ * is at least some guest code out there which has been tested only
+ * on QEMU and which never bothers to enable the UART because we
+ * historically never enforced that. So we effectively keep the
+ * UART continuously enabled regardless of the enable bits.
+ */
+ trace_pl011_can_receive(s->lcr, s->read_count, fifo_depth, fifo_available);
return fifo_available;
}
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index 7f1d071..de37465 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -22,6 +22,7 @@
#include "cpu.h"
#include "target/arm/cpregs.h"
#include "target/arm/cpu-features.h"
+#include "target/arm/internals.h"
#include "system/tcg.h"
#include "system/qtest.h"
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
index c5d8a48..fea43ce 100644
--- a/linux-user/aarch64/cpu_loop.c
+++ b/linux-user/aarch64/cpu_loop.c
@@ -27,54 +27,6 @@
#include "target/arm/syndrome.h"
#include "target/arm/cpu-features.h"
-#define get_user_code_u32(x, gaddr, env) \
- ({ abi_long __r = get_user_u32((x), (gaddr)); \
- if (!__r && bswap_code(arm_sctlr_b(env))) { \
- (x) = bswap32(x); \
- } \
- __r; \
- })
-
-#define get_user_code_u16(x, gaddr, env) \
- ({ abi_long __r = get_user_u16((x), (gaddr)); \
- if (!__r && bswap_code(arm_sctlr_b(env))) { \
- (x) = bswap16(x); \
- } \
- __r; \
- })
-
-#define get_user_data_u32(x, gaddr, env) \
- ({ abi_long __r = get_user_u32((x), (gaddr)); \
- if (!__r && arm_cpu_bswap_data(env)) { \
- (x) = bswap32(x); \
- } \
- __r; \
- })
-
-#define get_user_data_u16(x, gaddr, env) \
- ({ abi_long __r = get_user_u16((x), (gaddr)); \
- if (!__r && arm_cpu_bswap_data(env)) { \
- (x) = bswap16(x); \
- } \
- __r; \
- })
-
-#define put_user_data_u32(x, gaddr, env) \
- ({ typeof(x) __x = (x); \
- if (arm_cpu_bswap_data(env)) { \
- __x = bswap32(__x); \
- } \
- put_user_u32(__x, (gaddr)); \
- })
-
-#define put_user_data_u16(x, gaddr, env) \
- ({ typeof(x) __x = (x); \
- if (arm_cpu_bswap_data(env)) { \
- __x = bswap16(__x); \
- } \
- put_user_u16(__x, (gaddr)); \
- })
-
/* AArch64 main loop */
void cpu_loop(CPUARMState *env)
{
diff --git a/linux-user/arm/cpu_loop.c b/linux-user/arm/cpu_loop.c
index 10d8561..7416e32 100644
--- a/linux-user/arm/cpu_loop.c
+++ b/linux-user/arm/cpu_loop.c
@@ -36,45 +36,10 @@
__r; \
})
-#define get_user_code_u16(x, gaddr, env) \
- ({ abi_long __r = get_user_u16((x), (gaddr)); \
- if (!__r && bswap_code(arm_sctlr_b(env))) { \
- (x) = bswap16(x); \
- } \
- __r; \
- })
-
-#define get_user_data_u32(x, gaddr, env) \
- ({ abi_long __r = get_user_u32((x), (gaddr)); \
- if (!__r && arm_cpu_bswap_data(env)) { \
- (x) = bswap32(x); \
- } \
- __r; \
- })
-
-#define get_user_data_u16(x, gaddr, env) \
- ({ abi_long __r = get_user_u16((x), (gaddr)); \
- if (!__r && arm_cpu_bswap_data(env)) { \
- (x) = bswap16(x); \
- } \
- __r; \
- })
-
-#define put_user_data_u32(x, gaddr, env) \
- ({ typeof(x) __x = (x); \
- if (arm_cpu_bswap_data(env)) { \
- __x = bswap32(__x); \
- } \
- put_user_u32(__x, (gaddr)); \
- })
-
-#define put_user_data_u16(x, gaddr, env) \
- ({ typeof(x) __x = (x); \
- if (arm_cpu_bswap_data(env)) { \
- __x = bswap16(__x); \
- } \
- put_user_u16(__x, (gaddr)); \
- })
+/*
+ * Note that if we need to do data accesses here, they should do a
+ * bswap if arm_cpu_bswap_data() returns true.
+ */
/*
* Similar to code in accel/tcg/user-exec.c, but outside the execution loop.
diff --git a/meson.build b/meson.build
index 2f43fd8..7f75256 100644
--- a/meson.build
+++ b/meson.build
@@ -5,9 +5,12 @@ project('qemu', ['c'], meson_version: '>=1.5.0',
meson.add_devenv({ 'MESON_BUILD_ROOT' : meson.project_build_root() })
-add_test_setup('quick', exclude_suites: ['slow', 'thorough'], is_default: true)
-add_test_setup('slow', exclude_suites: ['thorough'], env: ['G_TEST_SLOW=1', 'SPEED=slow'])
-add_test_setup('thorough', env: ['G_TEST_SLOW=1', 'SPEED=thorough'])
+add_test_setup('quick', exclude_suites: ['slow', 'thorough'], is_default: true,
+ env: ['RUST_BACKTRACE=1'])
+add_test_setup('slow', exclude_suites: ['thorough'],
+ env: ['G_TEST_SLOW=1', 'SPEED=slow', 'RUST_BACKTRACE=1'])
+add_test_setup('thorough',
+ env: ['G_TEST_SLOW=1', 'SPEED=thorough', 'RUST_BACKTRACE=1'])
meson.add_postconf_script(find_program('scripts/symlink-install-tree.py'))
diff --git a/target/arm/arch_dump.c b/target/arm/arch_dump.c
index 5c943dc..c40df4e 100644
--- a/target/arm/arch_dump.c
+++ b/target/arm/arch_dump.c
@@ -23,6 +23,7 @@
#include "elf.h"
#include "system/dump.h"
#include "cpu-features.h"
+#include "internals.h"
/* struct user_pt_regs from arch/arm64/include/uapi/asm/ptrace.h */
struct aarch64_user_regs {
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
index 52377c6..2183de8 100644
--- a/target/arm/cpregs.h
+++ b/target/arm/cpregs.h
@@ -1157,4 +1157,32 @@ static inline bool arm_cpreg_traps_in_nv(const ARMCPRegInfo *ri)
return ri->opc1 == 4 || ri->opc1 == 5;
}
+/* Macros for accessing a specified CP register bank */
+#define A32_BANKED_REG_GET(_env, _regname, _secure) \
+ ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
+
+#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \
+ do { \
+ if (_secure) { \
+ (_env)->cp15._regname##_s = (_val); \
+ } else { \
+ (_env)->cp15._regname##_ns = (_val); \
+ } \
+ } while (0)
+
+/*
+ * Macros for automatically accessing a specific CP register bank depending on
+ * the current secure state of the system. These macros are not intended for
+ * supporting instruction translation reads/writes as these are dependent
+ * solely on the SCR.NS bit and not the mode.
+ */
+#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
+ A32_BANKED_REG_GET((_env), _regname, \
+ (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)))
+
+#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
+ A32_BANKED_REG_SET((_env), _regname, \
+ (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \
+ (_val))
+
#endif /* TARGET_ARM_CPREGS_H */
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 8f52380..a8177c6 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -2635,81 +2635,15 @@ uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space);
uint64_t arm_hcr_el2_eff(CPUARMState *env);
uint64_t arm_hcrx_el2_eff(CPUARMState *env);
-/* Return true if the specified exception level is running in AArch64 state. */
-static inline bool arm_el_is_aa64(CPUARMState *env, int el)
-{
- /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
- * and if we're not in EL0 then the state of EL0 isn't well defined.)
- */
- assert(el >= 1 && el <= 3);
- bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
-
- /* The highest exception level is always at the maximum supported
- * register width, and then lower levels have a register width controlled
- * by bits in the SCR or HCR registers.
- */
- if (el == 3) {
- return aa64;
- }
-
- if (arm_feature(env, ARM_FEATURE_EL3) &&
- ((env->cp15.scr_el3 & SCR_NS) || !(env->cp15.scr_el3 & SCR_EEL2))) {
- aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW);
- }
-
- if (el == 2) {
- return aa64;
- }
-
- if (arm_is_el2_enabled(env)) {
- aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
- }
-
- return aa64;
-}
-
-/* Function for determining whether guest cp register reads and writes should
+/*
+ * Function for determining whether guest cp register reads and writes should
* access the secure or non-secure bank of a cp register. When EL3 is
* operating in AArch32 state, the NS-bit determines whether the secure
* instance of a cp register should be used. When EL3 is AArch64 (or if
* it doesn't exist at all) then there is no register banking, and all
* accesses are to the non-secure version.
*/
-static inline bool access_secure_reg(CPUARMState *env)
-{
- bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
- !arm_el_is_aa64(env, 3) &&
- !(env->cp15.scr_el3 & SCR_NS));
-
- return ret;
-}
-
-/* Macros for accessing a specified CP register bank */
-#define A32_BANKED_REG_GET(_env, _regname, _secure) \
- ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
-
-#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \
- do { \
- if (_secure) { \
- (_env)->cp15._regname##_s = (_val); \
- } else { \
- (_env)->cp15._regname##_ns = (_val); \
- } \
- } while (0)
-
-/* Macros for automatically accessing a specific CP register bank depending on
- * the current secure state of the system. These macros are not intended for
- * supporting instruction translation reads/writes as these are dependent
- * solely on the SCR.NS bit and not the mode.
- */
-#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
- A32_BANKED_REG_GET((_env), _regname, \
- (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)))
-
-#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
- A32_BANKED_REG_SET((_env), _regname, \
- (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \
- (_val))
+bool access_secure_reg(CPUARMState *env);
uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
uint32_t cur_el, bool secure);
@@ -2732,39 +2666,6 @@ static inline bool arm_v7m_is_handler_mode(CPUARMState *env)
return env->v7m.exception != 0;
}
-/* Return the current Exception Level (as per ARMv8; note that this differs
- * from the ARMv7 Privilege Level).
- */
-static inline int arm_current_el(CPUARMState *env)
-{
- if (arm_feature(env, ARM_FEATURE_M)) {
- return arm_v7m_is_handler_mode(env) ||
- !(env->v7m.control[env->v7m.secure] & 1);
- }
-
- if (is_a64(env)) {
- return extract32(env->pstate, 2, 2);
- }
-
- switch (env->uncached_cpsr & 0x1f) {
- case ARM_CPU_MODE_USR:
- return 0;
- case ARM_CPU_MODE_HYP:
- return 2;
- case ARM_CPU_MODE_MON:
- return 3;
- default:
- if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
- /* If EL3 is 32-bit then all secure privileged modes run in
- * EL3
- */
- return 3;
- }
-
- return 1;
- }
-}
-
/**
* write_list_to_cpustate
* @cpu: ARMCPU
@@ -3065,47 +2966,6 @@ static inline bool arm_sctlr_b(CPUARMState *env)
uint64_t arm_sctlr(CPUARMState *env, int el);
-static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
- bool sctlr_b)
-{
-#ifdef CONFIG_USER_ONLY
- /*
- * In system mode, BE32 is modelled in line with the
- * architecture (as word-invariant big-endianness), where loads
- * and stores are done little endian but from addresses which
- * are adjusted by XORing with the appropriate constant. So the
- * endianness to use for the raw data access is not affected by
- * SCTLR.B.
- * In user mode, however, we model BE32 as byte-invariant
- * big-endianness (because user-only code cannot tell the
- * difference), and so we need to use a data access endianness
- * that depends on SCTLR.B.
- */
- if (sctlr_b) {
- return true;
- }
-#endif
- /* In 32bit endianness is determined by looking at CPSR's E bit */
- return env->uncached_cpsr & CPSR_E;
-}
-
-static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr)
-{
- return sctlr & (el ? SCTLR_EE : SCTLR_E0E);
-}
-
-/* Return true if the processor is in big-endian mode. */
-static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
-{
- if (!is_a64(env)) {
- return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env));
- } else {
- int cur_el = arm_current_el(env);
- uint64_t sctlr = arm_sctlr(env, cur_el);
- return arm_cpu_data_is_big_endian_a64(cur_el, sctlr);
- }
-}
-
#include "exec/cpu-all.h"
/*
@@ -3291,13 +3151,6 @@ static inline bool bswap_code(bool sctlr_b)
#endif
}
-#ifdef CONFIG_USER_ONLY
-static inline bool arm_cpu_bswap_data(CPUARMState *env)
-{
- return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env);
-}
-#endif
-
void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
uint64_t *cs_base, uint32_t *flags);
diff --git a/target/arm/helper.c b/target/arm/helper.c
index f0ead22..bb445e3 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -5326,6 +5326,11 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
/* Clear RES0 bits. */
value &= valid_mask;
+ /* RW is RAO/WI if EL1 is AArch64 only */
+ if (!cpu_isar_feature(aa64_aa32_el1, cpu)) {
+ value |= HCR_RW;
+ }
+
/*
* These bits change the MMU setup:
* HCR_VM enables stage 2 translation
@@ -5383,6 +5388,12 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
}
+static void hcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* hcr_write will set the RES1 bits on an AArch64-only CPU */
+ hcr_write(env, ri, 0);
+}
+
/*
* Return the effective value of HCR_EL2, at the given security state.
* Bits that are not included here:
@@ -5618,6 +5629,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
.nv2_redirect_offset = 0x78,
+ .resetfn = hcr_reset,
.writefn = hcr_write, .raw_writefn = raw_write },
{ .name = "HCR", .state = ARM_CP_STATE_AA32,
.type = ARM_CP_ALIAS | ARM_CP_IO,
@@ -9818,7 +9830,7 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
uint64_t hcr_el2;
if (arm_feature(env, ARM_FEATURE_EL3)) {
- rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
+ rw = arm_scr_rw_eff(env);
} else {
/*
* Either EL2 is the highest EL (and so the EL2 register width
@@ -10627,7 +10639,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
switch (new_el) {
case 3:
- is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
+ is_aa64 = arm_scr_rw_eff(env);
break;
case 2:
hcr = arm_hcr_el2_eff(env);
diff --git a/target/arm/internals.h b/target/arm/internals.h
index bb962389..28585c0 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -392,6 +392,141 @@ static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
return arm_rmode_to_sf_map[rmode];
}
+/* Return the effective value of SCR_EL3.RW */
+static inline bool arm_scr_rw_eff(CPUARMState *env)
+{
+ /*
+ * SCR_EL3.RW has an effective value of 1 if:
+ * - we are NS and EL2 is implemented but doesn't support AArch32
+ * - we are S and EL2 is enabled (in which case it must be AArch64)
+ */
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (env->cp15.scr_el3 & SCR_RW) {
+ return true;
+ }
+ if (env->cp15.scr_el3 & SCR_NS) {
+ return arm_feature(env, ARM_FEATURE_EL2) &&
+ !cpu_isar_feature(aa64_aa32_el2, cpu);
+ } else {
+ return env->cp15.scr_el3 & SCR_EEL2;
+ }
+}
+
+/* Return true if the specified exception level is running in AArch64 state. */
+static inline bool arm_el_is_aa64(CPUARMState *env, int el)
+{
+ /*
+ * This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
+ * and if we're not in EL0 then the state of EL0 isn't well defined.)
+ */
+ assert(el >= 1 && el <= 3);
+ bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
+
+ /*
+ * The highest exception level is always at the maximum supported
+ * register width, and then lower levels have a register width controlled
+ * by bits in the SCR or HCR registers.
+ */
+ if (el == 3) {
+ return aa64;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ aa64 = aa64 && arm_scr_rw_eff(env);
+ }
+
+ if (el == 2) {
+ return aa64;
+ }
+
+ if (arm_is_el2_enabled(env)) {
+ aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
+ }
+
+ return aa64;
+}
+
+/*
+ * Return the current Exception Level (as per ARMv8; note that this differs
+ * from the ARMv7 Privilege Level).
+ */
+static inline int arm_current_el(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return arm_v7m_is_handler_mode(env) ||
+ !(env->v7m.control[env->v7m.secure] & 1);
+ }
+
+ if (is_a64(env)) {
+ return extract32(env->pstate, 2, 2);
+ }
+
+ switch (env->uncached_cpsr & 0x1f) {
+ case ARM_CPU_MODE_USR:
+ return 0;
+ case ARM_CPU_MODE_HYP:
+ return 2;
+ case ARM_CPU_MODE_MON:
+ return 3;
+ default:
+ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
+ /* If EL3 is 32-bit then all secure privileged modes run in EL3 */
+ return 3;
+ }
+
+ return 1;
+ }
+}
+
+static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
+ bool sctlr_b)
+{
+#ifdef CONFIG_USER_ONLY
+ /*
+ * In system mode, BE32 is modelled in line with the
+ * architecture (as word-invariant big-endianness), where loads
+ * and stores are done little endian but from addresses which
+ * are adjusted by XORing with the appropriate constant. So the
+ * endianness to use for the raw data access is not affected by
+ * SCTLR.B.
+ * In user mode, however, we model BE32 as byte-invariant
+ * big-endianness (because user-only code cannot tell the
+ * difference), and so we need to use a data access endianness
+ * that depends on SCTLR.B.
+ */
+ if (sctlr_b) {
+ return true;
+ }
+#endif
+ /* In 32bit endianness is determined by looking at CPSR's E bit */
+ return env->uncached_cpsr & CPSR_E;
+}
+
+static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr)
+{
+ return sctlr & (el ? SCTLR_EE : SCTLR_E0E);
+}
+
+/* Return true if the processor is in big-endian mode. */
+static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
+{
+ if (!is_a64(env)) {
+ return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env));
+ } else {
+ int cur_el = arm_current_el(env);
+ uint64_t sctlr = arm_sctlr(env, cur_el);
+ return arm_cpu_data_is_big_endian_a64(cur_el, sctlr);
+ }
+}
+
+#ifdef CONFIG_USER_ONLY
+static inline bool arm_cpu_bswap_data(CPUARMState *env)
+{
+ return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env);
+}
+#endif
+
static inline void aarch64_save_sp(CPUARMState *env, int el)
{
if (env->pstate & PSTATE_SP) {
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
index 32f0647..9244848 100644
--- a/target/arm/tcg/helper-a64.c
+++ b/target/arm/tcg/helper-a64.c
@@ -631,6 +631,7 @@ static void cpsr_write_from_spsr_elx(CPUARMState *env,
void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
{
+ ARMCPU *cpu = env_archcpu(env);
int cur_el = arm_current_el(env);
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
uint32_t spsr = env->banked_spsr[spsr_idx];
@@ -677,12 +678,17 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
goto illegal_return;
}
+ if (!return_to_aa64 && !cpu_isar_feature(aa64_aa32, cpu)) {
+ /* Return to AArch32 when CPU is AArch64-only */
+ goto illegal_return;
+ }
+
if (new_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
goto illegal_return;
}
bql_lock();
- arm_call_pre_el_change_hook(env_archcpu(env));
+ arm_call_pre_el_change_hook(cpu);
bql_unlock();
if (!return_to_aa64) {
@@ -710,7 +716,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
int tbii;
env->aarch64 = true;
- spsr &= aarch64_pstate_valid_mask(&env_archcpu(env)->isar);
+ spsr &= aarch64_pstate_valid_mask(&cpu->isar);
pstate_write(env, spsr);
if (!arm_singlestep_active(env)) {
env->pstate &= ~PSTATE_SS;
@@ -749,7 +755,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64);
bql_lock();
- arm_call_el_change_hook(env_archcpu(env));
+ arm_call_el_change_hook(cpu);
bql_unlock();
return;
diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c
index 9e6a186..8d79b8b 100644
--- a/target/arm/tcg/hflags.c
+++ b/target/arm/tcg/hflags.c
@@ -63,6 +63,15 @@ static bool aprofile_require_alignment(CPUARMState *env, int el, uint64_t sctlr)
#endif
}
+bool access_secure_reg(CPUARMState *env)
+{
+ bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3) &&
+ !(env->cp15.scr_el3 & SCR_NS));
+
+ return ret;
+}
+
static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
ARMMMUIdx mmu_idx,
CPUARMTBFlags flags)
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 8bef391..3901432 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -1381,14 +1381,14 @@ static bool fp_access_check_only(DisasContext *s)
{
if (s->fp_excp_el) {
assert(!s->fp_access_checked);
- s->fp_access_checked = true;
+ s->fp_access_checked = -1;
gen_exception_insn_el(s, 0, EXCP_UDEF,
syn_fp_access_trap(1, 0xe, false, 0),
s->fp_excp_el);
return false;
}
- s->fp_access_checked = true;
+ s->fp_access_checked = 1;
return true;
}
@@ -1456,23 +1456,23 @@ static int fp_access_check_vector_hsd(DisasContext *s, bool is_q, MemOp esz)
bool sve_access_check(DisasContext *s)
{
if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) {
+ bool ret;
+
assert(dc_isar_feature(aa64_sme, s));
- if (!sme_sm_enabled_check(s)) {
- goto fail_exit;
- }
- } else if (s->sve_excp_el) {
+ ret = sme_sm_enabled_check(s);
+ s->sve_access_checked = (ret ? 1 : -1);
+ return ret;
+ }
+ if (s->sve_excp_el) {
+ /* Assert that we only raise one exception per instruction. */
+ assert(!s->sve_access_checked);
gen_exception_insn_el(s, 0, EXCP_UDEF,
syn_sve_access_trap(), s->sve_excp_el);
- goto fail_exit;
+ s->sve_access_checked = -1;
+ return false;
}
- s->sve_access_checked = true;
+ s->sve_access_checked = 1;
return fp_access_check(s);
-
- fail_exit:
- /* Assert that we only raise one exception per instruction. */
- assert(!s->sve_access_checked);
- s->sve_access_checked = true;
- return false;
}
/*
@@ -1500,8 +1500,9 @@ bool sme_enabled_check(DisasContext *s)
* sme_excp_el by itself for cpregs access checks.
*/
if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) {
- s->fp_access_checked = true;
- return sme_access_check(s);
+ bool ret = sme_access_check(s);
+ s->fp_access_checked = (ret ? 1 : -1);
+ return ret;
}
return fp_access_check_only(s);
}
@@ -10257,8 +10258,8 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
s->insn = insn;
s->base.pc_next = pc + 4;
- s->fp_access_checked = false;
- s->sve_access_checked = false;
+ s->fp_access_checked = 0;
+ s->sve_access_checked = 0;
if (s->pstate_il) {
/*
diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h
index 7d3b59c..b2420f5 100644
--- a/target/arm/tcg/translate-a64.h
+++ b/target/arm/tcg/translate-a64.h
@@ -65,7 +65,7 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
static inline void assert_fp_access_checked(DisasContext *s)
{
#ifdef CONFIG_DEBUG_TCG
- if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
+ if (unlikely(s->fp_access_checked <= 0)) {
fprintf(stderr, "target-arm: FP access check missing for "
"instruction 0x%08x\n", s->insn);
abort();
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
index f8dc2f0..53e485d 100644
--- a/target/arm/tcg/translate.h
+++ b/target/arm/tcg/translate.h
@@ -92,15 +92,19 @@ typedef struct DisasContext {
bool aarch64;
bool thumb;
bool lse2;
- /* Because unallocated encodings generate different exception syndrome
+ /*
+ * Because unallocated encodings generate different exception syndrome
* information from traps due to FP being disabled, we can't do a single
* "is fp access disabled" check at a high level in the decode tree.
* To help in catching bugs where the access check was forgotten in some
* code path, we set this flag when the access check is done, and assert
* that it is set at the point where we actually touch the FP regs.
+ * 0: not checked,
+ * 1: checked, access ok
+ * -1: checked, access denied
*/
- bool fp_access_checked;
- bool sve_access_checked;
+ int8_t fp_access_checked;
+ int8_t sve_access_checked;
/* ARMv8 single-step state (this is distinct from the QEMU gdbstub
* single-step support).
*/
diff --git a/util/cacheflush.c b/util/cacheflush.c
index a089061..1d12899 100644
--- a/util/cacheflush.c
+++ b/util/cacheflush.c
@@ -279,9 +279,11 @@ void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
for (p = rw & -dcache_lsize; p < rw + len; p += dcache_lsize) {
asm volatile("dc\tcvau, %0" : : "r" (p) : "memory");
}
- asm volatile("dsb\tish" : : : "memory");
}
+ /* DSB unconditionally to ensure any outstanding writes are committed. */
+ asm volatile("dsb\tish" : : : "memory");
+
/*
* If CTR_EL0.DIC is enabled, Instruction cache cleaning to the Point
* of Unification is not required for instruction to data coherence.