diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2018-10-24 10:49:14 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2018-10-24 10:49:14 +0100 |
commit | e60b38f445d0ca0c305440b07a23e8f0da73373a (patch) | |
tree | 820b8f48c523040b54c6501a35d8f83308f4df01 /linux-user | |
parent | 13399aad4fa87b2878c49d02a5d3bafa6c966ba3 (diff) | |
parent | 93f379b0c43617b1361f742f261479eaed4959cb (diff) | |
download | qemu-e60b38f445d0ca0c305440b07a23e8f0da73373a.zip qemu-e60b38f445d0ca0c305440b07a23e8f0da73373a.tar.gz qemu-e60b38f445d0ca0c305440b07a23e8f0da73373a.tar.bz2 |
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20181024' into staging
target-arm queue:
* ssi-sd: Make devices picking up backends unavailable with -device
* Add support for VCPU event states
* Move towards making ID registers the source of truth for
whether a guest CPU implements a feature, rather than having
parallel ID registers and feature bit flags
* Implement various HCR hypervisor trap/config bits
* Get IL bit correct for v7 syndrome values
* Report correct syndrome for FP/SIMD traps to Hyp mode
* hw/arm/boot: Increase compliance with kernel arm64 boot protocol
* Refactor A32 Neon to use generic vector infrastructure
* Fix a bug in A32 VLD2 "(multiple 2-element structures)" insn
* net: cadence_gem: Report features correctly in ID register
* Avoid some unnecessary TLB flushes on TTBR register writes
# gpg: Signature made Wed 24 Oct 2018 10:46:01 BST
# gpg: using RSA key 3C2525ED14360CDE
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>"
# gpg: aka "Peter Maydell <pmaydell@gmail.com>"
# gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>"
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE
* remotes/pmaydell/tags/pull-target-arm-20181024: (44 commits)
target/arm: Only flush tlb if ASID changes
target/arm: Remove writefn from TTBR0_EL3
net: cadence_gem: Announce 64bit addressing support
net: cadence_gem: Announce availability of priority queues
target/arm: Reorg NEON VLD/VST single element to one lane
target/arm: Promote consecutive memory ops for aa32
target/arm: Reorg NEON VLD/VST all elements
target/arm: Use gvec for NEON VLD all lanes
target/arm: Use gvec for NEON_3R_VTST_VCEQ, NEON_3R_VCGT, NEON_3R_VCGE
target/arm: Use gvec for NEON_3R_VML
target/arm: Use gvec for VSRI, VSLI
target/arm: Use gvec for VSRA
target/arm: Use gvec for VSHR, VSHL
target/arm: Use gvec for NEON_3R_VMUL
target/arm: Use gvec for NEON_2RM_VMN, NEON_2RM_VNEG
target/arm: Use gvec for NEON_3R_VADD_VSUB insns
target/arm: Use gvec for NEON_3R_LOGIC insns
target/arm: Use gvec for NEON VMOV, VMVN, VBIC & VORR (immediate)
target/arm: Use gvec for NEON VDUP
target/arm: Mark some arrays const
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'linux-user')
-rw-r--r-- | linux-user/aarch64/signal.c | 4 | ||||
-rw-r--r-- | linux-user/elfload.c | 58 | ||||
-rw-r--r-- | linux-user/syscall.c | 10 |
3 files changed, 40 insertions, 32 deletions
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c index 07fedfc..f84a9cf 100644 --- a/linux-user/aarch64/signal.c +++ b/linux-user/aarch64/signal.c @@ -314,7 +314,7 @@ static int target_restore_sigframe(CPUARMState *env, break; case TARGET_SVE_MAGIC: - if (arm_feature(env, ARM_FEATURE_SVE)) { + if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) { vq = (env->vfp.zcr_el[1] & 0xf) + 1; sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); if (!sve && size == sve_size) { @@ -433,7 +433,7 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, &layout); /* SVE state needs saving only if it exists. */ - if (arm_feature(env, ARM_FEATURE_SVE)) { + if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) { vq = (env->vfp.zcr_el[1] & 0xf) + 1; sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); sve_ofs = alloc_sigframe_space(sve_size, &layout); diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 10bca65..055f6a9 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -458,6 +458,10 @@ static uint32_t get_elf_hwcap(void) /* probe for the extra features */ #define GET_FEATURE(feat, hwcap) \ do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) + +#define GET_FEATURE_ID(feat, hwcap) \ + do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) + /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */ GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP); @@ -467,8 +471,8 @@ static uint32_t get_elf_hwcap(void) GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3); GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4); - GET_FEATURE(ARM_FEATURE_ARM_DIV, ARM_HWCAP_ARM_IDIVA); - GET_FEATURE(ARM_FEATURE_THUMB_DIV, ARM_HWCAP_ARM_IDIVT); + GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA); + GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT); /* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c. * Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of * ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated @@ -485,15 +489,16 @@ static uint32_t get_elf_hwcap2(void) ARMCPU *cpu = ARM_CPU(thread_cpu); uint32_t hwcaps = 0; - GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP2_ARM_AES); - GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP2_ARM_PMULL); - GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP2_ARM_SHA1); - GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP2_ARM_SHA2); - GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP2_ARM_CRC32); + GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES); + GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL); + GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1); + GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2); + GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32); return hwcaps; } #undef GET_FEATURE +#undef GET_FEATURE_ID #else /* 64 bit ARM definitions */ @@ -568,25 +573,26 @@ static uint32_t get_elf_hwcap(void) hwcaps |= ARM_HWCAP_A64_ASIMD; /* probe for the extra features */ -#define GET_FEATURE(feat, hwcap) \ - do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) - GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP_A64_AES); - GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP_A64_PMULL); - GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP_A64_SHA1); - GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP_A64_SHA2); - GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP_A64_CRC32); - GET_FEATURE(ARM_FEATURE_V8_SHA3, ARM_HWCAP_A64_SHA3); - GET_FEATURE(ARM_FEATURE_V8_SM3, ARM_HWCAP_A64_SM3); - GET_FEATURE(ARM_FEATURE_V8_SM4, ARM_HWCAP_A64_SM4); - GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512); - GET_FEATURE(ARM_FEATURE_V8_FP16, - ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); - GET_FEATURE(ARM_FEATURE_V8_ATOMICS, ARM_HWCAP_A64_ATOMICS); - GET_FEATURE(ARM_FEATURE_V8_RDM, ARM_HWCAP_A64_ASIMDRDM); - GET_FEATURE(ARM_FEATURE_V8_DOTPROD, ARM_HWCAP_A64_ASIMDDP); - GET_FEATURE(ARM_FEATURE_V8_FCMA, ARM_HWCAP_A64_FCMA); - GET_FEATURE(ARM_FEATURE_SVE, ARM_HWCAP_A64_SVE); -#undef GET_FEATURE +#define GET_FEATURE_ID(feat, hwcap) \ + do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) + + GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES); + GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL); + GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1); + GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2); + GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512); + GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32); + GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3); + GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3); + GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4); + GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); + GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS); + GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); + GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); + GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); + GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE); + +#undef GET_FEATURE_ID return hwcaps; } diff --git a/linux-user/syscall.c b/linux-user/syscall.c index cf4511b..15b03e1 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -9544,7 +9544,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, * even though the current architectural maximum is VQ=16. */ ret = -TARGET_EINVAL; - if (arm_feature(cpu_env, ARM_FEATURE_SVE) + if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env)) && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { CPUARMState *env = cpu_env; ARMCPU *cpu = arm_env_get_cpu(env); @@ -9563,9 +9563,11 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, return ret; case TARGET_PR_SVE_GET_VL: ret = -TARGET_EINVAL; - if (arm_feature(cpu_env, ARM_FEATURE_SVE)) { - CPUARMState *env = cpu_env; - ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16; + { + ARMCPU *cpu = arm_env_get_cpu(cpu_env); + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16; + } } return ret; #endif /* AARCH64 */ |