From d26f2f93c1853810fad7da7faa2fa1d590c1017b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 13 Sep 2021 16:07:22 +0100 Subject: hw/arm/virt: KVM: Probe for KVM_CAP_ARM_VM_IPA_SIZE when creating scratch VM Although we probe for the IPA limits imposed by KVM (and the hardware) when computing the memory map, we still use the old style '0' when creating a scratch VM in kvm_arm_create_scratch_host_vcpu(). On systems that are severely IPA challenged (such as the Apple M1), this results in a failure as KVM cannot use the default 40bit that '0' represents. Instead, probe for the extension and use the reported IPA limit if available. Cc: Andrew Jones Cc: Eric Auger Cc: Peter Maydell Signed-off-by: Marc Zyngier Reviewed-by: Andrew Jones Message-id: 20210822144441.1290891-2-maz@kernel.org Signed-off-by: Peter Maydell --- target/arm/kvm.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'target') diff --git a/target/arm/kvm.c b/target/arm/kvm.c index 5d55de1..94b970b 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -70,12 +70,17 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, struct kvm_vcpu_init *init) { int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1; + int max_vm_pa_size; kvmfd = qemu_open_old("/dev/kvm", O_RDWR); if (kvmfd < 0) { goto err; } - vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0); + max_vm_pa_size = ioctl(kvmfd, KVM_CHECK_EXTENSION, KVM_CAP_ARM_VM_IPA_SIZE); + if (max_vm_pa_size < 0) { + max_vm_pa_size = 0; + } + vmfd = ioctl(kvmfd, KVM_CREATE_VM, max_vm_pa_size); if (vmfd < 0) { goto err; } -- cgit v1.1 From 0e5c1c9a230e20d212ae9730e1c59c7fd36bdc96 Mon Sep 17 00:00:00 2001 From: Shashi Mallela Date: Mon, 13 Sep 2021 16:07:24 +0100 Subject: hw/arm/virt: add ITS support in virt GIC Included creation of ITS as part of virt platform GIC initialization. This Emulated ITS model now co-exists with kvm ITS and is enabled in absence of kvm irq kernel support in a platform. Signed-off-by: Shashi Mallela Reviewed-by: Peter Maydell Message-id: 20210910143951.92242-9-shashi.mallela@linaro.org Signed-off-by: Peter Maydell --- target/arm/kvm_arm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'target') diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index 34f8daa..0613454 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -525,8 +525,8 @@ static inline const char *its_class_name(void) /* KVM implementation requires this capability */ return kvm_direct_msi_enabled() ? "arm-its-kvm" : NULL; } else { - /* Software emulation is not implemented yet */ - return NULL; + /* Software emulation based model */ + return "arm-gicv3-its"; } } -- cgit v1.1 From 520d1621de30eecd0869dfd51ae1ff1a9ba988d9 Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 13 Sep 2021 16:07:24 +0100 Subject: target/arm: Take an exception if PSTATE.IL is set In v8A, the PSTATE.IL bit is set for various kinds of illegal exception return or mode-change attempts. We already set PSTATE.IL (or its AArch32 equivalent CPSR.IL) in all those cases, but we weren't implementing the part of the behaviour where attempting to execute an instruction with PSTATE.IL takes an immediate exception with an appropriate syndrome value. Add a new TB flags bit tracking PSTATE.IL/CPSR.IL, and generate code to take an exception instead of whatever the instruction would have been. PSTATE.IL and CPSR.IL change only on exception entry, attempted exception exit, and various AArch32 mode changes via cpsr_write(). These places generally already rebuild the hflags, so the only place we need an extra rebuild_hflags call is in the illegal-return codepath of the AArch64 exception_return helper. Signed-off-by: Peter Maydell Signed-off-by: Richard Henderson Reviewed-by: Richard Henderson Message-id: 20210821195958.41312-2-richard.henderson@linaro.org Message-Id: <20210817162118.24319-1-peter.maydell@linaro.org> Reviewed-by: Richard Henderson [rth: Added missing returns; set IL bit in syndrome] Signed-off-by: Richard Henderson --- target/arm/cpu.h | 1 + target/arm/helper-a64.c | 1 + target/arm/helper.c | 8 ++++++++ target/arm/syndrome.h | 5 +++++ target/arm/translate-a64.c | 11 +++++++++++ target/arm/translate.c | 21 +++++++++++++++++++++ target/arm/translate.h | 2 ++ 7 files changed, 49 insertions(+) (limited to 'target') diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 6a987f6..fb0ef1e 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -3455,6 +3455,7 @@ FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2) FIELD(TBFLAG_ANY, DEBUG_TARGET_EL, 10, 2) /* Memory operations require alignment: SCTLR_ELx.A or CCR.UNALIGN_TRP */ FIELD(TBFLAG_ANY, ALIGN_MEM, 12, 1) +FIELD(TBFLAG_ANY, PSTATE__IL, 13, 1) /* * Bit usage when in AArch32 state, both A- and M-profile. diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c index 26f79f9..19445b3 100644 --- a/target/arm/helper-a64.c +++ b/target/arm/helper-a64.c @@ -1071,6 +1071,7 @@ illegal_return: if (!arm_singlestep_active(env)) { env->pstate &= ~PSTATE_SS; } + helper_rebuild_hflags_a64(env, cur_el); qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: " "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc); } diff --git a/target/arm/helper.c b/target/arm/helper.c index a7ae781..b210da2 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -13462,6 +13462,10 @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1); } + if (env->uncached_cpsr & CPSR_IL) { + DP_TBFLAG_ANY(flags, PSTATE__IL, 1); + } + return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); } @@ -13556,6 +13560,10 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, } } + if (env->pstate & PSTATE_IL) { + DP_TBFLAG_ANY(flags, PSTATE__IL, 1); + } + if (cpu_isar_feature(aa64_mte, env_archcpu(env))) { /* * Set MTE_ACTIVE if any access may be Checked, and leave clear diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h index 8dd88a0..f30f413 100644 --- a/target/arm/syndrome.h +++ b/target/arm/syndrome.h @@ -277,4 +277,9 @@ static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit) (cv << 24) | (cond << 20) | ti; } +static inline uint32_t syn_illegalstate(void) +{ + return (EC_ILLEGALSTATE << ARM_EL_EC_SHIFT) | ARM_EL_IL; +} + #endif /* TARGET_ARM_SYNDROME_H */ diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 422e2ac..230cc8d 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -14662,6 +14662,16 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s) s->fp_access_checked = false; s->sve_access_checked = false; + if (s->pstate_il) { + /* + * Illegal execution state. This has priority over BTI + * exceptions, but comes after instruction abort exceptions. + */ + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_illegalstate(), default_exception_el(s)); + return; + } + if (dc_isar_feature(aa64_bti, s)) { if (s->base.num_insns == 1) { /* @@ -14780,6 +14790,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, #endif dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL); dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM); + dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL); dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL); dc->sve_len = (EX_TBFLAG_A64(tb_flags, ZCR_LEN) + 1) * 16; dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE); diff --git a/target/arm/translate.c b/target/arm/translate.c index 24b7f49..435c659 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -9090,6 +9090,16 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) return; } + if (s->pstate_il) { + /* + * Illegal execution state. This has priority over BTI + * exceptions, but comes after instruction abort exceptions. + */ + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_illegalstate(), default_exception_el(s)); + return; + } + if (cond == 0xf) { /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we * choose to UNDEF. In ARMv5 and above the space is used @@ -9358,6 +9368,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) #endif dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL); dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM); + dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL); if (arm_feature(env, ARM_FEATURE_M)) { dc->vfp_enabled = 1; @@ -9621,6 +9632,16 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) } dc->insn = insn; + if (dc->pstate_il) { + /* + * Illegal execution state. This has priority over BTI + * exceptions, but comes after instruction abort exceptions. + */ + gen_exception_insn(dc, dc->pc_curr, EXCP_UDEF, + syn_illegalstate(), default_exception_el(dc)); + return; + } + if (dc->eci) { /* * For M-profile continuable instructions, ECI/ICI handling diff --git a/target/arm/translate.h b/target/arm/translate.h index 8636c20..605d1f2 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -98,6 +98,8 @@ typedef struct DisasContext { bool hstr_active; /* True if memory operations require alignment */ bool align_mem; + /* True if PSTATE.IL is set */ + bool pstate_il; /* * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. * < 0, set by the current instruction. -- cgit v1.1 From bc7edccae0fdee76e06072d699b7c7de8d3aed83 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 13 Sep 2021 16:07:25 +0100 Subject: target/arm: Merge disas_a64_insn into aarch64_tr_translate_insn It is confusing to have different exits from translation for various conditions in separate functions. Merge disas_a64_insn into its only caller. Standardize on the "s" name for the DisasContext, as the code from disas_a64_insn had more instances. Reviewed-by: Peter Maydell Signed-off-by: Richard Henderson Message-id: 20210821195958.41312-3-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/translate-a64.c | 224 ++++++++++++++++++++++----------------------- 1 file changed, 109 insertions(+), 115 deletions(-) (limited to 'target') diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 230cc8d..333bc83 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -14649,113 +14649,6 @@ static bool btype_destination_ok(uint32_t insn, bool bt, int btype) return false; } -/* C3.1 A64 instruction index by encoding */ -static void disas_a64_insn(CPUARMState *env, DisasContext *s) -{ - uint32_t insn; - - s->pc_curr = s->base.pc_next; - insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b); - s->insn = insn; - s->base.pc_next += 4; - - s->fp_access_checked = false; - s->sve_access_checked = false; - - if (s->pstate_il) { - /* - * Illegal execution state. This has priority over BTI - * exceptions, but comes after instruction abort exceptions. - */ - gen_exception_insn(s, s->pc_curr, EXCP_UDEF, - syn_illegalstate(), default_exception_el(s)); - return; - } - - if (dc_isar_feature(aa64_bti, s)) { - if (s->base.num_insns == 1) { - /* - * At the first insn of the TB, compute s->guarded_page. - * We delayed computing this until successfully reading - * the first insn of the TB, above. This (mostly) ensures - * that the softmmu tlb entry has been populated, and the - * page table GP bit is available. - * - * Note that we need to compute this even if btype == 0, - * because this value is used for BR instructions later - * where ENV is not available. - */ - s->guarded_page = is_guarded_page(env, s); - - /* First insn can have btype set to non-zero. */ - tcg_debug_assert(s->btype >= 0); - - /* - * Note that the Branch Target Exception has fairly high - * priority -- below debugging exceptions but above most - * everything else. This allows us to handle this now - * instead of waiting until the insn is otherwise decoded. - */ - if (s->btype != 0 - && s->guarded_page - && !btype_destination_ok(insn, s->bt, s->btype)) { - gen_exception_insn(s, s->pc_curr, EXCP_UDEF, - syn_btitrap(s->btype), - default_exception_el(s)); - return; - } - } else { - /* Not the first insn: btype must be 0. */ - tcg_debug_assert(s->btype == 0); - } - } - - switch (extract32(insn, 25, 4)) { - case 0x0: case 0x1: case 0x3: /* UNALLOCATED */ - unallocated_encoding(s); - break; - case 0x2: - if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) { - unallocated_encoding(s); - } - break; - case 0x8: case 0x9: /* Data processing - immediate */ - disas_data_proc_imm(s, insn); - break; - case 0xa: case 0xb: /* Branch, exception generation and system insns */ - disas_b_exc_sys(s, insn); - break; - case 0x4: - case 0x6: - case 0xc: - case 0xe: /* Loads and stores */ - disas_ldst(s, insn); - break; - case 0x5: - case 0xd: /* Data processing - register */ - disas_data_proc_reg(s, insn); - break; - case 0x7: - case 0xf: /* Data processing - SIMD and floating point */ - disas_data_proc_simd_fp(s, insn); - break; - default: - assert(FALSE); /* all 15 cases should be handled above */ - break; - } - - /* if we allocated any temporaries, free them here */ - free_tmp_a64(s); - - /* - * After execution of most insns, btype is reset to 0. - * Note that we set btype == -1 when the insn sets btype. - */ - if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) { - reset_btype(s); - } -} - static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) { @@ -14857,10 +14750,11 @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { - DisasContext *dc = container_of(dcbase, DisasContext, base); + DisasContext *s = container_of(dcbase, DisasContext, base); CPUARMState *env = cpu->env_ptr; + uint32_t insn; - if (dc->ss_active && !dc->pstate_ss) { + if (s->ss_active && !s->pstate_ss) { /* Singlestep state is Active-pending. * If we're in this state at the start of a TB then either * a) we just took an exception to an EL which is being debugged @@ -14871,14 +14765,114 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) * "did not step an insn" case, and so the syndrome ISV and EX * bits should be zero. */ - assert(dc->base.num_insns == 1); - gen_swstep_exception(dc, 0, 0); - dc->base.is_jmp = DISAS_NORETURN; - } else { - disas_a64_insn(env, dc); + assert(s->base.num_insns == 1); + gen_swstep_exception(s, 0, 0); + s->base.is_jmp = DISAS_NORETURN; + return; + } + + s->pc_curr = s->base.pc_next; + insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b); + s->insn = insn; + s->base.pc_next += 4; + + s->fp_access_checked = false; + s->sve_access_checked = false; + + if (s->pstate_il) { + /* + * Illegal execution state. This has priority over BTI + * exceptions, but comes after instruction abort exceptions. + */ + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_illegalstate(), default_exception_el(s)); + return; + } + + if (dc_isar_feature(aa64_bti, s)) { + if (s->base.num_insns == 1) { + /* + * At the first insn of the TB, compute s->guarded_page. + * We delayed computing this until successfully reading + * the first insn of the TB, above. This (mostly) ensures + * that the softmmu tlb entry has been populated, and the + * page table GP bit is available. + * + * Note that we need to compute this even if btype == 0, + * because this value is used for BR instructions later + * where ENV is not available. + */ + s->guarded_page = is_guarded_page(env, s); + + /* First insn can have btype set to non-zero. */ + tcg_debug_assert(s->btype >= 0); + + /* + * Note that the Branch Target Exception has fairly high + * priority -- below debugging exceptions but above most + * everything else. This allows us to handle this now + * instead of waiting until the insn is otherwise decoded. + */ + if (s->btype != 0 + && s->guarded_page + && !btype_destination_ok(insn, s->bt, s->btype)) { + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_btitrap(s->btype), + default_exception_el(s)); + return; + } + } else { + /* Not the first insn: btype must be 0. */ + tcg_debug_assert(s->btype == 0); + } + } + + switch (extract32(insn, 25, 4)) { + case 0x0: case 0x1: case 0x3: /* UNALLOCATED */ + unallocated_encoding(s); + break; + case 0x2: + if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) { + unallocated_encoding(s); + } + break; + case 0x8: case 0x9: /* Data processing - immediate */ + disas_data_proc_imm(s, insn); + break; + case 0xa: case 0xb: /* Branch, exception generation and system insns */ + disas_b_exc_sys(s, insn); + break; + case 0x4: + case 0x6: + case 0xc: + case 0xe: /* Loads and stores */ + disas_ldst(s, insn); + break; + case 0x5: + case 0xd: /* Data processing - register */ + disas_data_proc_reg(s, insn); + break; + case 0x7: + case 0xf: /* Data processing - SIMD and floating point */ + disas_data_proc_simd_fp(s, insn); + break; + default: + assert(FALSE); /* all 15 cases should be handled above */ + break; + } + + /* if we allocated any temporaries, free them here */ + free_tmp_a64(s); + + /* + * After execution of most insns, btype is reset to 0. + * Note that we set btype == -1 when the insn sets btype. + */ + if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) { + reset_btype(s); } - translator_loop_temp_check(&dc->base); + translator_loop_temp_check(&s->base); } static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) -- cgit v1.1