diff options
Diffstat (limited to 'target/arm/helper.c')
-rw-r--r-- | target/arm/helper.c | 2208 |
1 files changed, 455 insertions, 1753 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c index 8fb4b47..c311d2d 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -12,26 +12,32 @@ #include "cpu.h" #include "internals.h" #include "cpu-features.h" -#include "exec/helper-proto.h" +#include "exec/page-protection.h" +#include "exec/mmap-lock.h" #include "qemu/main-loop.h" #include "qemu/timer.h" #include "qemu/bitops.h" -#include "qemu/crc32c.h" #include "qemu/qemu-print.h" -#include "exec/exec-all.h" -#include <zlib.h> /* For crc32 */ +#include "exec/cputlb.h" +#include "exec/translation-block.h" #include "hw/irq.h" -#include "sysemu/cpu-timers.h" -#include "sysemu/kvm.h" -#include "sysemu/tcg.h" +#include "system/cpu-timers.h" +#include "exec/icount.h" +#include "system/kvm.h" +#include "system/tcg.h" #include "qapi/error.h" #include "qemu/guest-random.h" #ifdef CONFIG_TCG +#include "accel/tcg/probe.h" +#include "accel/tcg/getpc.h" #include "semihosting/common-semi.h" #endif #include "cpregs.h" #include "target/arm/gtimer.h" +#define HELPER_H "tcg/helper.h" +#include "exec/helper-proto.h.inc" + #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ static void switch_mode(CPUARMState *env, int mode); @@ -219,7 +225,7 @@ static void count_cpreg(gpointer key, gpointer opaque) } } -static gint cpreg_key_compare(gconstpointer a, gconstpointer b) +static gint cpreg_key_compare(gconstpointer a, gconstpointer b, gpointer d) { uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a); uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b); @@ -243,7 +249,7 @@ void init_cpreg_list(ARMCPU *cpu) int arraylen; keys = g_hash_table_get_keys(cpu->cp_regs); - keys = g_list_sort(keys, cpreg_key_compare); + keys = g_list_sort_with_data(keys, cpreg_key_compare, NULL); cpu->cpreg_array_len = 0; @@ -285,7 +291,7 @@ static CPAccessResult access_el3_aa32ns(CPUARMState *env, { if (!is_a64(env) && arm_current_el(env) == 3 && arm_is_secure_below_el3(env)) { - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } return CP_ACCESS_OK; } @@ -310,7 +316,7 @@ static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, return CP_ACCESS_TRAP_EL3; } /* This will be EL1 NS and EL2 NS, which just UNDEF */ - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } /* @@ -365,40 +371,6 @@ static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } -/* Check for traps from EL1 due to HCR_EL2.TTLB. */ -static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) { - return CP_ACCESS_TRAP_EL2; - } - return CP_ACCESS_OK; -} - -/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBIS. */ -static CPAccessResult access_ttlbis(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - if (arm_current_el(env) == 1 && - (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBIS))) { - return CP_ACCESS_TRAP_EL2; - } - return CP_ACCESS_OK; -} - -#ifdef TARGET_AARCH64 -/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBOS. */ -static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - if (arm_current_el(env) == 1 && - (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBOS))) { - return CP_ACCESS_TRAP_EL2; - } - return CP_ACCESS_OK; -} -#endif - static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = env_archcpu(env); @@ -438,12 +410,15 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, raw_write(env, ri, value); } -static int alle1_tlbmask(CPUARMState *env) +int alle1_tlbmask(CPUARMState *env) { /* * Note that the 'ALL' scope must invalidate both stage 1 and * stage 2 translations, whereas most other scopes only invalidate * stage 1 translations. + * + * For AArch32 this is only used for TLBIALLNSNH and VTTBR + * writes, so only needs to apply to NS PL1&0, not S PL1&0. */ return (ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_1_PAN | @@ -452,174 +427,6 @@ static int alle1_tlbmask(CPUARMState *env) ARMMMUIdxBit_Stage2_S); } - -/* IS variants of TLB operations must affect all cores */ -static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_all_cpus_synced(cs); -} - -static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_all_cpus_synced(cs); -} - -static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); -} - -static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); -} - -/* - * Non-IS variants of TLB operations are upgraded to - * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to - * force broadcast of these operations. - */ -static bool tlb_force_broadcast(CPUARMState *env) -{ - return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB); -} - -static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate all (TLBIALL) */ - CPUState *cs = env_cpu(env); - - if (tlb_force_broadcast(env)) { - tlb_flush_all_cpus_synced(cs); - } else { - tlb_flush(cs); - } -} - -static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ - CPUState *cs = env_cpu(env); - - value &= TARGET_PAGE_MASK; - if (tlb_force_broadcast(env)) { - tlb_flush_page_all_cpus_synced(cs, value); - } else { - tlb_flush_page(cs, value); - } -} - -static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate by ASID (TLBIASID) */ - CPUState *cs = env_cpu(env); - - if (tlb_force_broadcast(env)) { - tlb_flush_all_cpus_synced(cs); - } else { - tlb_flush(cs); - } -} - -static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ - CPUState *cs = env_cpu(env); - - value &= TARGET_PAGE_MASK; - if (tlb_force_broadcast(env)) { - tlb_flush_page_all_cpus_synced(cs, value); - } else { - tlb_flush_page(cs, value); - } -} - -static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_by_mmuidx(cs, alle1_tlbmask(env)); -} - -static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_by_mmuidx_all_cpus_synced(cs, alle1_tlbmask(env)); -} - - -static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2); -} - -static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2); -} - -static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); - - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2); -} - -static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); - - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, - ARMMMUIdxBit_E2); -} - -static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12; - - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); -} - -static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12; - - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2); -} - static const ARMCPRegInfo cp_reginfo[] = { /* * Define the secure and non-secure FCSE identifier CP registers @@ -729,22 +536,6 @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = { */ { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, - /* - * MMU TLB control. Note that the wildcarding means we cover not just - * the unified TLB ops but also the dside/iside/inner-shareable variants. - */ - { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, - .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, - .type = ARM_CP_NO_RAW }, - { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, - .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, - .type = ARM_CP_NO_RAW }, - { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, - .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, - .type = ARM_CP_NO_RAW }, - { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, - .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, - .type = ARM_CP_NO_RAW }, { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, @@ -1096,7 +887,7 @@ static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } if (el < 2 && (mdcr_el2 & MDCR_TPM)) { return CP_ACCESS_TRAP_EL2; @@ -2113,7 +1904,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), .accessfn = pmreg_access, .fgt = FGT_PMCNTEN, - .writefn = pmcntenclr_write, + .writefn = pmcntenclr_write, .raw_writefn = raw_write, .type = ARM_CP_ALIAS | ARM_CP_IO }, { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, @@ -2121,7 +1912,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .fgt = FGT_PMCNTEN, .type = ARM_CP_ALIAS | ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), - .writefn = pmcntenclr_write }, + .writefn = pmcntenclr_write, .raw_writefn = raw_write }, { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, .access = PL0_RW, .type = ARM_CP_IO, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), @@ -2238,16 +2029,16 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, .access = PL1_RW, .accessfn = access_tpm, .fgt = FGT_PMINTEN, - .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW, + .type = ARM_CP_ALIAS | ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), - .writefn = pmintenclr_write, }, + .writefn = pmintenclr_write, .raw_writefn = raw_write }, { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, .access = PL1_RW, .accessfn = access_tpm, .fgt = FGT_PMINTEN, - .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW, + .type = ARM_CP_ALIAS | ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), - .writefn = pmintenclr_write }, + .writefn = pmintenclr_write, .raw_writefn = raw_write }, { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, .access = PL1_R, @@ -2328,55 +2119,6 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, .fgt = FGT_ISR_EL1, .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, - /* 32 bit ITLB invalidates */ - { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbiall_write }, - { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbimva_write }, - { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbiasid_write }, - /* 32 bit DTLB invalidates */ - { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbiall_write }, - { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbimva_write }, - { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbiasid_write }, - /* 32 bit TLB invalidates */ - { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbiall_write }, - { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbimva_write }, - { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbiasid_write }, - { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbimvaa_write }, -}; - -static const ARMCPRegInfo v7mp_cp_reginfo[] = { - /* 32 bit TLB invalidates, Inner Shareable */ - { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, - .writefn = tlbiall_is_write }, - { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, - .writefn = tlbimva_is_write }, - { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, - .writefn = tlbiasid_is_write }, - { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, - .writefn = tlbimvaa_is_write }, }; static const ARMCPRegInfo pmovsset_cp_reginfo[] = { @@ -2423,7 +2165,7 @@ static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 0 && (env->teecr & 1)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } return teecr_access(env, ri, isread); } @@ -2503,14 +2245,14 @@ static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, cntkctl = env->cp15.c14_cntkctl; } if (!extract32(cntkctl, 0, 2)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } break; case 1: if (!isread && ri->state == ARM_CP_STATE_AA32 && arm_is_secure_below_el3(env)) { /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } break; case 2: @@ -2519,7 +2261,7 @@ static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, } if (!isread && el < arm_highest_el(env)) { - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } return CP_ACCESS_OK; @@ -2542,7 +2284,7 @@ static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */ if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } /* fall through */ case 1: @@ -2583,7 +2325,7 @@ static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, * EL0 if EL0[PV]TEN is zero. */ if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } /* fall through */ @@ -2649,7 +2391,10 @@ static CPAccessResult gt_stimer_access(CPUARMState *env, switch (arm_current_el(env)) { case 1: if (!arm_is_secure(env)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_UNDEFINED; + } + if (arm_is_el2_enabled(env)) { + return CP_ACCESS_UNDEFINED; } if (!(env->cp15.scr_el3 & SCR_ST)) { return CP_ACCESS_TRAP_EL3; @@ -2657,7 +2402,7 @@ static CPAccessResult gt_stimer_access(CPUARMState *env, return CP_ACCESS_OK; case 0: case 2: - return CP_ACCESS_TRAP; + return CP_ACCESS_UNDEFINED; case 3: return CP_ACCESS_OK; default: @@ -2665,6 +2410,45 @@ static CPAccessResult gt_stimer_access(CPUARMState *env, } } +static CPAccessResult gt_sel2timer_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + /* + * The AArch64 register view of the secure EL2 timers are mostly + * accessible from EL3 and EL2 although can also be trapped to EL2 + * from EL1 depending on nested virt config. + */ + switch (arm_current_el(env)) { + case 0: /* UNDEFINED */ + return CP_ACCESS_UNDEFINED; + case 1: + if (!arm_is_secure(env)) { + /* UNDEFINED */ + return CP_ACCESS_UNDEFINED; + } else if (arm_hcr_el2_eff(env) & HCR_NV) { + /* Aarch64.SystemAccessTrap(EL2, 0x18) */ + return CP_ACCESS_TRAP_EL2; + } + /* UNDEFINED */ + return CP_ACCESS_UNDEFINED; + case 2: + if (!arm_is_secure(env)) { + /* UNDEFINED */ + return CP_ACCESS_UNDEFINED; + } + return CP_ACCESS_OK; + case 3: + if (env->cp15.scr_el3 & SCR_EEL2) { + return CP_ACCESS_OK; + } else { + return CP_ACCESS_UNDEFINED; + } + default: + g_assert_not_reached(); + } +} + uint64_t gt_get_countervalue(CPUARMState *env) { ARMCPU *cpu = env_archcpu(env); @@ -2716,12 +2500,80 @@ static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env) return 0; } -static uint64_t gt_phys_cnt_offset(CPUARMState *env) +static uint64_t gt_indirect_access_timer_offset(CPUARMState *env, int timeridx) { - if (arm_current_el(env) >= 2) { + /* + * Return the timer offset to use for indirect accesses to the timer. + * This is the Offset value as defined in D12.2.4.1 "Operation of the + * CompareValue views of the timers". + * + * The condition here is not always the same as the condition for + * whether to apply an offset register when doing a direct read of + * the counter sysreg; those conditions are described in the + * access pseudocode for each counter register. + */ + switch (timeridx) { + case GTIMER_PHYS: + return gt_phys_raw_cnt_offset(env); + case GTIMER_VIRT: + return env->cp15.cntvoff_el2; + case GTIMER_HYP: + case GTIMER_SEC: + case GTIMER_HYPVIRT: + case GTIMER_S_EL2_PHYS: + case GTIMER_S_EL2_VIRT: + return 0; + default: + g_assert_not_reached(); + } +} + +uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx) +{ + /* + * Return the timer offset to use for direct accesses to the + * counter registers CNTPCT and CNTVCT, and for direct accesses + * to the CNT*_TVAL registers. + * + * This isn't exactly the same as the indirect-access offset, + * because here we also care about what EL the register access + * is being made from. + * + * This corresponds to the access pseudocode for the registers. + */ + uint64_t hcr; + + switch (timeridx) { + case GTIMER_PHYS: + if (arm_current_el(env) >= 2) { + return 0; + } + return gt_phys_raw_cnt_offset(env); + case GTIMER_VIRT: + switch (arm_current_el(env)) { + case 2: + hcr = arm_hcr_el2_eff(env); + if (hcr & HCR_E2H) { + return 0; + } + break; + case 0: + hcr = arm_hcr_el2_eff(env); + if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { + return 0; + } + break; + } + return env->cp15.cntvoff_el2; + case GTIMER_HYP: + case GTIMER_SEC: + case GTIMER_HYPVIRT: + case GTIMER_S_EL2_PHYS: + case GTIMER_S_EL2_VIRT: return 0; + default: + g_assert_not_reached(); } - return gt_phys_raw_cnt_offset(env); } static void gt_recalc_timer(ARMCPU *cpu, int timeridx) @@ -2733,8 +2585,7 @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx) * Timer enabled: calculate and set current ISTATUS, irq, and * reset timer to when ISTATUS next has to change */ - uint64_t offset = timeridx == GTIMER_VIRT ? - cpu->env.cp15.cntvoff_el2 : gt_phys_raw_cnt_offset(&cpu->env); + uint64_t offset = gt_indirect_access_timer_offset(&cpu->env, timeridx); uint64_t count = gt_get_countervalue(&cpu->env); /* Note that this must be unsigned 64 bit arithmetic: */ int istatus = count - offset >= gt->cval; @@ -2797,34 +2648,14 @@ static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) { - return gt_get_countervalue(env) - gt_phys_cnt_offset(env); -} - -uint64_t gt_virt_cnt_offset(CPUARMState *env) -{ - uint64_t hcr; - - switch (arm_current_el(env)) { - case 2: - hcr = arm_hcr_el2_eff(env); - if (hcr & HCR_E2H) { - return 0; - } - break; - case 0: - hcr = arm_hcr_el2_eff(env); - if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { - return 0; - } - break; - } - - return env->cp15.cntvoff_el2; + uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_PHYS); + return gt_get_countervalue(env) - offset; } static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) { - return gt_get_countervalue(env) - gt_virt_cnt_offset(env); + uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_VIRT); + return gt_get_countervalue(env) - offset; } static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -2836,47 +2667,38 @@ static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, gt_recalc_timer(env_archcpu(env), timeridx); } -static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, - int timeridx) +static uint64_t do_tval_read(CPUARMState *env, int timeridx, uint64_t offset) { - uint64_t offset = 0; - - switch (timeridx) { - case GTIMER_VIRT: - case GTIMER_HYPVIRT: - offset = gt_virt_cnt_offset(env); - break; - case GTIMER_PHYS: - offset = gt_phys_cnt_offset(env); - break; - } - return (uint32_t)(env->cp15.c14_timer[timeridx].cval - (gt_get_countervalue(env) - offset)); } -static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, - int timeridx, - uint64_t value) +static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, + int timeridx) { - uint64_t offset = 0; + uint64_t offset = gt_direct_access_timer_offset(env, timeridx); - switch (timeridx) { - case GTIMER_VIRT: - case GTIMER_HYPVIRT: - offset = gt_virt_cnt_offset(env); - break; - case GTIMER_PHYS: - offset = gt_phys_cnt_offset(env); - break; - } + return do_tval_read(env, timeridx, offset); +} +static void do_tval_write(CPUARMState *env, int timeridx, uint64_t value, + uint64_t offset) +{ trace_arm_gt_tval_write(timeridx, value); env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + sextract64(value, 0, 32); gt_recalc_timer(env_archcpu(env), timeridx); } +static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + int timeridx, + uint64_t value) +{ + uint64_t offset = gt_direct_access_timer_offset(env, timeridx); + + do_tval_write(env, timeridx, value, offset); +} + static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, int timeridx, uint64_t value) @@ -3006,13 +2828,21 @@ static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) { - return gt_tval_read(env, ri, GTIMER_VIRT); + /* + * This is CNTV_TVAL_EL02; unlike the underlying CNTV_TVAL_EL0 + * we always apply CNTVOFF_EL2. Special case that here rather + * than going into the generic gt_tval_read() and then having + * to re-detect that it's this register. + * Note that the accessfn/perms mean we know we're at EL2 or EL3 here. + */ + return do_tval_read(env, GTIMER_VIRT, env->cp15.cntvoff_el2); } static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - gt_tval_write(env, ri, GTIMER_VIRT, value); + /* Similarly for writes to CNTV_TVAL_EL02 */ + do_tval_write(env, GTIMER_VIRT, value, env->cp15.cntvoff_el2); } static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -3172,6 +3002,62 @@ static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, gt_ctl_write(env, ri, GTIMER_SEC, value); } +static void gt_sec_pel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + gt_timer_reset(env, ri, GTIMER_S_EL2_PHYS); +} + +static void gt_sec_pel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_cval_write(env, ri, GTIMER_S_EL2_PHYS, value); +} + +static uint64_t gt_sec_pel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return gt_tval_read(env, ri, GTIMER_S_EL2_PHYS); +} + +static void gt_sec_pel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_tval_write(env, ri, GTIMER_S_EL2_PHYS, value); +} + +static void gt_sec_pel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_ctl_write(env, ri, GTIMER_S_EL2_PHYS, value); +} + +static void gt_sec_vel2_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + gt_timer_reset(env, ri, GTIMER_S_EL2_VIRT); +} + +static void gt_sec_vel2_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_cval_write(env, ri, GTIMER_S_EL2_VIRT, value); +} + +static uint64_t gt_sec_vel2_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return gt_tval_read(env, ri, GTIMER_S_EL2_VIRT); +} + +static void gt_sec_vel2_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_tval_write(env, ri, GTIMER_S_EL2_VIRT, value); +} + +static void gt_sec_vel2_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + gt_ctl_write(env, ri, GTIMER_S_EL2_VIRT, value); +} + static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) { gt_timer_reset(env, ri, GTIMER_HYPVIRT); @@ -3228,6 +3114,20 @@ void arm_gt_stimer_cb(void *opaque) gt_recalc_timer(cpu, GTIMER_SEC); } +void arm_gt_sel2timer_cb(void *opaque) +{ + ARMCPU *cpu = opaque; + + gt_recalc_timer(cpu, GTIMER_S_EL2_PHYS); +} + +void arm_gt_sel2vtimer_cb(void *opaque) +{ + ARMCPU *cpu = opaque; + + gt_recalc_timer(cpu, GTIMER_S_EL2_VIRT); +} + void arm_gt_hvtimer_cb(void *opaque) { ARMCPU *cpu = opaque; @@ -3568,7 +3468,7 @@ static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, } return CP_ACCESS_TRAP_EL3; } - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } } return CP_ACCESS_OK; @@ -3599,11 +3499,12 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, GetPhysAddrResult res = {}; /* - * I_MXTJT: Granule protection checks are not performed on the final address - * of a successful translation. + * I_MXTJT: Granule protection checks are not performed on the final + * address of a successful translation. This is a translation not a + * memory reference, so "memop = none = 0". */ - ret = get_phys_addr_with_space_nogpc(env, value, access_type, mmu_idx, ss, - &res, &fi); + ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0, + mmu_idx, ss, &res, &fi); /* * ATS operations only do S1 or S1+S2 translations, so we never @@ -3775,7 +3676,11 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ switch (el) { case 3: - mmu_idx = ARMMMUIdx_E3; + if (ri->crm == 9 && arm_pan_enabled(env)) { + mmu_idx = ARMMMUIdx_E30_3_PAN; + } else { + mmu_idx = ARMMMUIdx_E3; + } break; case 2: g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ @@ -3795,7 +3700,7 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ switch (el) { case 3: - mmu_idx = ARMMMUIdx_E10_0; + mmu_idx = ARMMMUIdx_E30_0; break; case 2: g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ @@ -3860,7 +3765,7 @@ static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri, * scr_write() ensures that the NSE bit is not set otherwise. */ if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) { - return CP_ACCESS_TRAP; + return CP_ACCESS_UNDEFINED; } return CP_ACCESS_OK; } @@ -3870,7 +3775,7 @@ static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, { if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) { - return CP_ACCESS_TRAP; + return CP_ACCESS_UNDEFINED; } return at_e012_access(env, ri, isread); } @@ -4758,7 +4663,7 @@ static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } return CP_ACCESS_OK; } @@ -4848,9 +4753,9 @@ static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env, /* Cache invalidate/clean to Point of Coherency or Persistence... */ switch (arm_current_el(env)) { case 0: - /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ + /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set. */ if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } /* fall through */ case 1: @@ -4868,9 +4773,9 @@ static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags) /* Cache invalidate/clean to Point of Unification... */ switch (arm_current_el(env)) { case 0: - /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ + /* ... EL0 must trap to EL1 unless SCTLR_EL1.UCI is set. */ if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } /* fall through */ case 1: @@ -4895,489 +4800,6 @@ static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri, return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU); } -/* - * See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions - * Page D4-1736 (DDI0487A.b) - */ - -static int vae1_tlbmask(CPUARMState *env) -{ - uint64_t hcr = arm_hcr_el2_eff(env); - uint16_t mask; - - if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { - mask = ARMMMUIdxBit_E20_2 | - ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E20_0; - } else { - mask = ARMMMUIdxBit_E10_1 | - ARMMMUIdxBit_E10_1_PAN | - ARMMMUIdxBit_E10_0; - } - return mask; -} - -static int vae2_tlbmask(CPUARMState *env) -{ - uint64_t hcr = arm_hcr_el2_eff(env); - uint16_t mask; - - if (hcr & HCR_E2H) { - mask = ARMMMUIdxBit_E20_2 | - ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E20_0; - } else { - mask = ARMMMUIdxBit_E2; - } - return mask; -} - -/* Return 56 if TBI is enabled, 64 otherwise. */ -static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx, - uint64_t addr) -{ - uint64_t tcr = regime_tcr(env, mmu_idx); - int tbi = aa64_va_parameter_tbi(tcr, mmu_idx); - int select = extract64(addr, 55, 1); - - return (tbi >> select) & 1 ? 56 : 64; -} - -static int vae1_tlbbits(CPUARMState *env, uint64_t addr) -{ - uint64_t hcr = arm_hcr_el2_eff(env); - ARMMMUIdx mmu_idx; - - /* Only the regime of the mmu_idx below is significant. */ - if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { - mmu_idx = ARMMMUIdx_E20_0; - } else { - mmu_idx = ARMMMUIdx_E10_0; - } - - return tlbbits_for_regime(env, mmu_idx, addr); -} - -static int vae2_tlbbits(CPUARMState *env, uint64_t addr) -{ - uint64_t hcr = arm_hcr_el2_eff(env); - ARMMMUIdx mmu_idx; - - /* - * Only the regime of the mmu_idx below is significant. - * Regime EL2&0 has two ranges with separate TBI configuration, while EL2 - * only has one. - */ - if (hcr & HCR_E2H) { - mmu_idx = ARMMMUIdx_E20_2; - } else { - mmu_idx = ARMMMUIdx_E2; - } - - return tlbbits_for_regime(env, mmu_idx, addr); -} - -static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - int mask = vae1_tlbmask(env); - - tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); -} - -static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - int mask = vae1_tlbmask(env); - - if (tlb_force_broadcast(env)) { - tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); - } else { - tlb_flush_by_mmuidx(cs, mask); - } -} - -static int e2_tlbmask(CPUARMState *env) -{ - return (ARMMMUIdxBit_E20_0 | - ARMMMUIdxBit_E20_2 | - ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E2); -} - -static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - int mask = alle1_tlbmask(env); - - tlb_flush_by_mmuidx(cs, mask); -} - -static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - int mask = e2_tlbmask(env); - - tlb_flush_by_mmuidx(cs, mask); -} - -static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = env_archcpu(env); - CPUState *cs = CPU(cpu); - - tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3); -} - -static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - int mask = alle1_tlbmask(env); - - tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); -} - -static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - int mask = e2_tlbmask(env); - - tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); -} - -static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3); -} - -static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* - * Invalidate by VA, EL2 - * Currently handles both VAE2 and VALE2, since we don't support - * flush-last-level-only. - */ - CPUState *cs = env_cpu(env); - int mask = vae2_tlbmask(env); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - int bits = vae2_tlbbits(env, pageaddr); - - tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits); -} - -static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* - * Invalidate by VA, EL3 - * Currently handles both VAE3 and VALE3, since we don't support - * flush-last-level-only. - */ - ARMCPU *cpu = env_archcpu(env); - CPUState *cs = CPU(cpu); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3); -} - -static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - int mask = vae1_tlbmask(env); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - int bits = vae1_tlbbits(env, pageaddr); - - tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); -} - -static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* - * Invalidate by VA, EL1&0 (AArch64 version). - * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, - * since we don't support flush-for-specific-ASID-only or - * flush-last-level-only. - */ - CPUState *cs = env_cpu(env); - int mask = vae1_tlbmask(env); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - int bits = vae1_tlbbits(env, pageaddr); - - if (tlb_force_broadcast(env)) { - tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); - } else { - tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits); - } -} - -static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - int mask = vae2_tlbmask(env); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - int bits = vae2_tlbbits(env, pageaddr); - - tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); -} - -static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr); - - tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, - ARMMMUIdxBit_E3, bits); -} - -static int ipas2e1_tlbmask(CPUARMState *env, int64_t value) -{ - /* - * The MSB of value is the NS field, which only applies if SEL2 - * is implemented and SCR_EL3.NS is not set (i.e. in secure mode). - */ - return (value >= 0 - && cpu_isar_feature(aa64_sel2, env_archcpu(env)) - && arm_is_secure_below_el3(env) - ? ARMMMUIdxBit_Stage2_S - : ARMMMUIdxBit_Stage2); -} - -static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - int mask = ipas2e1_tlbmask(env, value); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - - if (tlb_force_broadcast(env)) { - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); - } else { - tlb_flush_page_by_mmuidx(cs, pageaddr, mask); - } -} - -static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - int mask = ipas2e1_tlbmask(env, value); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); -} - -#ifdef TARGET_AARCH64 -typedef struct { - uint64_t base; - uint64_t length; -} TLBIRange; - -static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg) -{ - /* - * Note that the TLBI range TG field encoding differs from both - * TG0 and TG1 encodings. - */ - switch (tg) { - case 1: - return Gran4K; - case 2: - return Gran16K; - case 3: - return Gran64K; - default: - return GranInvalid; - } -} - -static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx, - uint64_t value) -{ - unsigned int page_size_granule, page_shift, num, scale, exponent; - /* Extract one bit to represent the va selector in use. */ - uint64_t select = sextract64(value, 36, 1); - ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true, false); - TLBIRange ret = { }; - ARMGranuleSize gran; - - page_size_granule = extract64(value, 46, 2); - gran = tlbi_range_tg_to_gran_size(page_size_granule); - - /* The granule encoded in value must match the granule in use. */ - if (gran != param.gran) { - qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n", - page_size_granule); - return ret; - } - - page_shift = arm_granule_bits(gran); - num = extract64(value, 39, 5); - scale = extract64(value, 44, 2); - exponent = (5 * scale) + 1; - - ret.length = (num + 1) << (exponent + page_shift); - - if (param.select) { - ret.base = sextract64(value, 0, 37); - } else { - ret.base = extract64(value, 0, 37); - } - if (param.ds) { - /* - * With DS=1, BaseADDR is always shifted 16 so that it is able - * to address all 52 va bits. The input address is perforce - * aligned on a 64k boundary regardless of translation granule. - */ - page_shift = 16; - } - ret.base <<= page_shift; - - return ret; -} - -static void do_rvae_write(CPUARMState *env, uint64_t value, - int idxmap, bool synced) -{ - ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap); - TLBIRange range; - int bits; - - range = tlbi_aa64_get_range(env, one_idx, value); - bits = tlbbits_for_regime(env, one_idx, range.base); - - if (synced) { - tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env), - range.base, - range.length, - idxmap, - bits); - } else { - tlb_flush_range_by_mmuidx(env_cpu(env), range.base, - range.length, idxmap, bits); - } -} - -static void tlbi_aa64_rvae1_write(CPUARMState *env, - const ARMCPRegInfo *ri, - uint64_t value) -{ - /* - * Invalidate by VA range, EL1&0. - * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1, - * since we don't support flush-for-specific-ASID-only or - * flush-last-level-only. - */ - - do_rvae_write(env, value, vae1_tlbmask(env), - tlb_force_broadcast(env)); -} - -static void tlbi_aa64_rvae1is_write(CPUARMState *env, - const ARMCPRegInfo *ri, - uint64_t value) -{ - /* - * Invalidate by VA range, Inner/Outer Shareable EL1&0. - * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS, - * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support - * flush-for-specific-ASID-only, flush-last-level-only or inner/outer - * shareable specific flushes. - */ - - do_rvae_write(env, value, vae1_tlbmask(env), true); -} - -static void tlbi_aa64_rvae2_write(CPUARMState *env, - const ARMCPRegInfo *ri, - uint64_t value) -{ - /* - * Invalidate by VA range, EL2. - * Currently handles all of RVAE2 and RVALE2, - * since we don't support flush-for-specific-ASID-only or - * flush-last-level-only. - */ - - do_rvae_write(env, value, vae2_tlbmask(env), - tlb_force_broadcast(env)); - - -} - -static void tlbi_aa64_rvae2is_write(CPUARMState *env, - const ARMCPRegInfo *ri, - uint64_t value) -{ - /* - * Invalidate by VA range, Inner/Outer Shareable, EL2. - * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS, - * since we don't support flush-for-specific-ASID-only, - * flush-last-level-only or inner/outer shareable specific flushes. - */ - - do_rvae_write(env, value, vae2_tlbmask(env), true); - -} - -static void tlbi_aa64_rvae3_write(CPUARMState *env, - const ARMCPRegInfo *ri, - uint64_t value) -{ - /* - * Invalidate by VA range, EL3. - * Currently handles all of RVAE3 and RVALE3, - * since we don't support flush-for-specific-ASID-only or - * flush-last-level-only. - */ - - do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env)); -} - -static void tlbi_aa64_rvae3is_write(CPUARMState *env, - const ARMCPRegInfo *ri, - uint64_t value) -{ - /* - * Invalidate by VA range, EL3, Inner/Outer Shareable. - * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS, - * since we don't support flush-for-specific-ASID-only, - * flush-last-level-only or inner/outer specific flushes. - */ - - do_rvae_write(env, value, ARMMMUIdxBit_E3, true); -} - -static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - do_rvae_write(env, value, ipas2e1_tlbmask(env, value), - tlb_force_broadcast(env)); -} - -static void tlbi_aa64_ripas2e1is_write(CPUARMState *env, - const ARMCPRegInfo *ri, - uint64_t value) -{ - do_rvae_write(env, value, ipas2e1_tlbmask(env, value), true); -} -#endif - static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { @@ -5393,7 +4815,7 @@ static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, } } else { if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } if (hcr & HCR_TDZ) { return CP_ACCESS_TRAP_EL2; @@ -5426,7 +4848,7 @@ static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, * Access to SP_EL0 is undefined if it's being used as * the stack pointer. */ - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } return CP_ACCESS_OK; } @@ -5568,7 +4990,7 @@ static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri, mmap_lock(); - tb_invalidate_phys_range(start_address, end_address); + tb_invalidate_phys_range(env_cpu(env), start_address, end_address); mmap_unlock(); } @@ -5590,7 +5012,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, { .name = "FPCR", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, - .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, + .access = PL0_RW, .type = ARM_CP_FPU, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, { .name = "FPSR", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, @@ -5672,99 +5094,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, .fgt = FGT_DCCISW, .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, - /* TLBI operations */ - { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVMALLE1IS, - .writefn = tlbi_aa64_vmalle1is_write }, - { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAE1IS, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIASIDE1IS, - .writefn = tlbi_aa64_vmalle1is_write }, - { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAAE1IS, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVALE1IS, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAALE1IS, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVMALLE1, - .writefn = tlbi_aa64_vmalle1_write }, - { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAE1, - .writefn = tlbi_aa64_vae1_write }, - { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIASIDE1, - .writefn = tlbi_aa64_vmalle1_write }, - { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAAE1, - .writefn = tlbi_aa64_vae1_write }, - { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVALE1, - .writefn = tlbi_aa64_vae1_write }, - { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAALE1, - .writefn = tlbi_aa64_vae1_write }, - { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ipas2e1is_write }, - { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ipas2e1is_write }, - { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle1is_write }, - { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle1is_write }, - { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ipas2e1_write }, - { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ipas2e1_write }, - { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle1_write }, - { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle1is_write }, #ifndef CONFIG_USER_ONLY /* 64 bit address translation operations */ { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, @@ -5820,42 +5149,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), .writefn = par_write }, #endif - /* TLB invalidate last level of translation table walk */ - { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, - .writefn = tlbimva_is_write }, - { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, - .writefn = tlbimvaa_is_write }, - { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbimva_write }, - { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, - .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, - .writefn = tlbimvaa_write }, - { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbimva_hyp_write }, - { .name = "TLBIMVALHIS", - .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbimva_hyp_is_write }, - { .name = "TLBIIPAS2", - .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiipas2_hyp_write }, - { .name = "TLBIIPAS2IS", - .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiipas2is_hyp_write }, - { .name = "TLBIIPAS2L", - .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiipas2_hyp_write }, - { .name = "TLBIIPAS2LIS", - .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiipas2is_hyp_write }, /* 32 bit cache operations */ { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab }, @@ -6038,6 +5331,11 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) /* Clear RES0 bits. */ value &= valid_mask; + /* RW is RAO/WI if EL1 is AArch64 only */ + if (!cpu_isar_feature(aa64_aa32_el1, cpu)) { + value |= HCR_RW; + } + /* * These bits change the MMU setup: * HCR_VM enables stage 2 translation @@ -6095,6 +5393,12 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32)); } +static void hcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* hcr_write will set the RES1 bits on an AArch64-only CPU */ + hcr_write(env, ri, 0); +} + /* * Return the effective value of HCR_EL2, at the given security state. * Bits that are not included here: @@ -6216,6 +5520,14 @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri, if (cpu_isar_feature(aa64_nmi, cpu)) { valid_mask |= HCRX_TALLINT | HCRX_VINMI | HCRX_VFNMI; } + /* FEAT_CMOW adds CMOW */ + if (cpu_isar_feature(aa64_cmow, cpu)) { + valid_mask |= HCRX_CMOW; + } + /* FEAT_XS adds FGTnXS, FnXS */ + if (cpu_isar_feature(aa64_xs, cpu)) { + valid_mask |= HCRX_FGTNXS | HCRX_FNXS; + } /* Clear RES0 bits. */ env->cp15.hcrx_el2 = value & valid_mask; @@ -6322,6 +5634,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), .nv2_redirect_offset = 0x78, + .resetfn = hcr_reset, .writefn = hcr_write, .raw_writefn = raw_write }, { .name = "HCR", .state = ARM_CP_STATE_AA32, .type = ARM_CP_ALIAS | ARM_CP_IO, @@ -6437,50 +5750,6 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, - { .name = "TLBIALLNSNH", - .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiall_nsnh_write }, - { .name = "TLBIALLNSNHIS", - .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiall_nsnh_is_write }, - { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiall_hyp_write }, - { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbiall_hyp_is_write }, - { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbimva_hyp_write }, - { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, - .type = ARM_CP_NO_RAW, .access = PL2_W, - .writefn = tlbimva_hyp_is_write }, - { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_alle2_write }, - { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_vae2_write }, - { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_vae2_write }, - { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_alle2is_write }, - { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_vae2is_write }, - { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_vae2is_write }, #ifndef CONFIG_USER_ONLY /* * Unlike the other EL2-related AT operations, these must @@ -6581,7 +5850,7 @@ static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri, if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) { return CP_ACCESS_OK; } - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } static const ARMCPRegInfo el2_sec_cp_reginfo[] = { @@ -6595,6 +5864,56 @@ static const ARMCPRegInfo el2_sec_cp_reginfo[] = { .access = PL2_RW, .accessfn = sel2_access, .nv2_redirect_offset = 0x48, .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) }, +#ifndef CONFIG_USER_ONLY + /* Secure EL2 Physical Timer */ + { .name = "CNTHPS_TVAL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 0, + .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, + .accessfn = gt_sel2timer_access, + .readfn = gt_sec_pel2_tval_read, + .writefn = gt_sec_pel2_tval_write, + .resetfn = gt_sec_pel2_timer_reset, + }, + { .name = "CNTHPS_CTL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 1, + .type = ARM_CP_IO, .access = PL2_RW, + .accessfn = gt_sel2timer_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].ctl), + .resetvalue = 0, + .writefn = gt_sec_pel2_ctl_write, .raw_writefn = raw_write, + }, + { .name = "CNTHPS_CVAL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 5, .opc2 = 2, + .type = ARM_CP_IO, .access = PL2_RW, + .accessfn = gt_sel2timer_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_PHYS].cval), + .writefn = gt_sec_pel2_cval_write, .raw_writefn = raw_write, + }, + /* Secure EL2 Virtual Timer */ + { .name = "CNTHVS_TVAL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 0, + .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, + .accessfn = gt_sel2timer_access, + .readfn = gt_sec_vel2_tval_read, + .writefn = gt_sec_vel2_tval_write, + .resetfn = gt_sec_vel2_timer_reset, + }, + { .name = "CNTHVS_CTL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 1, + .type = ARM_CP_IO, .access = PL2_RW, + .accessfn = gt_sel2timer_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].ctl), + .resetvalue = 0, + .writefn = gt_sec_vel2_ctl_write, .raw_writefn = raw_write, + }, + { .name = "CNTHVS_CVAL_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 4, .opc2 = 2, + .type = ARM_CP_IO, .access = PL2_RW, + .accessfn = gt_sel2timer_access, + .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_S_EL2_VIRT].cval), + .writefn = gt_sec_vel2_cval_write, .raw_writefn = raw_write, + }, +#endif }; static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, @@ -6617,7 +5936,7 @@ static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, if (isread) { return CP_ACCESS_OK; } - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } static const ARMCPRegInfo el3_cp_reginfo[] = { @@ -6693,30 +6012,6 @@ static const ARMCPRegInfo el3_cp_reginfo[] = { .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle3is_write }, - { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_vae3is_write }, - { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_vae3is_write }, - { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle3_write }, - { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_vae3_write }, - { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_vae3_write }, }; #ifndef CONFIG_USER_ONLY @@ -6729,7 +6024,7 @@ static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } return CP_ACCESS_OK; } @@ -6827,7 +6122,7 @@ static CPAccessResult el2_e2h_e12_access(CPUARMState *env, } /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */ if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { - return CP_ACCESS_TRAP_UNCATEGORIZED; + return CP_ACCESS_UNDEFINED; } if (ri->orig_accessfn) { return ri->orig_accessfn(env, ri->opaque, isread); @@ -7004,7 +6299,7 @@ static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, } } else { if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } if (hcr & HCR_TID2) { return CP_ACCESS_TRAP_EL2; @@ -7034,7 +6329,7 @@ static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri, if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) { return CP_ACCESS_TRAP_EL2; } - if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) { + if (!arm_is_el3_or_mon(env) && (env->cp15.scr_el3 & SCR_TERR)) { return CP_ACCESS_TRAP_EL3; } return CP_ACCESS_OK; @@ -7294,7 +6589,6 @@ static const ARMCPRegInfo zcr_reginfo[] = { .writefn = zcr_write, .raw_writefn = raw_write }, }; -#ifdef TARGET_AARCH64 static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { @@ -7303,7 +6597,7 @@ static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri, if (el == 0) { uint64_t sctlr = arm_sctlr(env, el); if (!(sctlr & SCTLR_EnTP2)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } } /* TODO: FEAT_FGT */ @@ -7344,7 +6638,7 @@ static void arm_reset_sve_state(CPUARMState *env) memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs)); /* Recall that FFR is stored as pregs[16]. */ memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs)); - vfp_set_fpcr(env, 0x0800009f); + vfp_set_fpsr(env, 0x0800009f); } void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask) @@ -7459,14 +6753,6 @@ static const ARMCPRegInfo sme_reginfo[] = { .type = ARM_CP_CONST, .resetvalue = 0 }, }; -static void tlbi_aa64_paall_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush(cs); -} - static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -7484,14 +6770,6 @@ static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri) env_archcpu(env)->reset_l0gptsz); } -static void tlbi_aa64_paallos_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = env_cpu(env); - - tlb_flush_all_cpus_synced(cs); -} - static const ARMCPRegInfo rme_reginfo[] = { { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6, @@ -7503,28 +6781,6 @@ static const ARMCPRegInfo rme_reginfo[] = { { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5, .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) }, - { .name = "TLBI_PAALL", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 4, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_paall_write }, - { .name = "TLBI_PAALLOS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 4, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_paallos_write }, - /* - * QEMU does not have a way to invalidate by physical address, thus - * invalidating a range of physical addresses is accomplished by - * flushing all tlb entries in the outer shareable domain, - * just like PAALLOS. - */ - { .name = "TLBI_RPALOS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 7, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_paallos_write }, - { .name = "TLBI_RPAOS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 3, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_paallos_write }, { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1, .access = PL3_W, .type = ARM_CP_NOP }, @@ -7566,7 +6822,6 @@ static const ARMCPRegInfo nmi_reginfo[] = { .writefn = aa64_allint_write, .readfn = aa64_allint_read, .resetfn = arm_cp_reset_ignore }, }; -#endif /* TARGET_AARCH64 */ static void define_pmu_regs(ARMCPU *cpu) { @@ -7677,7 +6932,7 @@ static void define_pmu_regs(ARMCPU *cpu) static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); - uint64_t pfr1 = cpu->isar.id_pfr1; + uint64_t pfr1 = GET_IDREG(&cpu->isar, ID_PFR1); if (env->gicv3state) { pfr1 |= 1 << 28; @@ -7688,7 +6943,7 @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); - uint64_t pfr0 = cpu->isar.id_aa64pfr0; + uint64_t pfr0 = GET_IDREG(&cpu->isar, ID_AA64PFR0); if (env->gicv3state) { pfr0 |= 1 << 24; @@ -7719,8 +6974,8 @@ static CPAccessResult access_lor_other(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_is_secure_below_el3(env)) { - /* Access denied in secure mode. */ - return CP_ACCESS_TRAP; + /* UNDEF if SCR_EL3.NS == 0 */ + return CP_ACCESS_UNDEFINED; } return access_lor_ns(env, ri, isread); } @@ -7758,7 +7013,6 @@ static const ARMCPRegInfo lor_reginfo[] = { .type = ARM_CP_CONST, .resetvalue = 0 }, }; -#ifdef TARGET_AARCH64 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { @@ -7830,210 +7084,6 @@ static const ARMCPRegInfo pauth_reginfo[] = { .fieldoffset = offsetof(CPUARMState, keys.apib.hi) }, }; -static const ARMCPRegInfo tlbirange_reginfo[] = { - { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAE1IS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAAE1IS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVALE1IS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAALE1IS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAE1OS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAAE1OS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVALE1OS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAALE1OS, - .writefn = tlbi_aa64_rvae1is_write }, - { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAE1, - .writefn = tlbi_aa64_rvae1_write }, - { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAAE1, - .writefn = tlbi_aa64_rvae1_write }, - { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVALE1, - .writefn = tlbi_aa64_rvae1_write }, - { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIRVAALE1, - .writefn = tlbi_aa64_rvae1_write }, - { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ripas2e1is_write }, - { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ripas2e1is_write }, - { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_rvae2is_write }, - { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_rvae2is_write }, - { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ripas2e1_write }, - { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_ripas2e1_write }, - { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_rvae2is_write }, - { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_rvae2is_write }, - { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_rvae2_write }, - { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_rvae2_write }, - { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_rvae3is_write }, - { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_rvae3is_write }, - { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_rvae3is_write }, - { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_rvae3is_write }, - { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_rvae3_write }, - { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_rvae3_write }, -}; - -static const ARMCPRegInfo tlbios_reginfo[] = { - { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVMALLE1OS, - .writefn = tlbi_aa64_vmalle1is_write }, - { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1, - .fgt = FGT_TLBIVAE1OS, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIASIDE1OS, - .writefn = tlbi_aa64_vmalle1is_write }, - { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAAE1OS, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVALE1OS, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7, - .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, - .fgt = FGT_TLBIVAALE1OS, - .writefn = tlbi_aa64_vae1is_write }, - { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_alle2is_write }, - { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_vae2is_write }, - { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle1is_write }, - { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5, - .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, - .writefn = tlbi_aa64_vae2is_write }, - { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6, - .access = PL2_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle1is_write }, - { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0, - .access = PL2_W, .type = ARM_CP_NOP }, - { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3, - .access = PL2_W, .type = ARM_CP_NOP }, - { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4, - .access = PL2_W, .type = ARM_CP_NOP }, - { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7, - .access = PL2_W, .type = ARM_CP_NOP }, - { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_alle3is_write }, - { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_vae3is_write }, - { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64, - .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5, - .access = PL3_W, .type = ARM_CP_NO_RAW, - .writefn = tlbi_aa64_vae3is_write }, -}; - static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) { Error *err = NULL; @@ -8345,7 +7395,7 @@ static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri, if (hcr & HCR_TGE) { return CP_ACCESS_TRAP_EL2; } - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) { return CP_ACCESS_TRAP_EL2; @@ -8455,8 +7505,6 @@ static const ARMCPRegInfo nv2_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) }, }; -#endif /* TARGET_AARCH64 */ - static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { @@ -8465,7 +7513,7 @@ static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, if (el == 0) { uint64_t sctlr = arm_sctlr(env, el); if (!(sctlr & SCTLR_EnRCTX)) { - return CP_ACCESS_TRAP; + return CP_ACCESS_TRAP_EL1; } } else if (el == 1) { uint64_t hcr = arm_hcr_el2_eff(env); @@ -8702,6 +7750,8 @@ void register_cp_regs_for_features(ARMCPU *cpu) { /* Register all the coprocessor registers based on feature bits */ CPUARMState *env = &cpu->env; + ARMISARegisters *isar = &cpu->isar; + if (arm_feature(env, ARM_FEATURE_M)) { /* M profile has no coprocessor registers */ return; @@ -8716,6 +7766,10 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_arm_cp_regs(cpu, not_v8_cp_reginfo); } +#ifndef CONFIG_USER_ONLY + define_tlb_insn_regs(cpu); +#endif + if (arm_feature(env, ARM_FEATURE_V6)) { /* The ID registers all have impdef reset values */ ARMCPRegInfo v6_idregs[] = { @@ -8723,7 +7777,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_pfr0 }, + .resetvalue = GET_IDREG(isar, ID_PFR0)}, /* * ID_PFR1 is not a plain ARM_CP_CONST because we don't know * the value of the GIC field until after we define these regs. @@ -8734,7 +7788,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .accessfn = access_aa32_tid3, #ifdef CONFIG_USER_ONLY .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_pfr1, + .resetvalue = GET_IDREG(isar, ID_PFR1), #else .type = ARM_CP_NO_RAW, .accessfn = access_aa32_tid3, @@ -8746,7 +7800,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_dfr0 }, + .resetvalue = GET_IDREG(isar, ID_DFR0)}, { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, @@ -8756,62 +7810,62 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr0 }, + .resetvalue = GET_IDREG(isar, ID_MMFR0)}, { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr1 }, + .resetvalue = GET_IDREG(isar, ID_MMFR1)}, { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr2 }, + .resetvalue = GET_IDREG(isar, ID_MMFR2)}, { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr3 }, + .resetvalue = GET_IDREG(isar, ID_MMFR3)}, { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar0 }, + .resetvalue = GET_IDREG(isar, ID_ISAR0)}, { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar1 }, + .resetvalue = GET_IDREG(isar, ID_ISAR1)}, { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar2 }, + .resetvalue = GET_IDREG(isar, ID_ISAR2)}, { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar3 }, + .resetvalue = GET_IDREG(isar, ID_ISAR3) }, { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar4 }, + .resetvalue = GET_IDREG(isar, ID_ISAR4) }, { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar5 }, + .resetvalue = GET_IDREG(isar, ID_ISAR5) }, { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr4 }, + .resetvalue = GET_IDREG(isar, ID_MMFR4)}, { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar6 }, + .resetvalue = GET_IDREG(isar, ID_ISAR6) }, }; define_arm_cp_regs(cpu, v6_idregs); define_arm_cp_regs(cpu, v6_cp_reginfo); @@ -8821,10 +7875,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (arm_feature(env, ARM_FEATURE_V6K)) { define_arm_cp_regs(cpu, v6k_cp_reginfo); } - if (arm_feature(env, ARM_FEATURE_V7MP) && - !arm_feature(env, ARM_FEATURE_PMSA)) { - define_arm_cp_regs(cpu, v7mp_cp_reginfo); - } if (arm_feature(env, ARM_FEATURE_V7VE)) { define_arm_cp_regs(cpu, pmovsset_cp_reginfo); } @@ -8866,7 +7916,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, #ifdef CONFIG_USER_ONLY .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_aa64pfr0 + .resetvalue = GET_IDREG(isar, ID_AA64PFR0) #else .type = ARM_CP_NO_RAW, .accessfn = access_aa64_tid3, @@ -8878,7 +7928,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64pfr1}, + .resetvalue = GET_IDREG(isar, ID_AA64PFR1)}, { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, @@ -8893,12 +7943,12 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64zfr0 }, + .resetvalue = GET_IDREG(isar, ID_AA64ZFR0)}, { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64smfr0 }, + .resetvalue = GET_IDREG(isar, ID_AA64SMFR0)}, { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, @@ -8913,12 +7963,12 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64dfr0 }, + .resetvalue = GET_IDREG(isar, ID_AA64DFR0) }, { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64dfr1 }, + .resetvalue = GET_IDREG(isar, ID_AA64DFR1) }, { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, @@ -8953,17 +8003,17 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64isar0 }, + .resetvalue = GET_IDREG(isar, ID_AA64ISAR0)}, { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64isar1 }, + .resetvalue = GET_IDREG(isar, ID_AA64ISAR1)}, { .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64isar2 }, + .resetvalue = GET_IDREG(isar, ID_AA64ISAR2)}, { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, @@ -8993,22 +8043,22 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64mmfr0 }, + .resetvalue = GET_IDREG(isar, ID_AA64MMFR0)}, { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64mmfr1 }, + .resetvalue = GET_IDREG(isar, ID_AA64MMFR1) }, { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64mmfr2 }, + .resetvalue = GET_IDREG(isar, ID_AA64MMFR2) }, { .name = "ID_AA64MMFR3_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64mmfr3 }, + .resetvalue = GET_IDREG(isar, ID_AA64MMFR3) }, { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, @@ -9080,17 +8130,17 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_pfr2 }, + .resetvalue = GET_IDREG(isar, ID_PFR2)}, { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_dfr1 }, + .resetvalue = GET_IDREG(isar, ID_DFR1)}, { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_mmfr5 }, + .resetvalue = GET_IDREG(isar, ID_MMFR5)}, { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, @@ -9899,7 +8949,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo); } -#ifdef TARGET_AARCH64 if (cpu_isar_feature(aa64_sme, cpu)) { define_arm_cp_regs(cpu, sme_reginfo); } @@ -9909,12 +8958,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (cpu_isar_feature(aa64_rndr, cpu)) { define_arm_cp_regs(cpu, rndr_reginfo); } - if (cpu_isar_feature(aa64_tlbirange, cpu)) { - define_arm_cp_regs(cpu, tlbirange_reginfo); - } - if (cpu_isar_feature(aa64_tlbios, cpu)) { - define_arm_cp_regs(cpu, tlbios_reginfo); - } /* Data Cache clean instructions up to PoP */ if (cpu_isar_feature(aa64_dcpop, cpu)) { define_one_arm_cp_reg(cpu, dcpop_reg); @@ -9966,7 +9009,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (cpu_isar_feature(aa64_nmi, cpu)) { define_arm_cp_regs(cpu, nmi_reginfo); } -#endif if (cpu_isar_feature(any_predinv, cpu)) { define_arm_cp_regs(cpu, predinv_reginfo); @@ -10327,6 +9369,31 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, if (r->state != state && r->state != ARM_CP_STATE_BOTH) { continue; } + if ((r->type & ARM_CP_ADD_TLBI_NXS) && + cpu_isar_feature(aa64_xs, cpu)) { + /* + * This is a TLBI insn which has an NXS variant. The + * NXS variant is at the same encoding except that + * crn is +1, and has the same behaviour except for + * fine-grained trapping. Add the NXS insn here and + * then fall through to add the normal register. + * add_cpreg_to_hashtable() copies the cpreg struct + * and name that it is passed, so it's OK to use + * a local struct here. + */ + ARMCPRegInfo nxs_ri = *r; + g_autofree char *name = g_strdup_printf("%sNXS", r->name); + + assert(state == ARM_CP_STATE_AA64); + assert(nxs_ri.crn < 0xf); + nxs_ri.crn++; + if (nxs_ri.fgt) { + nxs_ri.fgt |= R_FGT_NXS_MASK; + } + add_cpreg_to_hashtable(cpu, &nxs_ri, opaque, state, + ARM_CP_SECSTATE_NS, + crm, opc1, opc2, name); + } if (state == ARM_CP_STATE_AA32) { /* * Under AArch32 CP registers can be common @@ -10765,7 +9832,7 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, uint64_t hcr_el2; if (arm_feature(env, ARM_FEATURE_EL3)) { - rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); + rw = arm_scr_rw_eff(env); } else { /* * Either EL2 is the highest EL (and so the EL2 register width @@ -10840,6 +9907,7 @@ void arm_log_exception(CPUState *cs) [EXCP_NMI] = "NMI", [EXCP_VINMI] = "Virtual IRQ NMI", [EXCP_VFNMI] = "Virtual FIQ NMI", + [EXCP_MON_TRAP] = "Monitor Trap", }; if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { @@ -11406,6 +10474,16 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs) mask = CPSR_A | CPSR_I | CPSR_F; offset = 0; break; + case EXCP_MON_TRAP: + new_mode = ARM_CPU_MODE_MON; + addr = 0x04; + mask = CPSR_A | CPSR_I | CPSR_F; + if (env->thumb) { + offset = 2; + } else { + offset = 4; + } + break; default: cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); return; /* Never happens. Keep compiler happy. */ @@ -11539,7 +10617,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; unsigned int new_el = env->exception.target_el; - target_ulong addr = env->cp15.vbar_el[new_el]; + vaddr addr = env->cp15.vbar_el[new_el]; unsigned int new_mode = aarch64_pstate_mode(new_el, true); unsigned int old_mode; unsigned int cur_el = arm_current_el(env); @@ -11563,7 +10641,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) switch (new_el) { case 3: - is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; + is_aa64 = arm_scr_rw_eff(env); break; case 2: hcr = arm_hcr_el2_eff(env); @@ -11861,10 +10939,20 @@ void arm_cpu_do_interrupt(CPUState *cs) uint64_t arm_sctlr(CPUARMState *env, int el) { - /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */ + /* Only EL0 needs to be adjusted for EL1&0 or EL2&0 or EL3&0 */ if (el == 0) { ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); - el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1; + switch (mmu_idx) { + case ARMMMUIdx_E20_0: + el = 2; + break; + case ARMMMUIdx_E30_0: + el = 3; + break; + default: + el = 1; + break; + } } return env->cp15.sctlr_el[el]; } @@ -12128,289 +11216,6 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, }; } -/* - * Note that signed overflow is undefined in C. The following routines are - * careful to use unsigned types where modulo arithmetic is required. - * Failure to do so _will_ break on newer gcc. - */ - -/* Signed saturating arithmetic. */ - -/* Perform 16-bit signed saturating addition. */ -static inline uint16_t add16_sat(uint16_t a, uint16_t b) -{ - uint16_t res; - - res = a + b; - if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { - if (a & 0x8000) { - res = 0x8000; - } else { - res = 0x7fff; - } - } - return res; -} - -/* Perform 8-bit signed saturating addition. */ -static inline uint8_t add8_sat(uint8_t a, uint8_t b) -{ - uint8_t res; - - res = a + b; - if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { - if (a & 0x80) { - res = 0x80; - } else { - res = 0x7f; - } - } - return res; -} - -/* Perform 16-bit signed saturating subtraction. */ -static inline uint16_t sub16_sat(uint16_t a, uint16_t b) -{ - uint16_t res; - - res = a - b; - if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { - if (a & 0x8000) { - res = 0x8000; - } else { - res = 0x7fff; - } - } - return res; -} - -/* Perform 8-bit signed saturating subtraction. */ -static inline uint8_t sub8_sat(uint8_t a, uint8_t b) -{ - uint8_t res; - - res = a - b; - if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { - if (a & 0x80) { - res = 0x80; - } else { - res = 0x7f; - } - } - return res; -} - -#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); -#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); -#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); -#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); -#define PFX q - -#include "op_addsub.h" - -/* Unsigned saturating arithmetic. */ -static inline uint16_t add16_usat(uint16_t a, uint16_t b) -{ - uint16_t res; - res = a + b; - if (res < a) { - res = 0xffff; - } - return res; -} - -static inline uint16_t sub16_usat(uint16_t a, uint16_t b) -{ - if (a > b) { - return a - b; - } else { - return 0; - } -} - -static inline uint8_t add8_usat(uint8_t a, uint8_t b) -{ - uint8_t res; - res = a + b; - if (res < a) { - res = 0xff; - } - return res; -} - -static inline uint8_t sub8_usat(uint8_t a, uint8_t b) -{ - if (a > b) { - return a - b; - } else { - return 0; - } -} - -#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); -#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); -#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); -#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); -#define PFX uq - -#include "op_addsub.h" - -/* Signed modulo arithmetic. */ -#define SARITH16(a, b, n, op) do { \ - int32_t sum; \ - sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ - RESULT(sum, n, 16); \ - if (sum >= 0) \ - ge |= 3 << (n * 2); \ - } while (0) - -#define SARITH8(a, b, n, op) do { \ - int32_t sum; \ - sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ - RESULT(sum, n, 8); \ - if (sum >= 0) \ - ge |= 1 << n; \ - } while (0) - - -#define ADD16(a, b, n) SARITH16(a, b, n, +) -#define SUB16(a, b, n) SARITH16(a, b, n, -) -#define ADD8(a, b, n) SARITH8(a, b, n, +) -#define SUB8(a, b, n) SARITH8(a, b, n, -) -#define PFX s -#define ARITH_GE - -#include "op_addsub.h" - -/* Unsigned modulo arithmetic. */ -#define ADD16(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ - RESULT(sum, n, 16); \ - if ((sum >> 16) == 1) \ - ge |= 3 << (n * 2); \ - } while (0) - -#define ADD8(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ - RESULT(sum, n, 8); \ - if ((sum >> 8) == 1) \ - ge |= 1 << n; \ - } while (0) - -#define SUB16(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ - RESULT(sum, n, 16); \ - if ((sum >> 16) == 0) \ - ge |= 3 << (n * 2); \ - } while (0) - -#define SUB8(a, b, n) do { \ - uint32_t sum; \ - sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ - RESULT(sum, n, 8); \ - if ((sum >> 8) == 0) \ - ge |= 1 << n; \ - } while (0) - -#define PFX u -#define ARITH_GE - -#include "op_addsub.h" - -/* Halved signed arithmetic. */ -#define ADD16(a, b, n) \ - RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) -#define SUB16(a, b, n) \ - RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) -#define ADD8(a, b, n) \ - RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) -#define SUB8(a, b, n) \ - RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) -#define PFX sh - -#include "op_addsub.h" - -/* Halved unsigned arithmetic. */ -#define ADD16(a, b, n) \ - RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) -#define SUB16(a, b, n) \ - RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) -#define ADD8(a, b, n) \ - RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) -#define SUB8(a, b, n) \ - RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) -#define PFX uh - -#include "op_addsub.h" - -static inline uint8_t do_usad(uint8_t a, uint8_t b) -{ - if (a > b) { - return a - b; - } else { - return b - a; - } -} - -/* Unsigned sum of absolute byte differences. */ -uint32_t HELPER(usad8)(uint32_t a, uint32_t b) -{ - uint32_t sum; - sum = do_usad(a, b); - sum += do_usad(a >> 8, b >> 8); - sum += do_usad(a >> 16, b >> 16); - sum += do_usad(a >> 24, b >> 24); - return sum; -} - -/* For ARMv6 SEL instruction. */ -uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) -{ - uint32_t mask; - - mask = 0; - if (flags & 1) { - mask |= 0xff; - } - if (flags & 2) { - mask |= 0xff00; - } - if (flags & 4) { - mask |= 0xff0000; - } - if (flags & 8) { - mask |= 0xff000000; - } - return (a & mask) | (b & ~mask); -} - -/* - * CRC helpers. - * The upper bytes of val (above the number specified by 'bytes') must have - * been zeroed out by the caller. - */ -uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) -{ - uint8_t buf[4]; - - stl_le_p(buf, val); - - /* zlib crc32 converts the accumulator and output to one's complement. */ - return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; -} - -uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) -{ - uint8_t buf[4]; - - stl_le_p(buf, val); - - /* Linux crc32c converts the output to one's complement. */ - return crc32c(acc, buf, bytes) ^ 0xffffffff; -} /* * Return the exception level to which FP-disabled exceptions should @@ -12532,6 +11337,7 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) switch (mmu_idx) { case ARMMMUIdx_E10_0: case ARMMMUIdx_E20_0: + case ARMMMUIdx_E30_0: return 0; case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: @@ -12541,6 +11347,7 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) case ARMMMUIdx_E20_2_PAN: return 2; case ARMMMUIdx_E3: + case ARMMMUIdx_E30_3_PAN: return 3; default: g_assert_not_reached(); @@ -12569,6 +11376,9 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) hcr = arm_hcr_el2_eff(env); if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { idx = ARMMMUIdx_E20_0; + } else if (arm_is_secure_below_el3(env) && + !arm_el_is_aa64(env, 3)) { + idx = ARMMMUIdx_E30_0; } else { idx = ARMMMUIdx_E10_0; } @@ -12593,6 +11403,9 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) } break; case 3: + if (!arm_el_is_aa64(env, 3) && arm_pan_enabled(env)) { + return ARMMMUIdx_E30_3_PAN; + } return ARMMMUIdx_E3; default: g_assert_not_reached(); @@ -12606,116 +11419,6 @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env) return arm_mmu_idx_el(env, arm_current_el(env)); } -static bool mve_no_pred(CPUARMState *env) -{ - /* - * Return true if there is definitely no predication of MVE - * instructions by VPR or LTPSIZE. (Returning false even if there - * isn't any predication is OK; generated code will just be - * a little worse.) - * If the CPU does not implement MVE then this TB flag is always 0. - * - * NOTE: if you change this logic, the "recalculate s->mve_no_pred" - * logic in gen_update_fp_context() needs to be updated to match. - * - * We do not include the effect of the ECI bits here -- they are - * tracked in other TB flags. This simplifies the logic for - * "when did we emit code that changes the MVE_NO_PRED TB flag - * and thus need to end the TB?". - */ - if (cpu_isar_feature(aa32_mve, env_archcpu(env))) { - return false; - } - if (env->v7m.vpr) { - return false; - } - if (env->v7m.ltpsize < 4) { - return false; - } - return true; -} - -void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *pflags) -{ - CPUARMTBFlags flags; - - assert_hflags_rebuild_correctly(env); - flags = env->hflags; - - if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) { - *pc = env->pc; - if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { - DP_TBFLAG_A64(flags, BTYPE, env->btype); - } - } else { - *pc = env->regs[15]; - - if (arm_feature(env, ARM_FEATURE_M)) { - if (arm_feature(env, ARM_FEATURE_M_SECURITY) && - FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) - != env->v7m.secure) { - DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1); - } - - if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && - (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || - (env->v7m.secure && - !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { - /* - * ASPEN is set, but FPCA/SFPA indicate that there is no - * active FP context; we must create a new FP context before - * executing any FP insn. - */ - DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1); - } - - bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; - if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { - DP_TBFLAG_M32(flags, LSPACT, 1); - } - - if (mve_no_pred(env)) { - DP_TBFLAG_M32(flags, MVE_NO_PRED, 1); - } - } else { - /* - * Note that XSCALE_CPAR shares bits with VECSTRIDE. - * Note that VECLEN+VECSTRIDE are RES0 for M-profile. - */ - if (arm_feature(env, ARM_FEATURE_XSCALE)) { - DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar); - } else { - DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len); - DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride); - } - if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { - DP_TBFLAG_A32(flags, VFPEN, 1); - } - } - - DP_TBFLAG_AM32(flags, THUMB, env->thumb); - DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits); - } - - /* - * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine - * states defined in the ARM ARM for software singlestep: - * SS_ACTIVE PSTATE.SS State - * 0 x Inactive (the TB flag for SS is always 0) - * 1 0 Active-pending - * 1 1 Active-not-pending - * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB. - */ - if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) { - DP_TBFLAG_ANY(flags, PSTATE__SS, 1); - } - - *pflags = flags.flags; - *cs_base = flags.flags2; -} - -#ifdef TARGET_AARCH64 /* * The manual says that when SVE is enabled and VQ is widened the * implementation is allowed to zero the previously inaccessible @@ -12830,7 +11533,6 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, aarch64_sve_narrow_vq(env, new_len + 1); } } -#endif #ifndef CONFIG_USER_ONLY ARMSecuritySpace arm_security_space(CPUARMState *env) |