diff options
Diffstat (limited to 'target')
-rw-r--r-- | target/arm/arm-powerctl.c | 10 | ||||
-rw-r--r-- | target/arm/cpu.c | 7 | ||||
-rw-r--r-- | target/arm/cpu.h | 95 | ||||
-rw-r--r-- | target/arm/cpu64.c | 66 | ||||
-rw-r--r-- | target/arm/helper.c | 27 | ||||
-rw-r--r-- | target/arm/op_helper.c | 6 | ||||
-rw-r--r-- | target/hppa/mem_helper.c | 3 |
7 files changed, 197 insertions, 17 deletions
diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c index ce55eeb..2b85693 100644 --- a/target/arm/arm-powerctl.c +++ b/target/arm/arm-powerctl.c @@ -103,6 +103,16 @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state, } else { /* Processor is not in secure mode */ target_cpu->env.cp15.scr_el3 |= SCR_NS; + + /* + * If QEMU is providing the equivalent of EL3 firmware, then we need + * to make sure a CPU targeting EL2 comes out of reset with a + * functional HVC insn. + */ + if (arm_feature(&target_cpu->env, ARM_FEATURE_EL3) + && info->target_el == 2) { + target_cpu->env.cp15.scr_el3 |= SCR_HCE; + } } /* We check if the started CPU is now at the correct level */ diff --git a/target/arm/cpu.c b/target/arm/cpu.c index b5e61cc..cd48ad4 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -1397,7 +1397,7 @@ static void cortex_r5_initfn(Object *obj) cpu->id_mmfr1 = 0x00000000; cpu->id_mmfr2 = 0x01200000; cpu->id_mmfr3 = 0x0211; - cpu->id_isar0 = 0x2101111; + cpu->id_isar0 = 0x02101111; cpu->id_isar1 = 0x13112111; cpu->id_isar2 = 0x21232141; cpu->id_isar3 = 0x01112131; @@ -1587,7 +1587,10 @@ static void cortex_a7_initfn(Object *obj) cpu->id_mmfr1 = 0x40000000; cpu->id_mmfr2 = 0x01240000; cpu->id_mmfr3 = 0x02102211; - cpu->id_isar0 = 0x01101110; + /* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but + * table 4-41 gives 0x02101110, which includes the arm div insns. + */ + cpu->id_isar0 = 0x02101110; cpu->id_isar1 = 0x13112111; cpu->id_isar2 = 0x21232041; cpu->id_isar3 = 0x11112131; diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 3a2aff1..f00c044 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -911,10 +911,13 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); -void aarch64_sve_change_el(CPUARMState *env, int old_el, int new_el); +void aarch64_sve_change_el(CPUARMState *env, int old_el, + int new_el, bool el0_a64); #else static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } -static inline void aarch64_sve_change_el(CPUARMState *env, int o, int n) { } +static inline void aarch64_sve_change_el(CPUARMState *env, int o, + int n, bool a) +{ } #endif target_ulong do_arm_semihosting(CPUARMState *env); @@ -1440,6 +1443,94 @@ FIELD(V7M_CSSELR, LEVEL, 1, 3) */ FIELD(V7M_CSSELR, INDEX, 0, 4) +/* + * System register ID fields. + */ +FIELD(ID_ISAR0, SWAP, 0, 4) +FIELD(ID_ISAR0, BITCOUNT, 4, 4) +FIELD(ID_ISAR0, BITFIELD, 8, 4) +FIELD(ID_ISAR0, CMPBRANCH, 12, 4) +FIELD(ID_ISAR0, COPROC, 16, 4) +FIELD(ID_ISAR0, DEBUG, 20, 4) +FIELD(ID_ISAR0, DIVIDE, 24, 4) + +FIELD(ID_ISAR1, ENDIAN, 0, 4) +FIELD(ID_ISAR1, EXCEPT, 4, 4) +FIELD(ID_ISAR1, EXCEPT_AR, 8, 4) +FIELD(ID_ISAR1, EXTEND, 12, 4) +FIELD(ID_ISAR1, IFTHEN, 16, 4) +FIELD(ID_ISAR1, IMMEDIATE, 20, 4) +FIELD(ID_ISAR1, INTERWORK, 24, 4) +FIELD(ID_ISAR1, JAZELLE, 28, 4) + +FIELD(ID_ISAR2, LOADSTORE, 0, 4) +FIELD(ID_ISAR2, MEMHINT, 4, 4) +FIELD(ID_ISAR2, MULTIACCESSINT, 8, 4) +FIELD(ID_ISAR2, MULT, 12, 4) +FIELD(ID_ISAR2, MULTS, 16, 4) +FIELD(ID_ISAR2, MULTU, 20, 4) +FIELD(ID_ISAR2, PSR_AR, 24, 4) +FIELD(ID_ISAR2, REVERSAL, 28, 4) + +FIELD(ID_ISAR3, SATURATE, 0, 4) +FIELD(ID_ISAR3, SIMD, 4, 4) +FIELD(ID_ISAR3, SVC, 8, 4) +FIELD(ID_ISAR3, SYNCHPRIM, 12, 4) +FIELD(ID_ISAR3, TABBRANCH, 16, 4) +FIELD(ID_ISAR3, T32COPY, 20, 4) +FIELD(ID_ISAR3, TRUENOP, 24, 4) +FIELD(ID_ISAR3, T32EE, 28, 4) + +FIELD(ID_ISAR4, UNPRIV, 0, 4) +FIELD(ID_ISAR4, WITHSHIFTS, 4, 4) +FIELD(ID_ISAR4, WRITEBACK, 8, 4) +FIELD(ID_ISAR4, SMC, 12, 4) +FIELD(ID_ISAR4, BARRIER, 16, 4) +FIELD(ID_ISAR4, SYNCHPRIM_FRAC, 20, 4) +FIELD(ID_ISAR4, PSR_M, 24, 4) +FIELD(ID_ISAR4, SWP_FRAC, 28, 4) + +FIELD(ID_ISAR5, SEVL, 0, 4) +FIELD(ID_ISAR5, AES, 4, 4) +FIELD(ID_ISAR5, SHA1, 8, 4) +FIELD(ID_ISAR5, SHA2, 12, 4) +FIELD(ID_ISAR5, CRC32, 16, 4) +FIELD(ID_ISAR5, RDM, 24, 4) +FIELD(ID_ISAR5, VCMA, 28, 4) + +FIELD(ID_ISAR6, JSCVT, 0, 4) +FIELD(ID_ISAR6, DP, 4, 4) +FIELD(ID_ISAR6, FHM, 8, 4) +FIELD(ID_ISAR6, SB, 12, 4) +FIELD(ID_ISAR6, SPECRES, 16, 4) + +FIELD(ID_AA64ISAR0, AES, 4, 4) +FIELD(ID_AA64ISAR0, SHA1, 8, 4) +FIELD(ID_AA64ISAR0, SHA2, 12, 4) +FIELD(ID_AA64ISAR0, CRC32, 16, 4) +FIELD(ID_AA64ISAR0, ATOMIC, 20, 4) +FIELD(ID_AA64ISAR0, RDM, 28, 4) +FIELD(ID_AA64ISAR0, SHA3, 32, 4) +FIELD(ID_AA64ISAR0, SM3, 36, 4) +FIELD(ID_AA64ISAR0, SM4, 40, 4) +FIELD(ID_AA64ISAR0, DP, 44, 4) +FIELD(ID_AA64ISAR0, FHM, 48, 4) +FIELD(ID_AA64ISAR0, TS, 52, 4) +FIELD(ID_AA64ISAR0, TLB, 56, 4) +FIELD(ID_AA64ISAR0, RNDR, 60, 4) + +FIELD(ID_AA64ISAR1, DPB, 0, 4) +FIELD(ID_AA64ISAR1, APA, 4, 4) +FIELD(ID_AA64ISAR1, API, 8, 4) +FIELD(ID_AA64ISAR1, JSCVT, 12, 4) +FIELD(ID_AA64ISAR1, FCMA, 16, 4) +FIELD(ID_AA64ISAR1, LRCPC, 20, 4) +FIELD(ID_AA64ISAR1, GPA, 24, 4) +FIELD(ID_AA64ISAR1, GPI, 28, 4) +FIELD(ID_AA64ISAR1, FRINTTS, 32, 4) +FIELD(ID_AA64ISAR1, SB, 36, 4) +FIELD(ID_AA64ISAR1, SPECRES, 40, 4) + QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK); /* If adding a feature bit which corresponds to a Linux ELF diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index db71504..44fdf0f 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -51,7 +51,7 @@ static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) } #endif -static const ARMCPRegInfo cortex_a57_a53_cp_reginfo[] = { +static const ARMCPRegInfo cortex_a72_a57_a53_cp_reginfo[] = { #ifndef CONFIG_USER_ONLY { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2, @@ -156,7 +156,7 @@ static void aarch64_a57_initfn(Object *obj) cpu->gic_num_lrs = 4; cpu->gic_vpribits = 5; cpu->gic_vprebits = 5; - define_arm_cp_regs(cpu, cortex_a57_a53_cp_reginfo); + define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo); } static void aarch64_a53_initfn(Object *obj) @@ -215,7 +215,66 @@ static void aarch64_a53_initfn(Object *obj) cpu->gic_num_lrs = 4; cpu->gic_vpribits = 5; cpu->gic_vprebits = 5; - define_arm_cp_regs(cpu, cortex_a57_a53_cp_reginfo); + define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo); +} + +static void aarch64_a72_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + cpu->dtb_compatible = "arm,cortex-a72"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_VFP4); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_V8_AES); + set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); + set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); + set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); + set_feature(&cpu->env, ARM_FEATURE_CRC); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + cpu->midr = 0x410fd083; + cpu->revidr = 0x00000000; + cpu->reset_fpsid = 0x41034080; + cpu->mvfr0 = 0x10110222; + cpu->mvfr1 = 0x12111111; + cpu->mvfr2 = 0x00000043; + cpu->ctr = 0x8444c004; + cpu->reset_sctlr = 0x00c50838; + cpu->id_pfr0 = 0x00000131; + cpu->id_pfr1 = 0x00011011; + cpu->id_dfr0 = 0x03010066; + cpu->id_afr0 = 0x00000000; + cpu->id_mmfr0 = 0x10201105; + cpu->id_mmfr1 = 0x40000000; + cpu->id_mmfr2 = 0x01260000; + cpu->id_mmfr3 = 0x02102211; + cpu->id_isar0 = 0x02101110; + cpu->id_isar1 = 0x13112111; + cpu->id_isar2 = 0x21232042; + cpu->id_isar3 = 0x01112131; + cpu->id_isar4 = 0x00011142; + cpu->id_isar5 = 0x00011121; + cpu->id_aa64pfr0 = 0x00002222; + cpu->id_aa64dfr0 = 0x10305106; + cpu->pmceid0 = 0x00000000; + cpu->pmceid1 = 0x00000000; + cpu->id_aa64isar0 = 0x00011120; + cpu->id_aa64mmfr0 = 0x00001124; + cpu->dbgdidr = 0x3516d000; + cpu->clidr = 0x0a200023; + cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ + cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */ + cpu->dcz_blocksize = 4; /* 64 bytes */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo); } static void cpu_max_get_sve_vq(Object *obj, Visitor *v, const char *name, @@ -293,6 +352,7 @@ typedef struct ARMCPUInfo { static const ARMCPUInfo aarch64_cpus[] = { { .name = "cortex-a57", .initfn = aarch64_a57_initfn }, { .name = "cortex-a53", .initfn = aarch64_a53_initfn }, + { .name = "cortex-a72", .initfn = aarch64_a72_initfn }, { .name = "max", .initfn = aarch64_max_initfn }, { .name = NULL } }; diff --git a/target/arm/helper.c b/target/arm/helper.c index c83f7c1..e394656 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -1179,6 +1179,7 @@ static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { + value &= pmu_counter_mask(env); env->cp15.c9_pmovsr &= ~value; } @@ -1423,12 +1424,14 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .writefn = pmintenset_write, .raw_writefn = raw_write, .resetvalue = 0x0 }, { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, - .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, + .access = PL1_RW, .accessfn = access_tpm, + .type = ARM_CP_ALIAS | ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), .writefn = pmintenclr_write, }, { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, - .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, + .access = PL1_RW, .accessfn = access_tpm, + .type = ARM_CP_ALIAS | ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), .writefn = pmintenclr_write }, { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, @@ -6469,7 +6472,7 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value, target_ulong page_size; hwaddr physaddr; int prot; - ARMMMUFaultInfo fi; + ARMMMUFaultInfo fi = {}; bool secure = mmu_idx & ARM_MMU_IDX_M_S; int exc; bool exc_secure; @@ -6531,7 +6534,7 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr, target_ulong page_size; hwaddr physaddr; int prot; - ARMMMUFaultInfo fi; + ARMMMUFaultInfo fi = {}; bool secure = mmu_idx & ARM_MMU_IDX_M_S; int exc; bool exc_secure; @@ -8374,7 +8377,11 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) unsigned int new_mode = aarch64_pstate_mode(new_el, true); unsigned int cur_el = arm_current_el(env); - aarch64_sve_change_el(env, cur_el, new_el); + /* + * Note that new_el can never be 0. If cur_el is 0, then + * el0_a64 is is_a64(), else el0_a64 is ignored. + */ + aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); if (cur_el < new_el) { /* Entry vector offset depends on whether the implemented EL @@ -12791,9 +12798,11 @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) /* * Notice a change in SVE vector size when changing EL. */ -void aarch64_sve_change_el(CPUARMState *env, int old_el, int new_el) +void aarch64_sve_change_el(CPUARMState *env, int old_el, + int new_el, bool el0_a64) { int old_len, new_len; + bool old_a64, new_a64; /* Nothing to do if no SVE. */ if (!arm_feature(env, ARM_FEATURE_SVE)) { @@ -12817,9 +12826,11 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, int new_el) * we already have the correct register contents when encountering the * vq0->vq0 transition between EL0->EL1. */ - old_len = (arm_el_is_aa64(env, old_el) && !sve_exception_el(env, old_el) + old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; + old_len = (old_a64 && !sve_exception_el(env, old_el) ? sve_zcr_len_for_el(env, old_el) : 0); - new_len = (arm_el_is_aa64(env, new_el) && !sve_exception_el(env, new_el) + new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; + new_len = (new_a64 && !sve_exception_el(env, new_el) ? sve_zcr_len_for_el(env, new_el) : 0); /* When changing vector length, clear inaccessible state. */ diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c index fb15a13..d915579 100644 --- a/target/arm/op_helper.c +++ b/target/arm/op_helper.c @@ -1101,7 +1101,11 @@ void HELPER(exception_return)(CPUARMState *env) "AArch64 EL%d PC 0x%" PRIx64 "\n", cur_el, new_el, env->pc); } - aarch64_sve_change_el(env, cur_el, new_el); + /* + * Note that cur_el can never be 0. If new_el is 0, then + * el0_a64 is return_to_aa64, else el0_a64 is ignored. + */ + aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64); qemu_mutex_lock_iothread(); arm_call_el_change_hook(arm_env_get_cpu(env)); diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c index ab160c2..aecf307 100644 --- a/target/hppa/mem_helper.c +++ b/target/hppa/mem_helper.c @@ -137,7 +137,8 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, if (unlikely(!(prot & type))) { /* The access isn't allowed -- Inst/Data Memory Protection Fault. */ - ret = (type & PAGE_EXEC ? EXCP_IMP : EXCP_DMP); + ret = (type & PAGE_EXEC ? EXCP_IMP : + prot & PAGE_READ ? EXCP_DMP : EXCP_DMAR); goto egress; } |