diff options
Diffstat (limited to 'target/arm/internals.h')
-rw-r--r-- | target/arm/internals.h | 269 |
1 files changed, 232 insertions, 37 deletions
diff --git a/target/arm/internals.h b/target/arm/internals.h index 757b1fa..21a8d67 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -25,9 +25,13 @@ #ifndef TARGET_ARM_INTERNALS_H #define TARGET_ARM_INTERNALS_H +#include "exec/hwaddr.h" +#include "exec/vaddr.h" #include "exec/breakpoint.h" +#include "accel/tcg/tb-cpu-state.h" #include "hw/registerfields.h" #include "tcg/tcg-gvec-desc.h" +#include "system/memory.h" #include "syndrome.h" #include "cpu-features.h" @@ -350,13 +354,14 @@ static inline int r14_bank_number(int mode) } void arm_cpu_register(const ARMCPUInfo *info); -void aarch64_cpu_register(const ARMCPUInfo *info); void register_cp_regs_for_features(ARMCPU *cpu); void init_cpreg_list(ARMCPU *cpu); void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); void arm_translate_init(void); +void arm_translate_code(CPUState *cs, TranslationBlock *tb, + int *max_insns, vaddr pc, void *host_pc); void arm_cpu_register_gdb_commands(ARMCPU *cpu); void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *, @@ -367,10 +372,12 @@ void arm_restore_state_to_opc(CPUState *cs, const uint64_t *data); #ifdef CONFIG_TCG +TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs); void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); /* Our implementation of TCGCPUOps::cpu_exec_halt */ bool arm_cpu_exec_halt(CPUState *cs); +int arm_cpu_mmu_index(CPUState *cs, bool ifetch); #endif /* CONFIG_TCG */ typedef enum ARMFPRounding { @@ -390,6 +397,141 @@ static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) return arm_rmode_to_sf_map[rmode]; } +/* Return the effective value of SCR_EL3.RW */ +static inline bool arm_scr_rw_eff(CPUARMState *env) +{ + /* + * SCR_EL3.RW has an effective value of 1 if: + * - we are NS and EL2 is implemented but doesn't support AArch32 + * - we are S and EL2 is enabled (in which case it must be AArch64) + */ + ARMCPU *cpu = env_archcpu(env); + + if (env->cp15.scr_el3 & SCR_RW) { + return true; + } + if (env->cp15.scr_el3 & SCR_NS) { + return arm_feature(env, ARM_FEATURE_EL2) && + !cpu_isar_feature(aa64_aa32_el2, cpu); + } else { + return env->cp15.scr_el3 & SCR_EEL2; + } +} + +/* Return true if the specified exception level is running in AArch64 state. */ +static inline bool arm_el_is_aa64(CPUARMState *env, int el) +{ + /* + * This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, + * and if we're not in EL0 then the state of EL0 isn't well defined.) + */ + assert(el >= 1 && el <= 3); + bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); + + /* + * The highest exception level is always at the maximum supported + * register width, and then lower levels have a register width controlled + * by bits in the SCR or HCR registers. + */ + if (el == 3) { + return aa64; + } + + if (arm_feature(env, ARM_FEATURE_EL3)) { + aa64 = aa64 && arm_scr_rw_eff(env); + } + + if (el == 2) { + return aa64; + } + + if (arm_is_el2_enabled(env)) { + aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); + } + + return aa64; +} + +/* + * Return the current Exception Level (as per ARMv8; note that this differs + * from the ARMv7 Privilege Level). + */ +static inline int arm_current_el(CPUARMState *env) +{ + if (arm_feature(env, ARM_FEATURE_M)) { + return arm_v7m_is_handler_mode(env) || + !(env->v7m.control[env->v7m.secure] & 1); + } + + if (is_a64(env)) { + return extract32(env->pstate, 2, 2); + } + + switch (env->uncached_cpsr & 0x1f) { + case ARM_CPU_MODE_USR: + return 0; + case ARM_CPU_MODE_HYP: + return 2; + case ARM_CPU_MODE_MON: + return 3; + default: + if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { + /* If EL3 is 32-bit then all secure privileged modes run in EL3 */ + return 3; + } + + return 1; + } +} + +static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, + bool sctlr_b) +{ +#ifdef CONFIG_USER_ONLY + /* + * In system mode, BE32 is modelled in line with the + * architecture (as word-invariant big-endianness), where loads + * and stores are done little endian but from addresses which + * are adjusted by XORing with the appropriate constant. So the + * endianness to use for the raw data access is not affected by + * SCTLR.B. + * In user mode, however, we model BE32 as byte-invariant + * big-endianness (because user-only code cannot tell the + * difference), and so we need to use a data access endianness + * that depends on SCTLR.B. + */ + if (sctlr_b) { + return true; + } +#endif + /* In 32bit endianness is determined by looking at CPSR's E bit */ + return env->uncached_cpsr & CPSR_E; +} + +static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr) +{ + return sctlr & (el ? SCTLR_EE : SCTLR_E0E); +} + +/* Return true if the processor is in big-endian mode. */ +static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) +{ + if (!is_a64(env)) { + return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env)); + } else { + int cur_el = arm_current_el(env); + uint64_t sctlr = arm_sctlr(env, cur_el); + return arm_cpu_data_is_big_endian_a64(cur_el, sctlr); + } +} + +#ifdef CONFIG_USER_ONLY +static inline bool arm_cpu_bswap_data(CPUARMState *env) +{ + return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env); +} +#endif + static inline void aarch64_save_sp(CPUARMState *env, int el) { if (env->pstate & PSTATE_SP) { @@ -436,6 +578,25 @@ static inline void update_spsel(CPUARMState *env, uint32_t imm) */ unsigned int arm_pamax(ARMCPU *cpu); +/* + * round_down_to_parange_index + * @bit_size: uint8_t + * + * Rounds down the bit_size supplied to the first supported ARM physical + * address range and returns the index for this. The index is intended to + * be used to set ID_AA64MMFR0_EL1's PARANGE bits. + */ +uint8_t round_down_to_parange_index(uint8_t bit_size); + +/* + * round_down_to_parange_bit_size + * @bit_size: uint8_t + * + * Rounds down the bit_size supplied to the first supported ARM physical + * address range bit size and returns this. + */ +uint8_t round_down_to_parange_bit_size(uint8_t bit_size); + /* Return true if extended addresses are enabled. * This is always the case if our translation regime is 64 bit, * but depends on TTBCR.EAE for 32 bit. @@ -489,16 +650,12 @@ static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) { return false; } -static inline void arm_handle_psci_call(ARMCPU *cpu) -{ - g_assert_not_reached(); -} #else /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ bool arm_is_psci_call(ARMCPU *cpu, int excp_type); +#endif /* Actually handle a PSCI call */ void arm_handle_psci_call(ARMCPU *cpu); -#endif /** * arm_clear_exclusive: clear the exclusive monitor @@ -568,8 +725,8 @@ typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; struct ARMMMUFaultInfo { ARMFaultType type; ARMGPCF gpcf; - target_ulong s2addr; - target_ulong paddr; + hwaddr s2addr; + hwaddr paddr; ARMSecuritySpace paddr_space; int level; int domain; @@ -783,9 +940,9 @@ void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, MMUAccessType access_type, uintptr_t ra); #else -bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); +bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr, + MMUAccessType access_type, int mmu_idx, + MemOp memop, int size, bool probe, uintptr_t ra); #endif static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) @@ -852,7 +1009,16 @@ static inline void arm_call_el_change_hook(ARMCPU *cpu) } } -/* Return true if this address translation regime has two ranges. */ +/* + * Return true if this address translation regime has two ranges. + * Note that this will not return the correct answer for AArch32 + * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is + * never called from a context where EL3 can be AArch32. (The + * correct return value for ARMMMUIdx_E3 would be different for + * that case, so we can't just make the function return the + * correct value anyway; we would need an extra "bool e3_is_aarch32" + * argument which all the current callsites would pass as 'false'.) + */ static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) { switch (mmu_idx) { @@ -877,6 +1043,7 @@ static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) case ARMMMUIdx_Stage1_E1_PAN: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_E20_2_PAN: + case ARMMMUIdx_E30_3_PAN: return true; default: return false; @@ -900,10 +1067,11 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) case ARMMMUIdx_E2: return 2; case ARMMMUIdx_E3: + case ARMMMUIdx_E30_0: + case ARMMMUIdx_E30_3_PAN: return 3; case ARMMMUIdx_E10_0: case ARMMMUIdx_Stage1_E0: - return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3; case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: case ARMMMUIdx_E10_1: @@ -925,7 +1093,9 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) { switch (mmu_idx) { + case ARMMMUIdx_E10_0: case ARMMMUIdx_E20_0: + case ARMMMUIdx_E30_0: case ARMMMUIdx_Stage1_E0: case ARMMMUIdx_MUser: case ARMMMUIdx_MSUser: @@ -934,10 +1104,6 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) return true; default: return false; - case ARMMMUIdx_E10_0: - case ARMMMUIdx_E10_1: - case ARMMMUIdx_E10_1_PAN: - g_assert_not_reached(); } } @@ -1005,7 +1171,7 @@ static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) static inline int arm_num_brps(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; + return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, BRPS) + 1; } else { return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; } @@ -1019,7 +1185,7 @@ static inline int arm_num_brps(ARMCPU *cpu) static inline int arm_num_wrps(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; + return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, WRPS) + 1; } else { return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; } @@ -1033,7 +1199,7 @@ static inline int arm_num_wrps(ARMCPU *cpu) static inline int arm_num_ctx_cmps(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; + return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, CTX_CMPS) + 1; } else { return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; } @@ -1394,6 +1560,7 @@ typedef struct GetPhysAddrResult { * @env: CPUARMState * @address: virtual address to get physical address for * @access_type: 0 for read, 1 for write, 2 for execute + * @memop: memory operation feeding this access, or 0 for none * @mmu_idx: MMU index indicating required translation regime * @result: set on translation success. * @fi: set to fault info if the translation fails @@ -1411,8 +1578,8 @@ typedef struct GetPhysAddrResult { * * for PSMAv5 based systems we don't bother to return a full FSR format * value. */ -bool get_phys_addr(CPUARMState *env, target_ulong address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, +bool get_phys_addr(CPUARMState *env, vaddr address, + MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) __attribute__((nonnull)); @@ -1422,6 +1589,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address, * @env: CPUARMState * @address: virtual address to get physical address for * @access_type: 0 for read, 1 for write, 2 for execute + * @memop: memory operation feeding this access, or 0 for none * @mmu_idx: MMU index indicating required translation regime * @space: security space for the access * @result: set on translation success. @@ -1430,8 +1598,8 @@ bool get_phys_addr(CPUARMState *env, target_ulong address, * Similar to get_phys_addr, but use the given security space and don't perform * a Granule Protection Check on the resulting address. */ -bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address, - MMUAccessType access_type, +bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, + MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, ARMSecuritySpace space, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) @@ -1639,7 +1807,6 @@ static inline uint64_t pmu_counter_mask(CPUARMState *env) return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); } -#ifdef TARGET_AARCH64 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); @@ -1657,7 +1824,12 @@ void aarch64_max_tcg_initfn(Object *obj); void aarch64_add_pauth_properties(Object *obj); void aarch64_add_sve_properties(Object *obj); void aarch64_add_sme_properties(Object *obj); -#endif + +/* Return true if the gdbstub is presenting an AArch64 CPU */ +static inline bool arm_gdbstub_is_aarch64(ARMCPU *cpu) +{ + return arm_feature(&cpu->env, ARM_FEATURE_AARCH64); +} /* Read the CONTROL register as the MRS instruction would. */ uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); @@ -1697,6 +1869,9 @@ static inline uint64_t pauth_ptr_mask(ARMVAParameters param) /* Add the cpreg definitions for debug related system registers */ void define_debug_regs(ARMCPU *cpu); +/* Add the cpreg definitions for TLBI instructions */ +void define_tlb_insn_regs(ARMCPU *cpu); + /* Effective value of MDCR_EL2 */ static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) { @@ -1728,8 +1903,6 @@ static inline bool arm_fgt_active(CPUARMState *env, int el) (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); } -void assert_hflags_rebuild_correctly(CPUARMState *env); - /* * Although the ARM implementation of hardware assisted debugging * allows for different breakpoints per-core, the current GDB @@ -1771,20 +1944,42 @@ extern GArray *hw_breakpoints, *hw_watchpoints; #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) -bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); -int insert_hw_breakpoint(target_ulong pc); -int delete_hw_breakpoint(target_ulong pc); +bool find_hw_breakpoint(CPUState *cpu, vaddr pc); +int insert_hw_breakpoint(vaddr pc); +int delete_hw_breakpoint(vaddr pc); -bool check_watchpoint_in_range(int i, target_ulong addr); -CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr); -int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type); -int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type); +bool check_watchpoint_in_range(int i, vaddr addr); +CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, vaddr addr); +int insert_hw_watchpoint(vaddr addr, vaddr len, int type); +int delete_hw_watchpoint(vaddr addr, vaddr len, int type); /* Return the current value of the system counter in ticks */ uint64_t gt_get_countervalue(CPUARMState *env); /* * Return the currently applicable offset between the system counter - * and CNTVCT_EL0 (this will be either 0 or the value of CNTVOFF_EL2). + * and the counter for the specified timer, as used for direct register + * accesses. */ -uint64_t gt_virt_cnt_offset(CPUARMState *env); +uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx); + +/* + * Return mask of ARMMMUIdxBit values corresponding to an "invalidate + * all EL1" scope; this covers stage 1 and stage 2. + */ +int alle1_tlbmask(CPUARMState *env); + +/* Set the float_status behaviour to match the Arm defaults */ +void arm_set_default_fp_behaviours(float_status *s); +/* Set the float_status behaviour to match Arm FPCR.AH=1 behaviour */ +void arm_set_ah_fp_behaviours(float_status *s); +/* Read the float_status info and return the appropriate FPSR value */ +uint32_t vfp_get_fpsr_from_host(CPUARMState *env); +/* Clear the exception status flags from all float_status fields */ +void vfp_clear_float_status_exc_flags(CPUARMState *env); +/* + * Update float_status fields to handle the bits of the FPCR + * specified by mask changing to the values in val. + */ +void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask); + #endif |