diff options
Diffstat (limited to 'target')
-rw-r--r-- | target/arm/tcg/cpu32.c | 2 | ||||
-rw-r--r-- | target/arm/tcg/mte_helper.c | 12 | ||||
-rw-r--r-- | target/arm/tcg/translate-a64.c | 4 | ||||
-rw-r--r-- | target/hppa/cpu-param.h | 3 | ||||
-rw-r--r-- | target/hppa/cpu.h | 27 | ||||
-rw-r--r-- | target/hppa/int_helper.c | 2 | ||||
-rw-r--r-- | target/hppa/mem_helper.c | 97 | ||||
-rw-r--r-- | target/hppa/op_helper.c | 5 | ||||
-rw-r--r-- | target/hppa/translate.c | 41 |
9 files changed, 116 insertions, 77 deletions
diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c index 0d5d8e3..d9e0e2a 100644 --- a/target/arm/tcg/cpu32.c +++ b/target/arm/tcg/cpu32.c @@ -351,6 +351,7 @@ static void cortex_a8_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); cpu->midr = 0x410fc080; cpu->reset_fpsid = 0x410330c0; cpu->isar.mvfr0 = 0x11110222; @@ -418,6 +419,7 @@ static void cortex_a9_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_NEON); set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); /* * Note that A9 supports the MP extensions even for * A9UP and single-core A9MP (which are both different diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c index 70ac876..ffb8ea1 100644 --- a/target/arm/tcg/mte_helper.c +++ b/target/arm/tcg/mte_helper.c @@ -1101,10 +1101,18 @@ uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, uint32_t n; mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); - /* True probe; this will never fault */ + /* + * True probe; this will never fault. Note that our caller passes + * us a pointer to the end of the region, but allocation_tag_mem_probe() + * wants a pointer to the start. Because we know we don't span a page + * boundary and that allocation_tag_mem_probe() doesn't otherwise care + * about the size, pass in a size of 1 byte. This is simpler than + * adjusting the ptr to point to the start of the region and then having + * to adjust the returned 'mem' to get the end of the tag memory. + */ mem = allocation_tag_mem_probe(env, mmu_idx, ptr, w ? MMU_DATA_STORE : MMU_DATA_LOAD, - size, MMU_DATA_LOAD, true, 0); + 1, MMU_DATA_LOAD, true, 0); if (!mem) { return size; } diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 41484d8..a2e49c3 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -2351,6 +2351,8 @@ static bool trans_SVC(DisasContext *s, arg_i *a) static bool trans_HVC(DisasContext *s, arg_i *a) { + int target_el = s->current_el == 3 ? 3 : 2; + if (s->current_el == 0) { unallocated_encoding(s); return true; @@ -2363,7 +2365,7 @@ static bool trans_HVC(DisasContext *s, arg_i *a) gen_helper_pre_hvc(tcg_env); /* Architecture requires ss advance before we do the actual work */ gen_ss_advance(s); - gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(a->imm), 2); + gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(a->imm), target_el); return true; } diff --git a/target/hppa/cpu-param.h b/target/hppa/cpu-param.h index 6746869..bb3d7ef 100644 --- a/target/hppa/cpu-param.h +++ b/target/hppa/cpu-param.h @@ -14,7 +14,8 @@ # define TARGET_PHYS_ADDR_SPACE_BITS 32 # define TARGET_VIRT_ADDR_SPACE_BITS 32 #else -# define TARGET_PHYS_ADDR_SPACE_BITS 64 +/* ??? PA-8000 through 8600 have 40 bits; PA-8700 and 8900 have 44 bits. */ +# define TARGET_PHYS_ADDR_SPACE_BITS 40 # define TARGET_VIRT_ADDR_SPACE_BITS 64 #endif diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index cecec59..bcfed04 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -31,23 +31,25 @@ basis. It's probably easier to fall back to a strong memory model. */ #define TCG_GUEST_DEFAULT_MO TCG_MO_ALL -#define MMU_KERNEL_IDX 7 -#define MMU_KERNEL_P_IDX 8 -#define MMU_PL1_IDX 9 -#define MMU_PL1_P_IDX 10 -#define MMU_PL2_IDX 11 -#define MMU_PL2_P_IDX 12 -#define MMU_USER_IDX 13 -#define MMU_USER_P_IDX 14 -#define MMU_PHYS_IDX 15 - +#define MMU_ABS_W_IDX 6 +#define MMU_ABS_IDX 7 +#define MMU_KERNEL_IDX 8 +#define MMU_KERNEL_P_IDX 9 +#define MMU_PL1_IDX 10 +#define MMU_PL1_P_IDX 11 +#define MMU_PL2_IDX 12 +#define MMU_PL2_P_IDX 13 +#define MMU_USER_IDX 14 +#define MMU_USER_P_IDX 15 + +#define MMU_IDX_MMU_DISABLED(MIDX) ((MIDX) < MMU_KERNEL_IDX) #define MMU_IDX_TO_PRIV(MIDX) (((MIDX) - MMU_KERNEL_IDX) / 2) #define MMU_IDX_TO_P(MIDX) (((MIDX) - MMU_KERNEL_IDX) & 1) #define PRIV_P_TO_MMU_IDX(PRIV, P) ((PRIV) * 2 + !!(P) + MMU_KERNEL_IDX) #define TARGET_INSN_START_EXTRA_WORDS 2 -/* No need to flush MMU_PHYS_IDX */ +/* No need to flush MMU_ABS*_IDX */ #define HPPA_MMU_FLUSH_MASK \ (1 << MMU_KERNEL_IDX | 1 << MMU_KERNEL_P_IDX | \ 1 << MMU_PL1_IDX | 1 << MMU_PL1_P_IDX | \ @@ -287,7 +289,8 @@ static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch) if (env->psw & (ifetch ? PSW_C : PSW_D)) { return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P); } - return MMU_PHYS_IDX; /* mmu disabled */ + /* mmu disabled */ + return env->psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX; #endif } diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c index 467ee7d..98e9d68 100644 --- a/target/hppa/int_helper.c +++ b/target/hppa/int_helper.c @@ -126,7 +126,7 @@ void hppa_cpu_do_interrupt(CPUState *cs) env->cr[CR_IIASQ] = hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32; env->cr_back[0] = - hppa_form_gva_psw(old_psw, env->iasq_f, env->iaoq_f) >> 32; + hppa_form_gva_psw(old_psw, env->iasq_b, env->iaoq_b) >> 32; } else { env->cr[CR_IIASQ] = 0; env->cr_back[0] = 0; diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c index 858ce6e..08abd1a 100644 --- a/target/hppa/mem_helper.c +++ b/target/hppa/mem_helper.c @@ -27,41 +27,39 @@ hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr) { - if (likely(extract64(addr, 58, 4) != 0xf)) { - /* Memory address space */ - return addr & MAKE_64BIT_MASK(0, 62); - } - if (extract64(addr, 54, 4) != 0) { - /* I/O address space */ - return addr | MAKE_64BIT_MASK(62, 2); - } - /* PDC address space */ - return (addr & MAKE_64BIT_MASK(0, 54)) | MAKE_64BIT_MASK(60, 4); + /* + * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes + * an algorithm in which a 62-bit absolute address is transformed to + * a 64-bit physical address. This must then be combined with that + * pictured in Figure H-11 "Physical Address Space Mapping", in which + * the full physical address is truncated to the N-bit physical address + * supported by the implementation. + * + * Since the supported physical address space is below 54 bits, the + * H-8 algorithm is moot and all that is left is to truncate. + */ + QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54); + return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS); } hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr) { + /* + * See Figure H-10, "Absolute Accesses when PSW W-bit is 0", + * combined with Figure H-11, as above. + */ if (likely(extract32(addr, 28, 4) != 0xf)) { /* Memory address space */ - return addr & MAKE_64BIT_MASK(0, 32); - } - if (extract32(addr, 24, 4) != 0) { + addr = (uint32_t)addr; + } else if (extract32(addr, 24, 4) != 0) { /* I/O address space */ - return addr | MAKE_64BIT_MASK(32, 32); - } - /* PDC address space */ - return (addr & MAKE_64BIT_MASK(0, 24)) | MAKE_64BIT_MASK(60, 4); -} - -static hwaddr hppa_abs_to_phys(CPUHPPAState *env, vaddr addr) -{ - if (!hppa_is_pa20(env)) { - return addr; - } else if (env->psw & PSW_W) { - return hppa_abs_to_phys_pa2_w1(addr); + addr = (int32_t)addr; } else { - return hppa_abs_to_phys_pa2_w0(addr); + /* PDC address space */ + addr &= MAKE_64BIT_MASK(0, 24); + addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4); } + return addr; } static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr) @@ -161,9 +159,22 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, *tlb_entry = NULL; } - /* Virtual translation disabled. Direct map virtual to physical. */ - if (mmu_idx == MMU_PHYS_IDX) { - phys = addr; + /* Virtual translation disabled. Map absolute to physical. */ + if (MMU_IDX_MMU_DISABLED(mmu_idx)) { + switch (mmu_idx) { + case MMU_ABS_W_IDX: + phys = hppa_abs_to_phys_pa2_w1(addr); + break; + case MMU_ABS_IDX: + if (hppa_is_pa20(env)) { + phys = hppa_abs_to_phys_pa2_w0(addr); + } else { + phys = (uint32_t)addr; + } + break; + default: + g_assert_not_reached(); + } prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; goto egress; } @@ -261,7 +272,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, } egress: - *pphys = phys = hppa_abs_to_phys(env, phys); + *pphys = phys; *pprot = prot; trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys); return ret; @@ -271,16 +282,15 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { HPPACPU *cpu = HPPA_CPU(cs); hwaddr phys; - int prot, excp; + int prot, excp, mmu_idx; /* If the (data) mmu is disabled, bypass translation. */ /* ??? We really ought to know if the code mmu is disabled too, in order to get the correct debugging dumps. */ - if (!(cpu->env.psw & PSW_D)) { - return hppa_abs_to_phys(&cpu->env, addr); - } + mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX : + cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX); - excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0, + excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0, &phys, &prot, NULL); /* Since we're translating for debugging, the only error that is a @@ -367,8 +377,8 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx); /* Failure. Raise the indicated exception. */ - raise_exception_with_ior(env, excp, retaddr, - addr, mmu_idx == MMU_PHYS_IDX); + raise_exception_with_ior(env, excp, retaddr, addr, + MMU_IDX_MMU_DISABLED(mmu_idx)); } trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK, @@ -450,7 +460,7 @@ static void itlbt_pa20(CPUHPPAState *env, target_ulong r1, int mask_shift; mask_shift = 2 * (r1 & 0xf); - va_size = TARGET_PAGE_SIZE << mask_shift; + va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift; va_b &= -va_size; va_e = va_b + va_size - 1; @@ -459,7 +469,14 @@ static void itlbt_pa20(CPUHPPAState *env, target_ulong r1, ent->itree.start = va_b; ent->itree.last = va_e; - ent->pa = (r1 << 7) & (TARGET_PAGE_MASK << mask_shift); + + /* Extract all 52 bits present in the page table entry. */ + ent->pa = r1 << (TARGET_PAGE_BITS - 5); + /* Align per the page size. */ + ent->pa &= TARGET_PAGE_MASK << mask_shift; + /* Ignore the bits beyond physical address space. */ + ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS); + ent->t = extract64(r2, 61, 1); ent->d = extract64(r2, 60, 1); ent->b = extract64(r2, 59, 1); @@ -505,7 +522,7 @@ static void ptlb_work(CPUState *cpu, run_on_cpu_data data) */ end = start & 0xf; start &= TARGET_PAGE_MASK; - end = TARGET_PAGE_SIZE << (2 * end); + end = (vaddr)TARGET_PAGE_SIZE << (2 * end); end = start + end - 1; hppa_flush_tlb_range(env, start, end); diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c index a0e31c0..7f607c3 100644 --- a/target/hppa/op_helper.c +++ b/target/hppa/op_helper.c @@ -338,7 +338,7 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr, #ifdef CONFIG_USER_ONLY return page_check_range(addr, 1, want); #else - int prot, excp; + int prot, excp, mmu_idx; hwaddr phys; trace_hppa_tlb_probe(addr, level, want); @@ -347,7 +347,8 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr, return 0; } - excp = hppa_get_physical_address(env, addr, level, 0, &phys, + mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P); + excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys, &prot, NULL); if (excp >= 0) { if (env->psw & PSW_Q) { diff --git a/target/hppa/translate.c b/target/hppa/translate.c index bcce65d..4a4830c 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -69,19 +69,24 @@ typedef struct DisasContext { } DisasContext; #ifdef CONFIG_USER_ONLY -#define UNALIGN(C) (C)->unalign +#define UNALIGN(C) (C)->unalign +#define MMU_DISABLED(C) false #else -#define UNALIGN(C) MO_ALIGN +#define UNALIGN(C) MO_ALIGN +#define MMU_DISABLED(C) MMU_IDX_MMU_DISABLED((C)->mmu_idx) #endif /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ static int expand_sm_imm(DisasContext *ctx, int val) { - if (val & PSW_SM_E) { - val = (val & ~PSW_SM_E) | PSW_E; - } - if (val & PSW_SM_W) { - val = (val & ~PSW_SM_W) | PSW_W; + /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */ + if (ctx->is_pa20) { + if (val & PSW_SM_W) { + val |= PSW_W; + } + val &= ~(PSW_SM_W | PSW_SM_E | PSW_G); + } else { + val &= ~(PSW_SM_W | PSW_SM_E | PSW_O); } return val; } @@ -1372,7 +1377,7 @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, assert(ctx->null_cond.c == TCG_COND_NEVER); form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, - ctx->mmu_idx == MMU_PHYS_IDX); + MMU_DISABLED(ctx)); tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); if (modify) { save_gpr(ctx, rb, ofs); @@ -1390,7 +1395,7 @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, assert(ctx->null_cond.c == TCG_COND_NEVER); form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, - ctx->mmu_idx == MMU_PHYS_IDX); + MMU_DISABLED(ctx)); tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); if (modify) { save_gpr(ctx, rb, ofs); @@ -1408,7 +1413,7 @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, assert(ctx->null_cond.c == TCG_COND_NEVER); form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, - ctx->mmu_idx == MMU_PHYS_IDX); + MMU_DISABLED(ctx)); tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); if (modify) { save_gpr(ctx, rb, ofs); @@ -1426,7 +1431,7 @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, assert(ctx->null_cond.c == TCG_COND_NEVER); form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, - ctx->mmu_idx == MMU_PHYS_IDX); + MMU_DISABLED(ctx)); tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); if (modify) { save_gpr(ctx, rb, ofs); @@ -2294,7 +2299,7 @@ static bool trans_probe(DisasContext *ctx, arg_probe *a) form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); if (a->imm) { - level = tcg_constant_i32(a->ri); + level = tcg_constant_i32(a->ri & 3); } else { level = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri)); @@ -3075,7 +3080,7 @@ static bool trans_ldc(DisasContext *ctx, arg_ldst *a) } form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0, - a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX); + a->disp, a->sp, a->m, MMU_DISABLED(ctx)); /* * For hppa1.1, LDCW is undefined unless aligned mod 16. @@ -3105,7 +3110,7 @@ static bool trans_stby(DisasContext *ctx, arg_stby *a) nullify_over(ctx); form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, - ctx->mmu_idx == MMU_PHYS_IDX); + MMU_DISABLED(ctx)); val = load_gpr(ctx, a->r); if (a->a) { if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { @@ -3139,7 +3144,7 @@ static bool trans_stdby(DisasContext *ctx, arg_stby *a) nullify_over(ctx); form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, - ctx->mmu_idx == MMU_PHYS_IDX); + MMU_DISABLED(ctx)); val = load_gpr(ctx, a->r); if (a->a) { if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { @@ -3167,7 +3172,7 @@ static bool trans_lda(DisasContext *ctx, arg_ldst *a) int hold_mmu_idx = ctx->mmu_idx; CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); - ctx->mmu_idx = MMU_PHYS_IDX; + ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX; trans_ld(ctx, a); ctx->mmu_idx = hold_mmu_idx; return true; @@ -3178,7 +3183,7 @@ static bool trans_sta(DisasContext *ctx, arg_ldst *a) int hold_mmu_idx = ctx->mmu_idx; CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); - ctx->mmu_idx = MMU_PHYS_IDX; + ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX; trans_st(ctx, a); ctx->mmu_idx = hold_mmu_idx; return true; @@ -4430,7 +4435,7 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3; ctx->mmu_idx = (ctx->tb_flags & PSW_D ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P) - : MMU_PHYS_IDX); + : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX); /* Recover the IAOQ values from the GVA + PRIV. */ uint64_t cs_base = ctx->base.tb->cs_base; |