diff options
-rw-r--r-- | target/microblaze/cpu.c | 18 | ||||
-rw-r--r-- | target/microblaze/cpu.h | 2 | ||||
-rw-r--r-- | target/microblaze/translate.c | 55 |
3 files changed, 62 insertions, 13 deletions
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c index a6f1ce6..6ee15ac 100644 --- a/target/microblaze/cpu.c +++ b/target/microblaze/cpu.c @@ -154,6 +154,13 @@ static void mb_cpu_realizefn(DeviceState *dev, Error **errp) return; } + if (cpu->cfg.addr_size != 32) { + error_setg(errp, "addr-size %d is out of range. " + "Only 32bit is supported.", + cpu->cfg.addr_size); + return; + } + qemu_init_vcpu(cs); env->pvr.regs[0] = PVR0_USE_EXC_MASK \ @@ -200,7 +207,8 @@ static void mb_cpu_realizefn(DeviceState *dev, Error **errp) env->pvr.regs[5] |= cpu->cfg.dcache_writeback ? PVR5_DCACHE_WRITEBACK_MASK : 0; - env->pvr.regs[10] = 0x0c000000; /* Default to spartan 3a dsp family. */ + env->pvr.regs[10] = 0x0c000000 | /* Default to spartan 3a dsp family. */ + (cpu->cfg.addr_size - 32) << PVR10_ASIZE_SHIFT; env->pvr.regs[11] = (cpu->cfg.use_mmu ? PVR11_USE_MMU : 0) | 16 << 17; @@ -232,6 +240,14 @@ static Property mb_properties[] = { DEFINE_PROP_UINT32("base-vectors", MicroBlazeCPU, cfg.base_vectors, 0), DEFINE_PROP_BOOL("use-stack-protection", MicroBlazeCPU, cfg.stackprot, false), + /* + * This is the C_ADDR_SIZE synth-time configuration option of the + * MicroBlaze cores. Supported values range between 32 and 64. + * + * When set to > 32, 32bit MicroBlaze can emit load/stores + * with extended addressing. + */ + DEFINE_PROP_UINT8("addr-size", MicroBlazeCPU, cfg.addr_size, 32), /* If use-fpu > 0 - FPU is enabled * If use-fpu = 2 - Floating point conversion and square root instructions * are enabled diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h index b631b7d..e62c456 100644 --- a/target/microblaze/cpu.h +++ b/target/microblaze/cpu.h @@ -203,6 +203,7 @@ typedef struct CPUMBState CPUMBState; /* Target family PVR mask */ #define PVR10_TARGET_FAMILY_MASK 0xFF000000 +#define PVR10_ASIZE_SHIFT 18 /* MMU descrtiption */ #define PVR11_USE_MMU 0xC0000000 @@ -297,6 +298,7 @@ struct MicroBlazeCPU { struct { bool stackprot; uint32_t base_vectors; + uint8_t addr_size; uint8_t use_fpu; uint8_t use_hw_mul; bool use_barrel; diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c index 7db4bdc..504db88 100644 --- a/target/microblaze/translate.c +++ b/target/microblaze/translate.c @@ -824,7 +824,7 @@ static void dec_imm(DisasContext *dc) dc->clear_imm = 0; } -static inline void compute_ldst_addr(DisasContext *dc, TCGv t) +static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t) { bool extimm = dc->tb_flags & IMM_FLAG; /* Should be set to true if r1 is used by loadstores. */ @@ -838,6 +838,22 @@ static inline void compute_ldst_addr(DisasContext *dc, TCGv t) /* Treat the common cases first. */ if (!dc->type_b) { + if (ea) { + int addr_size = dc->cpu->cfg.addr_size; + + if (addr_size == 32) { + tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]); + return; + } + + tcg_gen_concat_i32_i64(t, cpu_R[dc->rb], cpu_R[dc->ra]); + if (addr_size < 64) { + /* Mask off out of range bits. */ + tcg_gen_andi_i64(t, t, MAKE_64BIT_MASK(0, addr_size)); + } + return; + } + /* If any of the regs is r0, set t to the value of the other reg. */ if (dc->ra == 0) { tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]); @@ -887,12 +903,14 @@ static void dec_load(DisasContext *dc) TCGv_i32 v; TCGv addr; unsigned int size; - bool rev = false, ex = false; + bool rev = false, ex = false, ea = false; + int mem_index = cpu_mmu_index(&dc->cpu->env, false); TCGMemOp mop; mop = dc->opcode & 3; size = 1 << mop; if (!dc->type_b) { + ea = extract32(dc->ir, 7, 1); rev = extract32(dc->ir, 9, 1); ex = extract32(dc->ir, 10, 1); } @@ -905,12 +923,19 @@ static void dec_load(DisasContext *dc) return; } - LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "", - ex ? "x" : ""); + if (trap_userspace(dc, ea)) { + return; + } + + LOG_DIS("l%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "", + ex ? "x" : "", + ea ? "ea" : ""); t_sync_flags(dc); addr = tcg_temp_new(); - compute_ldst_addr(dc, addr); + compute_ldst_addr(dc, ea, addr); + /* Extended addressing bypasses the MMU. */ + mem_index = ea ? MMU_NOMMU_IDX : mem_index; /* * When doing reverse accesses we need to do two things. @@ -964,7 +989,7 @@ static void dec_load(DisasContext *dc) * address and if that succeeds we write into the destination reg. */ v = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(v, addr, cpu_mmu_index(&dc->cpu->env, false), mop); + tcg_gen_qemu_ld_i32(v, addr, mem_index, mop); if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) { tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc); @@ -994,12 +1019,14 @@ static void dec_store(DisasContext *dc) TCGv addr; TCGLabel *swx_skip = NULL; unsigned int size; - bool rev = false, ex = false; + bool rev = false, ex = false, ea = false; + int mem_index = cpu_mmu_index(&dc->cpu->env, false); TCGMemOp mop; mop = dc->opcode & 3; size = 1 << mop; if (!dc->type_b) { + ea = extract32(dc->ir, 7, 1); rev = extract32(dc->ir, 9, 1); ex = extract32(dc->ir, 10, 1); } @@ -1012,14 +1039,19 @@ static void dec_store(DisasContext *dc) return; } - LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "", - ex ? "x" : ""); + trap_userspace(dc, ea); + + LOG_DIS("s%d%s%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "", + ex ? "x" : "", + ea ? "ea" : ""); t_sync_flags(dc); /* If we get a fault on a dslot, the jmpstate better be in sync. */ sync_jmpstate(dc); /* SWX needs a temp_local. */ addr = ex ? tcg_temp_local_new() : tcg_temp_new(); - compute_ldst_addr(dc, addr); + compute_ldst_addr(dc, ea, addr); + /* Extended addressing bypasses the MMU. */ + mem_index = ea ? MMU_NOMMU_IDX : mem_index; if (ex) { /* swx */ TCGv_i32 tval; @@ -1074,8 +1106,7 @@ static void dec_store(DisasContext *dc) break; } } - tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, - cpu_mmu_index(&dc->cpu->env, false), mop); + tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr, mem_index, mop); /* Verify alignment if needed. */ if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) { |