diff options
Diffstat (limited to 'target')
-rw-r--r-- | target/riscv/cpu_helper.c | 15 | ||||
-rw-r--r-- | target/riscv/fpu_helper.c | 102 | ||||
-rw-r--r-- | target/riscv/insn_trans/trans_rvd.c.inc | 8 | ||||
-rw-r--r-- | target/riscv/insn_trans/trans_rvf.c.inc | 99 | ||||
-rw-r--r-- | target/riscv/internals.h | 16 | ||||
-rw-r--r-- | target/riscv/pmp.c | 57 | ||||
-rw-r--r-- | target/riscv/pmp.h | 2 | ||||
-rw-r--r-- | target/riscv/translate.c | 29 |
8 files changed, 245 insertions, 83 deletions
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 75d2ae3..fd1d373 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -543,7 +543,8 @@ restart: /* for superpage mappings, make a fake leaf PTE for the TLB's benefit. */ target_ulong vpn = addr >> PGSHIFT; - *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT; + *physical = ((ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT) | + (addr & ~TARGET_PAGE_MASK); /* set permissions on the TLB entry */ if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { @@ -630,7 +631,7 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) } } - return phys_addr; + return phys_addr & TARGET_PAGE_MASK; } void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, @@ -692,6 +693,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, bool first_stage_error = true; int ret = TRANSLATE_FAIL; int mode = mmu_idx; + target_ulong tlb_size = 0; env->guest_phys_fault_addr = 0; @@ -783,8 +785,13 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } if (ret == TRANSLATE_SUCCESS) { - tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, - prot, mmu_idx, TARGET_PAGE_SIZE); + if (pmp_is_range_in_tlb(env, pa & TARGET_PAGE_MASK, &tlb_size)) { + tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1), + prot, mmu_idx, tlb_size); + } else { + tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, + prot, mmu_idx, TARGET_PAGE_SIZE); + } return true; } else if (probe) { return false; diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c index 4379756..bb346a8 100644 --- a/target/riscv/fpu_helper.c +++ b/target/riscv/fpu_helper.c @@ -81,10 +81,19 @@ void helper_set_rounding_mode(CPURISCVState *env, uint32_t rm) set_float_rounding_mode(softrm, &env->fp_status); } +static uint64_t do_fmadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2, + uint64_t rs3, int flags) +{ + float32 frs1 = check_nanbox_s(rs1); + float32 frs2 = check_nanbox_s(rs2); + float32 frs3 = check_nanbox_s(rs3); + return nanbox_s(float32_muladd(frs1, frs2, frs3, flags, &env->fp_status)); +} + uint64_t helper_fmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { - return float32_muladd(frs1, frs2, frs3, 0, &env->fp_status); + return do_fmadd_s(env, frs1, frs2, frs3, 0); } uint64_t helper_fmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, @@ -96,8 +105,7 @@ uint64_t helper_fmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t helper_fmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { - return float32_muladd(frs1, frs2, frs3, float_muladd_negate_c, - &env->fp_status); + return do_fmadd_s(env, frs1, frs2, frs3, float_muladd_negate_c); } uint64_t helper_fmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, @@ -110,8 +118,7 @@ uint64_t helper_fmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t helper_fnmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { - return float32_muladd(frs1, frs2, frs3, float_muladd_negate_product, - &env->fp_status); + return do_fmadd_s(env, frs1, frs2, frs3, float_muladd_negate_product); } uint64_t helper_fnmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, @@ -124,8 +131,8 @@ uint64_t helper_fnmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t helper_fnmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2, uint64_t frs3) { - return float32_muladd(frs1, frs2, frs3, float_muladd_negate_c | - float_muladd_negate_product, &env->fp_status); + return do_fmadd_s(env, frs1, frs2, frs3, + float_muladd_negate_c | float_muladd_negate_product); } uint64_t helper_fnmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, @@ -135,102 +142,126 @@ uint64_t helper_fnmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2, float_muladd_negate_product, &env->fp_status); } -uint64_t helper_fadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +uint64_t helper_fadd_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - return float32_add(frs1, frs2, &env->fp_status); + float32 frs1 = check_nanbox_s(rs1); + float32 frs2 = check_nanbox_s(rs2); + return nanbox_s(float32_add(frs1, frs2, &env->fp_status)); } -uint64_t helper_fsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +uint64_t helper_fsub_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - return float32_sub(frs1, frs2, &env->fp_status); + float32 frs1 = check_nanbox_s(rs1); + float32 frs2 = check_nanbox_s(rs2); + return nanbox_s(float32_sub(frs1, frs2, &env->fp_status)); } -uint64_t helper_fmul_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +uint64_t helper_fmul_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - return float32_mul(frs1, frs2, &env->fp_status); + float32 frs1 = check_nanbox_s(rs1); + float32 frs2 = check_nanbox_s(rs2); + return nanbox_s(float32_mul(frs1, frs2, &env->fp_status)); } -uint64_t helper_fdiv_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +uint64_t helper_fdiv_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - return float32_div(frs1, frs2, &env->fp_status); + float32 frs1 = check_nanbox_s(rs1); + float32 frs2 = check_nanbox_s(rs2); + return nanbox_s(float32_div(frs1, frs2, &env->fp_status)); } -uint64_t helper_fmin_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +uint64_t helper_fmin_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - return float32_minnum(frs1, frs2, &env->fp_status); + float32 frs1 = check_nanbox_s(rs1); + float32 frs2 = check_nanbox_s(rs2); + return nanbox_s(float32_minnum(frs1, frs2, &env->fp_status)); } -uint64_t helper_fmax_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +uint64_t helper_fmax_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { - return float32_maxnum(frs1, frs2, &env->fp_status); + float32 frs1 = check_nanbox_s(rs1); + float32 frs2 = check_nanbox_s(rs2); + return nanbox_s(float32_maxnum(frs1, frs2, &env->fp_status)); } -uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t frs1) +uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t rs1) { - return float32_sqrt(frs1, &env->fp_status); + float32 frs1 = check_nanbox_s(rs1); + return nanbox_s(float32_sqrt(frs1, &env->fp_status)); } -target_ulong helper_fle_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +target_ulong helper_fle_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { + float32 frs1 = check_nanbox_s(rs1); + float32 frs2 = check_nanbox_s(rs2); return float32_le(frs1, frs2, &env->fp_status); } -target_ulong helper_flt_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +target_ulong helper_flt_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { + float32 frs1 = check_nanbox_s(rs1); + float32 frs2 = check_nanbox_s(rs2); return float32_lt(frs1, frs2, &env->fp_status); } -target_ulong helper_feq_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2) +target_ulong helper_feq_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { + float32 frs1 = check_nanbox_s(rs1); + float32 frs2 = check_nanbox_s(rs2); return float32_eq_quiet(frs1, frs2, &env->fp_status); } -target_ulong helper_fcvt_w_s(CPURISCVState *env, uint64_t frs1) +target_ulong helper_fcvt_w_s(CPURISCVState *env, uint64_t rs1) { + float32 frs1 = check_nanbox_s(rs1); return float32_to_int32(frs1, &env->fp_status); } -target_ulong helper_fcvt_wu_s(CPURISCVState *env, uint64_t frs1) +target_ulong helper_fcvt_wu_s(CPURISCVState *env, uint64_t rs1) { + float32 frs1 = check_nanbox_s(rs1); return (int32_t)float32_to_uint32(frs1, &env->fp_status); } #if defined(TARGET_RISCV64) -uint64_t helper_fcvt_l_s(CPURISCVState *env, uint64_t frs1) +uint64_t helper_fcvt_l_s(CPURISCVState *env, uint64_t rs1) { + float32 frs1 = check_nanbox_s(rs1); return float32_to_int64(frs1, &env->fp_status); } -uint64_t helper_fcvt_lu_s(CPURISCVState *env, uint64_t frs1) +uint64_t helper_fcvt_lu_s(CPURISCVState *env, uint64_t rs1) { + float32 frs1 = check_nanbox_s(rs1); return float32_to_uint64(frs1, &env->fp_status); } #endif uint64_t helper_fcvt_s_w(CPURISCVState *env, target_ulong rs1) { - return int32_to_float32((int32_t)rs1, &env->fp_status); + return nanbox_s(int32_to_float32((int32_t)rs1, &env->fp_status)); } uint64_t helper_fcvt_s_wu(CPURISCVState *env, target_ulong rs1) { - return uint32_to_float32((uint32_t)rs1, &env->fp_status); + return nanbox_s(uint32_to_float32((uint32_t)rs1, &env->fp_status)); } #if defined(TARGET_RISCV64) uint64_t helper_fcvt_s_l(CPURISCVState *env, uint64_t rs1) { - return int64_to_float32(rs1, &env->fp_status); + return nanbox_s(int64_to_float32(rs1, &env->fp_status)); } uint64_t helper_fcvt_s_lu(CPURISCVState *env, uint64_t rs1) { - return uint64_to_float32(rs1, &env->fp_status); + return nanbox_s(uint64_to_float32(rs1, &env->fp_status)); } #endif -target_ulong helper_fclass_s(uint64_t frs1) +target_ulong helper_fclass_s(uint64_t rs1) { + float32 frs1 = check_nanbox_s(rs1); return fclass_s(frs1); } @@ -266,12 +297,13 @@ uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1) { - return float64_to_float32(rs1, &env->fp_status); + return nanbox_s(float64_to_float32(rs1, &env->fp_status)); } uint64_t helper_fcvt_d_s(CPURISCVState *env, uint64_t rs1) { - return float32_to_float64(rs1, &env->fp_status); + float32 frs1 = check_nanbox_s(rs1); + return float32_to_float64(frs1, &env->fp_status); } uint64_t helper_fsqrt_d(CPURISCVState *env, uint64_t frs1) diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc index ea1044f..4f83263 100644 --- a/target/riscv/insn_trans/trans_rvd.c.inc +++ b/target/riscv/insn_trans/trans_rvd.c.inc @@ -20,10 +20,10 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a) { - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); + TCGv t0 = tcg_temp_new(); + gen_get_gpr(t0, a->rs1); tcg_gen_addi_tl(t0, t0, a->imm); tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], t0, ctx->mem_idx, MO_TEQ); @@ -35,10 +35,10 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a) static bool trans_fsd(DisasContext *ctx, arg_fsd *a) { - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); + TCGv t0 = tcg_temp_new(); + gen_get_gpr(t0, a->rs1); tcg_gen_addi_tl(t0, t0, a->imm); tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEQ); diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc index 3bfd888..3dfec82 100644 --- a/target/riscv/insn_trans/trans_rvf.c.inc +++ b/target/riscv/insn_trans/trans_rvf.c.inc @@ -23,30 +23,16 @@ return false; \ } while (0) -/* - * RISC-V requires NaN-boxing of narrower width floating - * point values. This applies when a 32-bit value is - * assigned to a 64-bit FP register. Thus this does not - * apply when the RVD extension is not present. - */ -static void gen_nanbox_fpr(DisasContext *ctx, int regno) -{ - if (has_ext(ctx, RVD)) { - tcg_gen_ori_i64(cpu_fpr[regno], cpu_fpr[regno], - MAKE_64BIT_MASK(32, 32)); - } -} - static bool trans_flw(DisasContext *ctx, arg_flw *a) { - TCGv t0 = tcg_temp_new(); - gen_get_gpr(t0, a->rs1); REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); + TCGv t0 = tcg_temp_new(); + gen_get_gpr(t0, a->rs1); tcg_gen_addi_tl(t0, t0, a->imm); tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], t0, ctx->mem_idx, MO_TEUL); - gen_nanbox_fpr(ctx, a->rd); + gen_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rd]); tcg_temp_free(t0); mark_fs_dirty(ctx); @@ -55,11 +41,11 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a) static bool trans_fsw(DisasContext *ctx, arg_fsw *a) { + REQUIRE_FPU; + REQUIRE_EXT(ctx, RVF); TCGv t0 = tcg_temp_new(); gen_get_gpr(t0, a->rs1); - REQUIRE_FPU; - REQUIRE_EXT(ctx, RVF); tcg_gen_addi_tl(t0, t0, a->imm); tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEUL); @@ -175,11 +161,20 @@ static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a) { REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); + if (a->rs1 == a->rs2) { /* FMOV */ - tcg_gen_mov_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1]); + gen_check_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rs1]); } else { /* FSGNJ */ - tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rs2], cpu_fpr[a->rs1], - 0, 31); + TCGv_i64 rs1 = tcg_temp_new_i64(); + TCGv_i64 rs2 = tcg_temp_new_i64(); + + gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]); + gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]); + + /* This formulation retains the nanboxing of rs2. */ + tcg_gen_deposit_i64(cpu_fpr[a->rd], rs2, rs1, 0, 31); + tcg_temp_free_i64(rs1); + tcg_temp_free_i64(rs2); } mark_fs_dirty(ctx); return true; @@ -187,32 +182,65 @@ static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a) static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a) { + TCGv_i64 rs1, rs2, mask; + REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); + + rs1 = tcg_temp_new_i64(); + gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]); + if (a->rs1 == a->rs2) { /* FNEG */ - tcg_gen_xori_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], INT32_MIN); + tcg_gen_xori_i64(cpu_fpr[a->rd], rs1, MAKE_64BIT_MASK(31, 1)); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - tcg_gen_not_i64(t0, cpu_fpr[a->rs2]); - tcg_gen_deposit_i64(cpu_fpr[a->rd], t0, cpu_fpr[a->rs1], 0, 31); - tcg_temp_free_i64(t0); + rs2 = tcg_temp_new_i64(); + gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]); + + /* + * Replace bit 31 in rs1 with inverse in rs2. + * This formulation retains the nanboxing of rs1. + */ + mask = tcg_const_i64(~MAKE_64BIT_MASK(31, 1)); + tcg_gen_nor_i64(rs2, rs2, mask); + tcg_gen_and_i64(rs1, mask, rs1); + tcg_gen_or_i64(cpu_fpr[a->rd], rs1, rs2); + + tcg_temp_free_i64(mask); + tcg_temp_free_i64(rs2); } + tcg_temp_free_i64(rs1); + mark_fs_dirty(ctx); return true; } static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a) { + TCGv_i64 rs1, rs2; + REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); + + rs1 = tcg_temp_new_i64(); + gen_check_nanbox_s(rs1, cpu_fpr[a->rs1]); + if (a->rs1 == a->rs2) { /* FABS */ - tcg_gen_andi_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], ~INT32_MIN); + tcg_gen_andi_i64(cpu_fpr[a->rd], rs1, ~MAKE_64BIT_MASK(31, 1)); } else { - TCGv_i64 t0 = tcg_temp_new_i64(); - tcg_gen_andi_i64(t0, cpu_fpr[a->rs2], INT32_MIN); - tcg_gen_xor_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], t0); - tcg_temp_free_i64(t0); + rs2 = tcg_temp_new_i64(); + gen_check_nanbox_s(rs2, cpu_fpr[a->rs2]); + + /* + * Xor bit 31 in rs1 with that in rs2. + * This formulation retains the nanboxing of rs1. + */ + tcg_gen_andi_i64(rs2, rs2, MAKE_64BIT_MASK(31, 1)); + tcg_gen_xor_i64(cpu_fpr[a->rd], rs1, rs2); + + tcg_temp_free_i64(rs2); } + tcg_temp_free_i64(rs1); + mark_fs_dirty(ctx); return true; } @@ -378,11 +406,8 @@ static bool trans_fmv_w_x(DisasContext *ctx, arg_fmv_w_x *a) TCGv t0 = tcg_temp_new(); gen_get_gpr(t0, a->rs1); -#if defined(TARGET_RISCV64) - tcg_gen_mov_i64(cpu_fpr[a->rd], t0); -#else - tcg_gen_extu_i32_i64(cpu_fpr[a->rd], t0); -#endif + tcg_gen_extu_tl_i64(cpu_fpr[a->rd], t0); + gen_nanbox_s(cpu_fpr[a->rd], cpu_fpr[a->rd]); mark_fs_dirty(ctx); tcg_temp_free(t0); diff --git a/target/riscv/internals.h b/target/riscv/internals.h index 37d3382..f1a546d 100644 --- a/target/riscv/internals.h +++ b/target/riscv/internals.h @@ -38,4 +38,20 @@ target_ulong fclass_d(uint64_t frs1); #define SEW32 2 #define SEW64 3 +static inline uint64_t nanbox_s(float32 f) +{ + return f | MAKE_64BIT_MASK(32, 32); +} + +static inline float32 check_nanbox_s(uint64_t f) +{ + uint64_t mask = MAKE_64BIT_MASK(32, 32); + + if (likely((f & mask) == mask)) { + return (uint32_t)f; + } else { + return 0x7fc00000u; /* default qnan */ + } +} + #endif diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index 2a2b9f5..c394e86 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -320,8 +320,7 @@ void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, for (i = 0; i < sizeof(target_ulong); i++) { cfg_val = (val >> 8 * i) & 0xff; - pmp_write_cfg(env, (reg_index * sizeof(target_ulong)) + i, - cfg_val); + pmp_write_cfg(env, (reg_index * 4) + i, cfg_val); } } @@ -336,7 +335,7 @@ target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index) target_ulong val = 0; for (i = 0; i < sizeof(target_ulong); i++) { - val = pmp_read_cfg(env, (reg_index * sizeof(target_ulong)) + i); + val = pmp_read_cfg(env, (reg_index * 4) + i); cfg_val |= (val << (i * 8)); } trace_pmpcfg_csr_read(env->mhartid, reg_index, cfg_val); @@ -384,3 +383,55 @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) return val; } + +/* + * Calculate the TLB size if the start address or the end address of + * PMP entry is presented in thie TLB page. + */ +static target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index, + target_ulong tlb_sa, target_ulong tlb_ea) +{ + target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa; + target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea; + + if (pmp_sa >= tlb_sa && pmp_ea <= tlb_ea) { + return pmp_ea - pmp_sa + 1; + } + + if (pmp_sa >= tlb_sa && pmp_sa <= tlb_ea && pmp_ea >= tlb_ea) { + return tlb_ea - pmp_sa + 1; + } + + if (pmp_ea <= tlb_ea && pmp_ea >= tlb_sa && pmp_sa <= tlb_sa) { + return pmp_ea - tlb_sa + 1; + } + + return 0; +} + +/* + * Check is there a PMP entry which range covers this page. If so, + * try to find the minimum granularity for the TLB size. + */ +bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa, + target_ulong *tlb_size) +{ + int i; + target_ulong val; + target_ulong tlb_ea = (tlb_sa + TARGET_PAGE_SIZE - 1); + + for (i = 0; i < MAX_RISCV_PMPS; i++) { + val = pmp_get_tlb_size(env, i, tlb_sa, tlb_ea); + if (val) { + if (*tlb_size == 0 || *tlb_size > val) { + *tlb_size = val; + } + } + } + + if (*tlb_size != 0) { + return true; + } + + return false; +} diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h index 8e19793..6a8f072 100644 --- a/target/riscv/pmp.h +++ b/target/riscv/pmp.h @@ -60,5 +60,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index); bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, target_ulong size, pmp_priv_t priv, target_ulong mode); +bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa, + target_ulong *tlb_size); #endif diff --git a/target/riscv/translate.c b/target/riscv/translate.c index d0485c0..3919f57 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -90,6 +90,35 @@ static inline bool has_ext(DisasContext *ctx, uint32_t ext) return ctx->misa & ext; } +/* + * RISC-V requires NaN-boxing of narrower width floating point values. + * This applies when a 32-bit value is assigned to a 64-bit FP register. + * For consistency and simplicity, we nanbox results even when the RVD + * extension is not present. + */ +static void gen_nanbox_s(TCGv_i64 out, TCGv_i64 in) +{ + tcg_gen_ori_i64(out, in, MAKE_64BIT_MASK(32, 32)); +} + +/* + * A narrow n-bit operation, where n < FLEN, checks that input operands + * are correctly Nan-boxed, i.e., all upper FLEN - n bits are 1. + * If so, the least-significant bits of the input are used, otherwise the + * input value is treated as an n-bit canonical NaN (v2.2 section 9.2). + * + * Here, the result is always nan-boxed, even the canonical nan. + */ +static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in) +{ + TCGv_i64 t_max = tcg_const_i64(0xffffffff00000000ull); + TCGv_i64 t_nan = tcg_const_i64(0xffffffff7fc00000ull); + + tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan); + tcg_temp_free_i64(t_max); + tcg_temp_free_i64(t_nan); +} + static void generate_exception(DisasContext *ctx, int excp) { tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); |