aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Waterman <andrew@sifive.com>2022-10-20 16:34:48 -0700
committerGitHub <noreply@github.com>2022-10-20 16:34:48 -0700
commit5a50590f25a932cc1f25fe78b9912e9661d37d30 (patch)
tree5b86d637fab0c47eb61786ea35411337185fc802
parent9cf4e82ec234be4fb720ce44feef071010eb23c7 (diff)
parentb1f2ae41a1e64f416fb5f5aa092352439ecefa83 (diff)
downloadriscv-isa-sim-5a50590f25a932cc1f25fe78b9912e9661d37d30.zip
riscv-isa-sim-5a50590f25a932cc1f25fe78b9912e9661d37d30.tar.gz
riscv-isa-sim-5a50590f25a932cc1f25fe78b9912e9661d37d30.tar.bz2
Merge pull request #1122 from riscv-software-src/more-mmu-simplification
Fix minor MMU bugs; clean up MMU some more
-rw-r--r--customext/dummy_rocc.cc2
-rw-r--r--riscv/debug_module.cc16
-rw-r--r--riscv/insns/amoadd_d.h2
-rw-r--r--riscv/insns/amoadd_w.h2
-rw-r--r--riscv/insns/amoand_d.h2
-rw-r--r--riscv/insns/amoand_w.h2
-rw-r--r--riscv/insns/amomax_d.h2
-rw-r--r--riscv/insns/amomax_w.h2
-rw-r--r--riscv/insns/amomaxu_d.h2
-rw-r--r--riscv/insns/amomaxu_w.h2
-rw-r--r--riscv/insns/amomin_d.h2
-rw-r--r--riscv/insns/amomin_w.h2
-rw-r--r--riscv/insns/amominu_d.h2
-rw-r--r--riscv/insns/amominu_w.h2
-rw-r--r--riscv/insns/amoor_d.h2
-rw-r--r--riscv/insns/amoor_w.h2
-rw-r--r--riscv/insns/amoswap_d.h2
-rw-r--r--riscv/insns/amoswap_w.h2
-rw-r--r--riscv/insns/amoxor_d.h2
-rw-r--r--riscv/insns/amoxor_w.h2
-rw-r--r--riscv/insns/c_fld.h2
-rw-r--r--riscv/insns/c_fldsp.h2
-rw-r--r--riscv/insns/c_flw.h4
-rw-r--r--riscv/insns/c_flwsp.h4
-rw-r--r--riscv/insns/c_fsd.h2
-rw-r--r--riscv/insns/c_fsdsp.h2
-rw-r--r--riscv/insns/c_fsw.h4
-rw-r--r--riscv/insns/c_fswsp.h4
-rw-r--r--riscv/insns/c_lw.h2
-rw-r--r--riscv/insns/c_lwsp.h2
-rw-r--r--riscv/insns/c_sw.h2
-rw-r--r--riscv/insns/c_swsp.h2
-rw-r--r--riscv/insns/fld.h2
-rw-r--r--riscv/insns/flh.h2
-rw-r--r--riscv/insns/flw.h2
-rw-r--r--riscv/insns/fsd.h2
-rw-r--r--riscv/insns/fsh.h2
-rw-r--r--riscv/insns/fsw.h2
-rw-r--r--riscv/insns/hlv_b.h2
-rw-r--r--riscv/insns/hlv_bu.h2
-rw-r--r--riscv/insns/hlv_d.h2
-rw-r--r--riscv/insns/hlv_h.h2
-rw-r--r--riscv/insns/hlv_hu.h2
-rw-r--r--riscv/insns/hlv_w.h2
-rw-r--r--riscv/insns/hlv_wu.h2
-rw-r--r--riscv/insns/hlvx_hu.h2
-rw-r--r--riscv/insns/hlvx_wu.h2
-rw-r--r--riscv/insns/hsv_b.h2
-rw-r--r--riscv/insns/hsv_d.h2
-rw-r--r--riscv/insns/hsv_h.h2
-rw-r--r--riscv/insns/hsv_w.h2
-rw-r--r--riscv/insns/lb.h2
-rw-r--r--riscv/insns/lbu.h2
-rw-r--r--riscv/insns/ld.h2
-rw-r--r--riscv/insns/lh.h2
-rw-r--r--riscv/insns/lhu.h2
-rw-r--r--riscv/insns/lr_d.h4
-rw-r--r--riscv/insns/lr_w.h4
-rw-r--r--riscv/insns/lw.h2
-rw-r--r--riscv/insns/lwu.h2
-rw-r--r--riscv/insns/sb.h2
-rw-r--r--riscv/insns/sc_d.h7
-rw-r--r--riscv/insns/sc_w.h7
-rw-r--r--riscv/insns/sd.h2
-rw-r--r--riscv/insns/sh.h2
-rw-r--r--riscv/insns/sw.h2
-rw-r--r--riscv/interactive.cc10
-rw-r--r--riscv/mmu.cc10
-rw-r--r--riscv/mmu.h183
-rw-r--r--riscv/sim.cc4
-rw-r--r--riscv/v_ext_macros.h34
71 files changed, 190 insertions, 219 deletions
diff --git a/customext/dummy_rocc.cc b/customext/dummy_rocc.cc
index 5afa56a..8c051fa 100644
--- a/customext/dummy_rocc.cc
+++ b/customext/dummy_rocc.cc
@@ -22,7 +22,7 @@ class dummy_rocc_t : public rocc_t
case 1: // xd <- acc (the only real work is the return statement below)
break;
case 2: // acc[rs2] <- Mem[xs1]
- acc[insn.rs2] = p->get_mmu()->load_uint64(xs1);
+ acc[insn.rs2] = p->get_mmu()->load<uint64_t>(xs1);
break;
case 3: // acc[rs2] <- accX + xs1
acc[insn.rs2] += xs1;
diff --git a/riscv/debug_module.cc b/riscv/debug_module.cc
index f5c0c73..2f48dd2 100644
--- a/riscv/debug_module.cc
+++ b/riscv/debug_module.cc
@@ -318,13 +318,13 @@ void debug_module_t::sb_read()
reg_t address = ((uint64_t) sbaddress[1] << 32) | sbaddress[0];
try {
if (sbcs.sbaccess == 0 && config.max_sba_data_width >= 8) {
- sbdata[0] = sim->debug_mmu->load_uint8(address);
+ sbdata[0] = sim->debug_mmu->load<uint8_t>(address);
} else if (sbcs.sbaccess == 1 && config.max_sba_data_width >= 16) {
- sbdata[0] = sim->debug_mmu->load_uint16(address);
+ sbdata[0] = sim->debug_mmu->load<uint16_t>(address);
} else if (sbcs.sbaccess == 2 && config.max_sba_data_width >= 32) {
- sbdata[0] = sim->debug_mmu->load_uint32(address);
+ sbdata[0] = sim->debug_mmu->load<uint32_t>(address);
} else if (sbcs.sbaccess == 3 && config.max_sba_data_width >= 64) {
- uint64_t value = sim->debug_mmu->load_uint64(address);
+ uint64_t value = sim->debug_mmu->load<uint64_t>(address);
sbdata[0] = value;
sbdata[1] = value >> 32;
} else {
@@ -340,13 +340,13 @@ void debug_module_t::sb_write()
reg_t address = ((uint64_t) sbaddress[1] << 32) | sbaddress[0];
D(fprintf(stderr, "sb_write() 0x%x @ 0x%lx\n", sbdata[0], address));
if (sbcs.sbaccess == 0 && config.max_sba_data_width >= 8) {
- sim->debug_mmu->store_uint8(address, sbdata[0]);
+ sim->debug_mmu->store<uint8_t>(address, sbdata[0]);
} else if (sbcs.sbaccess == 1 && config.max_sba_data_width >= 16) {
- sim->debug_mmu->store_uint16(address, sbdata[0]);
+ sim->debug_mmu->store<uint16_t>(address, sbdata[0]);
} else if (sbcs.sbaccess == 2 && config.max_sba_data_width >= 32) {
- sim->debug_mmu->store_uint32(address, sbdata[0]);
+ sim->debug_mmu->store<uint32_t>(address, sbdata[0]);
} else if (sbcs.sbaccess == 3 && config.max_sba_data_width >= 64) {
- sim->debug_mmu->store_uint64(address,
+ sim->debug_mmu->store<uint64_t>(address,
(((uint64_t) sbdata[1]) << 32) | sbdata[0]);
} else {
sbcs.error = 3;
diff --git a/riscv/insns/amoadd_d.h b/riscv/insns/amoadd_d.h
index 6090fbc..8573aa5 100644
--- a/riscv/insns/amoadd_d.h
+++ b/riscv/insns/amoadd_d.h
@@ -1,3 +1,3 @@
require_extension('A');
require_rv64;
-WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs + RS2; }));
+WRITE_RD(MMU.amo<uint64_t>(RS1, [&](uint64_t lhs) { return lhs + RS2; }));
diff --git a/riscv/insns/amoadd_w.h b/riscv/insns/amoadd_w.h
index 2c6471a..c288b3b 100644
--- a/riscv/insns/amoadd_w.h
+++ b/riscv/insns/amoadd_w.h
@@ -1,2 +1,2 @@
require_extension('A');
-WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs + RS2; })));
+WRITE_RD(sext32(MMU.amo<uint32_t>(RS1, [&](uint32_t lhs) { return lhs + RS2; })));
diff --git a/riscv/insns/amoand_d.h b/riscv/insns/amoand_d.h
index 80aea18..2df7ce2 100644
--- a/riscv/insns/amoand_d.h
+++ b/riscv/insns/amoand_d.h
@@ -1,3 +1,3 @@
require_extension('A');
require_rv64;
-WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs & RS2; }));
+WRITE_RD(MMU.amo<uint64_t>(RS1, [&](uint64_t lhs) { return lhs & RS2; }));
diff --git a/riscv/insns/amoand_w.h b/riscv/insns/amoand_w.h
index f7e1ba7..962165f 100644
--- a/riscv/insns/amoand_w.h
+++ b/riscv/insns/amoand_w.h
@@ -1,2 +1,2 @@
require_extension('A');
-WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs & RS2; })));
+WRITE_RD(sext32(MMU.amo<uint32_t>(RS1, [&](uint32_t lhs) { return lhs & RS2; })));
diff --git a/riscv/insns/amomax_d.h b/riscv/insns/amomax_d.h
index 496d8ad..ab95da0 100644
--- a/riscv/insns/amomax_d.h
+++ b/riscv/insns/amomax_d.h
@@ -1,3 +1,3 @@
require_extension('A');
require_rv64;
-WRITE_RD(MMU.amo_uint64(RS1, [&](int64_t lhs) { return std::max(lhs, int64_t(RS2)); }));
+WRITE_RD(MMU.amo<uint64_t>(RS1, [&](int64_t lhs) { return std::max(lhs, int64_t(RS2)); }));
diff --git a/riscv/insns/amomax_w.h b/riscv/insns/amomax_w.h
index 757bdd2..132c2e0 100644
--- a/riscv/insns/amomax_w.h
+++ b/riscv/insns/amomax_w.h
@@ -1,2 +1,2 @@
require_extension('A');
-WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](int32_t lhs) { return std::max(lhs, int32_t(RS2)); })));
+WRITE_RD(sext32(MMU.amo<uint32_t>(RS1, [&](int32_t lhs) { return std::max(lhs, int32_t(RS2)); })));
diff --git a/riscv/insns/amomaxu_d.h b/riscv/insns/amomaxu_d.h
index 12b1733..e2371aa 100644
--- a/riscv/insns/amomaxu_d.h
+++ b/riscv/insns/amomaxu_d.h
@@ -1,3 +1,3 @@
require_extension('A');
require_rv64;
-WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return std::max(lhs, RS2); }));
+WRITE_RD(MMU.amo<uint64_t>(RS1, [&](uint64_t lhs) { return std::max(lhs, RS2); }));
diff --git a/riscv/insns/amomaxu_w.h b/riscv/insns/amomaxu_w.h
index 538df1c..ebbdd41 100644
--- a/riscv/insns/amomaxu_w.h
+++ b/riscv/insns/amomaxu_w.h
@@ -1,2 +1,2 @@
require_extension('A');
-WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return std::max(lhs, uint32_t(RS2)); })));
+WRITE_RD(sext32(MMU.amo<uint32_t>(RS1, [&](uint32_t lhs) { return std::max(lhs, uint32_t(RS2)); })));
diff --git a/riscv/insns/amomin_d.h b/riscv/insns/amomin_d.h
index 725d983..419e42e 100644
--- a/riscv/insns/amomin_d.h
+++ b/riscv/insns/amomin_d.h
@@ -1,3 +1,3 @@
require_extension('A');
require_rv64;
-WRITE_RD(MMU.amo_uint64(RS1, [&](int64_t lhs) { return std::min(lhs, int64_t(RS2)); }));
+WRITE_RD(MMU.amo<uint64_t>(RS1, [&](int64_t lhs) { return std::min(lhs, int64_t(RS2)); }));
diff --git a/riscv/insns/amomin_w.h b/riscv/insns/amomin_w.h
index ee53faa..749149c 100644
--- a/riscv/insns/amomin_w.h
+++ b/riscv/insns/amomin_w.h
@@ -1,2 +1,2 @@
require_extension('A');
-WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](int32_t lhs) { return std::min(lhs, int32_t(RS2)); })));
+WRITE_RD(sext32(MMU.amo<uint32_t>(RS1, [&](int32_t lhs) { return std::min(lhs, int32_t(RS2)); })));
diff --git a/riscv/insns/amominu_d.h b/riscv/insns/amominu_d.h
index 15b6c0a..b4bab47 100644
--- a/riscv/insns/amominu_d.h
+++ b/riscv/insns/amominu_d.h
@@ -1,3 +1,3 @@
require_extension('A');
require_rv64;
-WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return std::min(lhs, RS2); }));
+WRITE_RD(MMU.amo<uint64_t>(RS1, [&](uint64_t lhs) { return std::min(lhs, RS2); }));
diff --git a/riscv/insns/amominu_w.h b/riscv/insns/amominu_w.h
index 52e1141..680eef2 100644
--- a/riscv/insns/amominu_w.h
+++ b/riscv/insns/amominu_w.h
@@ -1,2 +1,2 @@
require_extension('A');
-WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return std::min(lhs, uint32_t(RS2)); })));
+WRITE_RD(sext32(MMU.amo<uint32_t>(RS1, [&](uint32_t lhs) { return std::min(lhs, uint32_t(RS2)); })));
diff --git a/riscv/insns/amoor_d.h b/riscv/insns/amoor_d.h
index de87627..c201d88 100644
--- a/riscv/insns/amoor_d.h
+++ b/riscv/insns/amoor_d.h
@@ -1,3 +1,3 @@
require_extension('A');
require_rv64;
-WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs | RS2; }));
+WRITE_RD(MMU.amo<uint64_t>(RS1, [&](uint64_t lhs) { return lhs | RS2; }));
diff --git a/riscv/insns/amoor_w.h b/riscv/insns/amoor_w.h
index 3455981..0adac5b 100644
--- a/riscv/insns/amoor_w.h
+++ b/riscv/insns/amoor_w.h
@@ -1,2 +1,2 @@
require_extension('A');
-WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs | RS2; })));
+WRITE_RD(sext32(MMU.amo<uint32_t>(RS1, [&](uint32_t lhs) { return lhs | RS2; })));
diff --git a/riscv/insns/amoswap_d.h b/riscv/insns/amoswap_d.h
index f9188ea..62a95b0 100644
--- a/riscv/insns/amoswap_d.h
+++ b/riscv/insns/amoswap_d.h
@@ -1,3 +1,3 @@
require_extension('A');
require_rv64;
-WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t UNUSED lhs) { return RS2; }));
+WRITE_RD(MMU.amo<uint64_t>(RS1, [&](uint64_t UNUSED lhs) { return RS2; }));
diff --git a/riscv/insns/amoswap_w.h b/riscv/insns/amoswap_w.h
index 151f095..819579c 100644
--- a/riscv/insns/amoswap_w.h
+++ b/riscv/insns/amoswap_w.h
@@ -1,2 +1,2 @@
require_extension('A');
-WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t UNUSED lhs) { return RS2; })));
+WRITE_RD(sext32(MMU.amo<uint32_t>(RS1, [&](uint32_t UNUSED lhs) { return RS2; })));
diff --git a/riscv/insns/amoxor_d.h b/riscv/insns/amoxor_d.h
index 1b3c0bf..a40050f 100644
--- a/riscv/insns/amoxor_d.h
+++ b/riscv/insns/amoxor_d.h
@@ -1,3 +1,3 @@
require_extension('A');
require_rv64;
-WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs ^ RS2; }));
+WRITE_RD(MMU.amo<uint64_t>(RS1, [&](uint64_t lhs) { return lhs ^ RS2; }));
diff --git a/riscv/insns/amoxor_w.h b/riscv/insns/amoxor_w.h
index a1ea82f..af025d6 100644
--- a/riscv/insns/amoxor_w.h
+++ b/riscv/insns/amoxor_w.h
@@ -1,2 +1,2 @@
require_extension('A');
-WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs ^ RS2; })));
+WRITE_RD(sext32(MMU.amo<uint32_t>(RS1, [&](uint32_t lhs) { return lhs ^ RS2; })));
diff --git a/riscv/insns/c_fld.h b/riscv/insns/c_fld.h
index 319615b..b009ac2 100644
--- a/riscv/insns/c_fld.h
+++ b/riscv/insns/c_fld.h
@@ -1,4 +1,4 @@
require_extension('C');
require_extension('D');
require_fp;
-WRITE_RVC_FRS2S(f64(MMU.load_uint64(RVC_RS1S + insn.rvc_ld_imm())));
+WRITE_RVC_FRS2S(f64(MMU.load<uint64_t>(RVC_RS1S + insn.rvc_ld_imm())));
diff --git a/riscv/insns/c_fldsp.h b/riscv/insns/c_fldsp.h
index 534eef7..20cad57 100644
--- a/riscv/insns/c_fldsp.h
+++ b/riscv/insns/c_fldsp.h
@@ -1,4 +1,4 @@
require_extension('C');
require_extension('D');
require_fp;
-WRITE_FRD(f64(MMU.load_uint64(RVC_SP + insn.rvc_ldsp_imm())));
+WRITE_FRD(f64(MMU.load<uint64_t>(RVC_SP + insn.rvc_ldsp_imm())));
diff --git a/riscv/insns/c_flw.h b/riscv/insns/c_flw.h
index 682566c..f073a85 100644
--- a/riscv/insns/c_flw.h
+++ b/riscv/insns/c_flw.h
@@ -2,7 +2,7 @@ require_extension('C');
if (xlen == 32) {
require_extension('F');
require_fp;
- WRITE_RVC_FRS2S(f32(MMU.load_uint32(RVC_RS1S + insn.rvc_lw_imm())));
+ WRITE_RVC_FRS2S(f32(MMU.load<uint32_t>(RVC_RS1S + insn.rvc_lw_imm())));
} else { // c.ld
- WRITE_RVC_RS2S(MMU.load_int64(RVC_RS1S + insn.rvc_ld_imm()));
+ WRITE_RVC_RS2S(MMU.load<int64_t>(RVC_RS1S + insn.rvc_ld_imm()));
}
diff --git a/riscv/insns/c_flwsp.h b/riscv/insns/c_flwsp.h
index 79058c4..a9a4b2c 100644
--- a/riscv/insns/c_flwsp.h
+++ b/riscv/insns/c_flwsp.h
@@ -2,8 +2,8 @@ require_extension('C');
if (xlen == 32) {
require_extension('F');
require_fp;
- WRITE_FRD(f32(MMU.load_uint32(RVC_SP + insn.rvc_lwsp_imm())));
+ WRITE_FRD(f32(MMU.load<uint32_t>(RVC_SP + insn.rvc_lwsp_imm())));
} else { // c.ldsp
require(insn.rvc_rd() != 0);
- WRITE_RD(MMU.load_int64(RVC_SP + insn.rvc_ldsp_imm()));
+ WRITE_RD(MMU.load<int64_t>(RVC_SP + insn.rvc_ldsp_imm()));
}
diff --git a/riscv/insns/c_fsd.h b/riscv/insns/c_fsd.h
index 6f2c8f4..58c3bcf 100644
--- a/riscv/insns/c_fsd.h
+++ b/riscv/insns/c_fsd.h
@@ -1,4 +1,4 @@
require_extension('C');
require_extension('D');
require_fp;
-MMU.store_uint64(RVC_RS1S + insn.rvc_ld_imm(), RVC_FRS2S.v[0]);
+MMU.store<uint64_t>(RVC_RS1S + insn.rvc_ld_imm(), RVC_FRS2S.v[0]);
diff --git a/riscv/insns/c_fsdsp.h b/riscv/insns/c_fsdsp.h
index 27b9331..ebe7995 100644
--- a/riscv/insns/c_fsdsp.h
+++ b/riscv/insns/c_fsdsp.h
@@ -1,4 +1,4 @@
require_extension('C');
require_extension('D');
require_fp;
-MMU.store_uint64(RVC_SP + insn.rvc_sdsp_imm(), RVC_FRS2.v[0]);
+MMU.store<uint64_t>(RVC_SP + insn.rvc_sdsp_imm(), RVC_FRS2.v[0]);
diff --git a/riscv/insns/c_fsw.h b/riscv/insns/c_fsw.h
index 7085822..381ab5e 100644
--- a/riscv/insns/c_fsw.h
+++ b/riscv/insns/c_fsw.h
@@ -2,7 +2,7 @@ require_extension('C');
if (xlen == 32) {
require_extension('F');
require_fp;
- MMU.store_uint32(RVC_RS1S + insn.rvc_lw_imm(), RVC_FRS2S.v[0]);
+ MMU.store<uint32_t>(RVC_RS1S + insn.rvc_lw_imm(), RVC_FRS2S.v[0]);
} else { // c.sd
- MMU.store_uint64(RVC_RS1S + insn.rvc_ld_imm(), RVC_RS2S);
+ MMU.store<uint64_t>(RVC_RS1S + insn.rvc_ld_imm(), RVC_RS2S);
}
diff --git a/riscv/insns/c_fswsp.h b/riscv/insns/c_fswsp.h
index c5a003f..9ce408c 100644
--- a/riscv/insns/c_fswsp.h
+++ b/riscv/insns/c_fswsp.h
@@ -2,7 +2,7 @@ require_extension('C');
if (xlen == 32) {
require_extension('F');
require_fp;
- MMU.store_uint32(RVC_SP + insn.rvc_swsp_imm(), RVC_FRS2.v[0]);
+ MMU.store<uint32_t>(RVC_SP + insn.rvc_swsp_imm(), RVC_FRS2.v[0]);
} else { // c.sdsp
- MMU.store_uint64(RVC_SP + insn.rvc_sdsp_imm(), RVC_RS2);
+ MMU.store<uint64_t>(RVC_SP + insn.rvc_sdsp_imm(), RVC_RS2);
}
diff --git a/riscv/insns/c_lw.h b/riscv/insns/c_lw.h
index ef49dd9..d90cca2 100644
--- a/riscv/insns/c_lw.h
+++ b/riscv/insns/c_lw.h
@@ -1,2 +1,2 @@
require_extension('C');
-WRITE_RVC_RS2S(MMU.load_int32(RVC_RS1S + insn.rvc_lw_imm()));
+WRITE_RVC_RS2S(MMU.load<int32_t>(RVC_RS1S + insn.rvc_lw_imm()));
diff --git a/riscv/insns/c_lwsp.h b/riscv/insns/c_lwsp.h
index b3d74db..b4c0b7f 100644
--- a/riscv/insns/c_lwsp.h
+++ b/riscv/insns/c_lwsp.h
@@ -1,3 +1,3 @@
require_extension('C');
require(insn.rvc_rd() != 0);
-WRITE_RD(MMU.load_int32(RVC_SP + insn.rvc_lwsp_imm()));
+WRITE_RD(MMU.load<int32_t>(RVC_SP + insn.rvc_lwsp_imm()));
diff --git a/riscv/insns/c_sw.h b/riscv/insns/c_sw.h
index 3073e9d..43f397f 100644
--- a/riscv/insns/c_sw.h
+++ b/riscv/insns/c_sw.h
@@ -1,2 +1,2 @@
require_extension('C');
-MMU.store_uint32(RVC_RS1S + insn.rvc_lw_imm(), RVC_RS2S);
+MMU.store<uint32_t>(RVC_RS1S + insn.rvc_lw_imm(), RVC_RS2S);
diff --git a/riscv/insns/c_swsp.h b/riscv/insns/c_swsp.h
index b8995ab..a01e466 100644
--- a/riscv/insns/c_swsp.h
+++ b/riscv/insns/c_swsp.h
@@ -1,2 +1,2 @@
require_extension('C');
-MMU.store_uint32(RVC_SP + insn.rvc_swsp_imm(), RVC_RS2);
+MMU.store<uint32_t>(RVC_SP + insn.rvc_swsp_imm(), RVC_RS2);
diff --git a/riscv/insns/fld.h b/riscv/insns/fld.h
index 4dea1d4..bbe859f 100644
--- a/riscv/insns/fld.h
+++ b/riscv/insns/fld.h
@@ -1,3 +1,3 @@
require_extension('D');
require_fp;
-WRITE_FRD(f64(MMU.load_uint64(RS1 + insn.i_imm())));
+WRITE_FRD(f64(MMU.load<uint64_t>(RS1 + insn.i_imm())));
diff --git a/riscv/insns/flh.h b/riscv/insns/flh.h
index bdb22d3..befff2c 100644
--- a/riscv/insns/flh.h
+++ b/riscv/insns/flh.h
@@ -1,3 +1,3 @@
require_extension(EXT_ZFHMIN);
require_fp;
-WRITE_FRD(f16(MMU.load_uint16(RS1 + insn.i_imm())));
+WRITE_FRD(f16(MMU.load<uint16_t>(RS1 + insn.i_imm())));
diff --git a/riscv/insns/flw.h b/riscv/insns/flw.h
index 6129754..c57306a 100644
--- a/riscv/insns/flw.h
+++ b/riscv/insns/flw.h
@@ -1,3 +1,3 @@
require_extension('F');
require_fp;
-WRITE_FRD(f32(MMU.load_uint32(RS1 + insn.i_imm())));
+WRITE_FRD(f32(MMU.load<uint32_t>(RS1 + insn.i_imm())));
diff --git a/riscv/insns/fsd.h b/riscv/insns/fsd.h
index 38c702b..babc9e5 100644
--- a/riscv/insns/fsd.h
+++ b/riscv/insns/fsd.h
@@ -1,3 +1,3 @@
require_extension('D');
require_fp;
-MMU.store_uint64(RS1 + insn.s_imm(), FRS2.v[0]);
+MMU.store<uint64_t>(RS1 + insn.s_imm(), FRS2.v[0]);
diff --git a/riscv/insns/fsh.h b/riscv/insns/fsh.h
index 9eaae1e..dfd6bc5 100644
--- a/riscv/insns/fsh.h
+++ b/riscv/insns/fsh.h
@@ -1,3 +1,3 @@
require_extension(EXT_ZFHMIN);
require_fp;
-MMU.store_uint16(RS1 + insn.s_imm(), FRS2.v[0]);
+MMU.store<uint16_t>(RS1 + insn.s_imm(), FRS2.v[0]);
diff --git a/riscv/insns/fsw.h b/riscv/insns/fsw.h
index 8af5184..887f03e 100644
--- a/riscv/insns/fsw.h
+++ b/riscv/insns/fsw.h
@@ -1,3 +1,3 @@
require_extension('F');
require_fp;
-MMU.store_uint32(RS1 + insn.s_imm(), FRS2.v[0]);
+MMU.store<uint32_t>(RS1 + insn.s_imm(), FRS2.v[0]);
diff --git a/riscv/insns/hlv_b.h b/riscv/insns/hlv_b.h
index 2ccb046..308d038 100644
--- a/riscv/insns/hlv_b.h
+++ b/riscv/insns/hlv_b.h
@@ -1,4 +1,4 @@
require_extension('H');
require_novirt();
require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S);
-WRITE_RD(MMU.guest_load_int8(RS1));
+WRITE_RD(MMU.guest_load<int8_t>(RS1));
diff --git a/riscv/insns/hlv_bu.h b/riscv/insns/hlv_bu.h
index 560f94a..1fe4d6a 100644
--- a/riscv/insns/hlv_bu.h
+++ b/riscv/insns/hlv_bu.h
@@ -1,4 +1,4 @@
require_extension('H');
require_novirt();
require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S);
-WRITE_RD(MMU.guest_load_uint8(RS1));
+WRITE_RD(MMU.guest_load<uint8_t>(RS1));
diff --git a/riscv/insns/hlv_d.h b/riscv/insns/hlv_d.h
index f432b65..8e92ce3 100644
--- a/riscv/insns/hlv_d.h
+++ b/riscv/insns/hlv_d.h
@@ -2,4 +2,4 @@ require_extension('H');
require_rv64;
require_novirt();
require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S);
-WRITE_RD(MMU.guest_load_int64(RS1));
+WRITE_RD(MMU.guest_load<int64_t>(RS1));
diff --git a/riscv/insns/hlv_h.h b/riscv/insns/hlv_h.h
index 4cb07e9..f2e14c4 100644
--- a/riscv/insns/hlv_h.h
+++ b/riscv/insns/hlv_h.h
@@ -1,4 +1,4 @@
require_extension('H');
require_novirt();
require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S);
-WRITE_RD(MMU.guest_load_int16(RS1));
+WRITE_RD(MMU.guest_load<int16_t>(RS1));
diff --git a/riscv/insns/hlv_hu.h b/riscv/insns/hlv_hu.h
index adec2f0..f7f12ef 100644
--- a/riscv/insns/hlv_hu.h
+++ b/riscv/insns/hlv_hu.h
@@ -1,4 +1,4 @@
require_extension('H');
require_novirt();
require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S);
-WRITE_RD(MMU.guest_load_uint16(RS1));
+WRITE_RD(MMU.guest_load<uint16_t>(RS1));
diff --git a/riscv/insns/hlv_w.h b/riscv/insns/hlv_w.h
index b2e102f..72f69ea 100644
--- a/riscv/insns/hlv_w.h
+++ b/riscv/insns/hlv_w.h
@@ -1,4 +1,4 @@
require_extension('H');
require_novirt();
require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S);
-WRITE_RD(MMU.guest_load_int32(RS1));
+WRITE_RD(MMU.guest_load<int32_t>(RS1));
diff --git a/riscv/insns/hlv_wu.h b/riscv/insns/hlv_wu.h
index 1f921c0..854269f 100644
--- a/riscv/insns/hlv_wu.h
+++ b/riscv/insns/hlv_wu.h
@@ -2,4 +2,4 @@ require_extension('H');
require_rv64;
require_novirt();
require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S);
-WRITE_RD(MMU.guest_load_uint32(RS1));
+WRITE_RD(MMU.guest_load<uint32_t>(RS1));
diff --git a/riscv/insns/hlvx_hu.h b/riscv/insns/hlvx_hu.h
index 3eb699c..95dcb20 100644
--- a/riscv/insns/hlvx_hu.h
+++ b/riscv/insns/hlvx_hu.h
@@ -1,4 +1,4 @@
require_extension('H');
require_novirt();
require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S);
-WRITE_RD(MMU.guest_load_x_uint16(RS1));
+WRITE_RD(MMU.guest_load_x<uint16_t>(RS1));
diff --git a/riscv/insns/hlvx_wu.h b/riscv/insns/hlvx_wu.h
index 33e2fa1..c751ba5 100644
--- a/riscv/insns/hlvx_wu.h
+++ b/riscv/insns/hlvx_wu.h
@@ -1,4 +1,4 @@
require_extension('H');
require_novirt();
require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S);
-WRITE_RD(sext_xlen(MMU.guest_load_x_uint32(RS1)));
+WRITE_RD(sext_xlen(MMU.guest_load_x<uint32_t>(RS1)));
diff --git a/riscv/insns/hsv_b.h b/riscv/insns/hsv_b.h
index 15f6a26..d56483f 100644
--- a/riscv/insns/hsv_b.h
+++ b/riscv/insns/hsv_b.h
@@ -1,4 +1,4 @@
require_extension('H');
require_novirt();
require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S);
-MMU.guest_store_uint8(RS1, RS2);
+MMU.guest_store<uint8_t>(RS1, RS2);
diff --git a/riscv/insns/hsv_d.h b/riscv/insns/hsv_d.h
index 83c3376..ed7f5bb 100644
--- a/riscv/insns/hsv_d.h
+++ b/riscv/insns/hsv_d.h
@@ -2,4 +2,4 @@ require_extension('H');
require_rv64;
require_novirt();
require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S);
-MMU.guest_store_uint64(RS1, RS2);
+MMU.guest_store<uint64_t>(RS1, RS2);
diff --git a/riscv/insns/hsv_h.h b/riscv/insns/hsv_h.h
index eaa2a2c..596f168 100644
--- a/riscv/insns/hsv_h.h
+++ b/riscv/insns/hsv_h.h
@@ -1,4 +1,4 @@
require_extension('H');
require_novirt();
require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S);
-MMU.guest_store_uint16(RS1, RS2);
+MMU.guest_store<uint16_t>(RS1, RS2);
diff --git a/riscv/insns/hsv_w.h b/riscv/insns/hsv_w.h
index 0d2c3d4..f011e2d 100644
--- a/riscv/insns/hsv_w.h
+++ b/riscv/insns/hsv_w.h
@@ -1,4 +1,4 @@
require_extension('H');
require_novirt();
require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S);
-MMU.guest_store_uint32(RS1, RS2);
+MMU.guest_store<uint32_t>(RS1, RS2);
diff --git a/riscv/insns/lb.h b/riscv/insns/lb.h
index 0f0999c..cd6b461 100644
--- a/riscv/insns/lb.h
+++ b/riscv/insns/lb.h
@@ -1 +1 @@
-WRITE_RD(MMU.load_int8(RS1 + insn.i_imm()));
+WRITE_RD(MMU.load<int8_t>(RS1 + insn.i_imm()));
diff --git a/riscv/insns/lbu.h b/riscv/insns/lbu.h
index 64d4a68..bcdf7ca 100644
--- a/riscv/insns/lbu.h
+++ b/riscv/insns/lbu.h
@@ -1 +1 @@
-WRITE_RD(MMU.load_uint8(RS1 + insn.i_imm()));
+WRITE_RD(MMU.load<uint8_t>(RS1 + insn.i_imm()));
diff --git a/riscv/insns/ld.h b/riscv/insns/ld.h
index 1122b98..3dea301 100644
--- a/riscv/insns/ld.h
+++ b/riscv/insns/ld.h
@@ -1,2 +1,2 @@
require_rv64;
-WRITE_RD(MMU.load_int64(RS1 + insn.i_imm()));
+WRITE_RD(MMU.load<int64_t>(RS1 + insn.i_imm()));
diff --git a/riscv/insns/lh.h b/riscv/insns/lh.h
index 0d458e0..845451a 100644
--- a/riscv/insns/lh.h
+++ b/riscv/insns/lh.h
@@ -1 +1 @@
-WRITE_RD(MMU.load_int16(RS1 + insn.i_imm()));
+WRITE_RD(MMU.load<int16_t>(RS1 + insn.i_imm()));
diff --git a/riscv/insns/lhu.h b/riscv/insns/lhu.h
index 9d24070..6eac0bb 100644
--- a/riscv/insns/lhu.h
+++ b/riscv/insns/lhu.h
@@ -1 +1 @@
-WRITE_RD(MMU.load_uint16(RS1 + insn.i_imm()));
+WRITE_RD(MMU.load<uint16_t>(RS1 + insn.i_imm()));
diff --git a/riscv/insns/lr_d.h b/riscv/insns/lr_d.h
index 6dd8d67..214daff 100644
--- a/riscv/insns/lr_d.h
+++ b/riscv/insns/lr_d.h
@@ -1,5 +1,3 @@
require_extension('A');
require_rv64;
-auto res = MMU.load_int64(RS1, true);
-MMU.acquire_load_reservation(RS1);
-WRITE_RD(res);
+WRITE_RD(MMU.load_reserved<int64_t>(RS1));
diff --git a/riscv/insns/lr_w.h b/riscv/insns/lr_w.h
index 185be53..354590f 100644
--- a/riscv/insns/lr_w.h
+++ b/riscv/insns/lr_w.h
@@ -1,4 +1,2 @@
require_extension('A');
-auto res = MMU.load_int32(RS1, true);
-MMU.acquire_load_reservation(RS1);
-WRITE_RD(res);
+WRITE_RD(MMU.load_reserved<int32_t>(RS1));
diff --git a/riscv/insns/lw.h b/riscv/insns/lw.h
index 4e8ed04..82d7044 100644
--- a/riscv/insns/lw.h
+++ b/riscv/insns/lw.h
@@ -1 +1 @@
-WRITE_RD(MMU.load_int32(RS1 + insn.i_imm()));
+WRITE_RD(MMU.load<int32_t>(RS1 + insn.i_imm()));
diff --git a/riscv/insns/lwu.h b/riscv/insns/lwu.h
index dcc4d75..cbc7e2a 100644
--- a/riscv/insns/lwu.h
+++ b/riscv/insns/lwu.h
@@ -1,2 +1,2 @@
require_rv64;
-WRITE_RD(MMU.load_uint32(RS1 + insn.i_imm()));
+WRITE_RD(MMU.load<uint32_t>(RS1 + insn.i_imm()));
diff --git a/riscv/insns/sb.h b/riscv/insns/sb.h
index 8729c2d..d9cc8f9 100644
--- a/riscv/insns/sb.h
+++ b/riscv/insns/sb.h
@@ -1 +1 @@
-MMU.store_uint8(RS1 + insn.s_imm(), RS2);
+MMU.store<uint8_t>(RS1 + insn.s_imm(), RS2);
diff --git a/riscv/insns/sc_d.h b/riscv/insns/sc_d.h
index 54023ed..ac82c3e 100644
--- a/riscv/insns/sc_d.h
+++ b/riscv/insns/sc_d.h
@@ -1,11 +1,6 @@
require_extension('A');
require_rv64;
-bool have_reservation = MMU.check_load_reservation(RS1, 8);
-
-if (have_reservation)
- MMU.store_uint64(RS1, RS2);
-
-MMU.yield_load_reservation();
+bool have_reservation = MMU.store_conditional<uint64_t>(RS1, RS2);
WRITE_RD(!have_reservation);
diff --git a/riscv/insns/sc_w.h b/riscv/insns/sc_w.h
index e430dcb..48fea4b 100644
--- a/riscv/insns/sc_w.h
+++ b/riscv/insns/sc_w.h
@@ -1,10 +1,5 @@
require_extension('A');
-bool have_reservation = MMU.check_load_reservation(RS1, 4);
-
-if (have_reservation)
- MMU.store_uint32(RS1, RS2);
-
-MMU.yield_load_reservation();
+bool have_reservation = MMU.store_conditional<uint32_t>(RS1, RS2);
WRITE_RD(!have_reservation);
diff --git a/riscv/insns/sd.h b/riscv/insns/sd.h
index 664deb2..5c9dd4e 100644
--- a/riscv/insns/sd.h
+++ b/riscv/insns/sd.h
@@ -1,2 +1,2 @@
require_rv64;
-MMU.store_uint64(RS1 + insn.s_imm(), RS2);
+MMU.store<uint64_t>(RS1 + insn.s_imm(), RS2);
diff --git a/riscv/insns/sh.h b/riscv/insns/sh.h
index 22aa3a8..8f780c3 100644
--- a/riscv/insns/sh.h
+++ b/riscv/insns/sh.h
@@ -1 +1 @@
-MMU.store_uint16(RS1 + insn.s_imm(), RS2);
+MMU.store<uint16_t>(RS1 + insn.s_imm(), RS2);
diff --git a/riscv/insns/sw.h b/riscv/insns/sw.h
index aa5ead3..a9d7268 100644
--- a/riscv/insns/sw.h
+++ b/riscv/insns/sw.h
@@ -1 +1 @@
-MMU.store_uint32(RS1 + insn.s_imm(), RS2);
+MMU.store<uint32_t>(RS1 + insn.s_imm(), RS2);
diff --git a/riscv/interactive.cc b/riscv/interactive.cc
index e64e321..8557acc 100644
--- a/riscv/interactive.cc
+++ b/riscv/interactive.cc
@@ -678,17 +678,17 @@ reg_t sim_t::get_mem(const std::vector<std::string>& args)
switch (addr % 8)
{
case 0:
- val = mmu->load_uint64(addr);
+ val = mmu->load<uint64_t>(addr);
break;
case 4:
- val = mmu->load_uint32(addr);
+ val = mmu->load<uint32_t>(addr);
break;
case 2:
case 6:
- val = mmu->load_uint16(addr);
+ val = mmu->load<uint16_t>(addr);
break;
default:
- val = mmu->load_uint8(addr);
+ val = mmu->load<uint8_t>(addr);
break;
}
return val;
@@ -722,7 +722,7 @@ void sim_t::interactive_str(const std::string& cmd, const std::vector<std::strin
std::ostream out(sout_.rdbuf());
char ch;
- while ((ch = mmu->load_uint8(addr++)))
+ while ((ch = mmu->load<uint8_t>(addr++)))
out << ch;
out << std::endl;
diff --git a/riscv/mmu.cc b/riscv/mmu.cc
index de82a77..3f9b5e9 100644
--- a/riscv/mmu.cc
+++ b/riscv/mmu.cc
@@ -190,12 +190,16 @@ void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint
tracer.trace(paddr, len, LOAD);
else if (xlate_flags == 0)
refill_tlb(addr, paddr, host_addr, LOAD);
- } else if (!mmio_load(paddr, len, bytes)) {
+
+ if (xlate_flags & RISCV_XLATE_LR) {
+ load_reservation_address = paddr;
+ }
+ } else if ((xlate_flags & RISCV_XLATE_LR) || !mmio_load(paddr, len, bytes)) {
throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0);
}
}
-void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool UNUSED require_alignment)
+void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags)
{
check_triggers(triggers::OPERATION_LOAD, addr);
@@ -206,7 +210,7 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate
#ifndef RISCV_ENABLE_MISALIGNED
throw trap_load_address_misaligned(gva, addr, 0, 0);
#else
- if (require_alignment)
+ if (xlate_flags & RISCV_XLATE_LR)
throw trap_load_access_fault(gva, addr, 0, 0);
reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE);
diff --git a/riscv/mmu.h b/riscv/mmu.h
index 1e5260b..19105e3 100644
--- a/riscv/mmu.h
+++ b/riscv/mmu.h
@@ -49,8 +49,9 @@ public:
mmu_t(simif_t* sim, processor_t* proc);
~mmu_t();
-#define RISCV_XLATE_VIRT (1U << 0)
+#define RISCV_XLATE_VIRT (1U << 0)
#define RISCV_XLATE_VIRT_HLVX (1U << 1)
+#define RISCV_XLATE_LR (1U << 2)
#ifndef RISCV_ENABLE_COMMITLOG
# define READ_MEM(addr, size) ((void)(addr), (void)(size))
@@ -59,49 +60,39 @@ public:
proc->state.log_mem_read.push_back(std::make_tuple(addr, 0, size));
#endif
- // template for functions that load an aligned value from memory
- #define load_func(type, prefix, xlate_flags) \
- type##_t ALWAYS_INLINE prefix##_##type(reg_t addr, bool require_alignment = false) { \
- reg_t vpn = addr >> PGSHIFT; \
- size_t size = sizeof(type##_t); \
- bool aligned = (addr & (size - 1)) == 0; \
- bool tlb_hit = tlb_load_tag[vpn % TLB_ENTRIES] == vpn; \
- if (likely((xlate_flags) == 0 && aligned && tlb_hit)) { \
- if (proc) READ_MEM(addr, size); \
- return from_target(*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \
- } else { \
- target_endian<type##_t> res; \
- load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res, (xlate_flags), require_alignment); \
- if (proc) READ_MEM(addr, size); \
- return from_target(res); \
- } \
+ template<typename T>
+ T ALWAYS_INLINE load(reg_t addr, uint32_t xlate_flags = 0) {
+ target_endian<T> res;
+ reg_t vpn = addr >> PGSHIFT;
+ bool aligned = (addr & (sizeof(T) - 1)) == 0;
+ bool tlb_hit = tlb_load_tag[vpn % TLB_ENTRIES] == vpn;
+
+ if (likely(xlate_flags == 0 && aligned && tlb_hit)) {
+ res = *(target_endian<T>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr);
+ } else {
+ load_slow_path(addr, sizeof(T), (uint8_t*)&res, xlate_flags);
}
- // load value from memory at aligned address; zero extend to register width
- load_func(uint8, load, 0)
- load_func(uint16, load, 0)
- load_func(uint32, load, 0)
- load_func(uint64, load, 0)
-
- // load value from guest memory at aligned address; zero extend to register width
- load_func(uint8, guest_load, RISCV_XLATE_VIRT)
- load_func(uint16, guest_load, RISCV_XLATE_VIRT)
- load_func(uint32, guest_load, RISCV_XLATE_VIRT)
- load_func(uint64, guest_load, RISCV_XLATE_VIRT)
- load_func(uint16, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX)
- load_func(uint32, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX)
-
- // load value from memory at aligned address; sign extend to register width
- load_func(int8, load, 0)
- load_func(int16, load, 0)
- load_func(int32, load, 0)
- load_func(int64, load, 0)
-
- // load value from guest memory at aligned address; sign extend to register width
- load_func(int8, guest_load, RISCV_XLATE_VIRT)
- load_func(int16, guest_load, RISCV_XLATE_VIRT)
- load_func(int32, guest_load, RISCV_XLATE_VIRT)
- load_func(int64, guest_load, RISCV_XLATE_VIRT)
+ if (proc)
+ READ_MEM(addr, sizeof(T));
+
+ return from_target(res);
+ }
+
+ template<typename T>
+ T load_reserved(reg_t addr) {
+ return load<T>(addr, RISCV_XLATE_LR);
+ }
+
+ template<typename T>
+ T guest_load(reg_t addr) {
+ return load<T>(addr, RISCV_XLATE_VIRT);
+ }
+
+ template<typename T>
+ T guest_load_x(reg_t addr) {
+ return load<T>(addr, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX);
+ }
#ifndef RISCV_ENABLE_COMMITLOG
# define WRITE_MEM(addr, value, size) ((void)(addr), (void)(value), (void)(size))
@@ -110,23 +101,26 @@ public:
proc->state.log_mem_write.push_back(std::make_tuple(addr, val, size));
#endif
- // template for functions that store an aligned value to memory
- #define store_func(type, prefix, xlate_flags) \
- void ALWAYS_INLINE prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \
- reg_t vpn = addr >> PGSHIFT; \
- size_t size = sizeof(type##_t); \
- bool aligned = (addr & (size - 1)) == 0; \
- bool tlb_hit = tlb_store_tag[vpn % TLB_ENTRIES] == vpn; \
- if ((xlate_flags) == 0 && likely(aligned && tlb_hit)) { \
- if (actually_store) { \
- if (proc) WRITE_MEM(addr, val, size); \
- *(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \
- } \
- } else { \
- target_endian<type##_t> target_val = to_target(val); \
- store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags), actually_store, require_alignment); \
- if (actually_store && proc) WRITE_MEM(addr, val, size); \
- } \
+ template<typename T>
+ void ALWAYS_INLINE store(reg_t addr, T val, uint32_t xlate_flags = 0) {
+ reg_t vpn = addr >> PGSHIFT;
+ bool aligned = (addr & (sizeof(T) - 1)) == 0;
+ bool tlb_hit = tlb_store_tag[vpn % TLB_ENTRIES] == vpn;
+
+ if (xlate_flags == 0 && likely(aligned && tlb_hit)) {
+ *(target_endian<T>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val);
+ } else {
+ target_endian<T> target_val = to_target(val);
+ store_slow_path(addr, sizeof(T), (const uint8_t*)&target_val, xlate_flags, true, false);
+ }
+
+ if (proc)
+ WRITE_MEM(addr, val, sizeof(T));
+ }
+
+ template<typename T>
+ void guest_store(reg_t addr, T val) {
+ store(addr, val, RISCV_XLATE_VIRT);
}
// AMO/Zicbom faults should be reported as store faults
@@ -145,16 +139,15 @@ public:
}
// template for functions that perform an atomic memory operation
- #define amo_func(type) \
- template<typename op> \
- type##_t amo_##type(reg_t addr, op f) { \
- convert_load_traps_to_store_traps({ \
- store_##type(addr, 0, false, true); \
- auto lhs = load_##type(addr, true); \
- store_##type(addr, f(lhs)); \
- return lhs; \
- }) \
- }
+ template<typename T, typename op>
+ T amo(reg_t addr, op f) {
+ convert_load_traps_to_store_traps({
+ store_slow_path(addr, sizeof(T), nullptr, 0, false, true);
+ auto lhs = load<T>(addr);
+ store<T>(addr, f(lhs));
+ return lhs;
+ })
+ }
void store_float128(reg_t addr, float128_t val)
{
@@ -162,8 +155,8 @@ public:
if (unlikely(addr & (sizeof(float128_t)-1)))
throw trap_store_address_misaligned((proc) ? proc->state.v : false, addr, 0, 0);
#endif
- store_uint64(addr, val.v[0]);
- store_uint64(addr + 8, val.v[1]);
+ store<uint64_t>(addr, val.v[0]);
+ store<uint64_t>(addr + 8, val.v[1]);
}
float128_t load_float128(reg_t addr)
@@ -172,29 +165,13 @@ public:
if (unlikely(addr & (sizeof(float128_t)-1)))
throw trap_load_address_misaligned((proc) ? proc->state.v : false, addr, 0, 0);
#endif
- return (float128_t){load_uint64(addr), load_uint64(addr + 8)};
+ return (float128_t){load<uint64_t>(addr), load<uint64_t>(addr + 8)};
}
- // store value to memory at aligned address
- store_func(uint8, store, 0)
- store_func(uint16, store, 0)
- store_func(uint32, store, 0)
- store_func(uint64, store, 0)
-
- // store value to guest memory at aligned address
- store_func(uint8, guest_store, RISCV_XLATE_VIRT)
- store_func(uint16, guest_store, RISCV_XLATE_VIRT)
- store_func(uint32, guest_store, RISCV_XLATE_VIRT)
- store_func(uint64, guest_store, RISCV_XLATE_VIRT)
-
- // perform an atomic memory operation at an aligned address
- amo_func(uint32)
- amo_func(uint64)
-
void cbo_zero(reg_t addr) {
auto base = addr & ~(blocksz - 1);
for (size_t offset = 0; offset < blocksz; offset += 1)
- store_uint8(base + offset, 0);
+ store<uint8_t>(base + offset, 0);
}
void clean_inval(reg_t addr, bool clean, bool inval) {
@@ -214,15 +191,6 @@ public:
load_reservation_address = (reg_t)-1;
}
- inline void acquire_load_reservation(reg_t vaddr)
- {
- reg_t paddr = translate(vaddr, 1, LOAD, 0);
- if (auto host_addr = sim->addr_to_mem(paddr))
- load_reservation_address = refill_tlb(vaddr, paddr, host_addr, LOAD).target_offset + vaddr;
- else
- throw trap_load_access_fault((proc) ? proc->state.v : false, vaddr, 0, 0); // disallow LR to I/O space
- }
-
inline bool check_load_reservation(reg_t vaddr, size_t size)
{
if (vaddr & (size-1)) {
@@ -231,12 +199,25 @@ public:
}
reg_t paddr = translate(vaddr, 1, STORE, 0);
- if (auto host_addr = sim->addr_to_mem(paddr))
- return load_reservation_address == refill_tlb(vaddr, paddr, host_addr, STORE).target_offset + vaddr;
+ if (sim->addr_to_mem(paddr))
+ return load_reservation_address == paddr;
else
throw trap_store_access_fault((proc) ? proc->state.v : false, vaddr, 0, 0); // disallow SC to I/O space
}
+ template<typename T>
+ bool store_conditional(reg_t addr, T val)
+ {
+ bool have_reservation = check_load_reservation(addr, sizeof(T));
+
+ if (have_reservation)
+ store(addr, val);
+
+ yield_load_reservation();
+
+ return have_reservation;
+ }
+
static const reg_t ICACHE_ENTRIES = 1024;
inline size_t icache_index(reg_t addr)
@@ -379,7 +360,7 @@ private:
// handle uncommon cases: TLB misses, page faults, MMIO
tlb_entry_t fetch_slow_path(reg_t addr);
- void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool require_alignment);
+ void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags);
void load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags);
void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool require_alignment);
void store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store);
diff --git a/riscv/sim.cc b/riscv/sim.cc
index 5ce7d21..71ac452 100644
--- a/riscv/sim.cc
+++ b/riscv/sim.cc
@@ -420,7 +420,7 @@ void sim_t::idle()
void sim_t::read_chunk(addr_t taddr, size_t len, void* dst)
{
assert(len == 8);
- auto data = debug_mmu->to_target(debug_mmu->load_uint64(taddr));
+ auto data = debug_mmu->to_target(debug_mmu->load<uint64_t>(taddr));
memcpy(dst, &data, sizeof data);
}
@@ -429,7 +429,7 @@ void sim_t::write_chunk(addr_t taddr, size_t len, const void* src)
assert(len == 8);
target_endian<uint64_t> data;
memcpy(&data, src, sizeof data);
- debug_mmu->store_uint64(taddr, debug_mmu->from_target(data));
+ debug_mmu->store<uint64_t>(taddr, debug_mmu->from_target(data));
}
void sim_t::set_target_endianness(memif_endianness_t endianness)
diff --git a/riscv/v_ext_macros.h b/riscv/v_ext_macros.h
index ad31938..0984a80 100644
--- a/riscv/v_ext_macros.h
+++ b/riscv/v_ext_macros.h
@@ -1183,7 +1183,7 @@ reg_t index[P.VU.vlmax]; \
VI_STRIP(i); \
P.VU.vstart->write(i); \
for (reg_t fn = 0; fn < nf; ++fn) { \
- elt_width##_t val = MMU.load_##elt_width( \
+ elt_width##_t val = MMU.load<elt_width##_t>( \
baseAddr + (stride) + (offset) * sizeof(elt_width##_t)); \
P.VU.elt<elt_width##_t>(vd + fn * emul, vreg_inx, true) = val; \
} \
@@ -1207,19 +1207,19 @@ reg_t index[P.VU.vlmax]; \
switch (P.VU.vsew) { \
case e8: \
P.VU.elt<uint8_t>(vd + fn * flmul, vreg_inx, true) = \
- MMU.load_uint8(baseAddr + index[i] + fn * 1); \
+ MMU.load<uint8_t>(baseAddr + index[i] + fn * 1); \
break; \
case e16: \
P.VU.elt<uint16_t>(vd + fn * flmul, vreg_inx, true) = \
- MMU.load_uint16(baseAddr + index[i] + fn * 2); \
+ MMU.load<uint16_t>(baseAddr + index[i] + fn * 2); \
break; \
case e32: \
P.VU.elt<uint32_t>(vd + fn * flmul, vreg_inx, true) = \
- MMU.load_uint32(baseAddr + index[i] + fn * 4); \
+ MMU.load<uint32_t>(baseAddr + index[i] + fn * 4); \
break; \
default: \
P.VU.elt<uint64_t>(vd + fn * flmul, vreg_inx, true) = \
- MMU.load_uint64(baseAddr + index[i] + fn * 8); \
+ MMU.load<uint64_t>(baseAddr + index[i] + fn * 8); \
break; \
} \
} \
@@ -1238,7 +1238,7 @@ reg_t index[P.VU.vlmax]; \
P.VU.vstart->write(i); \
for (reg_t fn = 0; fn < nf; ++fn) { \
elt_width##_t val = P.VU.elt<elt_width##_t>(vs3 + fn * emul, vreg_inx); \
- MMU.store_##elt_width( \
+ MMU.store<elt_width##_t>( \
baseAddr + (stride) + (offset) * sizeof(elt_width##_t), val); \
} \
} \
@@ -1260,19 +1260,19 @@ reg_t index[P.VU.vlmax]; \
for (reg_t fn = 0; fn < nf; ++fn) { \
switch (P.VU.vsew) { \
case e8: \
- MMU.store_uint8(baseAddr + index[i] + fn * 1, \
+ MMU.store<uint8_t>(baseAddr + index[i] + fn * 1, \
P.VU.elt<uint8_t>(vs3 + fn * flmul, vreg_inx)); \
break; \
case e16: \
- MMU.store_uint16(baseAddr + index[i] + fn * 2, \
+ MMU.store<uint16_t>(baseAddr + index[i] + fn * 2, \
P.VU.elt<uint16_t>(vs3 + fn * flmul, vreg_inx)); \
break; \
case e32: \
- MMU.store_uint32(baseAddr + index[i] + fn * 4, \
+ MMU.store<uint32_t>(baseAddr + index[i] + fn * 4, \
P.VU.elt<uint32_t>(vs3 + fn * flmul, vreg_inx)); \
break; \
default: \
- MMU.store_uint64(baseAddr + index[i] + fn * 8, \
+ MMU.store<uint64_t>(baseAddr + index[i] + fn * 8, \
P.VU.elt<uint64_t>(vs3 + fn * flmul, vreg_inx)); \
break; \
} \
@@ -1294,7 +1294,7 @@ reg_t index[P.VU.vlmax]; \
for (reg_t fn = 0; fn < nf; ++fn) { \
uint64_t val; \
try { \
- val = MMU.load_##elt_width( \
+ val = MMU.load<elt_width##_t>( \
baseAddr + (i * nf + fn) * sizeof(elt_width##_t)); \
} catch (trap_t& t) { \
if (i == 0) \
@@ -1327,7 +1327,7 @@ reg_t index[P.VU.vlmax]; \
reg_t off = P.VU.vstart->read() % elt_per_reg; \
if (off) { \
for (reg_t pos = off; pos < elt_per_reg; ++pos) { \
- auto val = MMU.load_## elt_width(baseAddr + \
+ auto val = MMU.load<elt_width##_t>(baseAddr + \
P.VU.vstart->read() * sizeof(elt_width ## _t)); \
P.VU.elt<elt_width ## _t>(vd + i, pos, true) = val; \
P.VU.vstart->write(P.VU.vstart->read() + 1); \
@@ -1336,7 +1336,7 @@ reg_t index[P.VU.vlmax]; \
} \
for (; i < len; ++i) { \
for (reg_t pos = 0; pos < elt_per_reg; ++pos) { \
- auto val = MMU.load_## elt_width(baseAddr + \
+ auto val = MMU.load<elt_width##_t>(baseAddr + \
P.VU.vstart->read() * sizeof(elt_width ## _t)); \
P.VU.elt<elt_width ## _t>(vd + i, pos, true) = val; \
P.VU.vstart->write(P.VU.vstart->read() + 1); \
@@ -1359,7 +1359,7 @@ reg_t index[P.VU.vlmax]; \
if (off) { \
for (reg_t pos = off; pos < P.VU.vlenb; ++pos) { \
auto val = P.VU.elt<uint8_t>(vs3 + i, pos); \
- MMU.store_uint8(baseAddr + P.VU.vstart->read(), val); \
+ MMU.store<uint8_t>(baseAddr + P.VU.vstart->read(), val); \
P.VU.vstart->write(P.VU.vstart->read() + 1); \
} \
i++; \
@@ -1367,7 +1367,7 @@ reg_t index[P.VU.vlmax]; \
for (; i < len; ++i) { \
for (reg_t pos = 0; pos < P.VU.vlenb; ++pos) { \
auto val = P.VU.elt<uint8_t>(vs3 + i, pos); \
- MMU.store_uint8(baseAddr + P.VU.vstart->read(), val); \
+ MMU.store<uint8_t>(baseAddr + P.VU.vstart->read(), val); \
P.VU.vstart->write(P.VU.vstart->read() + 1); \
} \
} \
@@ -1409,14 +1409,14 @@ reg_t index[P.VU.vlmax]; \
switch (P.VU.vsew) { \
case e32: { \
auto vs3 = P.VU.elt< type ## 32_t>(vd, vreg_inx); \
- auto val = MMU.amo_uint32(baseAddr + index[i], [&](type ## 32_t UNUSED lhs) { op }); \
+ auto val = MMU.amo<uint32_t>(baseAddr + index[i], [&](type ## 32_t UNUSED lhs) { op }); \
if (insn.v_wd()) \
P.VU.elt< type ## 32_t>(vd, vreg_inx, true) = val; \
} \
break; \
case e64: { \
auto vs3 = P.VU.elt< type ## 64_t>(vd, vreg_inx); \
- auto val = MMU.amo_uint64(baseAddr + index[i], [&](type ## 64_t UNUSED lhs) { op }); \
+ auto val = MMU.amo<uint64_t>(baseAddr + index[i], [&](type ## 64_t UNUSED lhs) { op }); \
if (insn.v_wd()) \
P.VU.elt< type ## 64_t>(vd, vreg_inx, true) = val; \
} \