aboutsummaryrefslogtreecommitdiff
path: root/riscv
diff options
context:
space:
mode:
authorJerry Zhao <jerryz123@berkeley.edu>2024-03-01 12:40:49 -0800
committerGitHub <noreply@github.com>2024-03-01 12:40:49 -0800
commit567e687b215e1198fe6e45122f245f1c9f4da556 (patch)
tree9c136afd1c2339309672b6b9bd596ecf644be10b /riscv
parent8d239ff376d3614af554630aa5e7ae6c5d7f010e (diff)
parenta39c2387b7e7638de6278dfceff31b297109da25 (diff)
downloadriscv-isa-sim-567e687b215e1198fe6e45122f245f1c9f4da556.zip
riscv-isa-sim-567e687b215e1198fe6e45122f245f1c9f4da556.tar.gz
riscv-isa-sim-567e687b215e1198fe6e45122f245f1c9f4da556.tar.bz2
Merge pull request #1583 from rbuchner-aril/rbuchner/designated-initializers
Upgrade Spike to compile with c++2a and use designated initializers
Diffstat (limited to 'riscv')
-rw-r--r--riscv/mmu.cc2
-rw-r--r--riscv/mmu.h42
2 files changed, 16 insertions, 28 deletions
diff --git a/riscv/mmu.cc b/riscv/mmu.cc
index 165be53..1a0c830 100644
--- a/riscv/mmu.cc
+++ b/riscv/mmu.cc
@@ -70,7 +70,7 @@ reg_t mmu_t::translate(mem_access_info_t access_info, reg_t len)
tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr)
{
- auto access_info = generate_access_info(vaddr, FETCH, {false, false, false});
+ auto access_info = generate_access_info(vaddr, FETCH, {});
check_triggers(triggers::OPERATION_EXECUTE, vaddr, access_info.effective_virt);
tlb_entry_t result;
diff --git a/riscv/mmu.h b/riscv/mmu.h
index a163fe4..4404e4c 100644
--- a/riscv/mmu.h
+++ b/riscv/mmu.h
@@ -39,9 +39,9 @@ struct tlb_entry_t {
};
struct xlate_flags_t {
- const bool forced_virt : 1;
- const bool hlvx : 1;
- const bool lr : 1;
+ const bool forced_virt : 1 {false};
+ const bool hlvx : 1 {false};
+ const bool lr : 1 {false};
bool is_special_access() const {
return forced_virt || hlvx || lr;
@@ -72,7 +72,7 @@ private:
mem_access_info_t generate_access_info(reg_t addr, access_type type, xlate_flags_t xlate_flags) {
if (!proc)
- return {addr, 0, false, {false, false, false}, type};
+ return {addr, 0, false, {}, type};
bool virt = proc->state.v;
reg_t mode = proc->state.prv;
if (type != FETCH) {
@@ -94,7 +94,7 @@ public:
~mmu_t();
template<typename T>
- T ALWAYS_INLINE load(reg_t addr, xlate_flags_t xlate_flags = {false, false, false}) {
+ T ALWAYS_INLINE load(reg_t addr, xlate_flags_t xlate_flags = {}) {
target_endian<T> res;
reg_t vpn = addr >> PGSHIFT;
bool aligned = (addr & (sizeof(T) - 1)) == 0;
@@ -114,30 +114,21 @@ public:
template<typename T>
T load_reserved(reg_t addr) {
- bool forced_virt = false;
- bool hlvx = false;
- bool lr = true;
- return load<T>(addr, {forced_virt, hlvx, lr});
+ return load<T>(addr, {.lr = true});
}
template<typename T>
T guest_load(reg_t addr) {
- bool forced_virt = true;
- bool hlvx = false;
- bool lr = false;
- return load<T>(addr, {forced_virt, hlvx, lr});
+ return load<T>(addr, {.forced_virt = true});
}
template<typename T>
T guest_load_x(reg_t addr) {
- bool forced_virt = true;
- bool hlvx = true;
- bool lr = false;
- return load<T>(addr, {forced_virt, hlvx, lr});
+ return load<T>(addr, {.forced_virt=true, .hlvx=true});
}
template<typename T>
- void ALWAYS_INLINE store(reg_t addr, T val, xlate_flags_t xlate_flags = {false, false, false}) {
+ void ALWAYS_INLINE store(reg_t addr, T val, xlate_flags_t xlate_flags = {}) {
reg_t vpn = addr >> PGSHIFT;
bool aligned = (addr & (sizeof(T) - 1)) == 0;
bool tlb_hit = tlb_store_tag[vpn % TLB_ENTRIES] == vpn;
@@ -155,10 +146,7 @@ public:
template<typename T>
void guest_store(reg_t addr, T val) {
- bool forced_virt = true;
- bool hlvx = false;
- bool lr = false;
- store(addr, val, {forced_virt, hlvx, lr});
+ store(addr, val, {.forced_virt=true});
}
// AMO/Zicbom faults should be reported as store faults
@@ -180,7 +168,7 @@ public:
template<typename T, typename op>
T amo(reg_t addr, op f) {
convert_load_traps_to_store_traps({
- store_slow_path(addr, sizeof(T), nullptr, {false, false, false}, false, true);
+ store_slow_path(addr, sizeof(T), nullptr, {}, false, true);
auto lhs = load<T>(addr);
store<T>(addr, f(lhs));
return lhs;
@@ -190,7 +178,7 @@ public:
template<typename T>
T amo_compare_and_swap(reg_t addr, T comp, T swap) {
convert_load_traps_to_store_traps({
- store_slow_path(addr, sizeof(T), nullptr, {false, false, false}, false, true);
+ store_slow_path(addr, sizeof(T), nullptr, {}, false, true);
auto lhs = load<T>(addr);
if (lhs == comp)
store<T>(addr, swap);
@@ -230,7 +218,7 @@ public:
for (size_t offset = 0; offset < blocksz; offset += 1)
check_triggers(triggers::OPERATION_STORE, base + offset, false, addr, std::nullopt);
convert_load_traps_to_store_traps({
- const reg_t paddr = translate(generate_access_info(addr, LOAD, {false, false, false}), 1);
+ const reg_t paddr = translate(generate_access_info(addr, LOAD, {}), 1);
if (sim->reservable(paddr)) {
if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD))
tracer.clean_invalidate(paddr, blocksz, clean, inval);
@@ -249,10 +237,10 @@ public:
{
if (vaddr & (size-1)) {
// Raise either access fault or misaligned exception
- store_slow_path(vaddr, size, nullptr, {false, false, false}, false, true);
+ store_slow_path(vaddr, size, nullptr, {}, false, true);
}
- reg_t paddr = translate(generate_access_info(vaddr, STORE, {false, false, false}), 1);
+ reg_t paddr = translate(generate_access_info(vaddr, STORE, {}), 1);
if (sim->reservable(paddr))
return load_reservation_address == paddr;
else