aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Waterman <andrew@sifive.com>2023-05-12 10:23:47 -0700
committerGitHub <noreply@github.com>2023-05-12 10:23:47 -0700
commit76b0027c177113fcf083ba2c95b3c35feb642957 (patch)
tree8bb0486dcafa78d700c92afcf870541a686d1aba
parentdc3eb2d9e3b805ed1a1416c1e66584cf6520d0f1 (diff)
parenta30a0d63677151cc688fa4e0a05ac664e63d94f4 (diff)
downloadriscv-isa-sim-76b0027c177113fcf083ba2c95b3c35feb642957.zip
riscv-isa-sim-76b0027c177113fcf083ba2c95b3c35feb642957.tar.gz
riscv-isa-sim-76b0027c177113fcf083ba2c95b3c35feb642957.tar.bz2
Merge pull request #1347 from rbuchner-aril/rb-872
Fix for #872 mstatus.GVA
-rw-r--r--riscv/execute.cc6
-rw-r--r--riscv/mmu.cc100
-rw-r--r--riscv/mmu.h96
-rw-r--r--riscv/processor.cc4
-rw-r--r--riscv/processor.h2
-rw-r--r--riscv/triggers.h5
6 files changed, 132 insertions, 81 deletions
diff --git a/riscv/execute.cc b/riscv/execute.cc
index acf0e90..295879d 100644
--- a/riscv/execute.cc
+++ b/riscv/execute.cc
@@ -267,7 +267,7 @@ void processor_t::step(size_t n)
auto match = TM.detect_icount_match();
if (match.has_value()) {
assert(match->timing == triggers::TIMING_BEFORE);
- throw triggers::matched_t((triggers::operation_t)0, 0, match->action);
+ throw triggers::matched_t((triggers::operation_t)0, 0, match->action, state.v);
}
}
@@ -310,7 +310,7 @@ void processor_t::step(size_t n)
// Trigger action takes priority over single step
auto match = TM.detect_trap_match(t);
if (match.has_value())
- take_trigger_action(match->action, 0, state.pc);
+ take_trigger_action(match->action, 0, state.pc, 0);
else if (unlikely(state.single_step == state.STEP_STEPPED)) {
state.single_step = state.STEP_NONE;
enter_debug_mode(DCSR_CAUSE_STEP);
@@ -322,7 +322,7 @@ void processor_t::step(size_t n)
delete mmu->matched_trigger;
mmu->matched_trigger = NULL;
}
- take_trigger_action(t.action, t.address, pc);
+ take_trigger_action(t.action, t.address, pc, t.gva);
}
catch(trap_debug_mode&)
{
diff --git a/riscv/mmu.cc b/riscv/mmu.cc
index be24f40..358ccd3 100644
--- a/riscv/mmu.cc
+++ b/riscv/mmu.cc
@@ -52,27 +52,17 @@ void throw_access_exception(bool virt, reg_t addr, access_type type)
}
}
-reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags)
+reg_t mmu_t::translate(mem_access_info_t access_info, reg_t len)
{
+ reg_t addr = access_info.vaddr;
+ access_type type = access_info.type;
if (!proc)
return addr;
- bool virt = proc->state.v;
- bool hlvx = xlate_flags & RISCV_XLATE_VIRT_HLVX;
- reg_t mode = proc->state.prv;
- if (type != FETCH) {
- if (in_mprv()) {
- mode = get_field(proc->state.mstatus->read(), MSTATUS_MPP);
- if (get_field(proc->state.mstatus->read(), MSTATUS_MPV) && mode != PRV_M)
- virt = true;
- }
- if (xlate_flags & RISCV_XLATE_VIRT) {
- virt = true;
- mode = get_field(proc->state.hstatus->read(), HSTATUS_SPVP);
- }
- }
+ bool virt = access_info.effective_virt;
+ reg_t mode = (reg_t) access_info.effective_priv;
- reg_t paddr = walk(addr, type, mode, virt, hlvx) | (addr & (PGSIZE-1));
+ reg_t paddr = walk(access_info) | (addr & (PGSIZE-1));
if (!pmp_ok(paddr, len, type, mode))
throw_access_exception(virt, addr, type);
return paddr;
@@ -80,12 +70,13 @@ reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_f
tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr)
{
- check_triggers(triggers::OPERATION_EXECUTE, vaddr);
+ auto access_info = generate_access_info(vaddr, FETCH, {false, false, false});
+ check_triggers(triggers::OPERATION_EXECUTE, vaddr, access_info.effective_virt);
tlb_entry_t result;
reg_t vpn = vaddr >> PGSHIFT;
if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) {
- reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, 0);
+ reg_t paddr = translate(access_info, sizeof(fetch_temp));
if (auto host_addr = sim->addr_to_mem(paddr)) {
result = refill_tlb(vaddr, paddr, host_addr, FETCH);
} else {
@@ -97,7 +88,7 @@ tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr)
result = tlb_data[vpn % TLB_ENTRIES];
}
- check_triggers(triggers::OPERATION_EXECUTE, vaddr, from_le(*(const uint16_t*)(result.host_offset + vaddr)));
+ check_triggers(triggers::OPERATION_EXECUTE, vaddr, access_info.effective_virt, from_le(*(const uint16_t*)(result.host_offset + vaddr)));
return result;
}
@@ -178,7 +169,7 @@ bool mmu_t::mmio(reg_t paddr, size_t len, uint8_t* bytes, access_type type)
return true;
}
-void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, std::optional<reg_t> data)
+void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, bool virt, std::optional<reg_t> data)
{
if (matched_trigger || !proc)
return;
@@ -188,74 +179,77 @@ void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, std::
if (match.has_value())
switch (match->timing) {
case triggers::TIMING_BEFORE:
- throw triggers::matched_t(operation, address, match->action);
+ throw triggers::matched_t(operation, address, match->action, virt);
case triggers::TIMING_AFTER:
// We want to take this exception on the next instruction. We check
// whether to do so in the I$ refill path, so flush the I$.
flush_icache();
- matched_trigger = new triggers::matched_t(operation, address, match->action);
+ matched_trigger = new triggers::matched_t(operation, address, match->action, virt);
}
}
-void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags)
+void mmu_t::load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_t access_info)
{
+ reg_t addr = access_info.vaddr;
reg_t vpn = addr >> PGSHIFT;
- if (xlate_flags == 0 && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
+ if (!access_info.flags.is_special_access() && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr;
memcpy(bytes, host_addr, len);
return;
}
- reg_t paddr = translate(addr, len, LOAD, xlate_flags);
+ reg_t paddr = translate(access_info, len);
- if ((xlate_flags & RISCV_XLATE_LR) && !sim->reservable(paddr)) {
- throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0);
+ if (access_info.flags.lr && !sim->reservable(paddr)) {
+ throw trap_load_access_fault(access_info.effective_virt, addr, 0, 0);
}
if (auto host_addr = sim->addr_to_mem(paddr)) {
memcpy(bytes, host_addr, len);
if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD))
tracer.trace(paddr, len, LOAD);
- else if (xlate_flags == 0)
+ else if (!access_info.flags.is_special_access())
refill_tlb(addr, paddr, host_addr, LOAD);
} else if (!mmio_load(paddr, len, bytes)) {
- throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0);
+ throw trap_load_access_fault(access_info.effective_virt, addr, 0, 0);
}
- if (xlate_flags & RISCV_XLATE_LR) {
+ if (access_info.flags.lr) {
load_reservation_address = paddr;
}
}
-void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags)
+void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags)
{
- check_triggers(triggers::OPERATION_LOAD, addr);
+ auto access_info = generate_access_info(addr, LOAD, xlate_flags);
+ check_triggers(triggers::OPERATION_LOAD, addr, access_info.effective_virt);
if ((addr & (len - 1)) == 0) {
- load_slow_path_intrapage(addr, len, bytes, xlate_flags);
+ load_slow_path_intrapage(len, bytes, access_info);
} else {
- bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags);
+ bool gva = access_info.effective_virt;
if (!is_misaligned_enabled())
throw trap_load_address_misaligned(gva, addr, 0, 0);
- if (xlate_flags & RISCV_XLATE_LR)
+ if (access_info.flags.lr)
throw trap_load_access_fault(gva, addr, 0, 0);
reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE);
- load_slow_path_intrapage(addr, len_page0, bytes, xlate_flags);
+ load_slow_path_intrapage(len_page0, bytes, access_info);
if (len_page0 != len)
- load_slow_path_intrapage(addr + len_page0, len - len_page0, bytes + len_page0, xlate_flags);
+ load_slow_path_intrapage(len - len_page0, bytes + len_page0, access_info.split_misaligned_access(len_page0));
}
- check_triggers(triggers::OPERATION_LOAD, addr, reg_from_bytes(len, bytes));
+ check_triggers(triggers::OPERATION_LOAD, addr, access_info.effective_virt, reg_from_bytes(len, bytes));
}
-void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store)
+void mmu_t::store_slow_path_intrapage(reg_t len, const uint8_t* bytes, mem_access_info_t access_info, bool actually_store)
{
+ reg_t addr = access_info.vaddr;
reg_t vpn = addr >> PGSHIFT;
- if (xlate_flags == 0 && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
+ if (!access_info.flags.is_special_access() && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
if (actually_store) {
auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr;
memcpy(host_addr, bytes, len);
@@ -263,28 +257,29 @@ void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* byte
return;
}
- reg_t paddr = translate(addr, len, STORE, xlate_flags);
+ reg_t paddr = translate(access_info, len);
if (actually_store) {
if (auto host_addr = sim->addr_to_mem(paddr)) {
memcpy(host_addr, bytes, len);
if (tracer.interested_in_range(paddr, paddr + PGSIZE, STORE))
tracer.trace(paddr, len, STORE);
- else if (xlate_flags == 0)
+ else if (!access_info.flags.is_special_access())
refill_tlb(addr, paddr, host_addr, STORE);
} else if (!mmio_store(paddr, len, bytes)) {
- throw trap_store_access_fault((proc) ? proc->state.v : false, addr, 0, 0);
+ throw trap_store_access_fault(access_info.effective_virt, addr, 0, 0);
}
}
}
-void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool UNUSED require_alignment)
+void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool UNUSED require_alignment)
{
+ auto access_info = generate_access_info(addr, STORE, xlate_flags);
if (actually_store)
- check_triggers(triggers::OPERATION_STORE, addr, reg_from_bytes(len, bytes));
+ check_triggers(triggers::OPERATION_STORE, addr, access_info.effective_virt, reg_from_bytes(len, bytes));
if (addr & (len - 1)) {
- bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags);
+ bool gva = access_info.effective_virt;
if (!is_misaligned_enabled())
throw trap_store_address_misaligned(gva, addr, 0, 0);
@@ -292,11 +287,11 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_
throw trap_store_access_fault(gva, addr, 0, 0);
reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE);
- store_slow_path_intrapage(addr, len_page0, bytes, xlate_flags, actually_store);
+ store_slow_path_intrapage(len_page0, bytes, access_info, actually_store);
if (len_page0 != len)
- store_slow_path_intrapage(addr + len_page0, len - len_page0, bytes + len_page0, xlate_flags, actually_store);
+ store_slow_path_intrapage(len - len_page0, bytes + len_page0, access_info.split_misaligned_access(len_page0), actually_store);
} else {
- store_slow_path_intrapage(addr, len, bytes, xlate_flags, actually_store);
+ store_slow_path_intrapage(len, bytes, access_info, actually_store);
}
}
@@ -465,8 +460,13 @@ reg_t mmu_t::s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_ty
}
}
-reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode, bool virt, bool hlvx)
+reg_t mmu_t::walk(mem_access_info_t access_info)
{
+ access_type type = access_info.type;
+ reg_t addr = access_info.vaddr;
+ bool virt = access_info.effective_virt;
+ bool hlvx = access_info.flags.hlvx;
+ reg_t mode = access_info.effective_priv;
reg_t page_mask = (reg_t(1) << PGSHIFT) - 1;
reg_t satp = proc->get_state()->satp->readvirt(virt);
vm_info vm = decode_vm_info(proc->get_const_xlen(), false, mode, satp);
diff --git a/riscv/mmu.h b/riscv/mmu.h
index ef054cf..5a4835c 100644
--- a/riscv/mmu.h
+++ b/riscv/mmu.h
@@ -38,6 +38,28 @@ struct tlb_entry_t {
reg_t target_offset;
};
+struct xlate_flags_t {
+ const bool forced_virt : 1;
+ const bool hlvx : 1;
+ const bool lr : 1;
+
+ bool is_special_access() const {
+ return forced_virt || hlvx || lr;
+ }
+};
+
+struct mem_access_info_t {
+ const reg_t vaddr;
+ const reg_t effective_priv;
+ const bool effective_virt;
+ const xlate_flags_t flags;
+ const access_type type;
+
+ mem_access_info_t split_misaligned_access(reg_t offset) const {
+ return {vaddr + offset, effective_priv, effective_virt, flags, type};
+ }
+};
+
void throw_access_exception(bool virt, reg_t addr, access_type type);
// this class implements a processor's port into the virtual memory system.
@@ -47,22 +69,38 @@ class mmu_t
private:
std::map<reg_t, reg_t> alloc_cache;
std::vector<std::pair<reg_t, reg_t >> addr_tbl;
+
+ mem_access_info_t generate_access_info(reg_t addr, access_type type, xlate_flags_t xlate_flags) {
+ if (!proc)
+ return {addr, 0, false, {false, false, false}, type};
+ bool virt = proc->state.v;
+ reg_t mode = proc->state.prv;
+ if (type != FETCH) {
+ if (in_mprv()) {
+ mode = get_field(proc->state.mstatus->read(), MSTATUS_MPP);
+ if (get_field(proc->state.mstatus->read(), MSTATUS_MPV) && mode != PRV_M)
+ virt = true;
+ }
+ if (xlate_flags.forced_virt) {
+ virt = true;
+ mode = get_field(proc->state.hstatus->read(), HSTATUS_SPVP);
+ }
+ }
+ return {addr, mode, virt, xlate_flags, type};
+ }
+
public:
mmu_t(simif_t* sim, endianness_t endianness, processor_t* proc);
~mmu_t();
-#define RISCV_XLATE_VIRT (1U << 0)
-#define RISCV_XLATE_VIRT_HLVX (1U << 1)
-#define RISCV_XLATE_LR (1U << 2)
-
template<typename T>
- T ALWAYS_INLINE load(reg_t addr, uint32_t xlate_flags = 0) {
+ T ALWAYS_INLINE load(reg_t addr, xlate_flags_t xlate_flags = {false, false, false}) {
target_endian<T> res;
reg_t vpn = addr >> PGSHIFT;
bool aligned = (addr & (sizeof(T) - 1)) == 0;
bool tlb_hit = tlb_load_tag[vpn % TLB_ENTRIES] == vpn;
- if (likely(xlate_flags == 0 && aligned && tlb_hit)) {
+ if (likely(!xlate_flags.is_special_access() && aligned && tlb_hit)) {
res = *(target_endian<T>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr);
} else {
load_slow_path(addr, sizeof(T), (uint8_t*)&res, xlate_flags);
@@ -76,26 +114,35 @@ public:
template<typename T>
T load_reserved(reg_t addr) {
- return load<T>(addr, RISCV_XLATE_LR);
+ bool forced_virt = false;
+ bool hlvx = false;
+ bool lr = true;
+ return load<T>(addr, {forced_virt, hlvx, lr});
}
template<typename T>
T guest_load(reg_t addr) {
- return load<T>(addr, RISCV_XLATE_VIRT);
+ bool forced_virt = true;
+ bool hlvx = false;
+ bool lr = false;
+ return load<T>(addr, {forced_virt, hlvx, lr});
}
template<typename T>
T guest_load_x(reg_t addr) {
- return load<T>(addr, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX);
+ bool forced_virt = true;
+ bool hlvx = true;
+ bool lr = false;
+ return load<T>(addr, {forced_virt, hlvx, lr});
}
template<typename T>
- void ALWAYS_INLINE store(reg_t addr, T val, uint32_t xlate_flags = 0) {
+ void ALWAYS_INLINE store(reg_t addr, T val, xlate_flags_t xlate_flags = {false, false, false}) {
reg_t vpn = addr >> PGSHIFT;
bool aligned = (addr & (sizeof(T) - 1)) == 0;
bool tlb_hit = tlb_store_tag[vpn % TLB_ENTRIES] == vpn;
- if (xlate_flags == 0 && likely(aligned && tlb_hit)) {
+ if (!xlate_flags.is_special_access() && likely(aligned && tlb_hit)) {
*(target_endian<T>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val);
} else {
target_endian<T> target_val = to_target(val);
@@ -108,7 +155,10 @@ public:
template<typename T>
void guest_store(reg_t addr, T val) {
- store(addr, val, RISCV_XLATE_VIRT);
+ bool forced_virt = true;
+ bool hlvx = false;
+ bool lr = false;
+ store(addr, val, {forced_virt, hlvx, lr});
}
// AMO/Zicbom faults should be reported as store faults
@@ -130,7 +180,7 @@ public:
template<typename T, typename op>
T amo(reg_t addr, op f) {
convert_load_traps_to_store_traps({
- store_slow_path(addr, sizeof(T), nullptr, 0, false, true);
+ store_slow_path(addr, sizeof(T), nullptr, {false, false, false}, false, true);
auto lhs = load<T>(addr);
store<T>(addr, f(lhs));
return lhs;
@@ -164,7 +214,7 @@ public:
void clean_inval(reg_t addr, bool clean, bool inval) {
convert_load_traps_to_store_traps({
- const reg_t paddr = translate(addr, blocksz, LOAD, 0) & ~(blocksz - 1);
+ const reg_t paddr = translate(generate_access_info(addr, LOAD, {false, false, false}), blocksz) & ~(blocksz - 1);
if (sim->reservable(paddr)) {
if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD))
tracer.clean_invalidate(paddr, blocksz, clean, inval);
@@ -183,10 +233,10 @@ public:
{
if (vaddr & (size-1)) {
// Raise either access fault or misaligned exception
- store_slow_path(vaddr, size, nullptr, 0, false, true);
+ store_slow_path(vaddr, size, nullptr, {false, false, false}, false, true);
}
- reg_t paddr = translate(vaddr, 1, STORE, 0);
+ reg_t paddr = translate(generate_access_info(vaddr, STORE, {false, false, false}), 1);
if (sim->reservable(paddr))
return load_reservation_address == paddr;
else
@@ -328,21 +378,21 @@ private:
reg_t s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_type, bool virt, bool hlvx);
// perform a page table walk for a given VA; set referenced/dirty bits
- reg_t walk(reg_t addr, access_type type, reg_t prv, bool virt, bool hlvx);
+ reg_t walk(mem_access_info_t access_info);
// handle uncommon cases: TLB misses, page faults, MMIO
tlb_entry_t fetch_slow_path(reg_t addr);
- void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags);
- void load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags);
- void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool require_alignment);
- void store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store);
+ void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags);
+ void load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_t access_info);
+ void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool require_alignment);
+ void store_slow_path_intrapage(reg_t len, const uint8_t* bytes, mem_access_info_t access_info, bool actually_store);
bool mmio_fetch(reg_t paddr, size_t len, uint8_t* bytes);
bool mmio_load(reg_t paddr, size_t len, uint8_t* bytes);
bool mmio_store(reg_t paddr, size_t len, const uint8_t* bytes);
bool mmio(reg_t paddr, size_t len, uint8_t* bytes, access_type type);
bool mmio_ok(reg_t paddr, access_type type);
- void check_triggers(triggers::operation_t operation, reg_t address, std::optional<reg_t> data = std::nullopt);
- reg_t translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags);
+ void check_triggers(triggers::operation_t operation, reg_t address, bool virt, std::optional<reg_t> data = std::nullopt);
+ reg_t translate(mem_access_info_t access_info, reg_t len);
reg_t pte_load(reg_t pte_paddr, reg_t addr, bool virt, access_type trap_type, size_t ptesize) {
if (ptesize == 4)
diff --git a/riscv/processor.cc b/riscv/processor.cc
index 330bd30..74a0b8f 100644
--- a/riscv/processor.cc
+++ b/riscv/processor.cc
@@ -885,7 +885,7 @@ void processor_t::take_trap(trap_t& t, reg_t epc)
}
}
-void processor_t::take_trigger_action(triggers::action_t action, reg_t breakpoint_tval, reg_t epc)
+void processor_t::take_trigger_action(triggers::action_t action, reg_t breakpoint_tval, reg_t epc, bool virt)
{
if (debug) {
std::stringstream s; // first put everything in a string, later send it to output
@@ -899,7 +899,7 @@ void processor_t::take_trigger_action(triggers::action_t action, reg_t breakpoin
enter_debug_mode(DCSR_CAUSE_HWBP);
break;
case triggers::ACTION_DEBUG_EXCEPTION: {
- trap_breakpoint trap(state.v, breakpoint_tval);
+ trap_breakpoint trap(virt, breakpoint_tval);
take_trap(trap, epc);
break;
}
diff --git a/riscv/processor.h b/riscv/processor.h
index 8117568..1b74cc2 100644
--- a/riscv/processor.h
+++ b/riscv/processor.h
@@ -331,7 +331,7 @@ private:
void take_pending_interrupt() { take_interrupt(state.mip->read() & state.mie->read()); }
void take_interrupt(reg_t mask); // take first enabled interrupt in mask
void take_trap(trap_t& t, reg_t epc); // take an exception
- void take_trigger_action(triggers::action_t action, reg_t breakpoint_tval, reg_t epc);
+ void take_trigger_action(triggers::action_t action, reg_t breakpoint_tval, reg_t epc, bool virt);
void disasm(insn_t insn); // disassemble and print an instruction
int paddr_bits();
diff --git a/riscv/triggers.h b/riscv/triggers.h
index 6e3d74d..aeda4d5 100644
--- a/riscv/triggers.h
+++ b/riscv/triggers.h
@@ -54,12 +54,13 @@ struct match_result_t {
class matched_t
{
public:
- matched_t(triggers::operation_t operation, reg_t address, action_t action) :
- operation(operation), address(address), action(action) {}
+ matched_t(triggers::operation_t operation, reg_t address, action_t action, bool gva) :
+ operation(operation), address(address), action(action), gva(gva) {}
triggers::operation_t operation;
reg_t address;
action_t action;
+ bool gva;
};
class trigger_t {