From 115a9b3dc21aa001275ce898bc5bc73e43fccab5 Mon Sep 17 00:00:00 2001 From: "soberl@nvidia.com" Date: Tue, 3 May 2022 19:51:33 -0700 Subject: Update mmu_t::pmp_ok() for ePMP in case matching region is not found --- riscv/mmu.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 1ef81cf..2918f0b 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -242,7 +242,11 @@ bool mmu_t::pmp_ok(reg_t addr, reg_t len, access_type type, reg_t mode) } } - return mode == PRV_M; + // in case matching region is not found + const bool mseccfg_mml = proc->state.mseccfg->get_mml(); + const bool mseccfg_mmwp = proc->state.mseccfg->get_mmwp(); + return ((mode == PRV_M) && !mseccfg_mmwp + && (!mseccfg_mml || ((type == LOAD) || (type == STORE)))); } reg_t mmu_t::pmp_homogeneous(reg_t addr, reg_t len) -- cgit v1.1 From 996634f0be06099401eec5165428b3ecc4a72fb6 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Sat, 7 May 2022 22:33:16 -0700 Subject: Switch from checking for SVPBMT extension to checking *ENVCFG values during tablewalks Fix issue #990. --- riscv/mmu.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 2918f0b..3868c70 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -294,12 +294,13 @@ reg_t mmu_t::s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_ty reg_t pte = vm.ptesize == 4 ? from_target(*(target_endian*)ppte) : from_target(*(target_endian*)ppte); reg_t ppn = (pte & ~reg_t(PTE_ATTR)) >> PTE_PPN_SHIFT; + bool pbmte = proc->get_state()->menvcfg->read() & MENVCFG_PBMTE; if (pte & PTE_RSVD) { break; } else if (!proc->extension_enabled(EXT_SVNAPOT) && (pte & PTE_N)) { break; - } else if (!proc->extension_enabled(EXT_SVPBMT) && (pte & PTE_PBMT)) { + } else if (!pbmte && (pte & PTE_PBMT)) { break; } else if (PTE_TABLE(pte)) { // next level of page table if (pte & (PTE_D | PTE_A | PTE_U | PTE_N | PTE_PBMT)) @@ -384,12 +385,13 @@ reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode, bool virt, bool hlvx reg_t pte = vm.ptesize == 4 ? from_target(*(target_endian*)ppte) : from_target(*(target_endian*)ppte); reg_t ppn = (pte & ~reg_t(PTE_ATTR)) >> PTE_PPN_SHIFT; + bool pbmte = virt ? (proc->get_state()->henvcfg->read() & HENVCFG_PBMTE) : (proc->get_state()->menvcfg->read() & MENVCFG_PBMTE); if (pte & PTE_RSVD) { break; } else if (!proc->extension_enabled(EXT_SVNAPOT) && (pte & PTE_N)) { break; - } else if (!proc->extension_enabled(EXT_SVPBMT) && (pte & PTE_PBMT)) { + } else if (!pbmte && (pte & PTE_PBMT)) { break; } else if (PTE_TABLE(pte)) { // next level of page table if (pte & (PTE_D | PTE_A | PTE_U | PTE_N | PTE_PBMT)) -- cgit v1.1 From ccfeaa99732479b029ce95fc9b09968c7f3b1f64 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Sat, 7 May 2022 22:36:02 -0700 Subject: Check for reserved PBMT values during tablewalks and fault if found See #990. --- riscv/mmu.cc | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 3868c70..d0a55f0 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -302,6 +302,8 @@ reg_t mmu_t::s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_ty break; } else if (!pbmte && (pte & PTE_PBMT)) { break; + } else if ((pte & PTE_PBMT) == PTE_PBMT) { + break; } else if (PTE_TABLE(pte)) { // next level of page table if (pte & (PTE_D | PTE_A | PTE_U | PTE_N | PTE_PBMT)) break; @@ -393,6 +395,8 @@ reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode, bool virt, bool hlvx break; } else if (!pbmte && (pte & PTE_PBMT)) { break; + } else if ((pte & PTE_PBMT) == PTE_PBMT) { + break; } else if (PTE_TABLE(pte)) { // next level of page table if (pte & (PTE_D | PTE_A | PTE_U | PTE_N | PTE_PBMT)) break; -- cgit v1.1 From b724db52f9d7be3e3068e5bf01ac939ece8d032b Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Fri, 30 Sep 2022 11:48:44 +0800 Subject: Add has_data argument to trigger checking functions The mcontrol trigger can select either address or data for checking. The The selection decides the priority of the trigger. For instance, the address trigger has a higher priority over the page fault, and the page fault has a higher priority over the data trigger. The previous implementation only has the checking functions for data trigger, which results in incorrect priority of address trigger. This commit adds a has_data argument to indicate address trigger and the priority of the trigger. --- riscv/mmu.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index d0a55f0..0dbb94e 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -155,7 +155,7 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate if (!matched_trigger) { reg_t data = reg_from_bytes(len, bytes); - matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, data); + matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, true, data); if (matched_trigger) throw *matched_trigger; } @@ -167,7 +167,7 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_ if (!matched_trigger) { reg_t data = reg_from_bytes(len, bytes); - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, data); + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data); if (matched_trigger) throw *matched_trigger; } -- cgit v1.1 From 99cb603973b3f9575b411b80934035f3ee7fbcf3 Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Fri, 30 Sep 2022 12:01:09 +0800 Subject: Fix priority of mcontrol trigger load address before The spec defines the mcontrol load address has a higher priority over page fault and address misaligned (Debug spec, Table 5.2). Thus, the trigger checking should be before the translation and alignment checking. The previous implementation checks the trigger after the translation and alignment, resulting in incorrect priority. For instance, when page fault and trigger occur on the same instruction, the previous implementation will choose to raise the page fault, which contradicts the priority requirement. This commit adds an address-only trigger checking before the misaligned checking and translation. The trigger will fire on the instruction instead of the page fault in the above case. --- riscv/mmu.cc | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 0dbb94e..3048bde 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -141,6 +141,13 @@ bool mmu_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes) void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags) { + if (!matched_trigger) { + reg_t data = reg_from_bytes(len, bytes); + matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); + if (matched_trigger) + throw *matched_trigger; + } + reg_t paddr = translate(addr, len, LOAD, xlate_flags); if (auto host_addr = sim->addr_to_mem(paddr)) { -- cgit v1.1 From d52858f3a80d3f87bfedffbff1fec34245a63082 Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Fri, 30 Sep 2022 12:05:40 +0800 Subject: Fix priority of mcontrol trigger store address/data before The spec defines that the mcontrol store address/data has a higher priority over page fault and address misalignment (Debug spec, Table 5.2). Thus, the trigger checking should be before the translation and alignment checking. The previous implementation checks the trigger after the translation and alignment, resulting in incorrect priority. For instance, when page fault and trigger occur on the same instruction, the previous implementation will choose to raise the page fault, which contradicts the priority requirement. This commit moves the trigger checking before the misaligned checking and translation. The trigger will fire on the instruction instead of the page fault in the above case. --- riscv/mmu.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 3048bde..ede5722 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -170,8 +170,6 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) { - reg_t paddr = translate(addr, len, STORE, xlate_flags); - if (!matched_trigger) { reg_t data = reg_from_bytes(len, bytes); matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data); @@ -179,6 +177,8 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_ throw *matched_trigger; } + reg_t paddr = translate(addr, len, STORE, xlate_flags); + if (actually_store) { if (auto host_addr = sim->addr_to_mem(paddr)) { memcpy(host_addr, bytes, len); -- cgit v1.1 From ab104011ba66fb3c88ce8405236908dc8700c679 Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Fri, 30 Sep 2022 12:08:38 +0800 Subject: Check trigger only with actually_store The data value of the function store_slow_path() is meaningful only when the actually_store=true. Otherwise, the data value is a hollow value of 0, which may result in unintended trigger matching. --- riscv/mmu.cc | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index ede5722..9935ed2 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -170,11 +170,13 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) { - if (!matched_trigger) { - reg_t data = reg_from_bytes(len, bytes); - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data); - if (matched_trigger) - throw *matched_trigger; + if (actually_store) { + if (!matched_trigger) { + reg_t data = reg_from_bytes(len, bytes); + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data); + if (matched_trigger) + throw *matched_trigger; + } } reg_t paddr = translate(addr, len, STORE, xlate_flags); -- cgit v1.1 From ce69fb5db97ecf240336b7826dd9dddeb32e5dca Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 17:34:33 -0700 Subject: Suppress most unused variable warnings --- riscv/mmu.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index d0a55f0..1028026 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -114,7 +114,7 @@ reg_t reg_from_bytes(size_t len, const uint8_t* bytes) abort(); } -bool mmu_t::mmio_ok(reg_t addr, access_type type) +bool mmu_t::mmio_ok(reg_t addr, access_type UNUSED type) { // Disallow access to debug region when not in debug mode if (addr >= DEBUG_START && addr <= DEBUG_END && proc && !proc->state.debug_mode) -- cgit v1.1 From d7edd7ac550dab81b84f2001793e04b663330658 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 5 Oct 2022 17:21:11 -0700 Subject: Remove unused variable to fix build --- riscv/mmu.cc | 1 - 1 file changed, 1 deletion(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 367c859..38a7241 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -142,7 +142,6 @@ bool mmu_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes) void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags) { if (!matched_trigger) { - reg_t data = reg_from_bytes(len, bytes); matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); if (matched_trigger) throw *matched_trigger; -- cgit v1.1 From 749ead90a5c254ca23d54ef3e0669e51df127a5d Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 5 Oct 2022 18:09:53 -0700 Subject: Move all uncommon-case load functionality into load_slow_path As a side effect, misaligned loads now behave the same as aligned loads with respect to triggers: only the first byte is checked. --- riscv/mmu.cc | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 38a7241..49889df 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -139,12 +139,13 @@ bool mmu_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes) return sim->mmio_store(addr, len, bytes); } -void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags) +void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags) { - if (!matched_trigger) { - matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); - if (matched_trigger) - throw *matched_trigger; + reg_t vpn = addr >> PGSHIFT; + if (xlate_flags == 0 && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { + auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr; + memcpy(bytes, host_addr, len); + return; } reg_t paddr = translate(addr, len, LOAD, xlate_flags); @@ -158,6 +159,32 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate } else if (!mmio_load(paddr, len, bytes)) { throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0); } +} + +void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool UNUSED require_alignment) +{ + if (!matched_trigger) { + matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); + if (matched_trigger) + throw *matched_trigger; + } + + if ((addr & (len - 1)) == 0) { + load_slow_path_intrapage(addr, len, bytes, xlate_flags); + } else { + bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); +#ifndef RISCV_ENABLE_MISALIGNED + throw trap_load_address_misaligned(gva, addr, 0, 0); +#else + if (require_alignment) + throw trap_load_access_fault(gva, addr, 0, 0); + + reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE); + load_slow_path_intrapage(addr, len_page0, bytes, xlate_flags); + if (len_page0 != len) + load_slow_path_intrapage(addr + len_page0, len - len_page0, bytes + len_page0, xlate_flags); +#endif + } if (!matched_trigger) { reg_t data = reg_from_bytes(len, bytes); -- cgit v1.1 From 6311f7513aa150797f69ecac906978bb9e9fecbd Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 5 Oct 2022 18:38:02 -0700 Subject: Move all uncommon-case store functionality into store_slow_path As a side effect, misaligned stores now behave the same as aligned stores with respect to triggers: only the first byte is checked. --- riscv/mmu.cc | 43 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 8 deletions(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 49889df..9c9a6c5 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -194,15 +194,13 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate } } -void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) +void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) { - if (actually_store) { - if (!matched_trigger) { - reg_t data = reg_from_bytes(len, bytes); - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data); - if (matched_trigger) - throw *matched_trigger; - } + reg_t vpn = addr >> PGSHIFT; + if (xlate_flags == 0 && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { + auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr; + memcpy(host_addr, bytes, len); + return; } reg_t paddr = translate(addr, len, STORE, xlate_flags); @@ -220,6 +218,35 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_ } } +void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool UNUSED require_alignment) +{ + if (actually_store) { + if (!matched_trigger) { + reg_t data = reg_from_bytes(len, bytes); + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data); + if (matched_trigger) + throw *matched_trigger; + } + } + + if (addr & (len - 1)) { + bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); +#ifndef RISCV_ENABLE_MISALIGNED + throw trap_store_address_misaligned(gva, addr, 0, 0); +#else + if (require_alignment) + throw trap_store_access_fault(gva, addr, 0, 0); + + reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE); + store_slow_path_intrapage(addr, len_page0, bytes, xlate_flags, actually_store); + if (len_page0 != len) + store_slow_path_intrapage(addr + len_page0, len - len_page0, bytes + len_page0, xlate_flags, actually_store); +#endif + } else { + store_slow_path_intrapage(addr, len, bytes, xlate_flags, actually_store); + } +} + tlb_entry_t mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type) { reg_t idx = (vaddr >> PGSHIFT) % TLB_ENTRIES; -- cgit v1.1 From 14410156b2d796dad0e439b9535057f1e4c5a13c Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 6 Oct 2022 14:31:07 -0700 Subject: Move uncommon-case fetch functionality into fetch_slow_path --- riscv/mmu.cc | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 9c9a6c5..5a5f5c0 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -76,16 +76,32 @@ reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_f tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) { - reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, 0); - - if (auto host_addr = sim->addr_to_mem(paddr)) { - return refill_tlb(vaddr, paddr, host_addr, FETCH); + triggers::action_t action; + auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, vaddr, false); + if (match != triggers::MATCH_NONE) + throw triggers::matched_t(triggers::OPERATION_EXECUTE, vaddr, 0, action); + + tlb_entry_t result; + reg_t vpn = vaddr >> PGSHIFT; + if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) { + reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, 0); + if (auto host_addr = sim->addr_to_mem(paddr)) { + result = refill_tlb(vaddr, paddr, host_addr, FETCH); + } else { + if (!mmio_load(paddr, sizeof fetch_temp, (uint8_t*)&fetch_temp)) + throw trap_instruction_access_fault(proc->state.v, vaddr, 0, 0); + result = {(char*)&fetch_temp - vaddr, paddr - vaddr}; + } } else { - if (!mmio_load(paddr, sizeof fetch_temp, (uint8_t*)&fetch_temp)) - throw trap_instruction_access_fault(proc->state.v, vaddr, 0, 0); - tlb_entry_t entry = {(char*)&fetch_temp - vaddr, paddr - vaddr}; - return entry; + result = tlb_data[vpn % TLB_ENTRIES]; } + + target_endian* ptr = (target_endian*)(result.host_offset + vaddr); + match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, vaddr, true, from_target(*ptr)); + if (match != triggers::MATCH_NONE) + throw triggers::matched_t(triggers::OPERATION_EXECUTE, vaddr, from_target(*ptr), action); + + return result; } reg_t reg_from_bytes(size_t len, const uint8_t* bytes) -- cgit v1.1 From 197f3e2640a182c7734d781bf61f570457cce5b8 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 6 Oct 2022 14:35:59 -0700 Subject: DRY in checking triggers --- riscv/mmu.cc | 53 +++++++++++++++++++++++++++-------------------------- 1 file changed, 27 insertions(+), 26 deletions(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 5a5f5c0..f87e879 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -76,10 +76,7 @@ reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_f tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) { - triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, vaddr, false); - if (match != triggers::MATCH_NONE) - throw triggers::matched_t(triggers::OPERATION_EXECUTE, vaddr, 0, action); + check_triggers(triggers::OPERATION_EXECUTE, vaddr, false); tlb_entry_t result; reg_t vpn = vaddr >> PGSHIFT; @@ -97,9 +94,7 @@ tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) } target_endian* ptr = (target_endian*)(result.host_offset + vaddr); - match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, vaddr, true, from_target(*ptr)); - if (match != triggers::MATCH_NONE) - throw triggers::matched_t(triggers::OPERATION_EXECUTE, vaddr, from_target(*ptr), action); + check_triggers(triggers::OPERATION_EXECUTE, vaddr, true, from_target(*ptr)); return result; } @@ -155,6 +150,27 @@ bool mmu_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes) return sim->mmio_store(addr, len, bytes); } +void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, bool has_data, reg_t data) +{ + if (matched_trigger || !proc) + return; + + triggers::action_t action; + auto match = proc->TM.memory_access_match(&action, operation, address, has_data, data); + + switch (match) { + case triggers::MATCH_NONE: + return; + + case triggers::MATCH_FIRE_BEFORE: + throw triggers::matched_t(operation, address, data, action); + + case triggers::MATCH_FIRE_AFTER: + matched_trigger = new triggers::matched_t(operation, address, data, action); + throw *matched_trigger; + } +} + void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags) { reg_t vpn = addr >> PGSHIFT; @@ -179,11 +195,7 @@ void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool UNUSED require_alignment) { - if (!matched_trigger) { - matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); - if (matched_trigger) - throw *matched_trigger; - } + check_triggers(triggers::OPERATION_LOAD, addr, false); if ((addr & (len - 1)) == 0) { load_slow_path_intrapage(addr, len, bytes, xlate_flags); @@ -202,12 +214,7 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate #endif } - if (!matched_trigger) { - reg_t data = reg_from_bytes(len, bytes); - matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, true, data); - if (matched_trigger) - throw *matched_trigger; - } + check_triggers(triggers::OPERATION_LOAD, addr, true, reg_from_bytes(len, bytes)); } void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) @@ -236,14 +243,8 @@ void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* byte void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool UNUSED require_alignment) { - if (actually_store) { - if (!matched_trigger) { - reg_t data = reg_from_bytes(len, bytes); - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data); - if (matched_trigger) - throw *matched_trigger; - } - } + if (actually_store) + check_triggers(triggers::OPERATION_STORE, addr, true, reg_from_bytes(len, bytes)); if (addr & (len - 1)) { bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); -- cgit v1.1 From fd50768df9ec4d9f80c6a37d89734d9e27443f6b Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 6 Oct 2022 14:53:32 -0700 Subject: Fix endianness bug in fetch triggers Instruction fetch is always little-endian. --- riscv/mmu.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index f87e879..b8690ec 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -93,8 +93,7 @@ tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) result = tlb_data[vpn % TLB_ENTRIES]; } - target_endian* ptr = (target_endian*)(result.host_offset + vaddr); - check_triggers(triggers::OPERATION_EXECUTE, vaddr, true, from_target(*ptr)); + check_triggers(triggers::OPERATION_EXECUTE, vaddr, true, from_le(*(const uint16_t*)(result.host_offset + vaddr))); return result; } -- cgit v1.1 From 7b8114f707a7b2de9fd2d393b9d019180de83025 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 6 Oct 2022 17:40:41 -0700 Subject: Don't use reexecution as the means to implement trigger-after The scheme was based on the notion that memory accesses are idempotent up until the point the trigger would've been hit, which isn't true in the case of side-effecting loads and data-value triggers. Instead, check the trigger on the next instruction fetch. To keep the perf overhead minimal, perform this check on the I$ refill path, and ensure that path is taken by flushing the I$. --- riscv/mmu.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index b8690ec..fdad05f 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -165,8 +165,11 @@ void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, bool throw triggers::matched_t(operation, address, data, action); case triggers::MATCH_FIRE_AFTER: + // We want to take this exception on the next instruction. We check + // whether to do so in the I$ refill path, so flush the I$. + flush_icache(); matched_trigger = new triggers::matched_t(operation, address, data, action); - throw *matched_trigger; + return; } } -- cgit v1.1 From 37c8985013b1a15a8043f002e94f38e09cb55ef4 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 13 Oct 2022 13:51:06 -0700 Subject: Remove unused field matched_t::data --- riscv/mmu.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index fdad05f..53c6fba 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -162,13 +162,13 @@ void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, bool return; case triggers::MATCH_FIRE_BEFORE: - throw triggers::matched_t(operation, address, data, action); + throw triggers::matched_t(operation, address, action); case triggers::MATCH_FIRE_AFTER: // We want to take this exception on the next instruction. We check // whether to do so in the I$ refill path, so flush the I$. flush_icache(); - matched_trigger = new triggers::matched_t(operation, address, data, action); + matched_trigger = new triggers::matched_t(operation, address, action); return; } } -- cgit v1.1 From 062ef8868033605b56269b185e3584ef2370ac12 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 13 Oct 2022 13:57:08 -0700 Subject: In triggers, use optional instead of {has_data, data} --- riscv/mmu.cc | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'riscv/mmu.cc') diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 53c6fba..c77b6b1 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -76,7 +76,7 @@ reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_f tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) { - check_triggers(triggers::OPERATION_EXECUTE, vaddr, false); + check_triggers(triggers::OPERATION_EXECUTE, vaddr); tlb_entry_t result; reg_t vpn = vaddr >> PGSHIFT; @@ -93,7 +93,7 @@ tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) result = tlb_data[vpn % TLB_ENTRIES]; } - check_triggers(triggers::OPERATION_EXECUTE, vaddr, true, from_le(*(const uint16_t*)(result.host_offset + vaddr))); + check_triggers(triggers::OPERATION_EXECUTE, vaddr, from_le(*(const uint16_t*)(result.host_offset + vaddr))); return result; } @@ -149,13 +149,13 @@ bool mmu_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes) return sim->mmio_store(addr, len, bytes); } -void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, bool has_data, reg_t data) +void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, std::optional data) { if (matched_trigger || !proc) return; triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, operation, address, has_data, data); + auto match = proc->TM.memory_access_match(&action, operation, address, data); switch (match) { case triggers::MATCH_NONE: @@ -197,7 +197,7 @@ void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool UNUSED require_alignment) { - check_triggers(triggers::OPERATION_LOAD, addr, false); + check_triggers(triggers::OPERATION_LOAD, addr); if ((addr & (len - 1)) == 0) { load_slow_path_intrapage(addr, len, bytes, xlate_flags); @@ -216,7 +216,7 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate #endif } - check_triggers(triggers::OPERATION_LOAD, addr, true, reg_from_bytes(len, bytes)); + check_triggers(triggers::OPERATION_LOAD, addr, reg_from_bytes(len, bytes)); } void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) @@ -246,7 +246,7 @@ void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* byte void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool UNUSED require_alignment) { if (actually_store) - check_triggers(triggers::OPERATION_STORE, addr, true, reg_from_bytes(len, bytes)); + check_triggers(triggers::OPERATION_STORE, addr, reg_from_bytes(len, bytes)); if (addr & (len - 1)) { bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); -- cgit v1.1