From c52ae38779ae027db7034abd9b0a61970df8dd45 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Thu, 21 Apr 2022 11:19:54 -0700 Subject: Add require_alignment tag to store_func Will be used similarly as in load_func. --- riscv/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index dcf338f..03a2d39 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -147,7 +147,7 @@ public: // template for functions that store an aligned value to memory #define store_func(type, prefix, xlate_flags) \ - void prefix##_##type(reg_t addr, type##_t val, bool actually_store=true) { \ + void prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ if (unlikely(addr & (sizeof(type##_t)-1))) \ return misaligned_store(addr, val, sizeof(type##_t), xlate_flags); \ reg_t vpn = addr >> PGSHIFT; \ -- cgit v1.1 From 004bdc492710e0abd23771fdba33097a9ae1f792 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Thu, 21 Apr 2022 11:21:27 -0700 Subject: Set require alignment to true on the 'fake' store in amo_func. --- riscv/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 03a2d39..c08acd3 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -196,7 +196,7 @@ public: template \ type##_t amo_##type(reg_t addr, op f) { \ convert_load_traps_to_store_traps({ \ - store_##type(addr, 0, false); \ + store_##type(addr, 0, false, true); \ auto lhs = load_##type(addr, true); \ store_##type(addr, f(lhs)); \ return lhs; \ -- cgit v1.1 From 61eba540e15d4f27d8c3a78b605c614096b3e552 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Thu, 21 Apr 2022 11:23:35 -0700 Subject: Modify store_func to throw fault if misaligned and require_alignment=true --- riscv/mmu.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index c08acd3..415cf1a 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -148,8 +148,10 @@ public: // template for functions that store an aligned value to memory #define store_func(type, prefix, xlate_flags) \ void prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ - if (unlikely(addr & (sizeof(type##_t)-1))) \ - return misaligned_store(addr, val, sizeof(type##_t), xlate_flags); \ + if (unlikely(addr & (sizeof(type##_t)-1))) { \ + if (require_alignment) store_conditional_address_misaligned(addr); \ + else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags); \ + } \ reg_t vpn = addr >> PGSHIFT; \ size_t size = sizeof(type##_t); \ if ((xlate_flags) == 0 && likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { \ -- cgit v1.1 From 8948aef6dcad90fd80d6b8267e2fc2eeb4163a64 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Thu, 21 Apr 2022 11:35:26 -0700 Subject: Add actually_store tag to misaligned_store function Is passed along to the contained store_func. --- riscv/mmu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 415cf1a..bb4c27e 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -65,11 +65,11 @@ public: #endif } - inline void misaligned_store(reg_t addr, reg_t data, size_t size, uint32_t xlate_flags) + inline void misaligned_store(reg_t addr, reg_t data, size_t size, uint32_t xlate_flags, bool actually_store=true) { #ifdef RISCV_ENABLE_MISALIGNED for (size_t i = 0; i < size; i++) - store_uint8(addr + (target_big_endian? size-1-i : i), data >> (i * 8)); + store_uint8(addr + (target_big_endian? size-1-i : i), data >> (i * 8), actually_store); #else bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); throw trap_store_address_misaligned(gva, addr, 0, 0); -- cgit v1.1 From 01b88b06693d91ee3d2e5b80020e0b934828d47d Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Thu, 21 Apr 2022 11:40:32 -0700 Subject: Pass acutally_store from store_func to misaligned_store In future, someone may expect this functionality. --- riscv/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index bb4c27e..8964e29 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -150,7 +150,7 @@ public: void prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ if (unlikely(addr & (sizeof(type##_t)-1))) { \ if (require_alignment) store_conditional_address_misaligned(addr); \ - else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags); \ + else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags, actually_store); \ } \ reg_t vpn = addr >> PGSHIFT; \ size_t size = sizeof(type##_t); \ -- cgit v1.1 From 898c0dd6a0c7bfe5e332ab0b6c00b3fd3b3d06a0 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Mon, 6 Jun 2022 20:34:24 -0700 Subject: Zero-extend instructions when fetching them from memory ...since we no longer rely on their being sign-extended. --- riscv/mmu.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 8964e29..5e776a9 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -319,15 +319,15 @@ public: int length = insn_length(insn); if (likely(length == 4)) { - insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 2)) << 16; + insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 2)) << 16; } else if (length == 2) { - insn = (int16_t)insn; + // entire instruction already fetched } else if (length == 6) { - insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 4)) << 32; + insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 4)) << 32; insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 2)) << 16; } else { static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t"); - insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 6)) << 48; + insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 6)) << 48; insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 4)) << 32; insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 2)) << 16; } -- cgit v1.1 From 031681b2f3bfa120769d9ead1ca866ca1be163b4 Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Fri, 15 Jul 2022 18:06:24 -0700 Subject: Split up misaligned load into several steps Since the middle step is about to get much more complex --- riscv/mmu.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 5e776a9..6d8072b 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -56,8 +56,11 @@ public: { #ifdef RISCV_ENABLE_MISALIGNED reg_t res = 0; - for (size_t i = 0; i < size; i++) - res += (reg_t)load_uint8(addr + (target_big_endian? size-1-i : i)) << (i * 8); + for (size_t i = 0; i < size; i++) { + const reg_t byteaddr = addr + (target_big_endian? size-1-i : i); + const reg_t bytedata = (reg_t)load_uint8(byteaddr); + res += bytedata << (i * 8); + } return res; #else bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); -- cgit v1.1 From a2697ac775dac31e0aabf0223171b1e2f8a7fcde Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Fri, 15 Jul 2022 18:08:00 -0700 Subject: Split up misaligned store into several steps Since the last step is about to get much more complex --- riscv/mmu.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 6d8072b..f652bf8 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -71,8 +71,11 @@ public: inline void misaligned_store(reg_t addr, reg_t data, size_t size, uint32_t xlate_flags, bool actually_store=true) { #ifdef RISCV_ENABLE_MISALIGNED - for (size_t i = 0; i < size; i++) - store_uint8(addr + (target_big_endian? size-1-i : i), data >> (i * 8), actually_store); + for (size_t i = 0; i < size; i++) { + const reg_t byteaddr = addr + (target_big_endian? size-1-i : i); + const reg_t bytedata = data >> (i * 8); + store_uint8(byteaddr, bytedata, actually_store); + } #else bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); throw trap_store_address_misaligned(gva, addr, 0, 0); -- cgit v1.1 From f0d84787423ef5a0329bb79c45775dbc7ec16de5 Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Mon, 18 Jul 2022 07:00:45 -0700 Subject: Remove no-longer-necessary typecast It was previously necessary because we were shifting left before assigning to a reg_t, but that changed in the previous commit. --- riscv/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index f652bf8..fdc641f 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -58,7 +58,7 @@ public: reg_t res = 0; for (size_t i = 0; i < size; i++) { const reg_t byteaddr = addr + (target_big_endian? size-1-i : i); - const reg_t bytedata = (reg_t)load_uint8(byteaddr); + const reg_t bytedata = load_uint8(byteaddr); res += bytedata << (i * 8); } return res; -- cgit v1.1 From d61dceccdb43ae0025b3f02d825c3783e8ae10ea Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Fri, 15 Jul 2022 18:15:09 -0700 Subject: Fix totally-broken misaligned HLV/HLVX They were accessing memory using the current privilege mode instead of the expected guest privilege. Once #872 is fixed, I suspect we can greatly simplify this. --- riscv/mmu.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index fdc641f..70cb9e2 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -58,7 +58,11 @@ public: reg_t res = 0; for (size_t i = 0; i < size; i++) { const reg_t byteaddr = addr + (target_big_endian? size-1-i : i); - const reg_t bytedata = load_uint8(byteaddr); + const reg_t bytedata + = (RISCV_XLATE_VIRT_HLVX & xlate_flags) ? guest_load_x_uint8(byteaddr) + : (RISCV_XLATE_VIRT & xlate_flags) ? guest_load_uint8(byteaddr) + : load_uint8(byteaddr) + ; res += bytedata << (i * 8); } return res; @@ -129,6 +133,7 @@ public: load_func(uint16, guest_load, RISCV_XLATE_VIRT) load_func(uint32, guest_load, RISCV_XLATE_VIRT) load_func(uint64, guest_load, RISCV_XLATE_VIRT) + load_func(uint8, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) // only for use by misaligned HLVX load_func(uint16, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) load_func(uint32, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) -- cgit v1.1 From 43ecb3424d68391e033f4df421c67c7f468fdff6 Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Fri, 15 Jul 2022 18:20:52 -0700 Subject: Fix totally-broken misaligned HSV It was accessing memory using the current privilege mode instead of the expected guest privilege. Once #872 is fixed, I suspect we can greatly simplify this. --- riscv/mmu.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 70cb9e2..4e12805 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -78,7 +78,11 @@ public: for (size_t i = 0; i < size; i++) { const reg_t byteaddr = addr + (target_big_endian? size-1-i : i); const reg_t bytedata = data >> (i * 8); - store_uint8(byteaddr, bytedata, actually_store); + if (RISCV_XLATE_VIRT & xlate_flags) { + guest_store_uint8(byteaddr, bytedata, actually_store); + } else { + store_uint8(byteaddr, bytedata, actually_store); + } } #else bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); -- cgit v1.1 From b0d9782e13156abd5884fa73017a0286441202d1 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Mon, 18 Jul 2022 15:15:41 -0700 Subject: Fix load/store performance under clang Hopefully for the last time :-) --- riscv/mmu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 4e12805..6f24ec7 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -99,7 +99,7 @@ public: // template for functions that load an aligned value from memory #define load_func(type, prefix, xlate_flags) \ - inline type##_t prefix##_##type(reg_t addr, bool require_alignment = false) { \ + type##_t ALWAYS_INLINE prefix##_##type(reg_t addr, bool require_alignment = false) { \ if (unlikely(addr & (sizeof(type##_t)-1))) { \ if (require_alignment) load_reserved_address_misaligned(addr); \ else return misaligned_load(addr, sizeof(type##_t), xlate_flags); \ @@ -162,7 +162,7 @@ public: // template for functions that store an aligned value to memory #define store_func(type, prefix, xlate_flags) \ - void prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ + void ALWAYS_INLINE prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ if (unlikely(addr & (sizeof(type##_t)-1))) { \ if (require_alignment) store_conditional_address_misaligned(addr); \ else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags, actually_store); \ -- cgit v1.1 From 2667f611fb258ffdfb7741521e6dd8aac61f30ea Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Tue, 23 Aug 2022 10:51:24 -0700 Subject: Separate variables that contain two different things No reason to use a variable misleadingly named 'paddr' to hold the virtual address. --- riscv/mmu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 6f24ec7..db23892 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -263,8 +263,8 @@ public: void clean_inval(reg_t addr, bool clean, bool inval) { convert_load_traps_to_store_traps({ - reg_t paddr = addr & ~(blocksz - 1); - paddr = translate(paddr, blocksz, LOAD, 0); + reg_t vaddr = addr & ~(blocksz - 1); + reg_t paddr = translate(vaddr, blocksz, LOAD, 0); if (auto host_addr = sim->addr_to_mem(paddr)) { if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) tracer.clean_invalidate(paddr, blocksz, clean, inval); -- cgit v1.1 From d3cb2470518cfb25ee205269deb060715df3427e Mon Sep 17 00:00:00 2001 From: Scott Johnson Date: Tue, 23 Aug 2022 10:53:00 -0700 Subject: Constantize variables Because it's always better to do so where possible. --- riscv/mmu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index db23892..9a93f16 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -263,8 +263,8 @@ public: void clean_inval(reg_t addr, bool clean, bool inval) { convert_load_traps_to_store_traps({ - reg_t vaddr = addr & ~(blocksz - 1); - reg_t paddr = translate(vaddr, blocksz, LOAD, 0); + const reg_t vaddr = addr & ~(blocksz - 1); + const reg_t paddr = translate(vaddr, blocksz, LOAD, 0); if (auto host_addr = sim->addr_to_mem(paddr)) { if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) tracer.clean_invalidate(paddr, blocksz, clean, inval); -- cgit v1.1 From ccc9791807b59d848e7c8983cea49a16fb4a2912 Mon Sep 17 00:00:00 2001 From: YenHaoChen <39526191+YenHaoChen@users.noreply.github.com> Date: Sat, 17 Sep 2022 02:59:57 +0800 Subject: Fix trigger never fire on executing an instruction on plugin devices (#1084) The trigger matching only checks on TLB hit (after refill_tlb()). However, instructions on plugin devices will never be filled into the TLB; thus, triggers cannot fire when executing instructions on the plugin devices. The PR removes the if-condition of TLB hit for trigger checking. Co-authored-by: Howard Yen-Hao Chen --- riscv/mmu.h | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 9a93f16..ca8b792 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -477,13 +477,11 @@ private: } else { result = tlb_data[vpn % TLB_ENTRIES]; } - if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { - target_endian* ptr = (target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr); - triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, from_target(*ptr)); - if (match != triggers::MATCH_NONE) { - throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, from_target(*ptr), action); - } + target_endian* ptr = (target_endian*)(result.host_offset + addr); + triggers::action_t action; + auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, from_target(*ptr)); + if (match != triggers::MATCH_NONE) { + throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, from_target(*ptr), action); } return result; } -- cgit v1.1 From b724db52f9d7be3e3068e5bf01ac939ece8d032b Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Fri, 30 Sep 2022 11:48:44 +0800 Subject: Add has_data argument to trigger checking functions The mcontrol trigger can select either address or data for checking. The The selection decides the priority of the trigger. For instance, the address trigger has a higher priority over the page fault, and the page fault has a higher priority over the data trigger. The previous implementation only has the checking functions for data trigger, which results in incorrect priority of address trigger. This commit adds a has_data argument to indicate address trigger and the priority of the trigger. --- riscv/mmu.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index ca8b792..ebe4f9b 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -113,7 +113,7 @@ public: if ((xlate_flags) == 0 && unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \ type##_t data = from_target(*(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \ if (!matched_trigger) { \ - matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, data); \ + matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, true, data); \ if (matched_trigger) \ throw *matched_trigger; \ } \ @@ -178,7 +178,7 @@ public: else if ((xlate_flags) == 0 && unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \ if (actually_store) { \ if (!matched_trigger) { \ - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, val); \ + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, val); \ if (matched_trigger) \ throw *matched_trigger; \ } \ @@ -479,7 +479,7 @@ private: } target_endian* ptr = (target_endian*)(result.host_offset + addr); triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, from_target(*ptr)); + auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, true, from_target(*ptr)); if (match != triggers::MATCH_NONE) { throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, from_target(*ptr), action); } @@ -491,13 +491,13 @@ private: } inline triggers::matched_t *trigger_exception(triggers::operation_t operation, - reg_t address, reg_t data) + reg_t address, bool has_data, reg_t data=0) { if (!proc) { return NULL; } triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, operation, address, data); + auto match = proc->TM.memory_access_match(&action, operation, address, has_data, data); if (match == triggers::MATCH_NONE) return NULL; if (match == triggers::MATCH_FIRE_BEFORE) { -- cgit v1.1 From a5752cebafa75b8dc539ee327bac8630e3fb84f1 Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Fri, 30 Sep 2022 11:55:57 +0800 Subject: Fix priority of mcontrol trigger execute address before The spec defines the mcontrol execute address has a higher priority over page fault (Debug spec, Table 5.2). Thus, the trigger checking should be before the translation. The previous implementation checks the trigger after the translation, resulting in incorrect priority. For instance, when page fault and trigger occur on the same instruction, the previous implementation will choose to raise the page fault, which contradicts the priority requirement. This commit adds an address-only trigger checking before the translation. The trigger will fire on the instruction instead of the page fault in the above case. --- riscv/mmu.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index ebe4f9b..d076aeb 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -471,6 +471,11 @@ private: reg_t vpn = addr >> PGSHIFT; if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn)) return tlb_data[vpn % TLB_ENTRIES]; + triggers::action_t action; + auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, false); + if (match != triggers::MATCH_NONE) { + throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, 0, action); + } tlb_entry_t result; if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) { result = fetch_slow_path(addr); @@ -478,8 +483,7 @@ private: result = tlb_data[vpn % TLB_ENTRIES]; } target_endian* ptr = (target_endian*)(result.host_offset + addr); - triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, true, from_target(*ptr)); + match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, true, from_target(*ptr)); if (match != triggers::MATCH_NONE) { throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, from_target(*ptr), action); } -- cgit v1.1 From 99cb603973b3f9575b411b80934035f3ee7fbcf3 Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Fri, 30 Sep 2022 12:01:09 +0800 Subject: Fix priority of mcontrol trigger load address before The spec defines the mcontrol load address has a higher priority over page fault and address misaligned (Debug spec, Table 5.2). Thus, the trigger checking should be before the translation and alignment checking. The previous implementation checks the trigger after the translation and alignment, resulting in incorrect priority. For instance, when page fault and trigger occur on the same instruction, the previous implementation will choose to raise the page fault, which contradicts the priority requirement. This commit adds an address-only trigger checking before the misaligned checking and translation. The trigger will fire on the instruction instead of the page fault in the above case. --- riscv/mmu.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index d076aeb..3a06c1e 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -101,6 +101,11 @@ public: #define load_func(type, prefix, xlate_flags) \ type##_t ALWAYS_INLINE prefix##_##type(reg_t addr, bool require_alignment = false) { \ if (unlikely(addr & (sizeof(type##_t)-1))) { \ + if (!matched_trigger) { \ + matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); \ + if (matched_trigger) \ + throw *matched_trigger; \ + } \ if (require_alignment) load_reserved_address_misaligned(addr); \ else return misaligned_load(addr, sizeof(type##_t), xlate_flags); \ } \ -- cgit v1.1 From d52858f3a80d3f87bfedffbff1fec34245a63082 Mon Sep 17 00:00:00 2001 From: YenHaoChen Date: Fri, 30 Sep 2022 12:05:40 +0800 Subject: Fix priority of mcontrol trigger store address/data before The spec defines that the mcontrol store address/data has a higher priority over page fault and address misalignment (Debug spec, Table 5.2). Thus, the trigger checking should be before the translation and alignment checking. The previous implementation checks the trigger after the translation and alignment, resulting in incorrect priority. For instance, when page fault and trigger occur on the same instruction, the previous implementation will choose to raise the page fault, which contradicts the priority requirement. This commit moves the trigger checking before the misaligned checking and translation. The trigger will fire on the instruction instead of the page fault in the above case. --- riscv/mmu.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 3a06c1e..40a435f 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -169,6 +169,13 @@ public: #define store_func(type, prefix, xlate_flags) \ void ALWAYS_INLINE prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ if (unlikely(addr & (sizeof(type##_t)-1))) { \ + if (actually_store) { \ + if (!matched_trigger) { \ + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, val); \ + if (matched_trigger) \ + throw *matched_trigger; \ + } \ + } \ if (require_alignment) store_conditional_address_misaligned(addr); \ else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags, actually_store); \ } \ -- cgit v1.1 From ce69fb5db97ecf240336b7826dd9dddeb32e5dca Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 17:34:33 -0700 Subject: Suppress most unused variable warnings --- riscv/mmu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index ca8b792..1cd614b 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -52,7 +52,7 @@ public: #define RISCV_XLATE_VIRT (1U << 0) #define RISCV_XLATE_VIRT_HLVX (1U << 1) - inline reg_t misaligned_load(reg_t addr, size_t size, uint32_t xlate_flags) + inline reg_t misaligned_load(reg_t addr, size_t UNUSED size, uint32_t xlate_flags) { #ifdef RISCV_ENABLE_MISALIGNED reg_t res = 0; @@ -72,7 +72,7 @@ public: #endif } - inline void misaligned_store(reg_t addr, reg_t data, size_t size, uint32_t xlate_flags, bool actually_store=true) + inline void misaligned_store(reg_t addr, reg_t UNUSED data, size_t UNUSED size, uint32_t xlate_flags, bool UNUSED actually_store=true) { #ifdef RISCV_ENABLE_MISALIGNED for (size_t i = 0; i < size; i++) { -- cgit v1.1 From c20c4a2331a6a2f2cf9e170a9b387024ef543b47 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 22 Sep 2022 18:01:06 -0700 Subject: Suppress most unused-variable warnings --- riscv/mmu.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 1cd614b..4d52618 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -91,7 +91,7 @@ public: } #ifndef RISCV_ENABLE_COMMITLOG -# define READ_MEM(addr, size) ({}) +# define READ_MEM(addr, size) ((void)(addr), (void)(size)) #else # define READ_MEM(addr, size) \ proc->state.log_mem_read.push_back(std::make_tuple(addr, 0, size)); @@ -154,7 +154,7 @@ public: load_func(int64, guest_load, RISCV_XLATE_VIRT) #ifndef RISCV_ENABLE_COMMITLOG -# define WRITE_MEM(addr, value, size) ({}) +# define WRITE_MEM(addr, value, size) ((void)(addr), (void)(value), (void)(size)) #else # define WRITE_MEM(addr, val, size) \ proc->state.log_mem_write.push_back(std::make_tuple(addr, val, size)); @@ -265,7 +265,7 @@ public: convert_load_traps_to_store_traps({ const reg_t vaddr = addr & ~(blocksz - 1); const reg_t paddr = translate(vaddr, blocksz, LOAD, 0); - if (auto host_addr = sim->addr_to_mem(paddr)) { + if (sim->addr_to_mem(paddr)) { if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) tracer.clean_invalidate(paddr, blocksz, clean, inval); } else { -- cgit v1.1 From 749ead90a5c254ca23d54ef3e0669e51df127a5d Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 5 Oct 2022 18:09:53 -0700 Subject: Move all uncommon-case load functionality into load_slow_path As a side effect, misaligned loads now behave the same as aligned loads with respect to triggers: only the first byte is checked. --- riscv/mmu.h | 64 +++++++++---------------------------------------------------- 1 file changed, 9 insertions(+), 55 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 8b9ff9b..5d761dc 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -52,26 +52,6 @@ public: #define RISCV_XLATE_VIRT (1U << 0) #define RISCV_XLATE_VIRT_HLVX (1U << 1) - inline reg_t misaligned_load(reg_t addr, size_t UNUSED size, uint32_t xlate_flags) - { -#ifdef RISCV_ENABLE_MISALIGNED - reg_t res = 0; - for (size_t i = 0; i < size; i++) { - const reg_t byteaddr = addr + (target_big_endian? size-1-i : i); - const reg_t bytedata - = (RISCV_XLATE_VIRT_HLVX & xlate_flags) ? guest_load_x_uint8(byteaddr) - : (RISCV_XLATE_VIRT & xlate_flags) ? guest_load_uint8(byteaddr) - : load_uint8(byteaddr) - ; - res += bytedata << (i * 8); - } - return res; -#else - bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); - throw trap_load_address_misaligned(gva, addr, 0, 0); -#endif - } - inline void misaligned_store(reg_t addr, reg_t UNUSED data, size_t UNUSED size, uint32_t xlate_flags, bool UNUSED actually_store=true) { #ifdef RISCV_ENABLE_MISALIGNED @@ -100,35 +80,19 @@ public: // template for functions that load an aligned value from memory #define load_func(type, prefix, xlate_flags) \ type##_t ALWAYS_INLINE prefix##_##type(reg_t addr, bool require_alignment = false) { \ - if (unlikely(addr & (sizeof(type##_t)-1))) { \ - if (!matched_trigger) { \ - matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); \ - if (matched_trigger) \ - throw *matched_trigger; \ - } \ - if (require_alignment) load_reserved_address_misaligned(addr); \ - else return misaligned_load(addr, sizeof(type##_t), xlate_flags); \ - } \ reg_t vpn = addr >> PGSHIFT; \ size_t size = sizeof(type##_t); \ - if ((xlate_flags) == 0 && likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) { \ + bool aligned = (addr & (size - 1)) == 0; \ + bool tlb_hit = tlb_load_tag[vpn % TLB_ENTRIES] == vpn; \ + if (likely((xlate_flags) == 0 && aligned && tlb_hit)) { \ if (proc) READ_MEM(addr, size); \ return from_target(*(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \ - } \ - if ((xlate_flags) == 0 && unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \ - type##_t data = from_target(*(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \ - if (!matched_trigger) { \ - matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, true, data); \ - if (matched_trigger) \ - throw *matched_trigger; \ - } \ + } else { \ + target_endian res; \ + load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res, (xlate_flags), require_alignment); \ if (proc) READ_MEM(addr, size); \ - return data; \ + return from_target(res); \ } \ - target_endian res; \ - load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res, (xlate_flags)); \ - if (proc) READ_MEM(addr, size); \ - return from_target(res); \ } // load value from memory at aligned address; zero extend to register width @@ -142,7 +106,6 @@ public: load_func(uint16, guest_load, RISCV_XLATE_VIRT) load_func(uint32, guest_load, RISCV_XLATE_VIRT) load_func(uint64, guest_load, RISCV_XLATE_VIRT) - load_func(uint8, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) // only for use by misaligned HLVX load_func(uint16, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) load_func(uint32, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) @@ -300,16 +263,6 @@ public: throw trap_load_access_fault((proc) ? proc->state.v : false, vaddr, 0, 0); // disallow LR to I/O space } - inline void load_reserved_address_misaligned(reg_t vaddr) - { - bool gva = proc ? proc->state.v : false; -#ifdef RISCV_ENABLE_MISALIGNED - throw trap_load_access_fault(gva, vaddr, 0, 0); -#else - throw trap_load_address_misaligned(gva, vaddr, 0, 0); -#endif - } - inline void store_conditional_address_misaligned(reg_t vaddr) { bool gva = proc ? proc->state.v : false; @@ -471,7 +424,8 @@ private: // handle uncommon cases: TLB misses, page faults, MMIO tlb_entry_t fetch_slow_path(reg_t addr); - void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags); + void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool require_alignment); + void load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags); void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store); bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); -- cgit v1.1 From 6311f7513aa150797f69ecac906978bb9e9fecbd Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Wed, 5 Oct 2022 18:38:02 -0700 Subject: Move all uncommon-case store functionality into store_slow_path As a side effect, misaligned stores now behave the same as aligned stores with respect to triggers: only the first byte is checked. --- riscv/mmu.h | 68 ++++++++++--------------------------------------------------- 1 file changed, 11 insertions(+), 57 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 5d761dc..137b306 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -52,24 +52,6 @@ public: #define RISCV_XLATE_VIRT (1U << 0) #define RISCV_XLATE_VIRT_HLVX (1U << 1) - inline void misaligned_store(reg_t addr, reg_t UNUSED data, size_t UNUSED size, uint32_t xlate_flags, bool UNUSED actually_store=true) - { -#ifdef RISCV_ENABLE_MISALIGNED - for (size_t i = 0; i < size; i++) { - const reg_t byteaddr = addr + (target_big_endian? size-1-i : i); - const reg_t bytedata = data >> (i * 8); - if (RISCV_XLATE_VIRT & xlate_flags) { - guest_store_uint8(byteaddr, bytedata, actually_store); - } else { - store_uint8(byteaddr, bytedata, actually_store); - } - } -#else - bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); - throw trap_store_address_misaligned(gva, addr, 0, 0); -#endif - } - #ifndef RISCV_ENABLE_COMMITLOG # define READ_MEM(addr, size) ((void)(addr), (void)(size)) #else @@ -131,39 +113,18 @@ public: // template for functions that store an aligned value to memory #define store_func(type, prefix, xlate_flags) \ void ALWAYS_INLINE prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ - if (unlikely(addr & (sizeof(type##_t)-1))) { \ - if (actually_store) { \ - if (!matched_trigger) { \ - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, val); \ - if (matched_trigger) \ - throw *matched_trigger; \ - } \ - } \ - if (require_alignment) store_conditional_address_misaligned(addr); \ - else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags, actually_store); \ - } \ reg_t vpn = addr >> PGSHIFT; \ size_t size = sizeof(type##_t); \ - if ((xlate_flags) == 0 && likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { \ - if (actually_store) { \ - if (proc) WRITE_MEM(addr, val, size); \ - *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ - } \ - } \ - else if ((xlate_flags) == 0 && unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \ + bool aligned = (addr & (size - 1)) == 0; \ + bool tlb_hit = tlb_store_tag[vpn % TLB_ENTRIES] == vpn; \ + if ((xlate_flags) == 0 && likely(aligned && tlb_hit)) { \ if (actually_store) { \ - if (!matched_trigger) { \ - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, val); \ - if (matched_trigger) \ - throw *matched_trigger; \ - } \ if (proc) WRITE_MEM(addr, val, size); \ *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ } \ - } \ - else { \ + } else { \ target_endian target_val = to_target(val); \ - store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags), actually_store); \ + store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags), actually_store, require_alignment); \ if (actually_store && proc) WRITE_MEM(addr, val, size); \ } \ } @@ -263,20 +224,12 @@ public: throw trap_load_access_fault((proc) ? proc->state.v : false, vaddr, 0, 0); // disallow LR to I/O space } - inline void store_conditional_address_misaligned(reg_t vaddr) - { - bool gva = proc ? proc->state.v : false; -#ifdef RISCV_ENABLE_MISALIGNED - throw trap_store_access_fault(gva, vaddr, 0, 0); -#else - throw trap_store_address_misaligned(gva, vaddr, 0, 0); -#endif - } - inline bool check_load_reservation(reg_t vaddr, size_t size) { - if (vaddr & (size-1)) - store_conditional_address_misaligned(vaddr); + if (vaddr & (size-1)) { + // Raise either access fault or misaligned exception + store_slow_path(vaddr, size, nullptr, 0, false, true); + } reg_t paddr = translate(vaddr, 1, STORE, 0); if (auto host_addr = sim->addr_to_mem(paddr)) @@ -426,7 +379,8 @@ private: tlb_entry_t fetch_slow_path(reg_t addr); void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool require_alignment); void load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags); - void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store); + void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool require_alignment); + void store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store); bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); bool mmio_ok(reg_t addr, access_type type); -- cgit v1.1 From 14410156b2d796dad0e439b9535057f1e4c5a13c Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 6 Oct 2022 14:31:07 -0700 Subject: Move uncommon-case fetch functionality into fetch_slow_path --- riscv/mmu.h | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 137b306..74c9a71 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -391,23 +391,7 @@ private: reg_t vpn = addr >> PGSHIFT; if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn)) return tlb_data[vpn % TLB_ENTRIES]; - triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, false); - if (match != triggers::MATCH_NONE) { - throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, 0, action); - } - tlb_entry_t result; - if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) { - result = fetch_slow_path(addr); - } else { - result = tlb_data[vpn % TLB_ENTRIES]; - } - target_endian* ptr = (target_endian*)(result.host_offset + addr); - match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, true, from_target(*ptr)); - if (match != triggers::MATCH_NONE) { - throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, from_target(*ptr), action); - } - return result; + return fetch_slow_path(addr); } inline const uint16_t* translate_insn_addr_to_host(reg_t addr) { -- cgit v1.1 From 197f3e2640a182c7734d781bf61f570457cce5b8 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 6 Oct 2022 14:35:59 -0700 Subject: DRY in checking triggers --- riscv/mmu.h | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 74c9a71..8eeea31 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -384,6 +384,7 @@ private: bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); bool mmio_ok(reg_t addr, access_type type); + void check_triggers(triggers::operation_t operation, reg_t address, bool has_data, reg_t data = 0); reg_t translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags); // ITLB lookup @@ -398,22 +399,6 @@ private: return (uint16_t*)(translate_insn_addr(addr).host_offset + addr); } - inline triggers::matched_t *trigger_exception(triggers::operation_t operation, - reg_t address, bool has_data, reg_t data=0) - { - if (!proc) { - return NULL; - } - triggers::action_t action; - auto match = proc->TM.memory_access_match(&action, operation, address, has_data, data); - if (match == triggers::MATCH_NONE) - return NULL; - if (match == triggers::MATCH_FIRE_BEFORE) { - throw triggers::matched_t(operation, address, data, action); - } - return new triggers::matched_t(operation, address, data, action); - } - reg_t pmp_homogeneous(reg_t addr, reg_t len); bool pmp_ok(reg_t addr, reg_t len, access_type type, reg_t mode); -- cgit v1.1 From 7b8114f707a7b2de9fd2d393b9d019180de83025 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 6 Oct 2022 17:40:41 -0700 Subject: Don't use reexecution as the means to implement trigger-after The scheme was based on the notion that memory accesses are idempotent up until the point the trigger would've been hit, which isn't true in the case of side-effecting loads and data-value triggers. Instead, check the trigger on the next instruction fetch. To keep the perf overhead minimal, perform this check on the I$ refill path, and ensure that path is taken by flushing the I$. --- riscv/mmu.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 8eeea31..01e74ef 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -247,6 +247,9 @@ public: inline icache_entry_t* refill_icache(reg_t addr, icache_entry_t* entry) { + if (matched_trigger) + throw *matched_trigger; + auto tlb_entry = translate_insn_addr(addr); insn_bits_t insn = from_le(*(uint16_t*)(tlb_entry.host_offset + addr)); int length = insn_length(insn); -- cgit v1.1 From 062ef8868033605b56269b185e3584ef2370ac12 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Thu, 13 Oct 2022 13:57:08 -0700 Subject: In triggers, use optional instead of {has_data, data} --- riscv/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'riscv/mmu.h') diff --git a/riscv/mmu.h b/riscv/mmu.h index 01e74ef..da84adc 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -387,7 +387,7 @@ private: bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); bool mmio_ok(reg_t addr, access_type type); - void check_triggers(triggers::operation_t operation, reg_t address, bool has_data, reg_t data = 0); + void check_triggers(triggers::operation_t operation, reg_t address, std::optional data = std::nullopt); reg_t translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags); // ITLB lookup -- cgit v1.1