diff options
-rw-r--r-- | accel/tcg/cputlb.c | 40 |
1 files changed, 27 insertions, 13 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index dd45e04..f90f431 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1761,7 +1761,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, MemOpIdx oi, int size, int prot, uintptr_t retaddr) { - size_t mmu_idx = get_mmuidx(oi); + uintptr_t mmu_idx = get_mmuidx(oi); MemOp mop = get_memop(oi); int a_bits = get_alignment_bits(mop); uintptr_t index; @@ -1769,6 +1769,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, target_ulong tlb_addr; void *hostaddr; + tcg_debug_assert(mmu_idx < NB_MMU_MODES); + /* Adjust the given return address. */ retaddr -= GETPC_ADJ; @@ -1908,18 +1910,20 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, uintptr_t retaddr, MemOp op, bool code_read, FullLoadHelper *full_load) { - uintptr_t mmu_idx = get_mmuidx(oi); - uintptr_t index = tlb_index(env, mmu_idx, addr); - CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); - target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; const size_t tlb_off = code_read ? offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); const MMUAccessType access_type = code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; - unsigned a_bits = get_alignment_bits(get_memop(oi)); + const unsigned a_bits = get_alignment_bits(get_memop(oi)); + const size_t size = memop_size(op); + uintptr_t mmu_idx = get_mmuidx(oi); + uintptr_t index; + CPUTLBEntry *entry; + target_ulong tlb_addr; void *haddr; uint64_t res; - size_t size = memop_size(op); + + tcg_debug_assert(mmu_idx < NB_MMU_MODES); /* Handle CPU specific unaligned behaviour */ if (addr & ((1 << a_bits) - 1)) { @@ -1927,6 +1931,10 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, mmu_idx, retaddr); } + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + tlb_addr = code_read ? entry->addr_code : entry->addr_read; + /* If the TLB entry is for a different page, reload and try again. */ if (!tlb_hit(tlb_addr, addr)) { if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, @@ -2310,14 +2318,16 @@ static inline void QEMU_ALWAYS_INLINE store_helper(CPUArchState *env, target_ulong addr, uint64_t val, MemOpIdx oi, uintptr_t retaddr, MemOp op) { - uintptr_t mmu_idx = get_mmuidx(oi); - uintptr_t index = tlb_index(env, mmu_idx, addr); - CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); - target_ulong tlb_addr = tlb_addr_write(entry); const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); - unsigned a_bits = get_alignment_bits(get_memop(oi)); + const unsigned a_bits = get_alignment_bits(get_memop(oi)); + const size_t size = memop_size(op); + uintptr_t mmu_idx = get_mmuidx(oi); + uintptr_t index; + CPUTLBEntry *entry; + target_ulong tlb_addr; void *haddr; - size_t size = memop_size(op); + + tcg_debug_assert(mmu_idx < NB_MMU_MODES); /* Handle CPU specific unaligned behaviour */ if (addr & ((1 << a_bits) - 1)) { @@ -2325,6 +2335,10 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, mmu_idx, retaddr); } + index = tlb_index(env, mmu_idx, addr); + entry = tlb_entry(env, mmu_idx, addr); + tlb_addr = tlb_addr_write(entry); + /* If the TLB entry is for a different page, reload and try again. */ if (!tlb_hit(tlb_addr, addr)) { if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, |