diff options
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r-- | accel/tcg/cputlb.c | 209 |
1 files changed, 55 insertions, 154 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index ae0fbcd..90c72c9 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1896,12 +1896,9 @@ static bool mmu_lookup(CPUArchState *env, target_ulong addr, MemOpIdx oi, /* * Probe for an atomic operation. Do not allow unaligned operations, * or io operations to proceed. Return the host address. - * - * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. */ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, - MemOpIdx oi, int size, int prot, - uintptr_t retaddr) + MemOpIdx oi, int size, uintptr_t retaddr) { uintptr_t mmu_idx = get_mmuidx(oi); MemOp mop = get_memop(oi); @@ -1937,53 +1934,36 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, tlbe = tlb_entry(env, mmu_idx, addr); /* Check TLB entry and enforce page permissions. */ - if (prot & PAGE_WRITE) { - tlb_addr = tlb_addr_write(tlbe); - if (!tlb_hit(tlb_addr, addr)) { - if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE, - addr & TARGET_PAGE_MASK)) { - tlb_fill(env_cpu(env), addr, size, - MMU_DATA_STORE, mmu_idx, retaddr); - index = tlb_index(env, mmu_idx, addr); - tlbe = tlb_entry(env, mmu_idx, addr); - } - tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; + tlb_addr = tlb_addr_write(tlbe); + if (!tlb_hit(tlb_addr, addr)) { + if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE, + addr & TARGET_PAGE_MASK)) { + tlb_fill(env_cpu(env), addr, size, + MMU_DATA_STORE, mmu_idx, retaddr); + index = tlb_index(env, mmu_idx, addr); + tlbe = tlb_entry(env, mmu_idx, addr); } + tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; + } - if (prot & PAGE_READ) { - /* - * Let the guest notice RMW on a write-only page. - * We have just verified that the page is writable. - * Subpage lookups may have left TLB_INVALID_MASK set, - * but addr_read will only be -1 if PAGE_READ was unset. - */ - if (unlikely(tlbe->addr_read == -1)) { - tlb_fill(env_cpu(env), addr, size, - MMU_DATA_LOAD, mmu_idx, retaddr); - /* - * Since we don't support reads and writes to different - * addresses, and we do have the proper page loaded for - * write, this shouldn't ever return. But just in case, - * handle via stop-the-world. - */ - goto stop_the_world; - } - /* Collect TLB_WATCHPOINT for read. */ - tlb_addr |= tlbe->addr_read; - } - } else /* if (prot & PAGE_READ) */ { - tlb_addr = tlbe->addr_read; - if (!tlb_hit(tlb_addr, addr)) { - if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_LOAD, - addr & TARGET_PAGE_MASK)) { - tlb_fill(env_cpu(env), addr, size, - MMU_DATA_LOAD, mmu_idx, retaddr); - index = tlb_index(env, mmu_idx, addr); - tlbe = tlb_entry(env, mmu_idx, addr); - } - tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK; - } + /* + * Let the guest notice RMW on a write-only page. + * We have just verified that the page is writable. + * Subpage lookups may have left TLB_INVALID_MASK set, + * but addr_read will only be -1 if PAGE_READ was unset. + */ + if (unlikely(tlbe->addr_read == -1)) { + tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); + /* + * Since we don't support reads and writes to different + * addresses, and we do have the proper page loaded for + * write, this shouldn't ever return. But just in case, + * handle via stop-the-world. + */ + goto stop_the_world; } + /* Collect TLB_WATCHPOINT for read. */ + tlb_addr |= tlbe->addr_read; /* Notice an IO access or a needs-MMU-lookup access */ if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) { @@ -2000,11 +1980,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, } if (unlikely(tlb_addr & TLB_WATCHPOINT)) { - QEMU_BUILD_BUG_ON(PAGE_READ != BP_MEM_READ); - QEMU_BUILD_BUG_ON(PAGE_WRITE != BP_MEM_WRITE); - /* therefore prot == watchpoint bits */ - cpu_check_watchpoint(env_cpu(env), addr, size, - full->attrs, prot, retaddr); + cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, + BP_MEM_READ | BP_MEM_WRITE, retaddr); } return hostaddr; @@ -2575,89 +2552,45 @@ uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) return ret; } -uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) -{ - uint16_t ret; - - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUW); - ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD); - plugin_load_cb(env, addr, oi); - return ret; -} - -uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) -{ - uint32_t ret; - - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUL); - ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD); - plugin_load_cb(env, addr, oi); - return ret; -} - -uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) -{ - uint64_t ret; - - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUQ); - ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD); - plugin_load_cb(env, addr, oi); - return ret; -} - -uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { uint16_t ret; - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUW); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD); plugin_load_cb(env, addr, oi); return ret; } -uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { uint32_t ret; - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUL); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD); plugin_load_cb(env, addr, oi); return ret; } -uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { uint64_t ret; - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUQ); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD); plugin_load_cb(env, addr, oi); return ret; } -Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) -{ - Int128 ret; - - tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_BE|MO_128)); - ret = do_ld16_mmu(env, addr, oi, ra); - plugin_load_cb(env, addr, oi); - return ret; -} - -Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, - MemOpIdx oi, uintptr_t ra) +Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { Int128 ret; - tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_LE|MO_128)); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); ret = do_ld16_mmu(env, addr, oi, ra); plugin_load_cb(env, addr, oi); return ret; @@ -2779,7 +2712,7 @@ static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p, case MO_ATOM_WITHIN16_PAIR: /* Since size > 8, this is the half that must be atomic. */ - if (!HAVE_al16) { + if (!HAVE_ATOMIC128_RW) { cpu_loop_exit_atomic(env_cpu(env), ra); } return store_whole_le16(p->haddr, p->size, val_le); @@ -3045,66 +2978,34 @@ void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, plugin_store_cb(env, addr, oi); } -void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUW); - do_st2_mmu(env, addr, val, oi, retaddr); - plugin_store_cb(env, addr, oi); -} - -void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUL); - do_st4_mmu(env, addr, val, oi, retaddr); - plugin_store_cb(env, addr, oi); -} - -void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr) -{ - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUQ); - do_st8_mmu(env, addr, val, oi, retaddr); - plugin_store_cb(env, addr, oi); -} - -void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - MemOpIdx oi, uintptr_t retaddr) +void cpu_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) { - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUW); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); do_st2_mmu(env, addr, val, oi, retaddr); plugin_store_cb(env, addr, oi); } -void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val, +void cpu_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, MemOpIdx oi, uintptr_t retaddr) { - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUL); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); do_st4_mmu(env, addr, val, oi, retaddr); plugin_store_cb(env, addr, oi); } -void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - MemOpIdx oi, uintptr_t retaddr) +void cpu_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) { - tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUQ); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); do_st8_mmu(env, addr, val, oi, retaddr); plugin_store_cb(env, addr, oi); } -void cpu_st16_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, - MemOpIdx oi, uintptr_t retaddr) -{ - tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_BE|MO_128)); - do_st16_mmu(env, addr, val, oi, retaddr); - plugin_store_cb(env, addr, oi); -} - -void cpu_st16_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, - MemOpIdx oi, uintptr_t retaddr) +void cpu_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, + MemOpIdx oi, uintptr_t retaddr) { - tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_LE|MO_128)); + tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); do_st16_mmu(env, addr, val, oi, retaddr); plugin_store_cb(env, addr, oi); } @@ -3137,7 +3038,7 @@ void cpu_st16_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, #include "atomic_template.h" #endif -#if HAVE_CMPXCHG128 || HAVE_ATOMIC128 +#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128) #define DATA_SIZE 16 #include "atomic_template.h" #endif |