diff options
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r-- | accel/tcg/cputlb.c | 98 |
1 files changed, 37 insertions, 61 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index fb22048..d9fb68d 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -22,12 +22,11 @@ #include "accel/tcg/cpu-ops.h" #include "exec/exec-all.h" #include "exec/page-protection.h" -#include "exec/memory.h" -#include "exec/cpu_ldst.h" +#include "system/memory.h" +#include "accel/tcg/cpu-ldst.h" #include "exec/cputlb.h" #include "exec/tb-flush.h" -#include "exec/memory-internal.h" -#include "exec/ram_addr.h" +#include "system/ram_addr.h" #include "exec/mmu-access-type.h" #include "exec/tlb-common.h" #include "exec/vaddr.h" @@ -35,18 +34,22 @@ #include "qemu/error-report.h" #include "exec/log.h" #include "exec/helper-proto-common.h" +#include "exec/tlb-flags.h" #include "qemu/atomic.h" #include "qemu/atomic128.h" #include "tb-internal.h" #include "trace.h" #include "tb-hash.h" #include "tb-internal.h" +#include "tlb-bounds.h" #include "internal-common.h" #include "internal-target.h" #ifdef CONFIG_PLUGIN #include "qemu/plugin-memory.h" #endif #include "tcg/tcg-ldst.h" +#include "backend-ldst.h" + /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ @@ -882,18 +885,17 @@ void tlb_unprotect_code(ram_addr_t ram_addr) * * Called with tlb_c.lock held. */ -static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, +static void tlb_reset_dirty_range_locked(CPUTLBEntryFull *full, CPUTLBEntry *ent, uintptr_t start, uintptr_t length) { - uintptr_t addr = tlb_entry->addr_write; + const uintptr_t addr = ent->addr_write; + int flags = addr | full->slow_flags[MMU_DATA_STORE]; - if ((addr & (TLB_INVALID_MASK | TLB_MMIO | - TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { - addr &= TARGET_PAGE_MASK; - addr += tlb_entry->addend; - if ((addr - start) < length) { - qatomic_set(&tlb_entry->addr_write, - tlb_entry->addr_write | TLB_NOTDIRTY); + flags &= TLB_INVALID_MASK | TLB_MMIO | TLB_DISCARD_WRITE | TLB_NOTDIRTY; + if (flags == 0) { + uintptr_t host = (addr & TARGET_PAGE_MASK) + ent->addend; + if ((host - start) < length) { + qatomic_set(&ent->addr_write, addr | TLB_NOTDIRTY); } } } @@ -912,23 +914,25 @@ static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) * We must take tlb_c.lock to avoid racing with another vCPU update. The only * thing actually updated is the target TLB entry ->addr_write flags. */ -void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) +void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length) { int mmu_idx; qemu_spin_lock(&cpu->neg.tlb.c.lock); for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; + CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; + unsigned int n = tlb_n_entries(fast); unsigned int i; - unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]); for (i = 0; i < n; i++) { - tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i], - start1, length); + tlb_reset_dirty_range_locked(&desc->fulltlb[i], &fast->table[i], + start, length); } for (i = 0; i < CPU_VTLB_SIZE; i++) { - tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i], - start1, length); + tlb_reset_dirty_range_locked(&desc->vfulltlb[i], &desc->vtable[i], + start, length); } } qemu_spin_unlock(&cpu->neg.tlb.c.lock); @@ -2321,7 +2325,7 @@ static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); tcg_debug_assert(!crosspage); @@ -2336,7 +2340,7 @@ static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, uint16_t ret; uint8_t a, b; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); if (likely(!crosspage)) { return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); @@ -2360,7 +2364,7 @@ static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, bool crosspage; uint32_t ret; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); if (likely(!crosspage)) { return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); @@ -2381,7 +2385,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, bool crosspage; uint64_t ret; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); if (likely(!crosspage)) { return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); @@ -2404,7 +2408,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr, Int128 ret; int first; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l); if (likely(!crosspage)) { if (unlikely(l.page[0].flags & TLB_MMIO)) { @@ -2732,7 +2736,7 @@ static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); tcg_debug_assert(!crosspage); @@ -2746,7 +2750,7 @@ static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val, bool crosspage; uint8_t a, b; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); @@ -2768,7 +2772,7 @@ static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); @@ -2789,7 +2793,7 @@ static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); @@ -2812,7 +2816,7 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, uint64_t a, b; int first; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { if (unlikely(l.page[0].flags & TLB_MMIO)) { @@ -2897,53 +2901,25 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, /* Code access functions. */ -uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) -{ - CPUState *cs = env_cpu(env); - MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true)); - return do_ld1_mmu(cs, addr, oi, 0, MMU_INST_FETCH); -} - -uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) -{ - CPUState *cs = env_cpu(env); - MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true)); - return do_ld2_mmu(cs, addr, oi, 0, MMU_INST_FETCH); -} - -uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) -{ - CPUState *cs = env_cpu(env); - MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true)); - return do_ld4_mmu(cs, addr, oi, 0, MMU_INST_FETCH); -} - -uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) -{ - CPUState *cs = env_cpu(env); - MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true)); - return do_ld8_mmu(cs, addr, oi, 0, MMU_INST_FETCH); -} - -uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, +uint8_t cpu_ldb_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t retaddr) { return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); } -uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, +uint16_t cpu_ldw_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t retaddr) { return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); } -uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, +uint32_t cpu_ldl_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t retaddr) { return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); } -uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, +uint64_t cpu_ldq_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t retaddr) { return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); |