diff options
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r-- | accel/tcg/cputlb.c | 391 |
1 files changed, 200 insertions, 191 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 117b516..87e14bd 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -19,15 +19,17 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" -#include "hw/core/tcg-cpu-ops.h" -#include "exec/exec-all.h" +#include "qemu/target-info.h" +#include "accel/tcg/cpu-ops.h" +#include "accel/tcg/iommu.h" +#include "accel/tcg/probe.h" #include "exec/page-protection.h" -#include "exec/memory.h" -#include "exec/cpu_ldst.h" +#include "system/memory.h" +#include "accel/tcg/cpu-ldst-common.h" +#include "accel/tcg/cpu-mmu-index.h" #include "exec/cputlb.h" #include "exec/tb-flush.h" -#include "exec/memory-internal.h" -#include "exec/ram_addr.h" +#include "system/ram_addr.h" #include "exec/mmu-access-type.h" #include "exec/tlb-common.h" #include "exec/vaddr.h" @@ -35,18 +37,21 @@ #include "qemu/error-report.h" #include "exec/log.h" #include "exec/helper-proto-common.h" +#include "exec/tlb-flags.h" #include "qemu/atomic.h" #include "qemu/atomic128.h" -#include "exec/translate-all.h" +#include "tb-internal.h" #include "trace.h" #include "tb-hash.h" +#include "tb-internal.h" +#include "tlb-bounds.h" #include "internal-common.h" -#include "internal-target.h" #ifdef CONFIG_PLUGIN #include "qemu/plugin-memory.h" #endif #include "tcg/tcg-ldst.h" -#include "tcg/oversized-guest.h" +#include "backend-ldst.h" + /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ @@ -104,26 +109,15 @@ static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry, { /* Do not rearrange the CPUTLBEntry structure members. */ QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) != - MMU_DATA_LOAD * sizeof(uint64_t)); + MMU_DATA_LOAD * sizeof(uintptr_t)); QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) != - MMU_DATA_STORE * sizeof(uint64_t)); + MMU_DATA_STORE * sizeof(uintptr_t)); QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) != - MMU_INST_FETCH * sizeof(uint64_t)); + MMU_INST_FETCH * sizeof(uintptr_t)); -#if TARGET_LONG_BITS == 32 - /* Use qatomic_read, in case of addr_write; only care about low bits. */ - const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type]; - ptr += HOST_BIG_ENDIAN; - return qatomic_read(ptr); -#else - const uint64_t *ptr = &entry->addr_idx[access_type]; -# if TCG_OVERSIZED_GUEST - return *ptr; -# else + const uintptr_t *ptr = &entry->addr_idx[access_type]; /* ofs might correspond to .addr_write, so use qatomic_read */ return qatomic_read(ptr); -# endif -#endif } static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry) @@ -779,19 +773,19 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, assert_cpu_is_self(cpu); + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx(cpu, idxmap); + return; + } /* * If all bits are significant, and len is small, * this devolves to tlb_flush_page. */ - if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { + if (len <= TARGET_PAGE_SIZE && bits >= target_long_bits()) { tlb_flush_page_by_mmuidx(cpu, addr, idxmap); return; } - /* If no page bits are significant, this devolves to tlb_flush. */ - if (bits < TARGET_PAGE_BITS) { - tlb_flush_by_mmuidx(cpu, idxmap); - return; - } /* This should already be page aligned */ d.addr = addr & TARGET_PAGE_MASK; @@ -817,19 +811,19 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, TLBFlushRangeData d, *p; CPUState *dst_cpu; + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); + return; + } /* * If all bits are significant, and len is small, * this devolves to tlb_flush_page. */ - if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { + if (len <= TARGET_PAGE_SIZE && bits >= target_long_bits()) { tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); return; } - /* If no page bits are significant, this devolves to tlb_flush. */ - if (bits < TARGET_PAGE_BITS) { - tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); - return; - } /* This should already be page aligned */ d.addr = addr & TARGET_PAGE_MASK; @@ -893,26 +887,17 @@ void tlb_unprotect_code(ram_addr_t ram_addr) * * Called with tlb_c.lock held. */ -static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, +static void tlb_reset_dirty_range_locked(CPUTLBEntryFull *full, CPUTLBEntry *ent, uintptr_t start, uintptr_t length) { - uintptr_t addr = tlb_entry->addr_write; - - if ((addr & (TLB_INVALID_MASK | TLB_MMIO | - TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { - addr &= TARGET_PAGE_MASK; - addr += tlb_entry->addend; - if ((addr - start) < length) { -#if TARGET_LONG_BITS == 32 - uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write; - ptr_write += HOST_BIG_ENDIAN; - qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY); -#elif TCG_OVERSIZED_GUEST - tlb_entry->addr_write |= TLB_NOTDIRTY; -#else - qatomic_set(&tlb_entry->addr_write, - tlb_entry->addr_write | TLB_NOTDIRTY); -#endif + const uintptr_t addr = ent->addr_write; + int flags = addr | full->slow_flags[MMU_DATA_STORE]; + + flags &= TLB_INVALID_MASK | TLB_MMIO | TLB_DISCARD_WRITE | TLB_NOTDIRTY; + if (flags == 0) { + uintptr_t host = (addr & TARGET_PAGE_MASK) + ent->addend; + if ((host - start) < length) { + qatomic_set(&ent->addr_write, addr | TLB_NOTDIRTY); } } } @@ -931,23 +916,25 @@ static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) * We must take tlb_c.lock to avoid racing with another vCPU update. The only * thing actually updated is the target TLB entry ->addr_write flags. */ -void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) +void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length) { int mmu_idx; qemu_spin_lock(&cpu->neg.tlb.c.lock); for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; + CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; + unsigned int n = tlb_n_entries(fast); unsigned int i; - unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]); for (i = 0; i < n; i++) { - tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i], - start1, length); + tlb_reset_dirty_range_locked(&desc->fulltlb[i], &fast->table[i], + start, length); } for (i = 0; i < CPU_VTLB_SIZE; i++) { - tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i], - start1, length); + tlb_reset_dirty_range_locked(&desc->vfulltlb[i], &desc->vtable[i], + start, length); } } qemu_spin_unlock(&cpu->neg.tlb.c.lock); @@ -1199,7 +1186,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, hwaddr paddr, MemTxAttrs attrs, int prot, - int mmu_idx, uint64_t size) + int mmu_idx, vaddr size) { CPUTLBEntryFull full = { .phys_addr = paddr, @@ -1214,29 +1201,65 @@ void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, void tlb_set_page(CPUState *cpu, vaddr addr, hwaddr paddr, int prot, - int mmu_idx, uint64_t size) + int mmu_idx, vaddr size) { tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED, prot, mmu_idx, size); } +/** + * tlb_hit_page: return true if page aligned @addr is a hit against the + * TLB entry @tlb_addr + * + * @addr: virtual address to test (must be page aligned) + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) + */ +static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr) +{ + return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); +} + +/** + * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr + * + * @addr: virtual address to test (need not be page aligned) + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) + */ +static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr) +{ + return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK); +} + /* - * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the - * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must - * be discarded and looked up again (e.g. via tlb_entry()). + * Note: tlb_fill_align() can trigger a resize of the TLB. + * This means that all of the caller's prior references to the TLB table + * (e.g. CPUTLBEntry pointers) must be discarded and looked up again + * (e.g. via tlb_entry()). */ -static void tlb_fill(CPUState *cpu, vaddr addr, int size, - MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) +static bool tlb_fill_align(CPUState *cpu, vaddr addr, MMUAccessType type, + int mmu_idx, MemOp memop, int size, + bool probe, uintptr_t ra) { - bool ok; + const TCGCPUOps *ops = cpu->cc->tcg_ops; + CPUTLBEntryFull full; - /* - * This is not a probe, so only valid return is success; failure - * should result in exception + longjmp to the cpu loop. - */ - ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, - access_type, mmu_idx, false, retaddr); - assert(ok); + if (ops->tlb_fill_align) { + if (ops->tlb_fill_align(cpu, &full, addr, type, mmu_idx, + memop, size, probe, ra)) { + tlb_set_page_full(cpu, mmu_idx, addr, &full); + return true; + } + } else { + /* Legacy behaviour is alignment before paging. */ + if (addr & ((1u << memop_alignment_bits(memop)) - 1)) { + ops->do_unaligned_access(cpu, addr, type, mmu_idx, ra); + } + if (ops->tlb_fill(cpu, addr, size, type, mmu_idx, probe, ra)) { + return true; + } + } + assert(probe); + return false; } static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, @@ -1319,7 +1342,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { - tb_invalidate_phys_range_fast(ram_addr, size, retaddr); + tb_invalidate_phys_range_fast(cpu, ram_addr, size, retaddr); } /* @@ -1351,22 +1374,22 @@ static int probe_access_internal(CPUState *cpu, vaddr addr, if (!tlb_hit_page(tlb_addr, page_addr)) { if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) { - if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type, - mmu_idx, nonfault, retaddr)) { + if (!tlb_fill_align(cpu, addr, access_type, mmu_idx, + 0, fault_size, nonfault, retaddr)) { /* Non-faulting page table read failed. */ *phost = NULL; *pfull = NULL; return TLB_INVALID_MASK; } - /* TLB resize via tlb_fill may have moved the entry. */ + /* TLB resize via tlb_fill_align may have moved the entry. */ index = tlb_index(cpu, mmu_idx, addr); entry = tlb_entry(cpu, mmu_idx, addr); /* * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, - * to force the next access through tlb_fill. We've just - * called tlb_fill, so we know that this entry *is* valid. + * to force the next access through tlb_fill_align. We've just + * called tlb_fill_align, so we know that this entry *is* valid. */ flags &= ~TLB_INVALID_MASK; } @@ -1491,7 +1514,7 @@ void *probe_access(CPUArchState *env, vaddr addr, int size, return host; } -void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, +void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr, MMUAccessType access_type, int mmu_idx) { CPUTLBEntryFull *full; @@ -1607,16 +1630,17 @@ typedef struct MMULookupLocals { * mmu_lookup1: translate one page * @cpu: generic cpu state * @data: lookup parameters + * @memop: memory operation for the access, or 0 * @mmu_idx: virtual address context * @access_type: load/store/code * @ra: return address into tcg generated code, or 0 * * Resolve the translation for the one page at @data.addr, filling in * the rest of @data with the results. If the translation fails, - * tlb_fill will longjmp out. Return true if the softmmu tlb for + * tlb_fill_align will longjmp out. Return true if the softmmu tlb for * @mmu_idx may have resized. */ -static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, +static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop, int mmu_idx, MMUAccessType access_type, uintptr_t ra) { vaddr addr = data->addr; @@ -1631,7 +1655,8 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, if (!tlb_hit(tlb_addr, addr)) { if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, addr & TARGET_PAGE_MASK)) { - tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra); + tlb_fill_align(cpu, addr, access_type, mmu_idx, + memop, data->size, false, ra); maybe_resized = true; index = tlb_index(cpu, mmu_idx, addr); entry = tlb_entry(cpu, mmu_idx, addr); @@ -1643,6 +1668,25 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW); flags |= full->slow_flags[access_type]; + if (likely(!maybe_resized)) { + /* Alignment has not been checked by tlb_fill_align. */ + int a_bits = memop_alignment_bits(memop); + + /* + * This alignment check differs from the one above, in that this is + * based on the atomicity of the operation. The intended use case is + * the ARM memory type field of each PTE, where access to pages with + * Device memory type require alignment. + */ + if (unlikely(flags & TLB_CHECK_ALIGNED)) { + int at_bits = memop_atomicity_bits(memop); + a_bits = MAX(a_bits, at_bits); + } + if (unlikely(addr & ((1 << a_bits) - 1))) { + cpu_unaligned_access(cpu, addr, access_type, mmu_idx, ra); + } + } + data->full = full; data->flags = flags; /* Compute haddr speculatively; depending on flags it might be invalid. */ @@ -1699,7 +1743,6 @@ static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data, static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, uintptr_t ra, MMUAccessType type, MMULookupLocals *l) { - unsigned a_bits; bool crosspage; int flags; @@ -1708,12 +1751,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, tcg_debug_assert(l->mmu_idx < NB_MMU_MODES); - /* Handle CPU specific unaligned behaviour */ - a_bits = get_alignment_bits(l->memop); - if (addr & ((1 << a_bits) - 1)) { - cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra); - } - l->page[0].addr = addr; l->page[0].size = memop_size(l->memop); l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK; @@ -1721,7 +1758,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK; if (likely(!crosspage)) { - mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra); + mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra); flags = l->page[0].flags; if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { @@ -1736,12 +1773,15 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, l->page[1].size = l->page[0].size - size0; l->page[0].size = size0; + l->page[1].addr = cpu->cc->tcg_ops->pointer_wrap(cpu, l->mmu_idx, + l->page[1].addr, addr); + /* * Lookup both pages, recognizing exceptions from either. If the * second lookup potentially resized, refresh first CPUTLBEntryFull. */ - mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra); - if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) { + mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra); + if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) { uintptr_t index = tlb_index(cpu, l->mmu_idx, addr); l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index]; } @@ -1760,31 +1800,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, tcg_debug_assert((flags & TLB_BSWAP) == 0); } - /* - * This alignment check differs from the one above, in that this is - * based on the atomicity of the operation. The intended use case is - * the ARM memory type field of each PTE, where access to pages with - * Device memory type require alignment. - */ - if (unlikely(flags & TLB_CHECK_ALIGNED)) { - MemOp size = l->memop & MO_SIZE; - - switch (l->memop & MO_ATOM_MASK) { - case MO_ATOM_NONE: - size = MO_8; - break; - case MO_ATOM_IFALIGN_PAIR: - case MO_ATOM_WITHIN16_PAIR: - size = size ? size - 1 : 0; - break; - default: - break; - } - if (addr & ((1 << size) - 1)) { - cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra); - } - } - return crosspage; } @@ -1797,34 +1812,18 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, { uintptr_t mmu_idx = get_mmuidx(oi); MemOp mop = get_memop(oi); - int a_bits = get_alignment_bits(mop); uintptr_t index; CPUTLBEntry *tlbe; vaddr tlb_addr; void *hostaddr; CPUTLBEntryFull *full; + bool did_tlb_fill = false; tcg_debug_assert(mmu_idx < NB_MMU_MODES); /* Adjust the given return address. */ retaddr -= GETPC_ADJ; - /* Enforce guest required alignment. */ - if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { - /* ??? Maybe indicate atomic op to cpu_unaligned_access */ - cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, - mmu_idx, retaddr); - } - - /* Enforce qemu required alignment. */ - if (unlikely(addr & (size - 1))) { - /* We get here if guest alignment was not requested, - or was not enforced by cpu_unaligned_access above. - We might widen the access and emulate, but for now - mark an exception and exit the cpu loop. */ - goto stop_the_world; - } - index = tlb_index(cpu, mmu_idx, addr); tlbe = tlb_entry(cpu, mmu_idx, addr); @@ -1833,8 +1832,9 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, if (!tlb_hit(tlb_addr, addr)) { if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE, addr & TARGET_PAGE_MASK)) { - tlb_fill(cpu, addr, size, - MMU_DATA_STORE, mmu_idx, retaddr); + tlb_fill_align(cpu, addr, MMU_DATA_STORE, mmu_idx, + mop, size, false, retaddr); + did_tlb_fill = true; index = tlb_index(cpu, mmu_idx, addr); tlbe = tlb_entry(cpu, mmu_idx, addr); } @@ -1848,17 +1848,38 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, * but addr_read will only be -1 if PAGE_READ was unset. */ if (unlikely(tlbe->addr_read == -1)) { - tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); + tlb_fill_align(cpu, addr, MMU_DATA_LOAD, mmu_idx, + 0, size, false, retaddr); /* * Since we don't support reads and writes to different * addresses, and we do have the proper page loaded for - * write, this shouldn't ever return. But just in case, - * handle via stop-the-world. + * write, this shouldn't ever return. + */ + g_assert_not_reached(); + } + + /* Enforce guest required alignment, if not handled by tlb_fill_align. */ + if (!did_tlb_fill && (addr & ((1 << memop_alignment_bits(mop)) - 1))) { + cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, mmu_idx, retaddr); + } + + /* Enforce qemu required alignment. */ + if (unlikely(addr & (size - 1))) { + /* + * We get here if guest alignment was not requested, or was not + * enforced by cpu_unaligned_access or tlb_fill_align above. + * We might widen the access and emulate, but for now + * mark an exception and exit the cpu loop. */ goto stop_the_world; } - /* Collect tlb flags for read. */ + + /* Finish collecting tlb flags for both read and write. */ + full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; tlb_addr |= tlbe->addr_read; + tlb_addr &= TLB_FLAGS_MASK & ~TLB_FORCE_SLOW; + tlb_addr |= full->slow_flags[MMU_DATA_STORE]; + tlb_addr |= full->slow_flags[MMU_DATA_LOAD]; /* Notice an IO access or a needs-MMU-lookup access */ if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) { @@ -1868,13 +1889,12 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, } hostaddr = (void *)((uintptr_t)addr + tlbe->addend); - full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; if (unlikely(tlb_addr & TLB_NOTDIRTY)) { notdirty_write(cpu, addr, size, full, retaddr); } - if (unlikely(tlb_addr & TLB_FORCE_SLOW)) { + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { int wp_flags = 0; if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) { @@ -1883,10 +1903,8 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) { wp_flags |= BP_MEM_READ; } - if (wp_flags) { - cpu_check_watchpoint(cpu, addr, size, - full->attrs, wp_flags, retaddr); - } + cpu_check_watchpoint(cpu, addr, size, + full->attrs, wp_flags, retaddr); } return hostaddr; @@ -2313,7 +2331,7 @@ static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); tcg_debug_assert(!crosspage); @@ -2328,7 +2346,7 @@ static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, uint16_t ret; uint8_t a, b; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); if (likely(!crosspage)) { return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); @@ -2352,7 +2370,7 @@ static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, bool crosspage; uint32_t ret; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); if (likely(!crosspage)) { return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); @@ -2373,7 +2391,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, bool crosspage; uint64_t ret; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); if (likely(!crosspage)) { return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); @@ -2396,7 +2414,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr, Int128 ret; int first; - cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); + cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l); if (likely(!crosspage)) { if (unlikely(l.page[0].flags & TLB_MMIO)) { @@ -2724,7 +2742,7 @@ static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); tcg_debug_assert(!crosspage); @@ -2738,7 +2756,7 @@ static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val, bool crosspage; uint8_t a, b; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); @@ -2760,7 +2778,7 @@ static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); @@ -2781,7 +2799,7 @@ static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val, MMULookupLocals l; bool crosspage; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); @@ -2804,7 +2822,7 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, uint64_t a, b; int first; - cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); + cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST); crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); if (likely(!crosspage)) { if (unlikely(l.page[0].flags & TLB_MMIO)) { @@ -2889,54 +2907,45 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, /* Code access functions. */ -uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) -{ - CPUState *cs = env_cpu(env); - MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true)); - return do_ld1_mmu(cs, addr, oi, 0, MMU_INST_FETCH); -} - -uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) -{ - CPUState *cs = env_cpu(env); - MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true)); - return do_ld2_mmu(cs, addr, oi, 0, MMU_INST_FETCH); -} - -uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) -{ - CPUState *cs = env_cpu(env); - MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true)); - return do_ld4_mmu(cs, addr, oi, 0, MMU_INST_FETCH); -} - -uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) -{ - CPUState *cs = env_cpu(env); - MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true)); - return do_ld8_mmu(cs, addr, oi, 0, MMU_INST_FETCH); -} - -uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, +uint8_t cpu_ldb_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t retaddr) { return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); } -uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, +uint16_t cpu_ldw_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t retaddr) { return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); } -uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, +uint32_t cpu_ldl_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t retaddr) { return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); } -uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, +uint64_t cpu_ldq_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t retaddr) { return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); } + +/* + * Common pointer_wrap implementations. + */ + +/* + * To be used for strict alignment targets. + * Because no accesses are unaligned, no accesses wrap either. + */ +vaddr cpu_pointer_wrap_notreached(CPUState *cs, int idx, vaddr res, vaddr base) +{ + g_assert_not_reached(); +} + +/* To be used for strict 32-bit targets. */ +vaddr cpu_pointer_wrap_uint32(CPUState *cs, int idx, vaddr res, vaddr base) +{ + return (uint32_t)res; +} |