diff options
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r-- | accel/tcg/cputlb.c | 259 |
1 files changed, 148 insertions, 111 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 8fad2d9..6f1c006 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -100,21 +100,14 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) { - unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); + int i, i0 = tb_jmp_cache_hash_page(page_addr); + CPUJumpCache *jc = cpu->tb_jmp_cache; for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { - qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); + qatomic_set(&jc->array[i0 + i].tb, NULL); } } -static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) -{ - /* Discard jump cache entries for any tb which might potentially - overlap the flushed page. */ - tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); - tb_jmp_cache_clear_page(cpu, addr); -} - /** * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary * @desc: The CPUTLBDesc portion of the TLB @@ -200,13 +193,13 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, } g_free(fast->table); - g_free(desc->iotlb); + g_free(desc->fulltlb); tlb_window_reset(desc, now, 0); /* desc->n_used_entries is cleared by the caller */ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; fast->table = g_try_new(CPUTLBEntry, new_size); - desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); + desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); /* * If the allocations fail, try smaller sizes. We just freed some @@ -215,7 +208,7 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, * allocations to fail though, so we progressively reduce the allocation * size, aborting if we cannot even allocate the smallest TLB we support. */ - while (fast->table == NULL || desc->iotlb == NULL) { + while (fast->table == NULL || desc->fulltlb == NULL) { if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { error_report("%s: %s", __func__, strerror(errno)); abort(); @@ -224,9 +217,9 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; g_free(fast->table); - g_free(desc->iotlb); + g_free(desc->fulltlb); fast->table = g_try_new(CPUTLBEntry, new_size); - desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); + desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); } } @@ -258,7 +251,7 @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) desc->n_used_entries = 0; fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; fast->table = g_new(CPUTLBEntry, n_entries); - desc->iotlb = g_new(CPUIOTLBEntry, n_entries); + desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); tlb_mmu_flush_locked(desc, fast); } @@ -299,7 +292,7 @@ void tlb_destroy(CPUState *cpu) CPUTLBDescFast *fast = &env_tlb(env)->f[i]; g_free(fast->table); - g_free(desc->iotlb); + g_free(desc->fulltlb); } } @@ -364,7 +357,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) qemu_spin_unlock(&env_tlb(env)->c.lock); - cpu_tb_jmp_cache_clear(cpu); + tcg_flush_jmp_cache(cpu); if (to_clean == ALL_MMUIDX_BITS) { qatomic_set(&env_tlb(env)->c.full_flush_count, @@ -541,7 +534,12 @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, } qemu_spin_unlock(&env_tlb(env)->c.lock); - tb_flush_jmp_cache(cpu, addr); + /* + * Discard jump cache entries for any tb which might potentially + * overlap the flushed page, which includes the previous. + */ + tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); + tb_jmp_cache_clear_page(cpu, addr); } /** @@ -788,12 +786,18 @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, * longer to clear each entry individually than it will to clear it all. */ if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { - cpu_tb_jmp_cache_clear(cpu); + tcg_flush_jmp_cache(cpu); return; } - for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) { - tb_flush_jmp_cache(cpu, d.addr + i); + /* + * Discard jump cache entries for any tb which might potentially + * overlap the flushed pages, which includes the previous. + */ + d.addr -= TARGET_PAGE_SIZE; + for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) { + tb_jmp_cache_clear_page(cpu, d.addr); + d.addr += TARGET_PAGE_SIZE; } } @@ -951,7 +955,8 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, can be detected */ void tlb_protect_code(ram_addr_t ram_addr) { - cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, + cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK, + TARGET_PAGE_SIZE, DIRTY_MEMORY_CODE); } @@ -1095,16 +1100,16 @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx, env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; } -/* Add a new TLB entry. At most one entry for a given virtual address +/* + * Add a new TLB entry. At most one entry for a given virtual address * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the * supplied size is only used by tlb_flush_page. * * Called from TCG-generated code, which is under an RCU read-side * critical section. */ -void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, - hwaddr paddr, MemTxAttrs attrs, int prot, - int mmu_idx, target_ulong size) +void tlb_set_page_full(CPUState *cpu, int mmu_idx, + target_ulong vaddr, CPUTLBEntryFull *full) { CPUArchState *env = cpu->env_ptr; CPUTLB *tlb = env_tlb(env); @@ -1117,35 +1122,36 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, CPUTLBEntry *te, tn; hwaddr iotlb, xlat, sz, paddr_page; target_ulong vaddr_page; - int asidx = cpu_asidx_from_attrs(cpu, attrs); - int wp_flags; + int asidx, wp_flags, prot; bool is_ram, is_romd; assert_cpu_is_self(cpu); - if (size <= TARGET_PAGE_SIZE) { + if (full->lg_page_size <= TARGET_PAGE_BITS) { sz = TARGET_PAGE_SIZE; } else { - tlb_add_large_page(env, mmu_idx, vaddr, size); - sz = size; + sz = (hwaddr)1 << full->lg_page_size; + tlb_add_large_page(env, mmu_idx, vaddr, sz); } vaddr_page = vaddr & TARGET_PAGE_MASK; - paddr_page = paddr & TARGET_PAGE_MASK; + paddr_page = full->phys_addr & TARGET_PAGE_MASK; + prot = full->prot; + asidx = cpu_asidx_from_attrs(cpu, full->attrs); section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, - &xlat, &sz, attrs, &prot); + &xlat, &sz, full->attrs, &prot); assert(sz >= TARGET_PAGE_SIZE); tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx " prot=%x idx=%d\n", - vaddr, paddr, prot, mmu_idx); + vaddr, full->phys_addr, prot, mmu_idx); address = vaddr_page; - if (size < TARGET_PAGE_SIZE) { + if (full->lg_page_size < TARGET_PAGE_BITS) { /* Repeat the MMU check and TLB fill on every access. */ address |= TLB_INVALID_MASK; } - if (attrs.byte_swap) { + if (full->attrs.byte_swap) { address |= TLB_BSWAP; } @@ -1219,7 +1225,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, /* Evict the old entry into the victim tlb. */ copy_tlb_helper_locked(tv, te); - desc->viotlb[vidx] = desc->iotlb[index]; + desc->vfulltlb[vidx] = desc->fulltlb[index]; tlb_n_used_entries_dec(env, mmu_idx); } @@ -1236,8 +1242,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, * subtract here is that of the page base, and not the same as the * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). */ - desc->iotlb[index].addr = iotlb - vaddr_page; - desc->iotlb[index].attrs = attrs; + desc->fulltlb[index] = *full; + desc->fulltlb[index].xlat_section = iotlb - vaddr_page; + desc->fulltlb[index].phys_addr = paddr_page; + desc->fulltlb[index].prot = prot; /* Now calculate the new entry */ tn.addend = addend - vaddr_page; @@ -1272,9 +1280,21 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, qemu_spin_unlock(&tlb->c.lock); } -/* Add a new TLB entry, but without specifying the memory - * transaction attributes to be used. - */ +void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, MemTxAttrs attrs, int prot, + int mmu_idx, target_ulong size) +{ + CPUTLBEntryFull full = { + .phys_addr = paddr, + .attrs = attrs, + .prot = prot, + .lg_page_size = ctz64(size) + }; + + assert(is_power_of_2(size)); + tlb_set_page_full(cpu, mmu_idx, vaddr, &full); +} + void tlb_set_page(CPUState *cpu, target_ulong vaddr, hwaddr paddr, int prot, int mmu_idx, target_ulong size) @@ -1291,15 +1311,14 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr, static void tlb_fill(CPUState *cpu, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { - CPUClass *cc = CPU_GET_CLASS(cpu); bool ok; /* * This is not a probe, so only valid return is success; failure * should result in exception + longjmp to the cpu loop. */ - ok = cc->tcg_ops->tlb_fill(cpu, addr, size, - access_type, mmu_idx, false, retaddr); + ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, + access_type, mmu_idx, false, retaddr); assert(ok); } @@ -1307,9 +1326,8 @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { - CPUClass *cc = CPU_GET_CLASS(cpu); - - cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr); + cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, + mmu_idx, retaddr); } static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, @@ -1329,7 +1347,7 @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, } } -static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, +static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full, int mmu_idx, target_ulong addr, uintptr_t retaddr, MMUAccessType access_type, MemOp op) { @@ -1341,9 +1359,9 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, bool locked = false; MemTxResult r; - section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); + section = iotlb_to_section(cpu, full->xlat_section, full->attrs); mr = section->mr; - mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; cpu->mem_io_pc = retaddr; if (!cpu->can_do_io) { cpu_io_recompile(cpu, retaddr); @@ -1353,14 +1371,14 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, qemu_mutex_lock_iothread(); locked = true; } - r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); + r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs); if (r != MEMTX_OK) { hwaddr physaddr = mr_offset + section->offset_within_address_space - section->offset_within_region; cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, - mmu_idx, iotlbentry->attrs, r, retaddr); + mmu_idx, full->attrs, r, retaddr); } if (locked) { qemu_mutex_unlock_iothread(); @@ -1370,22 +1388,21 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, } /* - * Save a potentially trashed IOTLB entry for later lookup by plugin. - * This is read by tlb_plugin_lookup if the iotlb entry doesn't match + * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin. + * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match * because of the side effect of io_writex changing memory layout. */ -static void save_iotlb_data(CPUState *cs, hwaddr addr, - MemoryRegionSection *section, hwaddr mr_offset) +static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section, + hwaddr mr_offset) { #ifdef CONFIG_PLUGIN SavedIOTLB *saved = &cs->saved_iotlb; - saved->addr = addr; saved->section = section; saved->mr_offset = mr_offset; #endif } -static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, +static void io_writex(CPUArchState *env, CPUTLBEntryFull *full, int mmu_idx, uint64_t val, target_ulong addr, uintptr_t retaddr, MemOp op) { @@ -1396,9 +1413,9 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, bool locked = false; MemTxResult r; - section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); + section = iotlb_to_section(cpu, full->xlat_section, full->attrs); mr = section->mr; - mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; if (!cpu->can_do_io) { cpu_io_recompile(cpu, retaddr); } @@ -1408,20 +1425,20 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, * The memory_region_dispatch may trigger a flush/resize * so for plugins we save the iotlb_data just in case. */ - save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset); + save_iotlb_data(cpu, section, mr_offset); if (!qemu_mutex_iothread_locked()) { qemu_mutex_lock_iothread(); locked = true; } - r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); + r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs); if (r != MEMTX_OK) { hwaddr physaddr = mr_offset + section->offset_within_address_space - section->offset_within_region; cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), - MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, + MMU_DATA_STORE, mmu_idx, full->attrs, r, retaddr); } if (locked) { @@ -1468,9 +1485,10 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, copy_tlb_helper_locked(vtlb, &tmptlb); qemu_spin_unlock(&env_tlb(env)->c.lock); - CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; - CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; - tmpio = *io; *io = *vio; *vio = tmpio; + CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index]; + CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx]; + CPUTLBEntryFull tmpf; + tmpf = *f1; *f1 = *f2; *f2 = tmpf; return true; } } @@ -1483,9 +1501,9 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, (ADDR) & TARGET_PAGE_MASK) static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, - CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) + CPUTLBEntryFull *full, uintptr_t retaddr) { - ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; + ram_addr_t ram_addr = mem_vaddr + full->xlat_section; trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); @@ -1512,7 +1530,8 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, static int probe_access_internal(CPUArchState *env, target_ulong addr, int fault_size, MMUAccessType access_type, int mmu_idx, bool nonfault, - void **phost, uintptr_t retaddr) + void **phost, CPUTLBEntryFull **pfull, + uintptr_t retaddr) { uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); @@ -1535,25 +1554,36 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, } tlb_addr = tlb_read_ofs(entry, elt_ofs); + flags = TLB_FLAGS_MASK; page_addr = addr & TARGET_PAGE_MASK; if (!tlb_hit_page(tlb_addr, page_addr)) { if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { CPUState *cs = env_cpu(env); - CPUClass *cc = CPU_GET_CLASS(cs); - if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, - mmu_idx, nonfault, retaddr)) { + if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, + mmu_idx, nonfault, retaddr)) { /* Non-faulting page table read failed. */ *phost = NULL; + *pfull = NULL; return TLB_INVALID_MASK; } /* TLB resize via tlb_fill may have moved the entry. */ + index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); + + /* + * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, + * to force the next access through tlb_fill. We've just + * called tlb_fill, so we know that this entry *is* valid. + */ + flags &= ~TLB_INVALID_MASK; } tlb_addr = tlb_read_ofs(entry, elt_ofs); } - flags = tlb_addr & TLB_FLAGS_MASK; + flags &= tlb_addr; + + *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index]; /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { @@ -1566,37 +1596,44 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr, return flags; } -int probe_access_flags(CPUArchState *env, target_ulong addr, - MMUAccessType access_type, int mmu_idx, - bool nonfault, void **phost, uintptr_t retaddr) +int probe_access_full(CPUArchState *env, target_ulong addr, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, CPUTLBEntryFull **pfull, + uintptr_t retaddr) { - int flags; - - flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, - nonfault, phost, retaddr); + int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, + nonfault, phost, pfull, retaddr); /* Handle clean RAM pages. */ if (unlikely(flags & TLB_NOTDIRTY)) { - uintptr_t index = tlb_index(env, mmu_idx, addr); - CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; - - notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); + notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr); flags &= ~TLB_NOTDIRTY; } return flags; } +int probe_access_flags(CPUArchState *env, target_ulong addr, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, uintptr_t retaddr) +{ + CPUTLBEntryFull *full; + + return probe_access_full(env, addr, access_type, mmu_idx, + nonfault, phost, &full, retaddr); +} + void *probe_access(CPUArchState *env, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { + CPUTLBEntryFull *full; void *host; int flags; g_assert(-(addr | TARGET_PAGE_MASK) >= size); flags = probe_access_internal(env, addr, size, access_type, mmu_idx, - false, &host, retaddr); + false, &host, &full, retaddr); /* Per the interface, size == 0 merely faults the access. */ if (size == 0) { @@ -1604,20 +1641,17 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size, } if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { - uintptr_t index = tlb_index(env, mmu_idx, addr); - CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; - /* Handle watchpoints. */ if (flags & TLB_WATCHPOINT) { int wp_access = (access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ); cpu_check_watchpoint(env_cpu(env), addr, size, - iotlbentry->attrs, wp_access, retaddr); + full->attrs, wp_access, retaddr); } /* Handle clean RAM pages. */ if (flags & TLB_NOTDIRTY) { - notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); + notdirty_write(env_cpu(env), addr, 1, full, retaddr); } } @@ -1627,11 +1661,12 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size, void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, MMUAccessType access_type, int mmu_idx) { + CPUTLBEntryFull *full; void *host; int flags; flags = probe_access_internal(env, addr, 0, access_type, - mmu_idx, true, &host, 0); + mmu_idx, true, &host, &full, 0); /* No combination of flags are expected by the caller. */ return flags ? NULL : host; @@ -1650,10 +1685,11 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, void **hostp) { + CPUTLBEntryFull *full; void *p; (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH, - cpu_mmu_index(env, true), false, &p, 0); + cpu_mmu_index(env, true), false, &p, &full, 0); if (p == NULL) { return -1; } @@ -1674,7 +1710,7 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, * should have just filled the TLB. The one corner case is io_writex * which can cause TLB flushes and potential resizing of the TLBs * losing the information we need. In those cases we need to recover - * data from a copy of the iotlbentry. As long as this always occurs + * data from a copy of the CPUTLBEntryFull. As long as this always occurs * from the same thread (which a mem callback will be) this is safe. */ @@ -1689,11 +1725,12 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, if (likely(tlb_hit(tlb_addr, addr))) { /* We must have an iotlb entry for MMIO */ if (tlb_addr & TLB_MMIO) { - CPUIOTLBEntry *iotlbentry; - iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + CPUTLBEntryFull *full; + full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; data->is_io = true; - data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); - data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + data->v.io.section = + iotlb_to_section(cpu, full->xlat_section, full->attrs); + data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; } else { data->is_io = false; data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend); @@ -1801,7 +1838,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, if (unlikely(tlb_addr & TLB_NOTDIRTY)) { notdirty_write(env_cpu(env), addr, size, - &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); + &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr); } return hostaddr; @@ -1909,7 +1946,7 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, /* Handle anything that isn't just a straight memory access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - CPUIOTLBEntry *iotlbentry; + CPUTLBEntryFull *full; bool need_swap; /* For anything that is unaligned, recurse through full_load. */ @@ -1917,20 +1954,20 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, goto do_unaligned_access; } - iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; /* Handle watchpoints. */ if (unlikely(tlb_addr & TLB_WATCHPOINT)) { /* On watchpoint hit, this will longjmp out. */ cpu_check_watchpoint(env_cpu(env), addr, size, - iotlbentry->attrs, BP_MEM_READ, retaddr); + full->attrs, BP_MEM_READ, retaddr); } need_swap = size > 1 && (tlb_addr & TLB_BSWAP); /* Handle I/O access. */ if (likely(tlb_addr & TLB_MMIO)) { - return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, + return io_readx(env, full, mmu_idx, addr, retaddr, access_type, op ^ (need_swap * MO_BSWAP)); } @@ -2245,12 +2282,12 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, */ if (unlikely(tlb_addr & TLB_WATCHPOINT)) { cpu_check_watchpoint(env_cpu(env), addr, size - size2, - env_tlb(env)->d[mmu_idx].iotlb[index].attrs, + env_tlb(env)->d[mmu_idx].fulltlb[index].attrs, BP_MEM_WRITE, retaddr); } if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { cpu_check_watchpoint(env_cpu(env), page2, size2, - env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, + env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs, BP_MEM_WRITE, retaddr); } @@ -2314,7 +2351,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, /* Handle anything that isn't just a straight memory access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - CPUIOTLBEntry *iotlbentry; + CPUTLBEntryFull *full; bool need_swap; /* For anything that is unaligned, recurse through byte stores. */ @@ -2322,20 +2359,20 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, goto do_unaligned_access; } - iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; /* Handle watchpoints. */ if (unlikely(tlb_addr & TLB_WATCHPOINT)) { /* On watchpoint hit, this will longjmp out. */ cpu_check_watchpoint(env_cpu(env), addr, size, - iotlbentry->attrs, BP_MEM_WRITE, retaddr); + full->attrs, BP_MEM_WRITE, retaddr); } need_swap = size > 1 && (tlb_addr & TLB_BSWAP); /* Handle I/O access. */ if (tlb_addr & TLB_MMIO) { - io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, + io_writex(env, full, mmu_idx, val, addr, retaddr, op ^ (need_swap * MO_BSWAP)); return; } @@ -2347,7 +2384,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, /* Handle clean RAM pages. */ if (tlb_addr & TLB_NOTDIRTY) { - notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); + notdirty_write(env_cpu(env), addr, size, full, retaddr); } haddr = (void *)((uintptr_t)addr + entry->addend); |