diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2019-08-23 15:12:32 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2019-09-03 08:30:39 -0700 |
commit | 30d7e098d5c38644359820317fcf72e3e129ec53 (patch) | |
tree | 55ac8fed388ffa8cb3e92a8c37dea9edce816182 /accel/tcg | |
parent | 0026348b48fe532279e8c12b100c16c1aa991373 (diff) | |
download | qemu-30d7e098d5c38644359820317fcf72e3e129ec53.zip qemu-30d7e098d5c38644359820317fcf72e3e129ec53.tar.gz qemu-30d7e098d5c38644359820317fcf72e3e129ec53.tar.bz2 |
cputlb: Fold TLB_RECHECK into TLB_INVALID_MASK
We had two different mechanisms to force a recheck of the tlb.
Before TLB_RECHECK was introduced, we had a PAGE_WRITE_INV bit
that would immediate set TLB_INVALID_MASK, which automatically
means that a second check of the tlb entry fails.
We can use the same mechanism to handle small pages.
Conserve TLB_* bits by removing TLB_RECHECK.
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg')
-rw-r--r-- | accel/tcg/cputlb.c | 86 |
1 files changed, 23 insertions, 63 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index d9787cc..c9576be 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -732,11 +732,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, address = vaddr_page; if (size < TARGET_PAGE_SIZE) { - /* - * Slow-path the TLB entries; we will repeat the MMU check and TLB - * fill on every access. - */ - address |= TLB_RECHECK; + /* Repeat the MMU check and TLB fill on every access. */ + address |= TLB_INVALID_MASK; } if (attrs.byte_swap) { /* Force the access through the I/O slow path. */ @@ -1026,10 +1023,15 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ (ADDR) & TARGET_PAGE_MASK) -/* NOTE: this function can trigger an exception */ -/* NOTE2: the returned address is not exactly the physical address: it - * is actually a ram_addr_t (in system mode; the user mode emulation - * version of this function returns a guest virtual address). +/* + * Return a ram_addr_t for the virtual address for execution. + * + * Return -1 if we can't translate and execute from an entire page + * of RAM. This will force us to execute by loading and translating + * one insn at a time, without caching. + * + * NOTE: This function will trigger an exception if the page is + * not executable. */ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) { @@ -1043,19 +1045,20 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); + + if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { + /* + * The MMU protection covers a smaller range than a target + * page, so we must redo the MMU check for every insn. + */ + return -1; + } } assert(tlb_hit(entry->addr_code, addr)); } - if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) { - /* - * Return -1 if we can't translate and execute from an entire - * page of RAM here, which will cause us to execute by loading - * and translating one insn at a time, without caching: - * - TLB_RECHECK: means the MMU protection covers a smaller range - * than a target page, so we must redo the MMU check every insn - * - TLB_MMIO: region is not backed by RAM - */ + if (unlikely(entry->addr_code & TLB_MMIO)) { + /* The region is not backed by RAM. */ return -1; } @@ -1180,7 +1183,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, } /* Notice an IO access or a needs-MMU-lookup access */ - if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) { + if (unlikely(tlb_addr & TLB_MMIO)) { /* There's really nothing that can be done to support this apart from stop-the-world. */ goto stop_the_world; @@ -1258,6 +1261,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, entry = tlb_entry(env, mmu_idx, addr); } tlb_addr = code_read ? entry->addr_code : entry->addr_read; + tlb_addr &= ~TLB_INVALID_MASK; } /* Handle an IO access. */ @@ -1265,27 +1269,6 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, if ((addr & (size - 1)) != 0) { goto do_unaligned_access; } - - if (tlb_addr & TLB_RECHECK) { - /* - * This is a TLB_RECHECK access, where the MMU protection - * covers a smaller range than a target page, and we must - * repeat the MMU check here. This tlb_fill() call might - * longjump out if this access should cause a guest exception. - */ - tlb_fill(env_cpu(env), addr, size, - access_type, mmu_idx, retaddr); - index = tlb_index(env, mmu_idx, addr); - entry = tlb_entry(env, mmu_idx, addr); - - tlb_addr = code_read ? entry->addr_code : entry->addr_read; - tlb_addr &= ~TLB_RECHECK; - if (!(tlb_addr & ~TARGET_PAGE_MASK)) { - /* RAM access */ - goto do_aligned_access; - } - } - return io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, addr, retaddr, access_type, op); } @@ -1314,7 +1297,6 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, return res & MAKE_64BIT_MASK(0, size * 8); } - do_aligned_access: haddr = (void *)((uintptr_t)addr + entry->addend); switch (op) { case MO_UB: @@ -1509,27 +1491,6 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, if ((addr & (size - 1)) != 0) { goto do_unaligned_access; } - - if (tlb_addr & TLB_RECHECK) { - /* - * This is a TLB_RECHECK access, where the MMU protection - * covers a smaller range than a target page, and we must - * repeat the MMU check here. This tlb_fill() call might - * longjump out if this access should cause a guest exception. - */ - tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, - mmu_idx, retaddr); - index = tlb_index(env, mmu_idx, addr); - entry = tlb_entry(env, mmu_idx, addr); - - tlb_addr = tlb_addr_write(entry); - tlb_addr &= ~TLB_RECHECK; - if (!(tlb_addr & ~TARGET_PAGE_MASK)) { - /* RAM access */ - goto do_aligned_access; - } - } - io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, val, addr, retaddr, op); return; @@ -1579,7 +1540,6 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, return; } - do_aligned_access: haddr = (void *)((uintptr_t)addr + entry->addend); switch (op) { case MO_UB: |