aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-08-27 17:28:16 -0700
committerRichard Henderson <richard.henderson@linaro.org>2023-09-16 14:57:15 +0000
commitda6aef48d9a1bcda301a3a922b240a2c1aba8026 (patch)
tree6ede686d1ac76b88d38245b1a3483048b88bfb4f /accel/tcg
parente8967b6152b2b1fd953943ad03189d8a8b7f637b (diff)
downloadqemu-da6aef48d9a1bcda301a3a922b240a2c1aba8026.zip
qemu-da6aef48d9a1bcda301a3a922b240a2c1aba8026.tar.gz
qemu-da6aef48d9a1bcda301a3a922b240a2c1aba8026.tar.bz2
accel/tcg: Simplify tlb_plugin_lookup
Now that we defer address space update and tlb_flush until the next async_run_on_cpu, the plugin run at the end of the instruction no longer has to contend with a flushed tlb. Therefore, delete SavedIOTLB entirely. Properly return false from tlb_plugin_lookup when we do not have a tlb match. Fixes a bug in which SavedIOTLB had stale data, because there were multiple i/o accesses within a single insn. Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg')
-rw-r--r--accel/tcg/cputlb.c79
1 files changed, 23 insertions, 56 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 03e27b2..9cbcd20 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1367,21 +1367,6 @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
}
}
-/*
- * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
- * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
- * because of the side effect of io_writex changing memory layout.
- */
-static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
- hwaddr mr_offset)
-{
-#ifdef CONFIG_PLUGIN
- SavedIOTLB *saved = &cs->saved_iotlb;
- saved->section = section;
- saved->mr_offset = mr_offset;
-#endif
-}
-
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
int mmu_idx, vaddr addr, uintptr_t retaddr,
MMUAccessType access_type, MemOp op)
@@ -1401,12 +1386,6 @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
cpu_io_recompile(cpu, retaddr);
}
- /*
- * The memory_region_dispatch may trigger a flush/resize
- * so for plugins we save the iotlb_data just in case.
- */
- save_iotlb_data(cpu, section, mr_offset);
-
{
QEMU_IOTHREAD_LOCK_GUARD();
r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
@@ -1441,12 +1420,6 @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
}
cpu->mem_io_pc = retaddr;
- /*
- * The memory_region_dispatch may trigger a flush/resize
- * so for plugins we save the iotlb_data just in case.
- */
- save_iotlb_data(cpu, section, mr_offset);
-
{
QEMU_IOTHREAD_LOCK_GUARD();
r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
@@ -1729,45 +1702,39 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
* in the softmmu lookup code (or helper). We don't handle re-fills or
* checking the victim table. This is purely informational.
*
- * This almost never fails as the memory access being instrumented
- * should have just filled the TLB. The one corner case is io_writex
- * which can cause TLB flushes and potential resizing of the TLBs
- * losing the information we need. In those cases we need to recover
- * data from a copy of the CPUTLBEntryFull. As long as this always occurs
- * from the same thread (which a mem callback will be) this is safe.
+ * The one corner case is i/o write, which can cause changes to the
+ * address space. Those changes, and the corresponding tlb flush,
+ * should be delayed until the next TB, so even then this ought not fail.
+ * But check, Just in Case.
*/
-
bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
bool is_store, struct qemu_plugin_hwaddr *data)
{
CPUArchState *env = cpu->env_ptr;
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
uintptr_t index = tlb_index(env, mmu_idx, addr);
- uint64_t tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
-
- if (likely(tlb_hit(tlb_addr, addr))) {
- /* We must have an iotlb entry for MMIO */
- if (tlb_addr & TLB_MMIO) {
- CPUTLBEntryFull *full;
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
- data->is_io = true;
- data->v.io.section =
- iotlb_to_section(cpu, full->xlat_section, full->attrs);
- data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
- } else {
- data->is_io = false;
- data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
- }
- return true;
- } else {
- SavedIOTLB *saved = &cpu->saved_iotlb;
+ MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
+
+ if (unlikely(!tlb_hit(tlb_addr, addr))) {
+ return false;
+ }
+
+ /* We must have an iotlb entry for MMIO */
+ if (tlb_addr & TLB_MMIO) {
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ hwaddr xlat = full->xlat_section;
+
data->is_io = true;
- data->v.io.section = saved->section;
- data->v.io.offset = saved->mr_offset;
- return true;
+ data->v.io.offset = (xlat & TARGET_PAGE_MASK) + addr;
+ data->v.io.section =
+ iotlb_to_section(cpu, xlat & ~TARGET_PAGE_MASK, full->attrs);
+ } else {
+ data->is_io = false;
+ data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
}
+ return true;
}
-
#endif
/*