diff options
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r-- | accel/tcg/cputlb.c | 34 |
1 files changed, 18 insertions, 16 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index d9fb68d..5f6d7c6 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -19,11 +19,14 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" +#include "qemu/target-info.h" #include "accel/tcg/cpu-ops.h" -#include "exec/exec-all.h" +#include "accel/tcg/iommu.h" +#include "accel/tcg/probe.h" #include "exec/page-protection.h" #include "system/memory.h" -#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/cpu-ldst-common.h" +#include "accel/tcg/cpu-mmu-index.h" #include "exec/cputlb.h" #include "exec/tb-flush.h" #include "system/ram_addr.h" @@ -43,7 +46,6 @@ #include "tb-internal.h" #include "tlb-bounds.h" #include "internal-common.h" -#include "internal-target.h" #ifdef CONFIG_PLUGIN #include "qemu/plugin-memory.h" #endif @@ -771,19 +773,19 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, assert_cpu_is_self(cpu); + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx(cpu, idxmap); + return; + } /* * If all bits are significant, and len is small, * this devolves to tlb_flush_page. */ - if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { + if (len <= TARGET_PAGE_SIZE && bits >= target_long_bits()) { tlb_flush_page_by_mmuidx(cpu, addr, idxmap); return; } - /* If no page bits are significant, this devolves to tlb_flush. */ - if (bits < TARGET_PAGE_BITS) { - tlb_flush_by_mmuidx(cpu, idxmap); - return; - } /* This should already be page aligned */ d.addr = addr & TARGET_PAGE_MASK; @@ -809,19 +811,19 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, TLBFlushRangeData d, *p; CPUState *dst_cpu; + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); + return; + } /* * If all bits are significant, and len is small, * this devolves to tlb_flush_page. */ - if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { + if (len <= TARGET_PAGE_SIZE && bits >= target_long_bits()) { tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); return; } - /* If no page bits are significant, this devolves to tlb_flush. */ - if (bits < TARGET_PAGE_BITS) { - tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); - return; - } /* This should already be page aligned */ d.addr = addr & TARGET_PAGE_MASK; @@ -1340,7 +1342,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { - tb_invalidate_phys_range_fast(ram_addr, size, retaddr); + tb_invalidate_phys_range_fast(cpu, ram_addr, size, retaddr); } /* |