From 1308e0267174daaf557dac8366ea2ba615d3337f Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 17 Oct 2018 11:48:40 -0700 Subject: cputlb: Split large page tracking per mmu_idx The set of large pages in the kernel is probably not the same as the set of large pages in the application. Forcing one range to cover both will flush more often than necessary. This allows tlb_flush_page_async_work to flush just the one mmu_idx implicated, which in turn allows us to remove tlb_check_page_and_flush_by_mmuidx_async_work. Tested-by: Emilio G. Cota Reviewed-by: Emilio G. Cota Signed-off-by: Richard Henderson --- include/exec/cpu-defs.h | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'include/exec/cpu-defs.h') diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index 659c73d..df8ae18 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -141,6 +141,17 @@ typedef struct CPUIOTLBEntry { MemTxAttrs attrs; } CPUIOTLBEntry; +typedef struct CPUTLBDesc { + /* + * Describe a region covering all of the large pages allocated + * into the tlb. When any page within this region is flushed, + * we must flush the entire tlb. The region is matched if + * (addr & large_page_mask) == large_page_addr. + */ + target_ulong large_page_addr; + target_ulong large_page_mask; +} CPUTLBDesc; + /* * Data elements that are shared between all MMU modes. */ @@ -162,13 +173,12 @@ typedef struct CPUTLBCommon { */ #define CPU_COMMON_TLB \ CPUTLBCommon tlb_c; \ + CPUTLBDesc tlb_d[NB_MMU_MODES]; \ CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \ CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \ size_t tlb_flush_count; \ - target_ulong tlb_flush_addr; \ - target_ulong tlb_flush_mask; \ target_ulong vtlb_index; \ #else -- cgit v1.1