aboutsummaryrefslogtreecommitdiff
path: root/include/exec
diff options
context:
space:
mode:
authorEmilio G. Cota <cota@braap.org>2019-01-16 12:01:13 -0500
committerRichard Henderson <richard.henderson@linaro.org>2019-01-28 07:03:34 -0800
commit86e1eff8bc31a88a089d05b16277eafd7a3cf95b (patch)
tree5a5b82301e2ee910eda6ced84727312ac6d7d91f /include/exec
parent3cea94bbc97180dd2de78993982645b11405b3b2 (diff)
downloadqemu-86e1eff8bc31a88a089d05b16277eafd7a3cf95b.zip
qemu-86e1eff8bc31a88a089d05b16277eafd7a3cf95b.tar.gz
qemu-86e1eff8bc31a88a089d05b16277eafd7a3cf95b.tar.bz2
tcg: introduce dynamic TLB sizing
Disabled in all TCG backends for now. Tested-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Emilio G. Cota <cota@braap.org> Message-Id: <20190116170114.26802-3-cota@braap.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'include/exec')
-rw-r--r--include/exec/cpu-defs.h57
-rw-r--r--include/exec/cpu_ldst.h21
2 files changed, 76 insertions, 2 deletions
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 6a60f94..191a1e0 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -67,6 +67,28 @@ typedef uint64_t target_ulong;
#define CPU_TLB_ENTRY_BITS 5
#endif
+#if TCG_TARGET_IMPLEMENTS_DYN_TLB
+#define CPU_TLB_DYN_MIN_BITS 6
+#define CPU_TLB_DYN_DEFAULT_BITS 8
+
+
+# if HOST_LONG_BITS == 32
+/* Make sure we do not require a double-word shift for the TLB load */
+# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
+# else /* HOST_LONG_BITS == 64 */
+/*
+ * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) ==
+ * 2**34 == 16G of address space. This is roughly what one would expect a
+ * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel
+ * Skylake's Level-2 STLB has 16 1G entries.
+ * Also, make sure we do not size the TLB past the guest's address space.
+ */
+# define CPU_TLB_DYN_MAX_BITS \
+ MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
+# endif
+
+#else /* !TCG_TARGET_IMPLEMENTS_DYN_TLB */
+
/* TCG_TARGET_TLB_DISPLACEMENT_BITS is used in CPU_TLB_BITS to ensure that
* the TLB is not unnecessarily small, but still small enough for the
* TLB lookup instruction sequence used by the TCG target.
@@ -98,6 +120,7 @@ typedef uint64_t target_ulong;
NB_MMU_MODES <= 8 ? 3 : 4))
#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
+#endif /* TCG_TARGET_IMPLEMENTS_DYN_TLB */
typedef struct CPUTLBEntry {
/* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
@@ -141,6 +164,18 @@ typedef struct CPUIOTLBEntry {
MemTxAttrs attrs;
} CPUIOTLBEntry;
+/**
+ * struct CPUTLBWindow
+ * @begin_ns: host time (in ns) at the beginning of the time window
+ * @max_entries: maximum number of entries observed in the window
+ *
+ * See also: tlb_mmu_resize_locked()
+ */
+typedef struct CPUTLBWindow {
+ int64_t begin_ns;
+ size_t max_entries;
+} CPUTLBWindow;
+
typedef struct CPUTLBDesc {
/*
* Describe a region covering all of the large pages allocated
@@ -152,6 +187,10 @@ typedef struct CPUTLBDesc {
target_ulong large_page_mask;
/* The next index to use in the tlb victim table. */
size_t vindex;
+#if TCG_TARGET_IMPLEMENTS_DYN_TLB
+ CPUTLBWindow window;
+ size_t n_used_entries;
+#endif
} CPUTLBDesc;
/*
@@ -176,6 +215,20 @@ typedef struct CPUTLBCommon {
size_t elide_flush_count;
} CPUTLBCommon;
+#if TCG_TARGET_IMPLEMENTS_DYN_TLB
+# define CPU_TLB \
+ /* tlb_mask[i] contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */ \
+ uintptr_t tlb_mask[NB_MMU_MODES]; \
+ CPUTLBEntry *tlb_table[NB_MMU_MODES];
+# define CPU_IOTLB \
+ CPUIOTLBEntry *iotlb[NB_MMU_MODES];
+#else
+# define CPU_TLB \
+ CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE];
+# define CPU_IOTLB \
+ CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE];
+#endif
+
/*
* The meaning of each of the MMU modes is defined in the target code.
* Note that NB_MMU_MODES is not yet defined; we can only reference it
@@ -184,9 +237,9 @@ typedef struct CPUTLBCommon {
#define CPU_COMMON_TLB \
CPUTLBCommon tlb_c; \
CPUTLBDesc tlb_d[NB_MMU_MODES]; \
- CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ CPU_TLB \
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
- CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ CPU_IOTLB \
CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE];
#else
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index 9590684..83b2907 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -135,6 +135,21 @@ static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
#endif
}
+#if TCG_TARGET_IMPLEMENTS_DYN_TLB
+/* Find the TLB index corresponding to the mmu_idx + address pair. */
+static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
+ target_ulong addr)
+{
+ uintptr_t size_mask = env->tlb_mask[mmu_idx] >> CPU_TLB_ENTRY_BITS;
+
+ return (addr >> TARGET_PAGE_BITS) & size_mask;
+}
+
+static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
+{
+ return (env->tlb_mask[mmu_idx] >> CPU_TLB_ENTRY_BITS) + 1;
+}
+#else
/* Find the TLB index corresponding to the mmu_idx + address pair. */
static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
target_ulong addr)
@@ -142,6 +157,12 @@ static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
return (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
}
+static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx)
+{
+ return CPU_TLB_SIZE;
+}
+#endif /* TCG_TARGET_IMPLEMENTS_DYN_TLB */
+
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
target_ulong addr)