aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2019-09-13 12:07:40 -0400
committerRichard Henderson <richard.henderson@linaro.org>2019-10-28 10:35:20 +0100
commitbb8e3ea6fa8f616678133c9e8c8c3bf232c179ec (patch)
treebf2bb8ec6c9416020b6b6ae455af0119e6509e28
parentf048b8a7cefcabb0e55210a76775f9be57c4d3f4 (diff)
downloadqemu-bb8e3ea6fa8f616678133c9e8c8c3bf232c179ec.zip
qemu-bb8e3ea6fa8f616678133c9e8c8c3bf232c179ec.tar.gz
qemu-bb8e3ea6fa8f616678133c9e8c8c3bf232c179ec.tar.bz2
exec: Cache TARGET_PAGE_MASK for TARGET_PAGE_BITS_VARY
This eliminates a set of runtime shifts. It turns out that we require TARGET_PAGE_MASK more often than TARGET_PAGE_SIZE, so redefine TARGET_PAGE_SIZE based on TARGET_PAGE_MASK instead of the other way around. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r--exec-vary.c1
-rw-r--r--include/exec/cpu-all.h8
2 files changed, 7 insertions, 2 deletions
diff --git a/exec-vary.c b/exec-vary.c
index 8725fd0..ff905f2 100644
--- a/exec-vary.c
+++ b/exec-vary.c
@@ -96,6 +96,7 @@ void finalize_target_page_bits(void)
if (init_target_page.bits == 0) {
init_target_page.bits = TARGET_PAGE_BITS_MIN;
}
+ init_target_page.mask = (target_long)-1 << init_target_page.bits;
init_target_page.decided = true;
/*
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 0543359..e96781a 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -213,6 +213,7 @@ static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val
typedef struct {
bool decided;
int bits;
+ target_long mask;
} TargetPageBits;
#if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY)
extern const TargetPageBits target_page;
@@ -221,15 +222,18 @@ extern TargetPageBits target_page;
#endif
#ifdef CONFIG_DEBUG_TCG
#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
+#define TARGET_PAGE_MASK ({ assert(target_page.decided); target_page.mask; })
#else
#define TARGET_PAGE_BITS target_page.bits
+#define TARGET_PAGE_MASK target_page.mask
#endif
+#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
#else
#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
+#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
+#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
#endif
-#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
-#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even