aboutsummaryrefslogtreecommitdiff
path: root/include/exec
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-03-24 13:02:59 -0700
committerRichard Henderson <richard.henderson@linaro.org>2023-06-05 12:04:28 -0700
commit238f43809a85a47cfbbc2e1d6aff4640fec30328 (patch)
treed60c778ef821469bde1ba5ed8fe18169fc2941e6 /include/exec
parentff0c61bf35fbeffd5c0f85a0b67b49ccb65e04f5 (diff)
downloadqemu-238f43809a85a47cfbbc2e1d6aff4640fec30328.zip
qemu-238f43809a85a47cfbbc2e1d6aff4640fec30328.tar.gz
qemu-238f43809a85a47cfbbc2e1d6aff4640fec30328.tar.bz2
tcg: Widen CPUTLBEntry comparators to 64-bits
This makes CPUTLBEntry agnostic to the address size of the guest. When 32-bit addresses are in effect, we can simply read the low 32 bits of the 64-bit field. Similarly when we need to update the field for setting TLB_NOTDIRTY. For TCG backends that could in theory be big-endian, but in practice are not (arm, loongarch, riscv), use QEMU_BUILD_BUG_ON to document and ensure this is not accidentally missed. For s390x, which is always big-endian, use HOST_BIG_ENDIAN anyway, to document the reason for the adjustment. For sparc64 and ppc64, always perform a 64-bit load, and rely on the following 32-bit comparison to ignore the high bits. Rearrange mips and ppc if ladders for clarity. Reviewed-by: Anton Johansson <anjo@rev.ng> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'include/exec')
-rw-r--r--include/exec/cpu-defs.h37
-rw-r--r--include/exec/cpu_ldst.h19
2 files changed, 26 insertions, 30 deletions
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index a6e0cf1..b757d37 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -65,11 +65,7 @@
/* use a fully associative victim tlb of 8 entries */
#define CPU_VTLB_SIZE 8
-#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
-#define CPU_TLB_ENTRY_BITS 4
-#else
#define CPU_TLB_ENTRY_BITS 5
-#endif
#define CPU_TLB_DYN_MIN_BITS 6
#define CPU_TLB_DYN_DEFAULT_BITS 8
@@ -95,33 +91,26 @@
# endif
/* Minimalized TLB entry for use by TCG fast path. */
-typedef struct CPUTLBEntry {
- /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
- bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
- go directly to ram.
- bit 3 : indicates that the entry is invalid
- bit 2..0 : zero
- */
- union {
- struct {
- target_ulong addr_read;
- target_ulong addr_write;
- target_ulong addr_code;
- /* Addend to virtual address to get host address. IO accesses
- use the corresponding iotlb value. */
- uintptr_t addend;
- };
+typedef union CPUTLBEntry {
+ struct {
+ uint64_t addr_read;
+ uint64_t addr_write;
+ uint64_t addr_code;
/*
- * Padding to get a power of two size, as well as index
- * access to addr_{read,write,code}.
+ * Addend to virtual address to get host address. IO accesses
+ * use the corresponding iotlb value.
*/
- target_ulong addr_idx[(1 << CPU_TLB_ENTRY_BITS) / TARGET_LONG_SIZE];
+ uintptr_t addend;
};
+ /*
+ * Padding to get a power of two size, as well as index
+ * access to addr_{read,write,code}.
+ */
+ uint64_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uint64_t)];
} CPUTLBEntry;
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
-
#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
#if !defined(CONFIG_USER_ONLY)
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index 5939688..a43b34e 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -334,18 +334,25 @@ static inline target_ulong tlb_read_idx(const CPUTLBEntry *entry,
{
/* Do not rearrange the CPUTLBEntry structure members. */
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
- MMU_DATA_LOAD * TARGET_LONG_SIZE);
+ MMU_DATA_LOAD * sizeof(uint64_t));
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
- MMU_DATA_STORE * TARGET_LONG_SIZE);
+ MMU_DATA_STORE * sizeof(uint64_t));
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
- MMU_INST_FETCH * TARGET_LONG_SIZE);
+ MMU_INST_FETCH * sizeof(uint64_t));
- const target_ulong *ptr = &entry->addr_idx[access_type];
-#if TCG_OVERSIZED_GUEST
- return *ptr;
+#if TARGET_LONG_BITS == 32
+ /* Use qatomic_read, in case of addr_write; only care about low bits. */
+ const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type];
+ ptr += HOST_BIG_ENDIAN;
+ return qatomic_read(ptr);
#else
+ const uint64_t *ptr = &entry->addr_idx[access_type];
+# if TCG_OVERSIZED_GUEST
+ return *ptr;
+# else
/* ofs might correspond to .addr_write, so use qatomic_read */
return qatomic_read(ptr);
+# endif
#endif
}