aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2024-03-01 10:41:08 -1000
committerPeter Maydell <peter.maydell@linaro.org>2024-03-05 13:22:56 +0000
commit49fa457ca5abc8c46910ec777e2c510b428a1648 (patch)
tree917e43f220f78f6e60d250eff6fae624b775e26d
parenta0ff4a879cd3198adb4213653d51a39d053ef2d6 (diff)
downloadqemu-49fa457ca5abc8c46910ec777e2c510b428a1648.zip
qemu-49fa457ca5abc8c46910ec777e2c510b428a1648.tar.gz
qemu-49fa457ca5abc8c46910ec777e2c510b428a1648.tar.bz2
accel/tcg: Add TLB_CHECK_ALIGNED
This creates a per-page method for checking of alignment. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20240301204110.656742-5-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--accel/tcg/cputlb.c30
-rw-r--r--include/exec/cpu-all.h4
2 files changed, 30 insertions, 4 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index ac986cb..93b1ca8 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1453,9 +1453,8 @@ static int probe_access_internal(CPUState *cpu, vaddr addr,
flags |= full->slow_flags[access_type];
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
- if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))
- ||
- (access_type != MMU_INST_FETCH && force_mmio)) {
+ if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED))
+ || (access_type != MMU_INST_FETCH && force_mmio)) {
*phost = NULL;
return TLB_MMIO;
}
@@ -1836,6 +1835,31 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
tcg_debug_assert((flags & TLB_BSWAP) == 0);
}
+ /*
+ * This alignment check differs from the one above, in that this is
+ * based on the atomicity of the operation. The intended use case is
+ * the ARM memory type field of each PTE, where access to pages with
+ * Device memory type require alignment.
+ */
+ if (unlikely(flags & TLB_CHECK_ALIGNED)) {
+ MemOp size = l->memop & MO_SIZE;
+
+ switch (l->memop & MO_ATOM_MASK) {
+ case MO_ATOM_NONE:
+ size = MO_8;
+ break;
+ case MO_ATOM_IFALIGN_PAIR:
+ case MO_ATOM_WITHIN16_PAIR:
+ size = size ? size - 1 : 0;
+ break;
+ default:
+ break;
+ }
+ if (addr & ((1 << size) - 1)) {
+ cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
+ }
+ }
+
return crosspage;
}
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index bc05dce..1a6510f 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -357,8 +357,10 @@ static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
#define TLB_BSWAP (1 << 0)
/* Set if TLB entry contains a watchpoint. */
#define TLB_WATCHPOINT (1 << 1)
+/* Set if TLB entry requires aligned accesses. */
+#define TLB_CHECK_ALIGNED (1 << 2)
-#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT)
+#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED)
/* The two sets of flags must not overlap. */
QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);