aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg
diff options
context:
space:
mode:
authorLIU Zhiwei <zhiwei_liu@linux.alibaba.com>2023-09-01 14:01:18 +0800
committerRichard Henderson <richard.henderson@linaro.org>2023-09-15 05:26:50 -0700
commitdff1ab68d8c5d4703e07018f504fce6944c529a4 (patch)
tree5d47372846025597e5c2581763c6d1d18fb78aeb /accel/tcg
parent0e5903436de712844b0e6cdd862b499c767e09e9 (diff)
downloadqemu-dff1ab68d8c5d4703e07018f504fce6944c529a4.zip
qemu-dff1ab68d8c5d4703e07018f504fce6944c529a4.tar.gz
qemu-dff1ab68d8c5d4703e07018f504fce6944c529a4.tar.bz2
accel/tcg: Fix the comment for CPUTLBEntryFull
When memory region is ram, the lower TARGET_PAGE_BITS is not the physical section number. Instead, its value is always 0. Add comment and assert to make it clear. Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com> Message-Id: <20230901060118.379-1-zhiwei_liu@linux.alibaba.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg')
-rw-r--r--accel/tcg/cputlb.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index c643d66..03e27b2 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1193,6 +1193,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
write_flags = read_flags;
if (is_ram) {
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
+ assert(!(iotlb & ~TARGET_PAGE_MASK));
/*
* Computing is_clean is expensive; avoid all that unless
* the page is actually writable.
@@ -1255,10 +1256,12 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
/* refill the tlb */
/*
- * At this point iotlb contains a physical section number in the lower
- * TARGET_PAGE_BITS, and either
- * + the ram_addr_t of the page base of the target RAM (RAM)
- * + the offset within section->mr of the page base (I/O, ROMD)
+ * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
+ * aligned ram_addr_t of the page base of the target RAM.
+ * Otherwise, iotlb contains
+ * - a physical section number in the lower TARGET_PAGE_BITS
+ * - the offset within section->mr of the page base (I/O, ROMD) with the
+ * TARGET_PAGE_BITS masked off.
* We subtract addr_page (which is page aligned and thus won't
* disturb the low bits) to give an offset which can be added to the
* (non-page-aligned) vaddr of the eventual memory access to get