aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2021-05-09 17:16:15 +0200
committerPeter Maydell <peter.maydell@linaro.org>2021-05-25 16:01:43 +0100
commitc13b27d826797ee12dcf4e4c289a7a6c401e620b (patch)
tree754edde1988b2e3521654cd46beae3c99645e0cc /accel
parent600b819f235d6b6eb33fc33e09fe64f53eb9a9a6 (diff)
downloadqemu-c13b27d826797ee12dcf4e4c289a7a6c401e620b.zip
qemu-c13b27d826797ee12dcf4e4c289a7a6c401e620b.tar.gz
qemu-c13b27d826797ee12dcf4e4c289a7a6c401e620b.tar.bz2
accel/tlb: Add tlb_flush_range_by_mmuidx_all_cpus_synced()
Forward tlb_flush_page_bits_by_mmuidx_all_cpus_synced to tlb_flush_range_by_mmuidx_all_cpus_synced passing TARGET_PAGE_SIZE. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Message-id: 20210509151618.2331764-7-f4bug@amsat.org Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org> [PMD: Split from bigger patch] Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cputlb.c27
1 files changed, 20 insertions, 7 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index a63cf18..4b3ac70 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -887,16 +887,20 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
idxmap, bits);
}
-void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
- target_ulong addr,
- uint16_t idxmap,
- unsigned bits)
+void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ target_ulong addr,
+ target_ulong len,
+ uint16_t idxmap,
+ unsigned bits)
{
TLBFlushRangeData d, *p;
CPUState *dst_cpu;
- /* If all bits are significant, this devolves to tlb_flush_page. */
- if (bits >= TARGET_LONG_BITS) {
+ /*
+ * If all bits are significant, and len is small,
+ * this devolves to tlb_flush_page.
+ */
+ if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
return;
}
@@ -908,7 +912,7 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
- d.len = TARGET_PAGE_SIZE;
+ d.len = len;
d.idxmap = idxmap;
d.bits = bits;
@@ -926,6 +930,15 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
RUN_ON_CPU_HOST_PTR(p));
}
+void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ target_ulong addr,
+ uint16_t idxmap,
+ unsigned bits)
+{
+ tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
+ idxmap, bits);
+}
+
/* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)