aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-03-06 03:20:46 +0300
committerRichard Henderson <richard.henderson@linaro.org>2023-03-28 15:23:10 -0700
commitf7e2add5fd29ab067ab39c3d31a911ffcf1f7b17 (patch)
tree1d6db240e5bb345e9ddb071bd493c252a82368bf /accel
parent10310cbd6298def2dafd40069e27df2d25e233c5 (diff)
downloadqemu-f7e2add5fd29ab067ab39c3d31a911ffcf1f7b17.zip
qemu-f7e2add5fd29ab067ab39c3d31a911ffcf1f7b17.tar.gz
qemu-f7e2add5fd29ab067ab39c3d31a911ffcf1f7b17.tar.bz2
accel/tcg: Pass last not end to PAGE_FOR_EACH_TB
Pass the address of the last byte to be changed, rather than the first address past the last byte. This avoids overflow when the last page of the address space is involved. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/tb-maint.c28
1 files changed, 16 insertions, 12 deletions
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index a173db1..04d2751 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -127,29 +127,29 @@ static void tb_remove(TranslationBlock *tb)
}
/* TODO: For now, still shared with translate-all.c for system mode. */
-#define PAGE_FOR_EACH_TB(start, end, pagedesc, T, N) \
- for (T = foreach_tb_first(start, end), \
- N = foreach_tb_next(T, start, end); \
+#define PAGE_FOR_EACH_TB(start, last, pagedesc, T, N) \
+ for (T = foreach_tb_first(start, last), \
+ N = foreach_tb_next(T, start, last); \
T != NULL; \
- T = N, N = foreach_tb_next(N, start, end))
+ T = N, N = foreach_tb_next(N, start, last))
typedef TranslationBlock *PageForEachNext;
static PageForEachNext foreach_tb_first(tb_page_addr_t start,
- tb_page_addr_t end)
+ tb_page_addr_t last)
{
- IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, end - 1);
+ IntervalTreeNode *n = interval_tree_iter_first(&tb_root, start, last);
return n ? container_of(n, TranslationBlock, itree) : NULL;
}
static PageForEachNext foreach_tb_next(PageForEachNext tb,
tb_page_addr_t start,
- tb_page_addr_t end)
+ tb_page_addr_t last)
{
IntervalTreeNode *n;
if (tb) {
- n = interval_tree_iter_next(&tb->itree, start, end - 1);
+ n = interval_tree_iter_next(&tb->itree, start, last);
if (n) {
return container_of(n, TranslationBlock, itree);
}
@@ -320,7 +320,7 @@ struct page_collection {
};
typedef int PageForEachNext;
-#define PAGE_FOR_EACH_TB(start, end, pagedesc, tb, n) \
+#define PAGE_FOR_EACH_TB(start, last, pagedesc, tb, n) \
TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
#ifdef CONFIG_DEBUG_TCG
@@ -995,10 +995,11 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
{
TranslationBlock *tb;
PageForEachNext n;
+ tb_page_addr_t last = end - 1;
assert_memory_lock();
- PAGE_FOR_EACH_TB(start, end, unused, tb, n) {
+ PAGE_FOR_EACH_TB(start, last, unused, tb, n) {
tb_phys_invalidate__locked(tb);
}
}
@@ -1030,6 +1031,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
bool current_tb_modified;
TranslationBlock *tb;
PageForEachNext n;
+ tb_page_addr_t last;
/*
* Without precise smc semantics, or when outside of a TB,
@@ -1046,10 +1048,11 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
assert_memory_lock();
current_tb = tcg_tb_lookup(pc);
+ last = addr | ~TARGET_PAGE_MASK;
addr &= TARGET_PAGE_MASK;
current_tb_modified = false;
- PAGE_FOR_EACH_TB(addr, addr + TARGET_PAGE_SIZE, unused, tb, n) {
+ PAGE_FOR_EACH_TB(addr, last, unused, tb, n) {
if (current_tb == tb &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/*
@@ -1091,12 +1094,13 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
bool current_tb_modified = false;
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
#endif /* TARGET_HAS_PRECISE_SMC */
+ tb_page_addr_t last G_GNUC_UNUSED = end - 1;
/*
* We remove all the TBs in the range [start, end[.
* XXX: see if in some cases it could be faster to invalidate all the code
*/
- PAGE_FOR_EACH_TB(start, end, p, tb, n) {
+ PAGE_FOR_EACH_TB(start, last, p, tb, n) {
/* NOTE: this is subtle as a TB may span two physical pages */
if (n == 0) {
/* NOTE: tb_end may be after the end of the page, but