aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-03-06 03:42:12 +0300
committerRichard Henderson <richard.henderson@linaro.org>2023-03-28 15:23:10 -0700
commitf6555e3f39ea118754c03e8b5d92fa3aa0dd6093 (patch)
tree09b1e50799ad300fe9e85a5d33df541ec10de95d /accel/tcg
parentf7e2add5fd29ab067ab39c3d31a911ffcf1f7b17 (diff)
downloadqemu-f6555e3f39ea118754c03e8b5d92fa3aa0dd6093.zip
qemu-f6555e3f39ea118754c03e8b5d92fa3aa0dd6093.tar.gz
qemu-f6555e3f39ea118754c03e8b5d92fa3aa0dd6093.tar.bz2
accel/tcg: Pass last not end to page_collection_lock
Pass the address of the last byte to be changed, rather than the first address past the last byte. This avoids overflow when the last page of the address space is involved. Fixes a bug in the loop comparision where "<= end" would lock one more page than required. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg')
-rw-r--r--accel/tcg/tb-maint.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index 04d2751..57da2fe 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -511,20 +511,20 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
}
/*
- * Lock a range of pages ([@start,@end[) as well as the pages of all
+ * Lock a range of pages ([@start,@last]) as well as the pages of all
* intersecting TBs.
* Locking order: acquire locks in ascending order of page index.
*/
static struct page_collection *page_collection_lock(tb_page_addr_t start,
- tb_page_addr_t end)
+ tb_page_addr_t last)
{
struct page_collection *set = g_malloc(sizeof(*set));
tb_page_addr_t index;
PageDesc *pd;
start >>= TARGET_PAGE_BITS;
- end >>= TARGET_PAGE_BITS;
- g_assert(start <= end);
+ last >>= TARGET_PAGE_BITS;
+ g_assert(start <= last);
set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL,
page_entry_destroy);
@@ -534,7 +534,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
retry:
q_tree_foreach(set->tree, page_entry_lock, NULL);
- for (index = start; index <= end; index++) {
+ for (index = start; index <= last; index++) {
TranslationBlock *tb;
PageForEachNext n;
@@ -1154,7 +1154,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
void tb_invalidate_phys_page(tb_page_addr_t addr)
{
struct page_collection *pages;
- tb_page_addr_t start, end;
+ tb_page_addr_t start, last;
PageDesc *p;
p = page_find(addr >> TARGET_PAGE_BITS);
@@ -1163,9 +1163,9 @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
}
start = addr & TARGET_PAGE_MASK;
- end = start + TARGET_PAGE_SIZE;
- pages = page_collection_lock(start, end);
- tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
+ last = addr | ~TARGET_PAGE_MASK;
+ pages = page_collection_lock(start, last);
+ tb_invalidate_phys_page_range__locked(pages, p, start, last + 1, 0);
page_collection_unlock(pages);
}
@@ -1181,7 +1181,7 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
struct page_collection *pages;
tb_page_addr_t next;
- pages = page_collection_lock(start, end);
+ pages = page_collection_lock(start, end - 1);
for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
start < end;
start = next, next += TARGET_PAGE_SIZE) {
@@ -1226,7 +1226,7 @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
{
struct page_collection *pages;
- pages = page_collection_lock(ram_addr, ram_addr + size);
+ pages = page_collection_lock(ram_addr, ram_addr + size - 1);
tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
page_collection_unlock(pages);
}