aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-10-01 13:36:33 -0700
committerRichard Henderson <richard.henderson@linaro.org>2022-12-20 17:09:41 -0800
commita97d5d2c8be2aec5b2e3c81cde33506b3c029033 (patch)
tree7ebe1d4e77ba9dd9da1850aac687e66563fa154e /include
parentbf590a67dd7a92de89f5297fe87e48ad21f96194 (diff)
downloadqemu-a97d5d2c8be2aec5b2e3c81cde33506b3c029033.zip
qemu-a97d5d2c8be2aec5b2e3c81cde33506b3c029033.tar.gz
qemu-a97d5d2c8be2aec5b2e3c81cde33506b3c029033.tar.bz2
accel/tcg: Use interval tree for TBs in user-only mode
Begin weaning user-only away from PageDesc. Since, for user-only, all TB (and page) manipulation is done with a single mutex, and there is no virtual/physical discontinuity to split a TB across discontinuous pages, place all of the TBs into a single IntervalTree. This makes it trivial to find all of the TBs intersecting a range. Retain the existing PageDesc + linked list implementation for system mode. Move the portion of the implementation that overlaps the new user-only code behind the common ifdef. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'include')
-rw-r--r--include/exec/exec-all.h43
1 files changed, 40 insertions, 3 deletions
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 9b7bfbf..25e11b0 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -24,6 +24,7 @@
#ifdef CONFIG_TCG
#include "exec/cpu_ldst.h"
#endif
+#include "qemu/interval-tree.h"
/* allow to see translation results - the slowdown should be negligible, so we leave it */
#define DEBUG_DISAS
@@ -559,11 +560,20 @@ struct TranslationBlock {
struct tb_tc tc;
- /* first and second physical page containing code. The lower bit
- of the pointer tells the index in page_next[].
- The list is protected by the TB's page('s) lock(s) */
+ /*
+ * Track tb_page_addr_t intervals that intersect this TB.
+ * For user-only, the virtual addresses are always contiguous,
+ * and we use a unified interval tree. For system, we use a
+ * linked list headed in each PageDesc. Within the list, the lsb
+ * of the previous pointer tells the index of page_next[], and the
+ * list is protected by the PageDesc lock(s).
+ */
+#ifdef CONFIG_USER_ONLY
+ IntervalTreeNode itree;
+#else
uintptr_t page_next[2];
tb_page_addr_t page_addr[2];
+#endif
/* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
QemuSpin jmp_lock;
@@ -619,24 +629,51 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
{
+#ifdef CONFIG_USER_ONLY
+ return tb->itree.start;
+#else
return tb->page_addr[0];
+#endif
}
static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
{
+#ifdef CONFIG_USER_ONLY
+ tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
+ return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
+#else
return tb->page_addr[1];
+#endif
}
static inline void tb_set_page_addr0(TranslationBlock *tb,
tb_page_addr_t addr)
{
+#ifdef CONFIG_USER_ONLY
+ tb->itree.start = addr;
+ /*
+ * To begin, we record an interval of one byte. When the translation
+ * loop encounters a second page, the interval will be extended to
+ * include the first byte of the second page, which is sufficient to
+ * allow tb_page_addr1() above to work properly. The final corrected
+ * interval will be set by tb_page_add() from tb->size before the
+ * node is added to the interval tree.
+ */
+ tb->itree.last = addr;
+#else
tb->page_addr[0] = addr;
+#endif
}
static inline void tb_set_page_addr1(TranslationBlock *tb,
tb_page_addr_t addr)
{
+#ifdef CONFIG_USER_ONLY
+ /* Extend the interval to the first byte of the second page. See above. */
+ tb->itree.last = addr;
+#else
tb->page_addr[1] = addr;
+#endif
}
/* current cflags for hashing/comparison */