aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-10-05 16:22:16 -0700
committerRichard Henderson <richard.henderson@linaro.org>2022-12-20 17:09:41 -0800
commitf88f3ac90f9e98333abf91f23c8547a428cd90fa (patch)
tree1747f0b368caddb4f874cace6a71c24e5447603c /accel/tcg
parenta97d5d2c8be2aec5b2e3c81cde33506b3c029033 (diff)
downloadqemu-f88f3ac90f9e98333abf91f23c8547a428cd90fa.zip
qemu-f88f3ac90f9e98333abf91f23c8547a428cd90fa.tar.gz
qemu-f88f3ac90f9e98333abf91f23c8547a428cd90fa.tar.bz2
accel/tcg: Use interval tree for TARGET_PAGE_DATA_SIZE
Continue weaning user-only away from PageDesc. Use an interval tree to record target data. Chunk the data, to minimize allocation overhead. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg')
-rw-r--r--accel/tcg/internal.h1
-rw-r--r--accel/tcg/user-exec.c101
2 files changed, 75 insertions, 27 deletions
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
index bf1bf62..0f91ee9 100644
--- a/accel/tcg/internal.h
+++ b/accel/tcg/internal.h
@@ -26,7 +26,6 @@
typedef struct PageDesc {
#ifdef CONFIG_USER_ONLY
unsigned long flags;
- void *target_data;
#else
QemuSpin lock;
/* list of TBs intersecting this ram page */
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index fb7d6ee..42a04bd 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -210,47 +210,96 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
return addr;
}
+#ifdef TARGET_PAGE_DATA_SIZE
+/*
+ * Allocate chunks of target data together. For the only current user,
+ * if we allocate one hunk per page, we have overhead of 40/128 or 40%.
+ * Therefore, allocate memory for 64 pages at a time for overhead < 1%.
+ */
+#define TPD_PAGES 64
+#define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES)
+
+typedef struct TargetPageDataNode {
+ IntervalTreeNode itree;
+ char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
+} TargetPageDataNode;
+
+static IntervalTreeRoot targetdata_root;
+
void page_reset_target_data(target_ulong start, target_ulong end)
{
-#ifdef TARGET_PAGE_DATA_SIZE
- target_ulong addr, len;
-
- /*
- * This function should never be called with addresses outside the
- * guest address space. If this assert fires, it probably indicates
- * a missing call to h2g_valid.
- */
- assert(end - 1 <= GUEST_ADDR_MAX);
- assert(start < end);
+ IntervalTreeNode *n, *next;
+ target_ulong last;
+
assert_memory_lock();
start = start & TARGET_PAGE_MASK;
- end = TARGET_PAGE_ALIGN(end);
+ last = TARGET_PAGE_ALIGN(end) - 1;
+
+ for (n = interval_tree_iter_first(&targetdata_root, start, last),
+ next = n ? interval_tree_iter_next(n, start, last) : NULL;
+ n != NULL;
+ n = next,
+ next = next ? interval_tree_iter_next(n, start, last) : NULL) {
+ target_ulong n_start, n_last, p_ofs, p_len;
+ TargetPageDataNode *t;
+
+ if (n->start >= start && n->last <= last) {
+ interval_tree_remove(n, &targetdata_root);
+ g_free(n);
+ continue;
+ }
- for (addr = start, len = end - start;
- len != 0;
- len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
- PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
+ if (n->start < start) {
+ n_start = start;
+ p_ofs = (start - n->start) >> TARGET_PAGE_BITS;
+ } else {
+ n_start = n->start;
+ p_ofs = 0;
+ }
+ n_last = MIN(last, n->last);
+ p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
- g_free(p->target_data);
- p->target_data = NULL;
+ t = container_of(n, TargetPageDataNode, itree);
+ memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
}
-#endif
}
-#ifdef TARGET_PAGE_DATA_SIZE
void *page_get_target_data(target_ulong address)
{
- PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
- void *ret = p->target_data;
+ IntervalTreeNode *n;
+ TargetPageDataNode *t;
+ target_ulong page, region;
+
+ page = address & TARGET_PAGE_MASK;
+ region = address & TBD_MASK;
- if (!ret) {
- ret = g_malloc0(TARGET_PAGE_DATA_SIZE);
- p->target_data = ret;
+ n = interval_tree_iter_first(&targetdata_root, page, page);
+ if (!n) {
+ /*
+ * See util/interval-tree.c re lockless lookups: no false positives
+ * but there are false negatives. If we find nothing, retry with
+ * the mmap lock acquired. We also need the lock for the
+ * allocation + insert.
+ */
+ mmap_lock();
+ n = interval_tree_iter_first(&targetdata_root, page, page);
+ if (!n) {
+ t = g_new0(TargetPageDataNode, 1);
+ n = &t->itree;
+ n->start = region;
+ n->last = region | ~TBD_MASK;
+ interval_tree_insert(n, &targetdata_root);
+ }
+ mmap_unlock();
}
- return ret;
+
+ t = container_of(n, TargetPageDataNode, itree);
+ return t->data[(page - region) >> TARGET_PAGE_BITS];
}
-#endif
+#else
+void page_reset_target_data(target_ulong start, target_ulong end) { }
+#endif /* TARGET_PAGE_DATA_SIZE */
/* The softmmu versions of these helpers are in cputlb.c. */