aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg
diff options
context:
space:
mode:
authorEmilio Cota <cota@braap.org>2023-02-05 11:37:58 -0500
committerRichard Henderson <richard.henderson@linaro.org>2023-03-28 15:23:10 -0700
commit1ff4a81bd3efb207992f1da267886fe0c4df764f (patch)
tree4865036b91bf5cd4d4fc7df0764e1e35c1cefcea /accel/tcg
parente3feb2cc224f61149a27f021042f5a4230bb1008 (diff)
downloadqemu-1ff4a81bd3efb207992f1da267886fe0c4df764f.zip
qemu-1ff4a81bd3efb207992f1da267886fe0c4df764f.tar.gz
qemu-1ff4a81bd3efb207992f1da267886fe0c4df764f.tar.bz2
tcg: use QTree instead of GTree
qemu-user can hang in a multi-threaded fork. One common reason is that when creating a TB, between fork and exec we manipulate a GTree whose memory allocator (GSlice) is not fork-safe. Although POSIX does not mandate it, the system's allocator (e.g. tcmalloc, libc malloc) is probably fork-safe. Fix some of these hangs by using QTree, which uses the system's allocator regardless of the Glib version that we used at configuration time. Tested with the test program in the original bug report, i.e.: ``` void garble() { int pid = fork(); if (pid == 0) { exit(0); } else { int wstatus; waitpid(pid, &wstatus, 0); } } void supragarble(unsigned depth) { if (depth == 0) return ; std::thread a(supragarble, depth-1); std::thread b(supragarble, depth-1); garble(); a.join(); b.join(); } int main() { supragarble(10); } ``` Resolves: https://gitlab.com/qemu-project/qemu/-/issues/285 Reported-by: Valentin David <me@valentindavid.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Emilio Cota <cota@braap.org> Message-Id: <20230205163758.416992-3-cota@braap.org> [rth: Add QEMU_DISABLE_CFI for all callback using functions.] Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg')
-rw-r--r--accel/tcg/tb-maint.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index 7246c1c..a173db1 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -19,6 +19,7 @@
#include "qemu/osdep.h"
#include "qemu/interval-tree.h"
+#include "qemu/qtree.h"
#include "exec/cputlb.h"
#include "exec/log.h"
#include "exec/exec-all.h"
@@ -314,7 +315,7 @@ struct page_entry {
* See also: page_collection_lock().
*/
struct page_collection {
- GTree *tree;
+ QTree *tree;
struct page_entry *max;
};
@@ -467,7 +468,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
struct page_entry *pe;
PageDesc *pd;
- pe = g_tree_lookup(set->tree, &index);
+ pe = q_tree_lookup(set->tree, &index);
if (pe) {
return false;
}
@@ -478,7 +479,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
}
pe = page_entry_new(pd, index);
- g_tree_insert(set->tree, &pe->index, pe);
+ q_tree_insert(set->tree, &pe->index, pe);
/*
* If this is either (1) the first insertion or (2) a page whose index
@@ -525,13 +526,13 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
end >>= TARGET_PAGE_BITS;
g_assert(start <= end);
- set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
+ set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL,
page_entry_destroy);
set->max = NULL;
assert_no_pages_locked();
retry:
- g_tree_foreach(set->tree, page_entry_lock, NULL);
+ q_tree_foreach(set->tree, page_entry_lock, NULL);
for (index = start; index <= end; index++) {
TranslationBlock *tb;
@@ -542,7 +543,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
continue;
}
if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
- g_tree_foreach(set->tree, page_entry_unlock, NULL);
+ q_tree_foreach(set->tree, page_entry_unlock, NULL);
goto retry;
}
assert_page_locked(pd);
@@ -551,7 +552,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
(tb_page_addr1(tb) != -1 &&
page_trylock_add(set, tb_page_addr1(tb)))) {
/* drop all locks, and reacquire in order */
- g_tree_foreach(set->tree, page_entry_unlock, NULL);
+ q_tree_foreach(set->tree, page_entry_unlock, NULL);
goto retry;
}
}
@@ -562,7 +563,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
static void page_collection_unlock(struct page_collection *set)
{
/* entries are unlocked and freed via page_entry_destroy */
- g_tree_destroy(set->tree);
+ q_tree_destroy(set->tree);
g_free(set);
}