aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2023-04-04 17:02:14 +0100
committerPeter Maydell <peter.maydell@linaro.org>2023-04-04 17:02:14 +0100
commit7d0334e49111787ae19fbc8d29ff6e7347f0605e (patch)
treee24cefae1bf9689021e12e4c197ab71419bdd1b1
parent8a712df4d4d736b7fe6441626677bfd271d95b15 (diff)
parent1ffbe5d681b06ea95b1728fc556899f63834553a (diff)
downloadqemu-7d0334e49111787ae19fbc8d29ff6e7347f0605e.zip
qemu-7d0334e49111787ae19fbc8d29ff6e7347f0605e.tar.gz
qemu-7d0334e49111787ae19fbc8d29ff6e7347f0605e.tar.bz2
Merge tag 'pull-tcg-20230404' of https://gitlab.com/rth7680/qemu into staging
Revert "linux-user/arm: Take more care allocating commpage" accel/tcg: Fix jump cache set in cpu_exec_loop accel/tcg: Fix initialization of CF_PCREL in tcg_cflags tcg/sparc64: Disable direct jumps from goto_tb # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmQsRwAdHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8QqQf6AnUwdkp0zEUevshL # gPlns3u5RZIv/i5L1VIkYrLbaFQtwD17CfBACB2MxZI6lbPK4jT++FlDiWJ1ak7i # 4p9Q5KOAbjAxQgQXy51NbEzg5AoIYP5HEg2cnjfEg0PltRVTn0bdbBVbCG/erDXa # NfEOPtHSU+6B8jRjp0MRvFv4Y4CJ3nQ7eZ6TMI3oGOk44DOs22Fn330E8wT2vpt4 # ayJNDN8F0FZ5wGZdJIAsMSgauaGJwY/ZG1KX8TGQb7nsRn5lbpEgoOUCd0WUGdx6 # 3YsoELaZoHZhAlvVNjbOMBp3fZSH2owb08By9vU7ZMjPOnsjZQz6TuxR6NNl+04G # tPi44Q== # =+m7M # -----END PGP SIGNATURE----- # gpg: Signature made Tue 04 Apr 2023 16:49:20 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-tcg-20230404' of https://gitlab.com/rth7680/qemu: tcg/sparc64: Disable direct linking for goto_tb accel/tcg: Fix jump cache set in cpu_exec_loop accel/tcg: Fix overwrite problems of tcg_cflags Revert "linux-user/arm: Take more care allocating commpage" Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--accel/tcg/cpu-exec.c17
-rw-r--r--accel/tcg/tcg-accel-ops.c2
-rw-r--r--linux-user/elfload.c37
-rw-r--r--tcg/sparc64/tcg-target.c.inc30
4 files changed, 28 insertions, 58 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index c815f2d..8370c92 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -257,7 +257,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
if (cflags & CF_PCREL) {
/* Use acquire to ensure current load of pc from jc. */
- tb = qatomic_load_acquire(&jc->array[hash].tb);
+ tb = qatomic_load_acquire(&jc->array[hash].tb);
if (likely(tb &&
jc->array[hash].pc == pc &&
@@ -272,7 +272,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
return NULL;
}
jc->array[hash].pc = pc;
- /* Use store_release on tb to ensure pc is written first. */
+ /* Ensure pc is written first. */
qatomic_store_release(&jc->array[hash].tb, tb);
} else {
/* Use rcu_read to ensure current load of pc from *tb. */
@@ -971,18 +971,27 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
+ CPUJumpCache *jc;
uint32_t h;
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock();
+
/*
* We add the TB in the virtual pc hash table
* for the fast lookup
*/
h = tb_jmp_cache_hash_func(pc);
- /* Use the pc value already stored in tb->pc. */
- qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
+ jc = cpu->tb_jmp_cache;
+ if (cflags & CF_PCREL) {
+ jc->array[h].pc = pc;
+ /* Ensure pc is written first. */
+ qatomic_store_release(&jc->array[h].tb, tb);
+ } else {
+ /* Use the pc value already stored in tb->pc. */
+ qatomic_set(&jc->array[h].tb, tb);
+ }
}
#ifndef CONFIG_USER_ONLY
diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c
index af35e0d..58c8e64 100644
--- a/accel/tcg/tcg-accel-ops.c
+++ b/accel/tcg/tcg-accel-ops.c
@@ -59,7 +59,7 @@ void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
cflags |= parallel ? CF_PARALLEL : 0;
cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
- cpu->tcg_cflags = cflags;
+ cpu->tcg_cflags |= cflags;
}
void tcg_cpus_destroy(CPUState *cpu)
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index b96b3e5..f1370a7 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -423,32 +423,12 @@ enum {
static bool init_guest_commpage(void)
{
- ARMCPU *cpu = ARM_CPU(thread_cpu);
- abi_ptr want = HI_COMMPAGE & TARGET_PAGE_MASK;
- abi_ptr addr;
-
- /*
- * M-profile allocates maximum of 2GB address space, so can never
- * allocate the commpage. Skip it.
- */
- if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
- return true;
- }
-
- /*
- * If reserved_va does not cover the commpage, we get an assert
- * in page_set_flags. Produce an intelligent error instead.
- */
- if (reserved_va != 0 && want + TARGET_PAGE_SIZE - 1 > reserved_va) {
- error_report("Allocating guest commpage: -R 0x%" PRIx64 " too small",
- (uint64_t)reserved_va + 1);
- exit(EXIT_FAILURE);
- }
-
- addr = target_mmap(want, TARGET_PAGE_SIZE, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
+ abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size;
+ void *want = g2h_untagged(commpage);
+ void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
- if (addr == -1) {
+ if (addr == MAP_FAILED) {
perror("Allocating guest commpage");
exit(EXIT_FAILURE);
}
@@ -457,12 +437,15 @@ static bool init_guest_commpage(void)
}
/* Set kernel helper versions; rest of page is 0. */
- put_user_u32(5, 0xffff0ffcu);
+ __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu));
- if (target_mprotect(addr, qemu_host_page_size, PROT_READ | PROT_EXEC)) {
+ if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
perror("Protecting guest commpage");
exit(EXIT_FAILURE);
}
+
+ page_set_flags(commpage, commpage | ~qemu_host_page_mask,
+ PAGE_READ | PAGE_EXEC | PAGE_VALID);
return true;
}
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
index ccc4144..694f2b9 100644
--- a/tcg/sparc64/tcg-target.c.inc
+++ b/tcg/sparc64/tcg-target.c.inc
@@ -1445,12 +1445,12 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
{
ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
- /* Direct branch will be patched by tb_target_set_jmp_target. */
+ /* Load link and indirect branch. */
set_jmp_insn_offset(s, which);
- tcg_out32(s, CALL);
- /* delay slot */
- tcg_debug_assert(check_fit_ptr(off, 13));
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
+ /* delay slot */
+ tcg_out_nop(s);
set_jmp_reset_offset(s, which);
/*
@@ -1469,28 +1469,6 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
- uintptr_t addr = tb->jmp_target_addr[n];
- intptr_t br_disp = (intptr_t)(addr - jmp_rx) >> 2;
- tcg_insn_unit insn;
-
- br_disp >>= 2;
- if (check_fit_ptr(br_disp, 19)) {
- /* ba,pt %icc, addr */
- insn = deposit32(INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
- | BPCC_ICC | BPCC_PT, 0, 19, br_disp);
- } else if (check_fit_ptr(br_disp, 22)) {
- /* ba addr */
- insn = deposit32(INSN_OP(0) | INSN_OP2(2) | INSN_COND(COND_A),
- 0, 22, br_disp);
- } else {
- /* The code_gen_buffer can't be larger than 2GB. */
- tcg_debug_assert(check_fit_ptr(br_disp, 30));
- /* call addr */
- insn = deposit32(CALL, 0, 30, br_disp);
- }
-
- qatomic_set((uint32_t *)jmp_rw, insn);
- flush_idcache_range(jmp_rx, jmp_rw, 4);
}
static void tcg_out_op(TCGContext *s, TCGOpcode opc,