aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2023-08-30 09:19:57 -0400
committerStefan Hajnoczi <stefanha@redhat.com>2023-08-30 09:19:57 -0400
commitef9d20c63b64989ce4941dc48531c50728ddcc21 (patch)
tree15e1d06992a22b55b8c8e9a9d305080d9c65fdc3
parent813bac3d8d70d85cb7835f7945eb9eed84c2d8d0 (diff)
parent669fd6151337fdc81e34f7eb4940ba2f20d89957 (diff)
downloadqemu-ef9d20c63b64989ce4941dc48531c50728ddcc21.zip
qemu-ef9d20c63b64989ce4941dc48531c50728ddcc21.tar.gz
qemu-ef9d20c63b64989ce4941dc48531c50728ddcc21.tar.bz2
Merge tag 'pull-tcg-20230829-2' of https://gitlab.com/rth7680/qemu into staging
softmmu: Use async_run_on_cpu in tcg_commit tcg: Remove vecop_list check from tcg_gen_not_vec tcg/sparc64: Disable TCG_TARGET_HAS_extr_i64_i32 Revert "include/exec: typedef abi_ptr to vaddr in softmmu" # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmTuOcYdHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV9gGwf/QzbRP8MknlCZXrkZ # ykIZAkE4gRUb64Wzc2Oz1XaueltgBGRCS338oE8umtcLKTngZ8rVFD/LPTIEyAEY # SOzdHEJLPMSUv54rjAV8W4mVku81E9QvzOgz8PIzFM0mDiPJ/lG6JBTee/IZJHr3 # cW9W/2XMEz2rS2ONPj7WXbVbk/1ao29JFlhcWKNauUfqrNWK+VWOpo2w5qfgJruz # mjOSiMErU7SijytrKG9GP3Ri1JGskocfGcYYPofz8j6lmQoZrT6aYUj2tJTL8rvQ # Js+JCP8ZCXFO8/2jJqOivQccBGmLi8wf6Ke777xE0tAqfzXqBOp4tvJbv28e8lja # p+Lqhg== # =KPna # -----END PGP SIGNATURE----- # gpg: Signature made Tue 29 Aug 2023 14:32:38 EDT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-tcg-20230829-2' of https://gitlab.com/rth7680/qemu: Revert "include/exec: typedef abi_ptr to vaddr in softmmu" tcg/sparc64: Disable TCG_TARGET_HAS_extr_i64_i32 tcg: Remove vecop_list check from tcg_gen_not_vec softmmu: Use async_run_on_cpu in tcg_commit softmmu: Assert data in bounds in iotlb_to_section Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
-rw-r--r--accel/tcg/cpu-exec-common.c30
-rw-r--r--include/exec/cpu-common.h1
-rw-r--r--include/exec/cpu_ldst.h4
-rw-r--r--softmmu/physmem.c50
-rw-r--r--tcg/sparc64/tcg-target.c.inc11
-rw-r--r--tcg/sparc64/tcg-target.h2
-rw-r--r--tcg/tcg-op-vec.c7
7 files changed, 43 insertions, 62 deletions
diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c
index 9a5fabf..7e35d7f 100644
--- a/accel/tcg/cpu-exec-common.c
+++ b/accel/tcg/cpu-exec-common.c
@@ -33,36 +33,6 @@ void cpu_loop_exit_noexc(CPUState *cpu)
cpu_loop_exit(cpu);
}
-#if defined(CONFIG_SOFTMMU)
-void cpu_reloading_memory_map(void)
-{
- if (qemu_in_vcpu_thread() && current_cpu->running) {
- /* The guest can in theory prolong the RCU critical section as long
- * as it feels like. The major problem with this is that because it
- * can do multiple reconfigurations of the memory map within the
- * critical section, we could potentially accumulate an unbounded
- * collection of memory data structures awaiting reclamation.
- *
- * Because the only thing we're currently protecting with RCU is the
- * memory data structures, it's sufficient to break the critical section
- * in this callback, which we know will get called every time the
- * memory map is rearranged.
- *
- * (If we add anything else in the system that uses RCU to protect
- * its data structures, we will need to implement some other mechanism
- * to force TCG CPUs to exit the critical section, at which point this
- * part of this callback might become unnecessary.)
- *
- * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
- * only protects cpu->as->dispatch. Since we know our caller is about
- * to reload it, it's safe to split the critical section.
- */
- rcu_read_unlock();
- rcu_read_lock();
- }
-}
-#endif
-
void cpu_loop_exit(CPUState *cpu)
{
/* Undo the setting in cpu_tb_exec. */
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 87dc9a7..41788c0 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -133,7 +133,6 @@ static inline void cpu_physical_memory_write(hwaddr addr,
{
cpu_physical_memory_rw(addr, (void *)buf, len, true);
}
-void cpu_reloading_memory_map(void);
void *cpu_physical_memory_map(hwaddr addr,
hwaddr *plen,
bool is_write);
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index f3ce4eb..da10ba1 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -121,8 +121,8 @@ static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
h2g_nocheck(x); \
})
#else
-typedef vaddr abi_ptr;
-#define TARGET_ABI_FMT_ptr "%016" VADDR_PRIx
+typedef target_ulong abi_ptr;
+#define TARGET_ABI_FMT_ptr TARGET_FMT_lx
#endif
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index 3df7354..18277dd 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -680,8 +680,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr,
IOMMUTLBEntry iotlb;
int iommu_idx;
hwaddr addr = orig_addr;
- AddressSpaceDispatch *d =
- qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
+ AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
for (;;) {
section = address_space_translate_internal(d, addr, &addr, plen, false);
@@ -2412,10 +2411,16 @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu,
{
int asidx = cpu_asidx_from_attrs(cpu, attrs);
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
- AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch);
- MemoryRegionSection *sections = d->map.sections;
+ AddressSpaceDispatch *d = cpuas->memory_dispatch;
+ int section_index = index & ~TARGET_PAGE_MASK;
+ MemoryRegionSection *ret;
+
+ assert(section_index < d->map.sections_nb);
+ ret = d->map.sections + section_index;
+ assert(ret->mr);
+ assert(ret->mr->ops);
- return &sections[index & ~TARGET_PAGE_MASK];
+ return ret;
}
static void io_mem_init(void)
@@ -2481,23 +2486,42 @@ static void tcg_log_global_after_sync(MemoryListener *listener)
}
}
+static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data)
+{
+ CPUAddressSpace *cpuas = data.host_ptr;
+
+ cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as);
+ tlb_flush(cpu);
+}
+
static void tcg_commit(MemoryListener *listener)
{
CPUAddressSpace *cpuas;
- AddressSpaceDispatch *d;
+ CPUState *cpu;
assert(tcg_enabled());
/* since each CPU stores ram addresses in its TLB cache, we must
reset the modified entries */
cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
- cpu_reloading_memory_map();
- /* The CPU and TLB are protected by the iothread lock.
- * We reload the dispatch pointer now because cpu_reloading_memory_map()
- * may have split the RCU critical section.
+ cpu = cpuas->cpu;
+
+ /*
+ * Defer changes to as->memory_dispatch until the cpu is quiescent.
+ * Otherwise we race between (1) other cpu threads and (2) ongoing
+ * i/o for the current cpu thread, with data cached by mmu_lookup().
+ *
+ * In addition, queueing the work function will kick the cpu back to
+ * the main loop, which will end the RCU critical section and reclaim
+ * the memory data structures.
+ *
+ * That said, the listener is also called during realize, before
+ * all of the tcg machinery for run-on is initialized: thus halt_cond.
*/
- d = address_space_to_dispatch(cpuas->as);
- qatomic_rcu_set(&cpuas->memory_dispatch, d);
- tlb_flush(cpuas->cpu);
+ if (cpu->halt_cond) {
+ async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas));
+ } else {
+ tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas));
+ }
}
static void memory_map_init(void)
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
index f2a346a..81a08bb 100644
--- a/tcg/sparc64/tcg-target.c.inc
+++ b/tcg/sparc64/tcg-target.c.inc
@@ -529,11 +529,6 @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
tcg_out_ext32u(s, rd, rs);
}
-static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
-{
- tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
-}
-
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
{
return false;
@@ -1444,9 +1439,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_divu_i64:
c = ARITH_UDIVX;
goto gen_arith;
- case INDEX_op_extrh_i64_i32:
- tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
- break;
case INDEX_op_brcond_i64:
tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
@@ -1501,7 +1493,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_ext32u_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
default:
g_assert_not_reached();
}
@@ -1533,8 +1524,6 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_ext32u_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- case INDEX_op_extrh_i64_i32:
case INDEX_op_qemu_ld_a32_i32:
case INDEX_op_qemu_ld_a64_i32:
case INDEX_op_qemu_ld_a32_i64:
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
index 3d41c96..5cfc4b4 100644
--- a/tcg/sparc64/tcg-target.h
+++ b/tcg/sparc64/tcg-target.h
@@ -115,7 +115,7 @@ extern bool use_vis3_instructions;
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_qemu_st8_i32 0
-#define TCG_TARGET_HAS_extr_i64_i32 1
+#define TCG_TARGET_HAS_extr_i64_i32 0
#define TCG_TARGET_HAS_div_i64 1
#define TCG_TARGET_HAS_rem_i64 0
#define TCG_TARGET_HAS_rot_i64 0
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
index ad8ee08..094298b 100644
--- a/tcg/tcg-op-vec.c
+++ b/tcg/tcg-op-vec.c
@@ -391,12 +391,11 @@ static bool do_op2(unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc)
void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
{
- const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
-
- if (!TCG_TARGET_HAS_not_vec || !do_op2(vece, r, a, INDEX_op_not_vec)) {
+ if (TCG_TARGET_HAS_not_vec) {
+ vec_gen_op2(INDEX_op_not_vec, 0, r, a);
+ } else {
tcg_gen_xor_vec(0, r, a, tcg_constant_vec_matching(r, 0, -1));
}
- tcg_swap_vecop_list(hold_list);
}
void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a)