aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
Diffstat (limited to 'accel')
-rw-r--r--accel/hvf/hvf-accel-ops.c1
-rw-r--r--accel/hvf/hvf-all.c8
-rw-r--r--accel/stubs/hvf-stub.c12
-rw-r--r--accel/stubs/meson.build1
-rw-r--r--accel/tcg/cpu-exec.c164
-rw-r--r--accel/tcg/cputlb.c34
-rw-r--r--accel/tcg/internal-common.h37
-rw-r--r--accel/tcg/internal-target.h46
-rw-r--r--accel/tcg/ldst_common.c.inc4
-rw-r--r--accel/tcg/meson.build20
-rw-r--r--accel/tcg/tb-hash.h3
-rw-r--r--accel/tcg/tb-internal.h8
-rw-r--r--accel/tcg/tb-maint.c100
-rw-r--r--accel/tcg/tcg-all.c6
-rw-r--r--accel/tcg/tlb-bounds.h21
-rw-r--r--accel/tcg/translate-all.c95
-rw-r--r--accel/tcg/user-exec.c145
17 files changed, 302 insertions, 403 deletions
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
index 5375de7..b8b6116 100644
--- a/accel/hvf/hvf-accel-ops.c
+++ b/accel/hvf/hvf-accel-ops.c
@@ -51,7 +51,6 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "system/address-spaces.h"
-#include "exec/exec-all.h"
#include "gdbstub/enums.h"
#include "hw/boards.h"
#include "system/accel-ops.h"
diff --git a/accel/hvf/hvf-all.c b/accel/hvf/hvf-all.c
index d404e01..8c387fd 100644
--- a/accel/hvf/hvf-all.c
+++ b/accel/hvf/hvf-all.c
@@ -12,6 +12,7 @@
#include "qemu/error-report.h"
#include "system/hvf.h"
#include "system/hvf_int.h"
+#include "hw/core/cpu.h"
const char *hvf_return_string(hv_return_t ret)
{
@@ -58,8 +59,13 @@ int hvf_sw_breakpoints_active(CPUState *cpu)
return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints);
}
-int hvf_update_guest_debug(CPUState *cpu)
+static void do_hvf_update_guest_debug(CPUState *cpu, run_on_cpu_data arg)
{
hvf_arch_update_guest_debug(cpu);
+}
+
+int hvf_update_guest_debug(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_hvf_update_guest_debug, RUN_ON_CPU_NULL);
return 0;
}
diff --git a/accel/stubs/hvf-stub.c b/accel/stubs/hvf-stub.c
new file mode 100644
index 0000000..42eadc5
--- /dev/null
+++ b/accel/stubs/hvf-stub.c
@@ -0,0 +1,12 @@
+/*
+ * HVF stubs for QEMU
+ *
+ * Copyright (c) Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "system/hvf.h"
+
+bool hvf_allowed;
diff --git a/accel/stubs/meson.build b/accel/stubs/meson.build
index 91a2d21..8ca1a45 100644
--- a/accel/stubs/meson.build
+++ b/accel/stubs/meson.build
@@ -2,5 +2,6 @@ system_stubs_ss = ss.source_set()
system_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
system_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
system_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
+system_stubs_ss.add(when: 'CONFIG_HVF', if_false: files('hvf-stub.c'))
specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: system_stubs_ss)
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 87eba83..cc5f362 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -22,8 +22,8 @@
#include "qapi/error.h"
#include "qapi/type-helpers.h"
#include "hw/core/cpu.h"
-#include "accel/tcg/cpu-ldst.h"
#include "accel/tcg/cpu-ops.h"
+#include "accel/tcg/helper-retaddr.h"
#include "trace.h"
#include "disas/disas.h"
#include "exec/cpu-common.h"
@@ -36,7 +36,6 @@
#include "qemu/rcu.h"
#include "exec/log.h"
#include "qemu/main-loop.h"
-#include "cpu.h"
#include "exec/icount.h"
#include "exec/replay-core.h"
#include "system/tcg.h"
@@ -46,7 +45,6 @@
#include "tb-context.h"
#include "tb-internal.h"
#include "internal-common.h"
-#include "internal-target.h"
/* -icount align implementation. */
@@ -151,12 +149,9 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
#endif /* CONFIG USER ONLY */
struct tb_desc {
- vaddr pc;
- uint64_t cs_base;
+ TCGTBCPUState s;
CPUArchState *env;
tb_page_addr_t page_addr0;
- uint32_t flags;
- uint32_t cflags;
};
static bool tb_lookup_cmp(const void *p, const void *d)
@@ -164,11 +159,11 @@ static bool tb_lookup_cmp(const void *p, const void *d)
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
- if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) &&
+ if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->s.pc) &&
tb_page_addr0(tb) == desc->page_addr0 &&
- tb->cs_base == desc->cs_base &&
- tb->flags == desc->flags &&
- tb_cflags(tb) == desc->cflags) {
+ tb->cs_base == desc->s.cs_base &&
+ tb->flags == desc->s.flags &&
+ tb_cflags(tb) == desc->s.cflags) {
/* check next page if needed */
tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
if (tb_phys_page1 == -1) {
@@ -186,7 +181,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
* is different for the new TB. Therefore any exception raised
* here by the faulting lookup is not premature.
*/
- virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
+ virt_page1 = TARGET_PAGE_ALIGN(desc->s.pc);
phys_page1 = get_page_addr_code(desc->env, virt_page1);
if (tb_phys_page1 == phys_page1) {
return true;
@@ -196,26 +191,21 @@ static bool tb_lookup_cmp(const void *p, const void *d)
return false;
}
-static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
- uint64_t cs_base, uint32_t flags,
- uint32_t cflags)
+static TranslationBlock *tb_htable_lookup(CPUState *cpu, TCGTBCPUState s)
{
tb_page_addr_t phys_pc;
struct tb_desc desc;
uint32_t h;
+ desc.s = s;
desc.env = cpu_env(cpu);
- desc.cs_base = cs_base;
- desc.flags = flags;
- desc.cflags = cflags;
- desc.pc = pc;
- phys_pc = get_page_addr_code(desc.env, pc);
+ phys_pc = get_page_addr_code(desc.env, s.pc);
if (phys_pc == -1) {
return NULL;
}
desc.page_addr0 = phys_pc;
- h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
- flags, cs_base, cflags);
+ h = tb_hash_func(phys_pc, (s.cflags & CF_PCREL ? 0 : s.pc),
+ s.flags, s.cs_base, s.cflags);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
@@ -233,35 +223,33 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
*
* Returns: an existing translation block or NULL.
*/
-static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
- uint64_t cs_base, uint32_t flags,
- uint32_t cflags)
+static inline TranslationBlock *tb_lookup(CPUState *cpu, TCGTBCPUState s)
{
TranslationBlock *tb;
CPUJumpCache *jc;
uint32_t hash;
/* we should never be trying to look up an INVALID tb */
- tcg_debug_assert(!(cflags & CF_INVALID));
+ tcg_debug_assert(!(s.cflags & CF_INVALID));
- hash = tb_jmp_cache_hash_func(pc);
+ hash = tb_jmp_cache_hash_func(s.pc);
jc = cpu->tb_jmp_cache;
tb = qatomic_read(&jc->array[hash].tb);
if (likely(tb &&
- jc->array[hash].pc == pc &&
- tb->cs_base == cs_base &&
- tb->flags == flags &&
- tb_cflags(tb) == cflags)) {
+ jc->array[hash].pc == s.pc &&
+ tb->cs_base == s.cs_base &&
+ tb->flags == s.flags &&
+ tb_cflags(tb) == s.cflags)) {
goto hit;
}
- tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
+ tb = tb_htable_lookup(cpu, s);
if (tb == NULL) {
return NULL;
}
- jc->array[hash].pc = pc;
+ jc->array[hash].pc = s.pc;
qatomic_set(&jc->array[hash].tb, tb);
hit:
@@ -269,7 +257,7 @@ hit:
* As long as tb is not NULL, the contents are consistent. Therefore,
* the virtual PC has to match for non-CF_PCREL translations.
*/
- assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc);
+ assert((tb_cflags(tb) & CF_PCREL) || tb->pc == s.pc);
return tb;
}
@@ -286,14 +274,11 @@ static void log_cpu_exec(vaddr pc, CPUState *cpu,
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
FILE *logfile = qemu_log_trylock();
if (logfile) {
- int flags = 0;
+ int flags = CPU_DUMP_CCOP;
if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
flags |= CPU_DUMP_FPU;
}
-#if defined(TARGET_I386)
- flags |= CPU_DUMP_CCOP;
-#endif
if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) {
flags |= CPU_DUMP_VPU;
}
@@ -389,9 +374,6 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
{
CPUState *cpu = env_cpu(env);
TranslationBlock *tb;
- vaddr pc;
- uint64_t cs_base;
- uint32_t flags, cflags;
/*
* By definition we've just finished a TB, so I/O is OK.
@@ -401,20 +383,21 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
* The next TB, if we chain to it, will clear the flag again.
*/
cpu->neg.can_do_io = true;
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- cflags = curr_cflags(cpu);
- if (check_for_breakpoints(cpu, pc, &cflags)) {
+ TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
+ s.cflags = curr_cflags(cpu);
+
+ if (check_for_breakpoints(cpu, s.pc, &s.cflags)) {
cpu_loop_exit(cpu);
}
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ tb = tb_lookup(cpu, s);
if (tb == NULL) {
return tcg_code_gen_epilogue;
}
if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
- log_cpu_exec(pc, cpu, tb);
+ log_cpu_exec(s.pc, cpu, tb);
}
return tb->tc.ptr;
@@ -564,11 +547,7 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
void cpu_exec_step_atomic(CPUState *cpu)
{
- CPUArchState *env = cpu_env(cpu);
TranslationBlock *tb;
- vaddr pc;
- uint64_t cs_base;
- uint32_t flags, cflags;
int tb_exit;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
@@ -577,13 +556,13 @@ void cpu_exec_step_atomic(CPUState *cpu)
g_assert(!cpu->running);
cpu->running = true;
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+ TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
+ s.cflags = curr_cflags(cpu);
- cflags = curr_cflags(cpu);
/* Execute in a serial context. */
- cflags &= ~CF_PARALLEL;
+ s.cflags &= ~CF_PARALLEL;
/* After 1 insn, return and release the exclusive lock. */
- cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
+ s.cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
/*
* No need to check_for_breakpoints here.
* We only arrive in cpu_exec_step_atomic after beginning execution
@@ -591,16 +570,16 @@ void cpu_exec_step_atomic(CPUState *cpu)
* Any breakpoint for this insn will have been recognized earlier.
*/
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ tb = tb_lookup(cpu, s);
if (tb == NULL) {
mmap_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
+ tb = tb_gen_code(cpu, s);
mmap_unlock();
}
cpu_exec_enter(cpu);
/* execute the generated code */
- trace_exec_tb(tb, pc);
+ trace_exec_tb(tb, s.pc);
cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu);
} else {
@@ -733,10 +712,10 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
* If user mode only, we simulate a fake exception which will be
* handled outside the cpu execution loop.
*/
-#if defined(TARGET_I386)
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
- tcg_ops->fake_user_interrupt(cpu);
-#endif /* TARGET_I386 */
+ if (tcg_ops->fake_user_interrupt) {
+ tcg_ops->fake_user_interrupt(cpu);
+ }
*ret = cpu->exception_index;
cpu->exception_index = -1;
return true;
@@ -823,33 +802,22 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
cpu->exception_index = EXCP_HLT;
bql_unlock();
return true;
- }
-#if defined(TARGET_I386)
- else if (interrupt_request & CPU_INTERRUPT_INIT) {
- X86CPU *x86_cpu = X86_CPU(cpu);
- CPUArchState *env = &x86_cpu->env;
- replay_interrupt();
- cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
- do_cpu_init(x86_cpu);
- cpu->exception_index = EXCP_HALTED;
- bql_unlock();
- return true;
- }
-#else
- else if (interrupt_request & CPU_INTERRUPT_RESET) {
- replay_interrupt();
- cpu_reset(cpu);
- bql_unlock();
- return true;
- }
-#endif /* !TARGET_I386 */
- /* The target hook has 3 exit conditions:
- False when the interrupt isn't processed,
- True when it is, and we should restart on a new TB,
- and via longjmp via cpu_loop_exit. */
- else {
+ } else {
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
+ if (interrupt_request & CPU_INTERRUPT_RESET) {
+ replay_interrupt();
+ tcg_ops->cpu_exec_reset(cpu);
+ bql_unlock();
+ return true;
+ }
+
+ /*
+ * The target hook has 3 exit conditions:
+ * False when the interrupt isn't processed,
+ * True when it is, and we should restart on a new TB,
+ * and via longjmp via cpu_loop_exit.
+ */
if (tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
if (!tcg_ops->need_replay_interrupt ||
tcg_ops->need_replay_interrupt(interrupt_request)) {
@@ -956,11 +924,8 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
while (!cpu_handle_interrupt(cpu, &last_tb)) {
TranslationBlock *tb;
- vaddr pc;
- uint64_t cs_base;
- uint32_t flags, cflags;
-
- cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags);
+ TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
+ s.cflags = cpu->cflags_next_tb;
/*
* When requested, use an exact setting for cflags for the next
@@ -969,33 +934,32 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
* have CF_INVALID set, -1 is a convenient invalid value that
* does not require tcg headers for cpu_common_reset.
*/
- cflags = cpu->cflags_next_tb;
- if (cflags == -1) {
- cflags = curr_cflags(cpu);
+ if (s.cflags == -1) {
+ s.cflags = curr_cflags(cpu);
} else {
cpu->cflags_next_tb = -1;
}
- if (check_for_breakpoints(cpu, pc, &cflags)) {
+ if (check_for_breakpoints(cpu, s.pc, &s.cflags)) {
break;
}
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ tb = tb_lookup(cpu, s);
if (tb == NULL) {
CPUJumpCache *jc;
uint32_t h;
mmap_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
+ tb = tb_gen_code(cpu, s);
mmap_unlock();
/*
* We add the TB in the virtual pc hash table
* for the fast lookup
*/
- h = tb_jmp_cache_hash_func(pc);
+ h = tb_jmp_cache_hash_func(s.pc);
jc = cpu->tb_jmp_cache;
- jc->array[h].pc = pc;
+ jc->array[h].pc = s.pc;
qatomic_set(&jc->array[h].tb, tb);
}
@@ -1015,7 +979,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
tb_add_jump(last_tb, tb_exit, tb);
}
- cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
+ cpu_loop_exec_tb(cpu, tb, s.pc, &last_tb, &tb_exit);
/* Try to align the host and virtual clocks
if the guest is in advance */
@@ -1074,8 +1038,10 @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
#ifndef CONFIG_USER_ONLY
assert(tcg_ops->cpu_exec_halt);
assert(tcg_ops->cpu_exec_interrupt);
+ assert(tcg_ops->cpu_exec_reset);
#endif /* !CONFIG_USER_ONLY */
assert(tcg_ops->translate_code);
+ assert(tcg_ops->get_tb_cpu_state);
assert(tcg_ops->mmu_index);
tcg_ops->initialize();
tcg_target_initialized = true;
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index d9fb68d..5f6d7c6 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -19,11 +19,14 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
+#include "qemu/target-info.h"
#include "accel/tcg/cpu-ops.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/iommu.h"
+#include "accel/tcg/probe.h"
#include "exec/page-protection.h"
#include "system/memory.h"
-#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/cpu-ldst-common.h"
+#include "accel/tcg/cpu-mmu-index.h"
#include "exec/cputlb.h"
#include "exec/tb-flush.h"
#include "system/ram_addr.h"
@@ -43,7 +46,6 @@
#include "tb-internal.h"
#include "tlb-bounds.h"
#include "internal-common.h"
-#include "internal-target.h"
#ifdef CONFIG_PLUGIN
#include "qemu/plugin-memory.h"
#endif
@@ -771,19 +773,19 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
assert_cpu_is_self(cpu);
+ /* If no page bits are significant, this devolves to tlb_flush. */
+ if (bits < TARGET_PAGE_BITS) {
+ tlb_flush_by_mmuidx(cpu, idxmap);
+ return;
+ }
/*
* If all bits are significant, and len is small,
* this devolves to tlb_flush_page.
*/
- if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
+ if (len <= TARGET_PAGE_SIZE && bits >= target_long_bits()) {
tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
return;
}
- /* If no page bits are significant, this devolves to tlb_flush. */
- if (bits < TARGET_PAGE_BITS) {
- tlb_flush_by_mmuidx(cpu, idxmap);
- return;
- }
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
@@ -809,19 +811,19 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
TLBFlushRangeData d, *p;
CPUState *dst_cpu;
+ /* If no page bits are significant, this devolves to tlb_flush. */
+ if (bits < TARGET_PAGE_BITS) {
+ tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
+ return;
+ }
/*
* If all bits are significant, and len is small,
* this devolves to tlb_flush_page.
*/
- if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
+ if (len <= TARGET_PAGE_SIZE && bits >= target_long_bits()) {
tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
return;
}
- /* If no page bits are significant, this devolves to tlb_flush. */
- if (bits < TARGET_PAGE_BITS) {
- tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
- return;
- }
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
@@ -1340,7 +1342,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
- tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
+ tb_invalidate_phys_range_fast(cpu, ram_addr, size, retaddr);
}
/*
diff --git a/accel/tcg/internal-common.h b/accel/tcg/internal-common.h
index 2f00560..1dbc45d 100644
--- a/accel/tcg/internal-common.h
+++ b/accel/tcg/internal-common.h
@@ -11,6 +11,8 @@
#include "exec/cpu-common.h"
#include "exec/translation-block.h"
+#include "exec/mmap-lock.h"
+#include "accel/tcg/tb-cpu-state.h"
extern int64_t max_delay;
extern int64_t max_advance;
@@ -45,9 +47,7 @@ static inline bool cpu_plugin_mem_cbs_enabled(const CPUState *cpu)
#endif
}
-TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
- uint64_t cs_base, uint32_t flags,
- int cflags);
+TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s);
void page_init(void);
void tb_htable_init(void);
void tb_reset_jump(TranslationBlock *tb, int n);
@@ -108,4 +108,35 @@ static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
return get_page_addr_code_hostp(env, addr, NULL);
}
+/*
+ * Access to the various translations structures need to be serialised
+ * via locks for consistency. In user-mode emulation access to the
+ * memory related structures are protected with mmap_lock.
+ * In !user-mode we use per-page locks.
+ */
+#ifdef CONFIG_USER_ONLY
+#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
+#else
+#define assert_memory_lock()
+#endif
+
+#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
+void assert_no_pages_locked(void);
+#else
+static inline void assert_no_pages_locked(void) { }
+#endif
+
+#ifdef CONFIG_USER_ONLY
+static inline void page_table_config_init(void) { }
+#else
+void page_table_config_init(void);
+#endif
+
+#ifndef CONFIG_USER_ONLY
+G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
+#endif /* CONFIG_USER_ONLY */
+
+void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
+void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
+
#endif
diff --git a/accel/tcg/internal-target.h b/accel/tcg/internal-target.h
deleted file mode 100644
index 9a9cef3..0000000
--- a/accel/tcg/internal-target.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Internal execution defines for qemu (target specific)
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * SPDX-License-Identifier: LGPL-2.1-or-later
- */
-
-#ifndef ACCEL_TCG_INTERNAL_TARGET_H
-#define ACCEL_TCG_INTERNAL_TARGET_H
-
-#include "cpu-param.h"
-#include "exec/exec-all.h"
-#include "exec/translation-block.h"
-#include "tb-internal.h"
-#include "exec/mmap-lock.h"
-
-/*
- * Access to the various translations structures need to be serialised
- * via locks for consistency. In user-mode emulation access to the
- * memory related structures are protected with mmap_lock.
- * In !user-mode we use per-page locks.
- */
-#ifdef CONFIG_USER_ONLY
-#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
-#else
-#define assert_memory_lock()
-#endif
-
-#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
-void assert_no_pages_locked(void);
-#else
-static inline void assert_no_pages_locked(void) { }
-#endif
-
-#ifdef CONFIG_USER_ONLY
-static inline void page_table_config_init(void) { }
-#else
-void page_table_config_init(void);
-#endif
-
-#ifndef CONFIG_USER_ONLY
-G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
-#endif /* CONFIG_USER_ONLY */
-
-#endif /* ACCEL_TCG_INTERNAL_H */
diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc
index 9791a4e..57f3e06 100644
--- a/accel/tcg/ldst_common.c.inc
+++ b/accel/tcg/ldst_common.c.inc
@@ -123,7 +123,7 @@ void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
* Load helpers for cpu_ldst.h
*/
-static void plugin_load_cb(CPUArchState *env, abi_ptr addr,
+static void plugin_load_cb(CPUArchState *env, vaddr addr,
uint64_t value_low,
uint64_t value_high,
MemOpIdx oi)
@@ -193,7 +193,7 @@ Int128 cpu_ld16_mmu(CPUArchState *env, vaddr addr,
* Store helpers for cpu_ldst.h
*/
-static void plugin_store_cb(CPUArchState *env, abi_ptr addr,
+static void plugin_store_cb(CPUArchState *env, vaddr addr,
uint64_t value_low,
uint64_t value_high,
MemOpIdx oi)
diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build
index 047afa4..d6f533f 100644
--- a/accel/tcg/meson.build
+++ b/accel/tcg/meson.build
@@ -5,9 +5,13 @@ endif
tcg_ss = ss.source_set()
tcg_ss.add(files(
+ 'cpu-exec.c',
'cpu-exec-common.c',
'tcg-runtime.c',
'tcg-runtime-gvec.c',
+ 'tb-maint.c',
+ 'tcg-all.c',
+ 'translate-all.c',
'translator.c',
))
if get_option('plugins')
@@ -17,25 +21,13 @@ endif
libuser_ss.add_all(tcg_ss)
libsystem_ss.add_all(tcg_ss)
-tcg_specific_ss = ss.source_set()
-tcg_specific_ss.add(files(
- 'tcg-all.c',
- 'cpu-exec.c',
- 'tb-maint.c',
- 'translate-all.c',
-))
-tcg_specific_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
-specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_specific_ss)
-
-specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
- 'cputlb.c',
-))
-
libuser_ss.add(files(
+ 'user-exec.c',
'user-exec-stub.c',
))
libsystem_ss.add(files(
+ 'cputlb.c',
'icount-common.c',
'monitor.c',
'tcg-accel-ops.c',
diff --git a/accel/tcg/tb-hash.h b/accel/tcg/tb-hash.h
index 3bc5042..f7b159f 100644
--- a/accel/tcg/tb-hash.h
+++ b/accel/tcg/tb-hash.h
@@ -20,8 +20,7 @@
#ifndef EXEC_TB_HASH_H
#define EXEC_TB_HASH_H
-#include "exec/cpu-defs.h"
-#include "exec/exec-all.h"
+#include "exec/vaddr.h"
#include "exec/target_page.h"
#include "exec/translation-block.h"
#include "qemu/xxhash.h"
diff --git a/accel/tcg/tb-internal.h b/accel/tcg/tb-internal.h
index 08538e2..40439f0 100644
--- a/accel/tcg/tb-internal.h
+++ b/accel/tcg/tb-internal.h
@@ -45,11 +45,11 @@ void tb_unlock_pages(TranslationBlock *);
#endif
#ifdef CONFIG_SOFTMMU
-void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
- unsigned size,
- uintptr_t retaddr);
+void tb_invalidate_phys_range_fast(CPUState *cpu, ram_addr_t ram_addr,
+ unsigned size, uintptr_t retaddr);
#endif /* CONFIG_SOFTMMU */
-bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
+bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
+ uintptr_t pc);
#endif
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index d479f53..0048316 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -20,14 +20,13 @@
#include "qemu/osdep.h"
#include "qemu/interval-tree.h"
#include "qemu/qtree.h"
-#include "cpu.h"
#include "exec/cputlb.h"
#include "exec/log.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/mmap-lock.h"
#include "exec/tb-flush.h"
#include "exec/target_page.h"
+#include "accel/tcg/cpu-ops.h"
#include "tb-internal.h"
#include "system/tcg.h"
#include "tcg/tcg.h"
@@ -35,7 +34,6 @@
#include "tb-context.h"
#include "tb-internal.h"
#include "internal-common.h"
-#include "internal-target.h"
#ifdef CONFIG_USER_ONLY
#include "user/page-protection.h"
#endif
@@ -159,11 +157,7 @@ static PageForEachNext foreach_tb_next(PageForEachNext tb,
/*
* In system mode we want L1_MAP to be based on ram offsets.
*/
-#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
-# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
-#else
-# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
-#endif
+#define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
/* Size of the L2 (and L3, etc) page tables. */
#define V_L2_BITS 10
@@ -1012,7 +1006,8 @@ TranslationBlock *tb_link_page(TranslationBlock *tb)
* Called with mmap_lock held for user-mode emulation.
* NOTE: this function must not be called while a TB is running.
*/
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
+void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
+ tb_page_addr_t last)
{
TranslationBlock *tb;
PageForEachNext n;
@@ -1035,17 +1030,16 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr)
start = addr & TARGET_PAGE_MASK;
last = addr | ~TARGET_PAGE_MASK;
- tb_invalidate_phys_range(start, last);
+ tb_invalidate_phys_range(NULL, start, last);
}
/*
* Called with mmap_lock held. If pc is not 0 then it indicates the
* host PC of the faulting store instruction that caused this invalidate.
- * Returns true if the caller needs to abort execution of the current
- * TB (because it was modified by this store and the guest CPU has
- * precise-SMC semantics).
+ * Returns true if the caller needs to abort execution of the current TB.
*/
-bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
+bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
+ uintptr_t pc)
{
TranslationBlock *current_tb;
bool current_tb_modified;
@@ -1057,10 +1051,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
* Without precise smc semantics, or when outside of a TB,
* we can skip to invalidate.
*/
-#ifndef TARGET_HAS_PRECISE_SMC
- pc = 0;
-#endif
- if (!pc) {
+ if (!pc || !cpu || !cpu->cc->tcg_ops->precise_smc) {
tb_invalidate_phys_page(addr);
return false;
}
@@ -1083,15 +1074,14 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
* the CPU state.
*/
current_tb_modified = true;
- cpu_restore_state_from_tb(current_cpu, current_tb, pc);
+ cpu_restore_state_from_tb(cpu, current_tb, pc);
}
tb_phys_invalidate__locked(tb);
}
if (current_tb_modified) {
/* Force execution of one insn next time. */
- CPUState *cpu = current_cpu;
- cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
+ cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
return true;
}
return false;
@@ -1100,23 +1090,28 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
/*
* @p must be non-NULL.
* Call with all @pages locked.
+ * (@cpu, @retaddr) may be (NULL, 0) outside of a cpu context,
+ * in which case precise_smc need not be detected.
*/
static void
-tb_invalidate_phys_page_range__locked(struct page_collection *pages,
+tb_invalidate_phys_page_range__locked(CPUState *cpu,
+ struct page_collection *pages,
PageDesc *p, tb_page_addr_t start,
tb_page_addr_t last,
uintptr_t retaddr)
{
TranslationBlock *tb;
PageForEachNext n;
-#ifdef TARGET_HAS_PRECISE_SMC
bool current_tb_modified = false;
- TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
-#endif /* TARGET_HAS_PRECISE_SMC */
+ TranslationBlock *current_tb = NULL;
/* Range may not cross a page. */
tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0);
+ if (retaddr && cpu && cpu->cc->tcg_ops->precise_smc) {
+ current_tb = tcg_tb_lookup(retaddr);
+ }
+
/*
* We remove all the TBs in the range [start, last].
* XXX: see if in some cases it could be faster to invalidate all the code
@@ -1134,8 +1129,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
}
if (!(tb_last < start || tb_start > last)) {
-#ifdef TARGET_HAS_PRECISE_SMC
- if (current_tb == tb &&
+ if (unlikely(current_tb == tb) &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/*
* If we are modifying the current TB, we must stop
@@ -1145,9 +1139,8 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
* restore the CPU state.
*/
current_tb_modified = true;
- cpu_restore_state_from_tb(current_cpu, current_tb, retaddr);
+ cpu_restore_state_from_tb(cpu, current_tb, retaddr);
}
-#endif /* TARGET_HAS_PRECISE_SMC */
tb_phys_invalidate__locked(tb);
}
}
@@ -1157,15 +1150,13 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
tlb_unprotect_code(start);
}
-#ifdef TARGET_HAS_PRECISE_SMC
- if (current_tb_modified) {
+ if (unlikely(current_tb_modified)) {
page_collection_unlock(pages);
/* Force execution of one insn next time. */
- current_cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
+ cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
mmap_unlock();
- cpu_loop_exit_noexc(current_cpu);
+ cpu_loop_exit_noexc(cpu);
}
-#endif
}
/*
@@ -1175,7 +1166,8 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*/
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
+void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
+ tb_page_addr_t last)
{
struct page_collection *pages;
tb_page_addr_t index, index_last;
@@ -1194,44 +1186,30 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
page_start = index << TARGET_PAGE_BITS;
page_last = page_start | ~TARGET_PAGE_MASK;
page_last = MIN(page_last, last);
- tb_invalidate_phys_page_range__locked(pages, pd,
+ tb_invalidate_phys_page_range__locked(cpu, pages, pd,
page_start, page_last, 0);
}
page_collection_unlock(pages);
}
/*
- * Call with all @pages in the range [@start, @start + len[ locked.
- */
-static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
- tb_page_addr_t start,
- unsigned len, uintptr_t ra)
-{
- PageDesc *p;
-
- p = page_find(start >> TARGET_PAGE_BITS);
- if (!p) {
- return;
- }
-
- assert_page_locked(p);
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra);
-}
-
-/*
* len must be <= 8 and start must be a multiple of len.
* Called via softmmu_template.h when code areas are written to with
* iothread mutex not held.
*/
-void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
- unsigned size,
- uintptr_t retaddr)
+void tb_invalidate_phys_range_fast(CPUState *cpu, ram_addr_t start,
+ unsigned len, uintptr_t ra)
{
- struct page_collection *pages;
+ PageDesc *p = page_find(start >> TARGET_PAGE_BITS);
- pages = page_collection_lock(ram_addr, ram_addr + size - 1);
- tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
- page_collection_unlock(pages);
+ if (p) {
+ ram_addr_t last = start + len - 1;
+ struct page_collection *pages = page_collection_lock(start, last);
+
+ tb_invalidate_phys_page_range__locked(cpu, pages, p,
+ start, last, ra);
+ page_collection_unlock(pages);
+ }
}
#endif /* CONFIG_USER_ONLY */
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
index 0ce34ac..6e5dc33 100644
--- a/accel/tcg/tcg-all.c
+++ b/accel/tcg/tcg-all.c
@@ -36,15 +36,11 @@
#include "qapi/qapi-builtin-visit.h"
#include "qemu/units.h"
#include "qemu/target-info.h"
-#if defined(CONFIG_USER_ONLY)
-#include "hw/qdev-core.h"
-#else
+#ifndef CONFIG_USER_ONLY
#include "hw/boards.h"
-#include "system/tcg.h"
#endif
#include "accel/tcg/cpu-ops.h"
#include "internal-common.h"
-#include "cpu-param.h"
struct TCGState {
diff --git a/accel/tcg/tlb-bounds.h b/accel/tcg/tlb-bounds.h
index efd34d4..f83d9ac 100644
--- a/accel/tcg/tlb-bounds.h
+++ b/accel/tcg/tlb-bounds.h
@@ -7,26 +7,7 @@
#define ACCEL_TCG_TLB_BOUNDS_H
#define CPU_TLB_DYN_MIN_BITS 6
+#define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
#define CPU_TLB_DYN_DEFAULT_BITS 8
-# if HOST_LONG_BITS == 32
-/* Make sure we do not require a double-word shift for the TLB load */
-# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
-# else /* HOST_LONG_BITS == 64 */
-/*
- * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) ==
- * 2**34 == 16G of address space. This is roughly what one would expect a
- * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel
- * Skylake's Level-2 STLB has 16 1G entries.
- * Also, make sure we do not size the TLB past the guest's address space.
- */
-# ifdef TARGET_PAGE_BITS_VARY
-# define CPU_TLB_DYN_MAX_BITS \
- MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
-# else
-# define CPU_TLB_DYN_MAX_BITS \
- MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
-# endif
-# endif
-
#endif /* ACCEL_TCG_TLB_BOUNDS_H */
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index c007b9a..451b383 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -21,55 +21,23 @@
#include "trace.h"
#include "disas/disas.h"
-#include "exec/exec-all.h"
#include "tcg/tcg.h"
-#if defined(CONFIG_USER_ONLY)
-#include "qemu.h"
-#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
-#include <sys/param.h>
-#if __FreeBSD_version >= 700104
-#define HAVE_KINFO_GETVMMAP
-#define sigqueue sigqueue_freebsd /* avoid redefinition */
-#include <sys/proc.h>
-#include <machine/profile.h>
-#define _KERNEL
-#include <sys/user.h>
-#undef _KERNEL
-#undef sigqueue
-#include <libutil.h>
-#endif
-#endif
-#else
-#include "system/ram_addr.h"
-#endif
-
-#include "cpu-param.h"
-#include "exec/cputlb.h"
-#include "exec/page-protection.h"
#include "exec/mmap-lock.h"
#include "tb-internal.h"
#include "tlb-bounds.h"
-#include "exec/translator.h"
#include "exec/tb-flush.h"
-#include "qemu/bitmap.h"
-#include "qemu/qemu-print.h"
-#include "qemu/main-loop.h"
#include "qemu/cacheinfo.h"
-#include "qemu/timer.h"
+#include "qemu/target-info.h"
#include "exec/log.h"
#include "exec/icount.h"
-#include "system/tcg.h"
-#include "qapi/error.h"
#include "accel/tcg/cpu-ops.h"
#include "tb-jmp-cache.h"
#include "tb-hash.h"
#include "tb-context.h"
#include "tb-internal.h"
#include "internal-common.h"
-#include "internal-target.h"
#include "tcg/perf.h"
#include "tcg/insn-start-words.h"
-#include "cpu.h"
TBContext tb_ctx;
@@ -110,7 +78,7 @@ static int64_t decode_sleb128(const uint8_t **pp)
val |= (int64_t)(byte & 0x7f) << shift;
shift += 7;
} while (byte & 0x80);
- if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
+ if (shift < 64 && (byte & 0x40)) {
val |= -(int64_t)1 << shift;
}
@@ -121,7 +89,7 @@ static int64_t decode_sleb128(const uint8_t **pp)
/* Encode the data collected about the instructions while compiling TB.
Place the data at BLOCK, and return the number of bytes consumed.
- The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
+ The logical table consists of INSN_START_WORDS uint64_t's,
which come from the target's insn_start data, followed by a uintptr_t
which comes from the host pc of the end of the code implementing the insn.
@@ -141,13 +109,13 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
for (i = 0, n = tb->icount; i < n; ++i) {
uint64_t prev, curr;
- for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
+ for (j = 0; j < INSN_START_WORDS; ++j) {
if (i == 0) {
prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
} else {
- prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j];
+ prev = insn_data[(i - 1) * INSN_START_WORDS + j];
}
- curr = insn_data[i * TARGET_INSN_START_WORDS + j];
+ curr = insn_data[i * INSN_START_WORDS + j];
p = encode_sleb128(p, curr - prev);
}
prev = (i == 0 ? 0 : insn_end_off[i - 1]);
@@ -179,7 +147,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
return -1;
}
- memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
+ memset(data, 0, sizeof(uint64_t) * INSN_START_WORDS);
if (!(tb_cflags(tb) & CF_PCREL)) {
data[0] = tb->pc;
}
@@ -189,7 +157,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
* at which the end of the insn exceeds host_pc.
*/
for (i = 0; i < num_insns; ++i) {
- for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
+ for (j = 0; j < INSN_START_WORDS; ++j) {
data[j] += decode_sleb128(&p);
}
iter_pc += decode_sleb128(&p);
@@ -207,7 +175,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc)
{
- uint64_t data[TARGET_INSN_START_WORDS];
+ uint64_t data[INSN_START_WORDS];
int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
if (insns_left < 0) {
@@ -291,9 +259,7 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
}
/* Called with mmap_lock held for user mode emulation. */
-TranslationBlock *tb_gen_code(CPUState *cpu,
- vaddr pc, uint64_t cs_base,
- uint32_t flags, int cflags)
+TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s)
{
CPUArchState *env = cpu_env(cpu);
TranslationBlock *tb, *existing_tb;
@@ -306,14 +272,14 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
assert_memory_lock();
qemu_thread_jit_write();
- phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
+ phys_pc = get_page_addr_code_hostp(env, s.pc, &host_pc);
if (phys_pc == -1) {
/* Generate a one-shot TB with 1 insn in it */
- cflags = (cflags & ~CF_COUNT_MASK) | 1;
+ s.cflags = (s.cflags & ~CF_COUNT_MASK) | 1;
}
- max_insns = cflags & CF_COUNT_MASK;
+ max_insns = s.cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = TCG_MAX_INSNS;
}
@@ -333,12 +299,12 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
gen_code_buf = tcg_ctx->code_gen_ptr;
tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
- if (!(cflags & CF_PCREL)) {
- tb->pc = pc;
+ if (!(s.cflags & CF_PCREL)) {
+ tb->pc = s.pc;
}
- tb->cs_base = cs_base;
- tb->flags = flags;
- tb->cflags = cflags;
+ tb->cs_base = s.cs_base;
+ tb->flags = s.flags;
+ tb->cflags = s.cflags;
tb_set_page_addr0(tb, phys_pc);
tb_set_page_addr1(tb, -1);
if (phys_pc != -1) {
@@ -346,19 +312,18 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
}
tcg_ctx->gen_tb = tb;
- tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
+ tcg_ctx->addr_type = target_long_bits() == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
#ifdef CONFIG_SOFTMMU
tcg_ctx->page_bits = TARGET_PAGE_BITS;
tcg_ctx->page_mask = TARGET_PAGE_MASK;
tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
#endif
- tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
tcg_ctx->guest_mo = cpu->cc->tcg_ops->guest_default_memory_order;
restart_translate:
- trace_translate_block(tb, pc, tb->tc.ptr);
+ trace_translate_block(tb, s.pc, tb->tc.ptr);
- gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
+ gen_code_size = setjmp_gen_code(env, tb, s.pc, host_pc, &max_insns, &ti);
if (unlikely(gen_code_size < 0)) {
switch (gen_code_size) {
case -1:
@@ -435,10 +400,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* For CF_PCREL, attribute all executions of the generated code
* to its first mapping.
*/
- perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
+ perf_report_code(s.pc, tb, tcg_splitwx_to_rx(gen_code_buf));
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
- qemu_log_in_addr_range(pc)) {
+ qemu_log_in_addr_range(s.pc)) {
FILE *logfile = qemu_log_trylock();
if (logfile) {
int code_size, data_size;
@@ -460,7 +425,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
fprintf(logfile,
" -- guest addr 0x%016" PRIx64 " + tb prologue\n",
- tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
+ tcg_ctx->gen_insn_data[insn * INSN_START_WORDS]);
chunk_start = tcg_ctx->gen_insn_end_off[insn];
disas(logfile, tb->tc.ptr, chunk_start);
@@ -473,7 +438,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
if (chunk_end > chunk_start) {
fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n",
- tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
+ tcg_ctx->gen_insn_data[insn * INSN_START_WORDS]);
disas(logfile, tb->tc.ptr + chunk_start,
chunk_end - chunk_start);
chunk_start = chunk_end;
@@ -591,15 +556,11 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
/* The exception probably happened in a helper. The CPU state should
have been saved before calling it. Fetch the PC from there. */
CPUArchState *env = cpu_env(cpu);
- vaddr pc;
- uint64_t cs_base;
- tb_page_addr_t addr;
- uint32_t flags;
+ TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
+ tb_page_addr_t addr = get_page_addr_code(env, s.pc);
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- addr = get_page_addr_code(env, pc);
if (addr != -1) {
- tb_invalidate_phys_range(addr, addr);
+ tb_invalidate_phys_range(cpu, addr, addr);
}
}
}
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 5eef8e7..f25d80e 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -19,19 +19,20 @@
#include "qemu/osdep.h"
#include "accel/tcg/cpu-ops.h"
#include "disas/disas.h"
-#include "cpu.h"
#include "exec/vaddr.h"
-#include "exec/exec-all.h"
#include "exec/tlb-flags.h"
#include "tcg/tcg.h"
#include "qemu/bitops.h"
#include "qemu/rcu.h"
-#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/cpu-ldst-common.h"
+#include "accel/tcg/helper-retaddr.h"
+#include "accel/tcg/probe.h"
#include "user/cpu_loop.h"
+#include "user/guest-host.h"
#include "qemu/main-loop.h"
#include "user/page-protection.h"
#include "exec/page-protection.h"
-#include "exec/helper-proto.h"
+#include "exec/helper-proto-common.h"
#include "qemu/atomic128.h"
#include "qemu/bswap.h"
#include "qemu/int128.h"
@@ -39,7 +40,6 @@
#include "tcg/tcg-ldst.h"
#include "backend-ldst.h"
#include "internal-common.h"
-#include "internal-target.h"
#include "tb-internal.h"
__thread uintptr_t helper_retaddr;
@@ -126,9 +126,9 @@ MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
* guest, we'd end up in an infinite loop of retrying the faulting access.
*/
bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
- uintptr_t host_pc, abi_ptr guest_addr)
+ uintptr_t host_pc, vaddr guest_addr)
{
- switch (page_unprotect(guest_addr, host_pc)) {
+ switch (page_unprotect(cpu, guest_addr, host_pc)) {
case 0:
/*
* Fault not caused by a page marked unwritable to protect
@@ -162,7 +162,7 @@ typedef struct PageFlagsNode {
static IntervalTreeRoot pageflags_root;
-static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
+static PageFlagsNode *pageflags_find(vaddr start, vaddr last)
{
IntervalTreeNode *n;
@@ -170,8 +170,7 @@ static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
return n ? container_of(n, PageFlagsNode, itree) : NULL;
}
-static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
- target_ulong last)
+static PageFlagsNode *pageflags_next(PageFlagsNode *p, vaddr start, vaddr last)
{
IntervalTreeNode *n;
@@ -200,13 +199,22 @@ int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
return rc;
}
-static int dump_region(void *priv, target_ulong start,
- target_ulong end, unsigned long prot)
+static int dump_region(void *opaque, vaddr start, vaddr end, int prot)
{
- FILE *f = (FILE *)priv;
+ FILE *f = opaque;
+ uint64_t mask;
+ int width;
- fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n",
- start, end, end - start,
+ if (guest_addr_max <= UINT32_MAX) {
+ mask = UINT32_MAX, width = 8;
+ } else {
+ mask = UINT64_MAX, width = 16;
+ }
+
+ fprintf(f, "%0*" PRIx64 "-%0*" PRIx64 " %0*" PRIx64 " %c%c%c\n",
+ width, start & mask,
+ width, end & mask,
+ width, (end - start) & mask,
((prot & PAGE_READ) ? 'r' : '-'),
((prot & PAGE_WRITE) ? 'w' : '-'),
((prot & PAGE_EXEC) ? 'x' : '-'));
@@ -216,14 +224,14 @@ static int dump_region(void *priv, target_ulong start,
/* dump memory mappings */
void page_dump(FILE *f)
{
- const int length = sizeof(target_ulong) * 2;
+ int width = guest_addr_max <= UINT32_MAX ? 8 : 16;
fprintf(f, "%-*s %-*s %-*s %s\n",
- length, "start", length, "end", length, "size", "prot");
+ width, "start", width, "end", width, "size", "prot");
walk_memory_regions(f, dump_region);
}
-int page_get_flags(target_ulong address)
+int page_get_flags(vaddr address)
{
PageFlagsNode *p = pageflags_find(address, address);
@@ -246,7 +254,7 @@ int page_get_flags(target_ulong address)
}
/* A subroutine of page_set_flags: insert a new node for [start,last]. */
-static void pageflags_create(target_ulong start, target_ulong last, int flags)
+static void pageflags_create(vaddr start, vaddr last, int flags)
{
PageFlagsNode *p = g_new(PageFlagsNode, 1);
@@ -257,13 +265,13 @@ static void pageflags_create(target_ulong start, target_ulong last, int flags)
}
/* A subroutine of page_set_flags: remove everything in [start,last]. */
-static bool pageflags_unset(target_ulong start, target_ulong last)
+static bool pageflags_unset(vaddr start, vaddr last)
{
bool inval_tb = false;
while (true) {
PageFlagsNode *p = pageflags_find(start, last);
- target_ulong p_last;
+ vaddr p_last;
if (!p) {
break;
@@ -302,8 +310,7 @@ static bool pageflags_unset(target_ulong start, target_ulong last)
* A subroutine of page_set_flags: nothing overlaps [start,last],
* but check adjacent mappings and maybe merge into a single range.
*/
-static void pageflags_create_merge(target_ulong start, target_ulong last,
- int flags)
+static void pageflags_create_merge(vaddr start, vaddr last, int flags)
{
PageFlagsNode *next = NULL, *prev = NULL;
@@ -354,11 +361,11 @@ static void pageflags_create_merge(target_ulong start, target_ulong last,
#define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
/* A subroutine of page_set_flags: add flags to [start,last]. */
-static bool pageflags_set_clear(target_ulong start, target_ulong last,
+static bool pageflags_set_clear(vaddr start, vaddr last,
int set_flags, int clear_flags)
{
PageFlagsNode *p;
- target_ulong p_start, p_last;
+ vaddr p_start, p_last;
int p_flags, merge_flags;
bool inval_tb = false;
@@ -493,7 +500,7 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
return inval_tb;
}
-void page_set_flags(target_ulong start, target_ulong last, int flags)
+void page_set_flags(vaddr start, vaddr last, int flags)
{
bool reset = false;
bool inval_tb = false;
@@ -502,7 +509,7 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
guest address space. If this assert fires, it probably indicates
a missing call to h2g_valid. */
assert(start <= last);
- assert(last <= GUEST_ADDR_MAX);
+ assert(last <= guest_addr_max);
/* Only set PAGE_ANON with new mappings. */
assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
assert_memory_lock();
@@ -529,13 +536,13 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
~(reset ? 0 : PAGE_STICKY));
}
if (inval_tb) {
- tb_invalidate_phys_range(start, last);
+ tb_invalidate_phys_range(NULL, start, last);
}
}
-bool page_check_range(target_ulong start, target_ulong len, int flags)
+bool page_check_range(vaddr start, vaddr len, int flags)
{
- target_ulong last;
+ vaddr last;
int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
bool ret;
@@ -584,7 +591,7 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
break;
}
/* Asking about writable, but has been protected: undo. */
- if (!page_unprotect(start, 0)) {
+ if (!page_unprotect(NULL, start, 0)) {
ret = false;
break;
}
@@ -611,20 +618,19 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
return ret;
}
-bool page_check_range_empty(target_ulong start, target_ulong last)
+bool page_check_range_empty(vaddr start, vaddr last)
{
assert(last >= start);
assert_memory_lock();
return pageflags_find(start, last) == NULL;
}
-target_ulong page_find_range_empty(target_ulong min, target_ulong max,
- target_ulong len, target_ulong align)
+vaddr page_find_range_empty(vaddr min, vaddr max, vaddr len, vaddr align)
{
- target_ulong len_m1, align_m1;
+ vaddr len_m1, align_m1;
assert(min <= max);
- assert(max <= GUEST_ADDR_MAX);
+ assert(max <= guest_addr_max);
assert(len != 0);
assert(is_power_of_2(align));
assert_memory_lock();
@@ -662,7 +668,7 @@ target_ulong page_find_range_empty(target_ulong min, target_ulong max,
void tb_lock_page0(tb_page_addr_t address)
{
PageFlagsNode *p;
- target_ulong start, last;
+ vaddr start, last;
int host_page_size = qemu_real_host_page_size();
int prot;
@@ -704,11 +710,13 @@ void tb_lock_page0(tb_page_addr_t address)
* immediately exited. (We can only return 2 if the 'pc' argument is
* non-zero.)
*/
-int page_unprotect(tb_page_addr_t address, uintptr_t pc)
+int page_unprotect(CPUState *cpu, tb_page_addr_t address, uintptr_t pc)
{
PageFlagsNode *p;
bool current_tb_invalidated;
+ assert((cpu == NULL) == (pc == 0));
+
/*
* Technically this isn't safe inside a signal handler. However we
* know this only ever happens in a synchronous SEGV handler, so in
@@ -731,15 +739,15 @@ int page_unprotect(tb_page_addr_t address, uintptr_t pc)
* this thread raced with another one which got here first and
* set the page to PAGE_WRITE and did the TB invalidate for us.
*/
-#ifdef TARGET_HAS_PRECISE_SMC
- TranslationBlock *current_tb = tcg_tb_lookup(pc);
- if (current_tb) {
- current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
+ if (pc && cpu->cc->tcg_ops->precise_smc) {
+ TranslationBlock *current_tb = tcg_tb_lookup(pc);
+ if (current_tb) {
+ current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
+ }
}
-#endif
} else {
int host_page_size = qemu_real_host_page_size();
- target_ulong start, len, i;
+ vaddr start, len, i;
int prot;
if (host_page_size <= TARGET_PAGE_SIZE) {
@@ -747,14 +755,15 @@ int page_unprotect(tb_page_addr_t address, uintptr_t pc)
len = TARGET_PAGE_SIZE;
prot = p->flags | PAGE_WRITE;
pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
- current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
+ current_tb_invalidated =
+ tb_invalidate_phys_page_unwind(cpu, start, pc);
} else {
start = address & -host_page_size;
len = host_page_size;
prot = 0;
for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
- target_ulong addr = start + i;
+ vaddr addr = start + i;
p = pageflags_find(addr, addr);
if (p) {
@@ -770,7 +779,7 @@ int page_unprotect(tb_page_addr_t address, uintptr_t pc)
* the corresponding translated code.
*/
current_tb_invalidated |=
- tb_invalidate_phys_page_unwind(addr, pc);
+ tb_invalidate_phys_page_unwind(cpu, addr, pc);
}
}
if (prot & PAGE_EXEC) {
@@ -850,6 +859,12 @@ void *probe_access(CPUArchState *env, vaddr addr, int size,
return size ? g2h(env_cpu(env), addr) : NULL;
}
+void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr,
+ MMUAccessType access_type, int mmu_idx)
+{
+ return g2h(env_cpu(env), addr);
+}
+
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp)
{
@@ -864,7 +879,6 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
return addr;
}
-#ifdef TARGET_PAGE_DATA_SIZE
/*
* Allocate chunks of target data together. For the only current user,
* if we allocate one hunk per page, we have overhead of 40/128 or 40%.
@@ -880,10 +894,16 @@ typedef struct TargetPageDataNode {
} TargetPageDataNode;
static IntervalTreeRoot targetdata_root;
+static size_t target_page_data_size;
-void page_reset_target_data(target_ulong start, target_ulong last)
+void page_reset_target_data(vaddr start, vaddr last)
{
IntervalTreeNode *n, *next;
+ size_t size = target_page_data_size;
+
+ if (likely(size == 0)) {
+ return;
+ }
assert_memory_lock();
@@ -895,7 +915,7 @@ void page_reset_target_data(target_ulong start, target_ulong last)
n != NULL;
n = next,
next = next ? interval_tree_iter_next(n, start, last) : NULL) {
- target_ulong n_start, n_last, p_ofs, p_len;
+ vaddr n_start, n_last, p_ofs, p_len;
TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree);
if (n->start >= start && n->last <= last) {
@@ -914,16 +934,21 @@ void page_reset_target_data(target_ulong start, target_ulong last)
n_last = MIN(last, n->last);
p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
- memset(t->data + p_ofs * TARGET_PAGE_DATA_SIZE, 0,
- p_len * TARGET_PAGE_DATA_SIZE);
+ memset(t->data + p_ofs * size, 0, p_len * size);
}
}
-void *page_get_target_data(target_ulong address)
+void *page_get_target_data(vaddr address, size_t size)
{
IntervalTreeNode *n;
TargetPageDataNode *t;
- target_ulong page, region, p_ofs;
+ vaddr page, region, p_ofs;
+
+ /* Remember the size from the first call, and it should be constant. */
+ if (unlikely(target_page_data_size != size)) {
+ assert(target_page_data_size == 0);
+ target_page_data_size = size;
+ }
page = address & TARGET_PAGE_MASK;
region = address & TBD_MASK;
@@ -939,8 +964,7 @@ void *page_get_target_data(target_ulong address)
mmap_lock();
n = interval_tree_iter_first(&targetdata_root, page, page);
if (!n) {
- t = g_malloc0(sizeof(TargetPageDataNode)
- + TPD_PAGES * TARGET_PAGE_DATA_SIZE);
+ t = g_malloc0(sizeof(TargetPageDataNode) + TPD_PAGES * size);
n = &t->itree;
n->start = region;
n->last = region | ~TBD_MASK;
@@ -951,11 +975,8 @@ void *page_get_target_data(target_ulong address)
t = container_of(n, TargetPageDataNode, itree);
p_ofs = (page - region) >> TARGET_PAGE_BITS;
- return t->data + p_ofs * TARGET_PAGE_DATA_SIZE;
+ return t->data + p_ofs * size;
}
-#else
-void page_reset_target_data(target_ulong start, target_ulong last) { }
-#endif /* TARGET_PAGE_DATA_SIZE */
/* The system-mode versions of these helpers are in cputlb.c. */
@@ -1017,7 +1038,7 @@ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
* be under mmap_lock() in order to prevent the creation of
* another TranslationBlock in between.
*/
- tb_invalidate_phys_range(addr, addr + l - 1);
+ tb_invalidate_phys_range(NULL, addr, addr + l - 1);
written = pwrite(fd, buf, l,
(off_t)(uintptr_t)g2h_untagged(addr));
if (written != l) {
@@ -1123,7 +1144,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
return ret;
}
-static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
+static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;