aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
Diffstat (limited to 'accel')
-rw-r--r--accel/accel-common.c142
-rw-r--r--accel/accel-internal.h (renamed from accel/accel-system.h)10
-rw-r--r--accel/accel-system.c11
-rw-r--r--accel/accel-target.c134
-rw-r--r--accel/accel-user.c6
-rw-r--r--accel/dummy-cpus.c1
-rw-r--r--accel/dummy-cpus.h14
-rw-r--r--accel/hvf/hvf-accel-ops.c310
-rw-r--r--accel/hvf/hvf-all.c267
-rw-r--r--accel/hvf/trace-events7
-rw-r--r--accel/hvf/trace.h2
-rw-r--r--accel/kvm/kvm-accel-ops.c3
-rw-r--r--accel/kvm/kvm-all.c130
-rw-r--r--accel/meson.build1
-rw-r--r--accel/qtest/qtest.c8
-rw-r--r--accel/stubs/hvf-stub.c12
-rw-r--r--accel/stubs/kvm-stub.c9
-rw-r--r--accel/stubs/meson.build3
-rw-r--r--accel/stubs/nvmm-stub.c12
-rw-r--r--accel/stubs/tcg-stub.c3
-rw-r--r--accel/stubs/whpx-stub.c12
-rw-r--r--accel/tcg/atomic_template.h16
-rw-r--r--accel/tcg/backend-ldst.h41
-rw-r--r--accel/tcg/cpu-exec.c170
-rw-r--r--accel/tcg/cputlb.c167
-rw-r--r--accel/tcg/icount-common.c2
-rw-r--r--accel/tcg/internal-common.h73
-rw-r--r--accel/tcg/internal-target.h79
-rw-r--r--accel/tcg/ldst_common.c.inc339
-rw-r--r--accel/tcg/meson.build31
-rw-r--r--accel/tcg/monitor.c52
-rw-r--r--accel/tcg/plugin-gen.c43
-rw-r--r--accel/tcg/tb-hash.h4
-rw-r--r--accel/tcg/tb-internal.h46
-rw-r--r--accel/tcg/tb-maint.c101
-rw-r--r--accel/tcg/tcg-accel-ops-icount.c2
-rw-r--r--accel/tcg/tcg-accel-ops-mttcg.c2
-rw-r--r--accel/tcg/tcg-accel-ops-rr.c2
-rw-r--r--accel/tcg/tcg-accel-ops.c11
-rw-r--r--accel/tcg/tcg-all.c109
-rw-r--r--accel/tcg/tlb-bounds.h13
-rw-r--r--accel/tcg/translate-all.c107
-rw-r--r--accel/tcg/translator.c135
-rw-r--r--accel/tcg/user-exec.c257
-rw-r--r--accel/tcg/watchpoint.c3
-rw-r--r--accel/xen/xen-all.c10
46 files changed, 1340 insertions, 1572 deletions
diff --git a/accel/accel-common.c b/accel/accel-common.c
new file mode 100644
index 0000000..591ff4c
--- /dev/null
+++ b/accel/accel-common.c
@@ -0,0 +1,142 @@
+/*
+ * QEMU accel class, components common to system emulation and user mode
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2014 Red Hat Inc.
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/accel.h"
+#include "qemu/target-info.h"
+#include "accel/accel-cpu.h"
+#include "accel-internal.h"
+
+/* Lookup AccelClass from opt_name. Returns NULL if not found */
+AccelClass *accel_find(const char *opt_name)
+{
+ char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name);
+ AccelClass *ac = ACCEL_CLASS(module_object_class_by_name(class_name));
+ g_free(class_name);
+ return ac;
+}
+
+/* Return the name of the current accelerator */
+const char *current_accel_name(void)
+{
+ AccelClass *ac = ACCEL_GET_CLASS(current_accel());
+
+ return ac->name;
+}
+
+static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque)
+{
+ CPUClass *cc = CPU_CLASS(klass);
+ AccelCPUClass *accel_cpu = opaque;
+
+ /*
+ * The first callback allows accel-cpu to run initializations
+ * for the CPU, customizing CPU behavior according to the accelerator.
+ *
+ * The second one allows the CPU to customize the accel-cpu
+ * behavior according to the CPU.
+ *
+ * The second is currently only used by TCG, to specialize the
+ * TCGCPUOps depending on the CPU type.
+ */
+ cc->accel_cpu = accel_cpu;
+ if (accel_cpu->cpu_class_init) {
+ accel_cpu->cpu_class_init(cc);
+ }
+ if (cc->init_accel_cpu) {
+ cc->init_accel_cpu(accel_cpu, cc);
+ }
+}
+
+/* initialize the arch-specific accel CpuClass interfaces */
+static void accel_init_cpu_interfaces(AccelClass *ac)
+{
+ const char *ac_name; /* AccelClass name */
+ char *acc_name; /* AccelCPUClass name */
+ ObjectClass *acc; /* AccelCPUClass */
+ const char *cpu_resolving_type = target_cpu_type();
+
+ ac_name = object_class_get_name(OBJECT_CLASS(ac));
+ g_assert(ac_name != NULL);
+
+ acc_name = g_strdup_printf("%s-%s", ac_name, cpu_resolving_type);
+ acc = object_class_by_name(acc_name);
+ g_free(acc_name);
+
+ if (acc) {
+ object_class_foreach(accel_init_cpu_int_aux,
+ cpu_resolving_type, false, acc);
+ }
+}
+
+void accel_init_interfaces(AccelClass *ac)
+{
+ accel_init_ops_interfaces(ac);
+ accel_init_cpu_interfaces(ac);
+}
+
+void accel_cpu_instance_init(CPUState *cpu)
+{
+ if (cpu->cc->accel_cpu && cpu->cc->accel_cpu->cpu_instance_init) {
+ cpu->cc->accel_cpu->cpu_instance_init(cpu);
+ }
+}
+
+bool accel_cpu_common_realize(CPUState *cpu, Error **errp)
+{
+ AccelState *accel = current_accel();
+ AccelClass *acc = ACCEL_GET_CLASS(accel);
+
+ /* target specific realization */
+ if (cpu->cc->accel_cpu
+ && cpu->cc->accel_cpu->cpu_target_realize
+ && !cpu->cc->accel_cpu->cpu_target_realize(cpu, errp)) {
+ return false;
+ }
+
+ /* generic realization */
+ if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) {
+ return false;
+ }
+
+ return true;
+}
+
+void accel_cpu_common_unrealize(CPUState *cpu)
+{
+ AccelState *accel = current_accel();
+ AccelClass *acc = ACCEL_GET_CLASS(accel);
+
+ /* generic unrealization */
+ if (acc->cpu_common_unrealize) {
+ acc->cpu_common_unrealize(cpu);
+ }
+}
+
+int accel_supported_gdbstub_sstep_flags(void)
+{
+ AccelState *accel = current_accel();
+ AccelClass *acc = ACCEL_GET_CLASS(accel);
+ if (acc->gdbstub_supported_sstep_flags) {
+ return acc->gdbstub_supported_sstep_flags(accel);
+ }
+ return 0;
+}
+
+static const TypeInfo accel_types[] = {
+ {
+ .name = TYPE_ACCEL,
+ .parent = TYPE_OBJECT,
+ .class_size = sizeof(AccelClass),
+ .instance_size = sizeof(AccelState),
+ .abstract = true,
+ },
+};
+
+DEFINE_TYPES(accel_types)
diff --git a/accel/accel-system.h b/accel/accel-internal.h
index 2d37c73..d3a4422 100644
--- a/accel/accel-system.h
+++ b/accel/accel-internal.h
@@ -1,5 +1,5 @@
/*
- * QEMU System Emulation accel internal functions
+ * QEMU accel internal functions
*
* Copyright 2021 SUSE LLC
*
@@ -7,9 +7,11 @@
* See the COPYING file in the top-level directory.
*/
-#ifndef ACCEL_SYSTEM_H
-#define ACCEL_SYSTEM_H
+#ifndef ACCEL_INTERNAL_H
+#define ACCEL_INTERNAL_H
-void accel_system_init_ops_interfaces(AccelClass *ac);
+#include "qemu/accel.h"
+
+void accel_init_ops_interfaces(AccelClass *ac);
#endif /* ACCEL_SYSTEM_H */
diff --git a/accel/accel-system.c b/accel/accel-system.c
index 5df49fb..af713cc 100644
--- a/accel/accel-system.c
+++ b/accel/accel-system.c
@@ -29,7 +29,7 @@
#include "system/accel-ops.h"
#include "system/cpus.h"
#include "qemu/error-report.h"
-#include "accel-system.h"
+#include "accel-internal.h"
int accel_init_machine(AccelState *accel, MachineState *ms)
{
@@ -37,7 +37,7 @@ int accel_init_machine(AccelState *accel, MachineState *ms)
int ret;
ms->accelerator = accel;
*(acc->allowed) = true;
- ret = acc->init_machine(ms);
+ ret = acc->init_machine(accel, ms);
if (ret < 0) {
ms->accelerator = NULL;
*(acc->allowed) = false;
@@ -58,12 +58,12 @@ void accel_setup_post(MachineState *ms)
AccelState *accel = ms->accelerator;
AccelClass *acc = ACCEL_GET_CLASS(accel);
if (acc->setup_post) {
- acc->setup_post(ms, accel);
+ acc->setup_post(accel);
}
}
/* initialize the arch-independent accel operation interfaces */
-void accel_system_init_ops_interfaces(AccelClass *ac)
+void accel_init_ops_interfaces(AccelClass *ac)
{
const char *ac_name;
char *ops_name;
@@ -85,8 +85,9 @@ void accel_system_init_ops_interfaces(AccelClass *ac)
* non-NULL create_vcpu_thread operation.
*/
ops = ACCEL_OPS_CLASS(oc);
+ ac->ops = ops;
if (ops->ops_init) {
- ops->ops_init(ops);
+ ops->ops_init(ac);
}
cpus_register_accel(ops);
}
diff --git a/accel/accel-target.c b/accel/accel-target.c
index 33a539b..7fd392f 100644
--- a/accel/accel-target.c
+++ b/accel/accel-target.c
@@ -24,141 +24,8 @@
*/
#include "qemu/osdep.h"
-#include "qemu/accel.h"
-
-#include "cpu.h"
#include "accel/accel-cpu-target.h"
-#ifndef CONFIG_USER_ONLY
-#include "accel-system.h"
-#endif /* !CONFIG_USER_ONLY */
-
-static const TypeInfo accel_type = {
- .name = TYPE_ACCEL,
- .parent = TYPE_OBJECT,
- .class_size = sizeof(AccelClass),
- .instance_size = sizeof(AccelState),
- .abstract = true,
-};
-
-/* Lookup AccelClass from opt_name. Returns NULL if not found */
-AccelClass *accel_find(const char *opt_name)
-{
- char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name);
- AccelClass *ac = ACCEL_CLASS(module_object_class_by_name(class_name));
- g_free(class_name);
- return ac;
-}
-
-/* Return the name of the current accelerator */
-const char *current_accel_name(void)
-{
- AccelClass *ac = ACCEL_GET_CLASS(current_accel());
-
- return ac->name;
-}
-
-static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque)
-{
- CPUClass *cc = CPU_CLASS(klass);
- AccelCPUClass *accel_cpu = opaque;
-
- /*
- * The first callback allows accel-cpu to run initializations
- * for the CPU, customizing CPU behavior according to the accelerator.
- *
- * The second one allows the CPU to customize the accel-cpu
- * behavior according to the CPU.
- *
- * The second is currently only used by TCG, to specialize the
- * TCGCPUOps depending on the CPU type.
- */
- cc->accel_cpu = accel_cpu;
- if (accel_cpu->cpu_class_init) {
- accel_cpu->cpu_class_init(cc);
- }
- if (cc->init_accel_cpu) {
- cc->init_accel_cpu(accel_cpu, cc);
- }
-}
-
-/* initialize the arch-specific accel CpuClass interfaces */
-static void accel_init_cpu_interfaces(AccelClass *ac)
-{
- const char *ac_name; /* AccelClass name */
- char *acc_name; /* AccelCPUClass name */
- ObjectClass *acc; /* AccelCPUClass */
-
- ac_name = object_class_get_name(OBJECT_CLASS(ac));
- g_assert(ac_name != NULL);
-
- acc_name = g_strdup_printf("%s-%s", ac_name, CPU_RESOLVING_TYPE);
- acc = object_class_by_name(acc_name);
- g_free(acc_name);
-
- if (acc) {
- object_class_foreach(accel_init_cpu_int_aux,
- CPU_RESOLVING_TYPE, false, acc);
- }
-}
-
-void accel_init_interfaces(AccelClass *ac)
-{
-#ifndef CONFIG_USER_ONLY
- accel_system_init_ops_interfaces(ac);
-#endif /* !CONFIG_USER_ONLY */
-
- accel_init_cpu_interfaces(ac);
-}
-
-void accel_cpu_instance_init(CPUState *cpu)
-{
- if (cpu->cc->accel_cpu && cpu->cc->accel_cpu->cpu_instance_init) {
- cpu->cc->accel_cpu->cpu_instance_init(cpu);
- }
-}
-
-bool accel_cpu_common_realize(CPUState *cpu, Error **errp)
-{
- AccelState *accel = current_accel();
- AccelClass *acc = ACCEL_GET_CLASS(accel);
-
- /* target specific realization */
- if (cpu->cc->accel_cpu
- && cpu->cc->accel_cpu->cpu_target_realize
- && !cpu->cc->accel_cpu->cpu_target_realize(cpu, errp)) {
- return false;
- }
-
- /* generic realization */
- if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) {
- return false;
- }
-
- return true;
-}
-
-void accel_cpu_common_unrealize(CPUState *cpu)
-{
- AccelState *accel = current_accel();
- AccelClass *acc = ACCEL_GET_CLASS(accel);
-
- /* generic unrealization */
- if (acc->cpu_common_unrealize) {
- acc->cpu_common_unrealize(cpu);
- }
-}
-
-int accel_supported_gdbstub_sstep_flags(void)
-{
- AccelState *accel = current_accel();
- AccelClass *acc = ACCEL_GET_CLASS(accel);
- if (acc->gdbstub_supported_sstep_flags) {
- return acc->gdbstub_supported_sstep_flags();
- }
- return 0;
-}
-
static const TypeInfo accel_cpu_type = {
.name = TYPE_ACCEL_CPU,
.parent = TYPE_OBJECT,
@@ -168,7 +35,6 @@ static const TypeInfo accel_cpu_type = {
static void register_accel_types(void)
{
- type_register_static(&accel_type);
type_register_static(&accel_cpu_type);
}
diff --git a/accel/accel-user.c b/accel/accel-user.c
index 22b6a1a..7d19230 100644
--- a/accel/accel-user.c
+++ b/accel/accel-user.c
@@ -9,6 +9,12 @@
#include "qemu/osdep.h"
#include "qemu/accel.h"
+#include "accel-internal.h"
+
+void accel_init_ops_interfaces(AccelClass *ac)
+{
+ /* nothing */
+}
AccelState *current_accel(void)
{
diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c
index 8672761..03cfc0f 100644
--- a/accel/dummy-cpus.c
+++ b/accel/dummy-cpus.c
@@ -17,6 +17,7 @@
#include "qemu/guest-random.h"
#include "qemu/main-loop.h"
#include "hw/core/cpu.h"
+#include "accel/dummy-cpus.h"
static void *dummy_cpu_thread_fn(void *arg)
{
diff --git a/accel/dummy-cpus.h b/accel/dummy-cpus.h
new file mode 100644
index 0000000..d18dd0f
--- /dev/null
+++ b/accel/dummy-cpus.h
@@ -0,0 +1,14 @@
+/*
+ * Dummy cpu thread code
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef ACCEL_DUMMY_CPUS_H
+#define ACCEL_DUMMY_CPUS_H
+
+void dummy_start_vcpu_thread(CPUState *cpu);
+
+#endif
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
index 12fc30c..be8724a 100644
--- a/accel/hvf/hvf-accel-ops.c
+++ b/accel/hvf/hvf-accel-ops.c
@@ -48,18 +48,16 @@
*/
#include "qemu/osdep.h"
-#include "qemu/error-report.h"
+#include "qemu/guest-random.h"
#include "qemu/main-loop.h"
-#include "exec/address-spaces.h"
-#include "exec/exec-all.h"
+#include "qemu/queue.h"
#include "gdbstub/enums.h"
-#include "hw/boards.h"
+#include "exec/cpu-common.h"
+#include "hw/core/cpu.h"
#include "system/accel-ops.h"
#include "system/cpus.h"
#include "system/hvf.h"
#include "system/hvf_int.h"
-#include "system/runstate.h"
-#include "qemu/guest-random.h"
HVFState *hvf_state;
@@ -79,138 +77,17 @@ hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
return NULL;
}
-struct mac_slot {
- int present;
- uint64_t size;
- uint64_t gpa_start;
- uint64_t gva;
-};
-
-struct mac_slot mac_slots[32];
-
-static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
-{
- struct mac_slot *macslot;
- hv_return_t ret;
-
- macslot = &mac_slots[slot->slot_id];
-
- if (macslot->present) {
- if (macslot->size != slot->size) {
- macslot->present = 0;
- ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
- assert_hvf_ok(ret);
- }
- }
-
- if (!slot->size) {
- return 0;
- }
-
- macslot->present = 1;
- macslot->gpa_start = slot->start;
- macslot->size = slot->size;
- ret = hv_vm_map(slot->mem, slot->start, slot->size, flags);
- assert_hvf_ok(ret);
- return 0;
-}
-
-static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
-{
- hvf_slot *mem;
- MemoryRegion *area = section->mr;
- bool writable = !area->readonly && !area->rom_device;
- hv_memory_flags_t flags;
- uint64_t page_size = qemu_real_host_page_size();
-
- if (!memory_region_is_ram(area)) {
- if (writable) {
- return;
- } else if (!memory_region_is_romd(area)) {
- /*
- * If the memory device is not in romd_mode, then we actually want
- * to remove the hvf memory slot so all accesses will trap.
- */
- add = false;
- }
- }
-
- if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) ||
- !QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) {
- /* Not page aligned, so we can not map as RAM */
- add = false;
- }
-
- mem = hvf_find_overlap_slot(
- section->offset_within_address_space,
- int128_get64(section->size));
-
- if (mem && add) {
- if (mem->size == int128_get64(section->size) &&
- mem->start == section->offset_within_address_space &&
- mem->mem == (memory_region_get_ram_ptr(area) +
- section->offset_within_region)) {
- return; /* Same region was attempted to register, go away. */
- }
- }
-
- /* Region needs to be reset. set the size to 0 and remap it. */
- if (mem) {
- mem->size = 0;
- if (do_hvf_set_memory(mem, 0)) {
- error_report("Failed to reset overlapping slot");
- abort();
- }
- }
-
- if (!add) {
- return;
- }
-
- if (area->readonly ||
- (!memory_region_is_ram(area) && memory_region_is_romd(area))) {
- flags = HV_MEMORY_READ | HV_MEMORY_EXEC;
- } else {
- flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
- }
-
- /* Now make a new slot. */
- int x;
-
- for (x = 0; x < hvf_state->num_slots; ++x) {
- mem = &hvf_state->slots[x];
- if (!mem->size) {
- break;
- }
- }
-
- if (x == hvf_state->num_slots) {
- error_report("No free slots");
- abort();
- }
-
- mem->size = int128_get64(section->size);
- mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
- mem->start = section->offset_within_address_space;
- mem->region = area;
-
- if (do_hvf_set_memory(mem, flags)) {
- error_report("Error registering new memory slot");
- abort();
- }
-}
-
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{
- if (!cpu->accel->dirty) {
+ if (!cpu->vcpu_dirty) {
hvf_get_registers(cpu);
- cpu->accel->dirty = true;
+ cpu->vcpu_dirty = true;
}
}
static void hvf_cpu_synchronize_state(CPUState *cpu)
{
- if (!cpu->accel->dirty) {
+ if (!cpu->vcpu_dirty) {
run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
}
}
@@ -219,7 +96,7 @@ static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu,
run_on_cpu_data arg)
{
/* QEMU state is the reference, push it to HVF now and on next entry */
- cpu->accel->dirty = true;
+ cpu->vcpu_dirty = true;
}
static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
@@ -237,146 +114,10 @@ static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
}
-static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
-{
- hvf_slot *slot;
-
- slot = hvf_find_overlap_slot(
- section->offset_within_address_space,
- int128_get64(section->size));
-
- /* protect region against writes; begin tracking it */
- if (on) {
- slot->flags |= HVF_SLOT_LOG;
- hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
- HV_MEMORY_READ | HV_MEMORY_EXEC);
- /* stop tracking region*/
- } else {
- slot->flags &= ~HVF_SLOT_LOG;
- hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
- HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
- }
-}
-
-static void hvf_log_start(MemoryListener *listener,
- MemoryRegionSection *section, int old, int new)
-{
- if (old != 0) {
- return;
- }
-
- hvf_set_dirty_tracking(section, 1);
-}
-
-static void hvf_log_stop(MemoryListener *listener,
- MemoryRegionSection *section, int old, int new)
-{
- if (new != 0) {
- return;
- }
-
- hvf_set_dirty_tracking(section, 0);
-}
-
-static void hvf_log_sync(MemoryListener *listener,
- MemoryRegionSection *section)
-{
- /*
- * sync of dirty pages is handled elsewhere; just make sure we keep
- * tracking the region.
- */
- hvf_set_dirty_tracking(section, 1);
-}
-
-static void hvf_region_add(MemoryListener *listener,
- MemoryRegionSection *section)
-{
- hvf_set_phys_mem(section, true);
-}
-
-static void hvf_region_del(MemoryListener *listener,
- MemoryRegionSection *section)
-{
- hvf_set_phys_mem(section, false);
-}
-
-static MemoryListener hvf_memory_listener = {
- .name = "hvf",
- .priority = MEMORY_LISTENER_PRIORITY_ACCEL,
- .region_add = hvf_region_add,
- .region_del = hvf_region_del,
- .log_start = hvf_log_start,
- .log_stop = hvf_log_stop,
- .log_sync = hvf_log_sync,
-};
-
static void dummy_signal(int sig)
{
}
-bool hvf_allowed;
-
-static int hvf_accel_init(MachineState *ms)
-{
- int x;
- hv_return_t ret;
- HVFState *s;
- int pa_range = 36;
- MachineClass *mc = MACHINE_GET_CLASS(ms);
-
- if (mc->hvf_get_physical_address_range) {
- pa_range = mc->hvf_get_physical_address_range(ms);
- if (pa_range < 0) {
- return -EINVAL;
- }
- }
-
- ret = hvf_arch_vm_create(ms, (uint32_t)pa_range);
- assert_hvf_ok(ret);
-
- s = g_new0(HVFState, 1);
-
- s->num_slots = ARRAY_SIZE(s->slots);
- for (x = 0; x < s->num_slots; ++x) {
- s->slots[x].size = 0;
- s->slots[x].slot_id = x;
- }
-
- QTAILQ_INIT(&s->hvf_sw_breakpoints);
-
- hvf_state = s;
- memory_listener_register(&hvf_memory_listener, &address_space_memory);
-
- return hvf_arch_init();
-}
-
-static inline int hvf_gdbstub_sstep_flags(void)
-{
- return SSTEP_ENABLE | SSTEP_NOIRQ;
-}
-
-static void hvf_accel_class_init(ObjectClass *oc, void *data)
-{
- AccelClass *ac = ACCEL_CLASS(oc);
- ac->name = "HVF";
- ac->init_machine = hvf_accel_init;
- ac->allowed = &hvf_allowed;
- ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags;
-}
-
-static const TypeInfo hvf_accel_type = {
- .name = TYPE_HVF_ACCEL,
- .parent = TYPE_ACCEL,
- .class_init = hvf_accel_class_init,
-};
-
-static void hvf_type_init(void)
-{
- type_register_static(&hvf_accel_type);
-}
-
-type_init(hvf_type_init);
-
static void hvf_vcpu_destroy(CPUState *cpu)
{
hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd);
@@ -409,8 +150,8 @@ static int hvf_init_vcpu(CPUState *cpu)
#else
r = hv_vcpu_create(&cpu->accel->fd, HV_VCPU_DEFAULT);
#endif
- cpu->accel->dirty = true;
assert_hvf_ok(r);
+ cpu->vcpu_dirty = true;
cpu->accel->guest_debug_enabled = false;
@@ -476,6 +217,34 @@ static void hvf_start_vcpu_thread(CPUState *cpu)
cpu, QEMU_THREAD_JOINABLE);
}
+struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc)
+{
+ struct hvf_sw_breakpoint *bp;
+
+ QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) {
+ if (bp->pc == pc) {
+ return bp;
+ }
+ }
+ return NULL;
+}
+
+int hvf_sw_breakpoints_active(CPUState *cpu)
+{
+ return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints);
+}
+
+static void do_hvf_update_guest_debug(CPUState *cpu, run_on_cpu_data arg)
+{
+ hvf_arch_update_guest_debug(cpu);
+}
+
+int hvf_update_guest_debug(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_hvf_update_guest_debug, RUN_ON_CPU_NULL);
+ return 0;
+}
+
static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
{
struct hvf_sw_breakpoint *bp;
@@ -578,12 +347,13 @@ static void hvf_remove_all_breakpoints(CPUState *cpu)
}
}
-static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
+static void hvf_accel_ops_class_init(ObjectClass *oc, const void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
ops->create_vcpu_thread = hvf_start_vcpu_thread;
ops->kick_vcpu_thread = hvf_kick_vcpu_thread;
+ ops->handle_interrupt = generic_handle_interrupt;
ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
@@ -603,8 +373,10 @@ static const TypeInfo hvf_accel_ops_type = {
.class_init = hvf_accel_ops_class_init,
.abstract = true,
};
+
static void hvf_accel_ops_register_types(void)
{
type_register_static(&hvf_accel_ops_type);
}
+
type_init(hvf_accel_ops_register_types);
diff --git a/accel/hvf/hvf-all.c b/accel/hvf/hvf-all.c
index d404e01..b6075c0 100644
--- a/accel/hvf/hvf-all.c
+++ b/accel/hvf/hvf-all.c
@@ -10,8 +10,24 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
+#include "system/address-spaces.h"
+#include "system/memory.h"
#include "system/hvf.h"
#include "system/hvf_int.h"
+#include "hw/core/cpu.h"
+#include "hw/boards.h"
+#include "trace.h"
+
+bool hvf_allowed;
+
+struct mac_slot {
+ int present;
+ uint64_t size;
+ uint64_t gpa_start;
+ uint64_t gva;
+};
+
+struct mac_slot mac_slots[32];
const char *hvf_return_string(hv_return_t ret)
{
@@ -41,25 +57,254 @@ void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line,
abort();
}
-struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc)
+static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
{
- struct hvf_sw_breakpoint *bp;
+ struct mac_slot *macslot;
+ hv_return_t ret;
+
+ macslot = &mac_slots[slot->slot_id];
+
+ if (macslot->present) {
+ if (macslot->size != slot->size) {
+ macslot->present = 0;
+ trace_hvf_vm_unmap(macslot->gpa_start, macslot->size);
+ ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
+ assert_hvf_ok(ret);
+ }
+ }
+
+ if (!slot->size) {
+ return 0;
+ }
+
+ macslot->present = 1;
+ macslot->gpa_start = slot->start;
+ macslot->size = slot->size;
+ trace_hvf_vm_map(slot->start, slot->size, slot->mem, flags,
+ flags & HV_MEMORY_READ ? 'R' : '-',
+ flags & HV_MEMORY_WRITE ? 'W' : '-',
+ flags & HV_MEMORY_EXEC ? 'E' : '-');
+ ret = hv_vm_map(slot->mem, slot->start, slot->size, flags);
+ assert_hvf_ok(ret);
+ return 0;
+}
+
+static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
+{
+ hvf_slot *mem;
+ MemoryRegion *area = section->mr;
+ bool writable = !area->readonly && !area->rom_device;
+ hv_memory_flags_t flags;
+ uint64_t page_size = qemu_real_host_page_size();
+
+ if (!memory_region_is_ram(area)) {
+ if (writable) {
+ return;
+ } else if (!memory_region_is_romd(area)) {
+ /*
+ * If the memory device is not in romd_mode, then we actually want
+ * to remove the hvf memory slot so all accesses will trap.
+ */
+ add = false;
+ }
+ }
+
+ if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) ||
+ !QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) {
+ /* Not page aligned, so we can not map as RAM */
+ add = false;
+ }
+
+ mem = hvf_find_overlap_slot(
+ section->offset_within_address_space,
+ int128_get64(section->size));
+
+ if (mem && add) {
+ if (mem->size == int128_get64(section->size) &&
+ mem->start == section->offset_within_address_space &&
+ mem->mem == (memory_region_get_ram_ptr(area) +
+ section->offset_within_region)) {
+ return; /* Same region was attempted to register, go away. */
+ }
+ }
+
+ /* Region needs to be reset. set the size to 0 and remap it. */
+ if (mem) {
+ mem->size = 0;
+ if (do_hvf_set_memory(mem, 0)) {
+ error_report("Failed to reset overlapping slot");
+ abort();
+ }
+ }
+
+ if (!add) {
+ return;
+ }
+
+ if (area->readonly ||
+ (!memory_region_is_ram(area) && memory_region_is_romd(area))) {
+ flags = HV_MEMORY_READ | HV_MEMORY_EXEC;
+ } else {
+ flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
+ }
+
+ /* Now make a new slot. */
+ int x;
- QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) {
- if (bp->pc == pc) {
- return bp;
+ for (x = 0; x < hvf_state->num_slots; ++x) {
+ mem = &hvf_state->slots[x];
+ if (!mem->size) {
+ break;
}
}
- return NULL;
+
+ if (x == hvf_state->num_slots) {
+ error_report("No free slots");
+ abort();
+ }
+
+ mem->size = int128_get64(section->size);
+ mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
+ mem->start = section->offset_within_address_space;
+ mem->region = area;
+
+ if (do_hvf_set_memory(mem, flags)) {
+ error_report("Error registering new memory slot");
+ abort();
+ }
}
-int hvf_sw_breakpoints_active(CPUState *cpu)
+static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
{
- return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints);
+ hvf_slot *slot;
+
+ slot = hvf_find_overlap_slot(
+ section->offset_within_address_space,
+ int128_get64(section->size));
+
+ /* protect region against writes; begin tracking it */
+ if (on) {
+ slot->flags |= HVF_SLOT_LOG;
+ hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
+ HV_MEMORY_READ | HV_MEMORY_EXEC);
+ /* stop tracking region*/
+ } else {
+ slot->flags &= ~HVF_SLOT_LOG;
+ hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
+ HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
+ }
}
-int hvf_update_guest_debug(CPUState *cpu)
+static void hvf_log_start(MemoryListener *listener,
+ MemoryRegionSection *section, int old, int new)
{
- hvf_arch_update_guest_debug(cpu);
- return 0;
+ if (old != 0) {
+ return;
+ }
+
+ hvf_set_dirty_tracking(section, 1);
+}
+
+static void hvf_log_stop(MemoryListener *listener,
+ MemoryRegionSection *section, int old, int new)
+{
+ if (new != 0) {
+ return;
+ }
+
+ hvf_set_dirty_tracking(section, 0);
+}
+
+static void hvf_log_sync(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ /*
+ * sync of dirty pages is handled elsewhere; just make sure we keep
+ * tracking the region.
+ */
+ hvf_set_dirty_tracking(section, 1);
+}
+
+static void hvf_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ hvf_set_phys_mem(section, true);
+}
+
+static void hvf_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ hvf_set_phys_mem(section, false);
+}
+
+static MemoryListener hvf_memory_listener = {
+ .name = "hvf",
+ .priority = MEMORY_LISTENER_PRIORITY_ACCEL,
+ .region_add = hvf_region_add,
+ .region_del = hvf_region_del,
+ .log_start = hvf_log_start,
+ .log_stop = hvf_log_stop,
+ .log_sync = hvf_log_sync,
+};
+
+static int hvf_accel_init(AccelState *as, MachineState *ms)
+{
+ int x;
+ hv_return_t ret;
+ HVFState *s;
+ int pa_range = 36;
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+
+ if (mc->hvf_get_physical_address_range) {
+ pa_range = mc->hvf_get_physical_address_range(ms);
+ if (pa_range < 0) {
+ return -EINVAL;
+ }
+ }
+
+ ret = hvf_arch_vm_create(ms, (uint32_t)pa_range);
+ assert_hvf_ok(ret);
+
+ s = g_new0(HVFState, 1);
+
+ s->num_slots = ARRAY_SIZE(s->slots);
+ for (x = 0; x < s->num_slots; ++x) {
+ s->slots[x].size = 0;
+ s->slots[x].slot_id = x;
+ }
+
+ QTAILQ_INIT(&s->hvf_sw_breakpoints);
+
+ hvf_state = s;
+ memory_listener_register(&hvf_memory_listener, &address_space_memory);
+
+ return hvf_arch_init();
}
+
+static int hvf_gdbstub_sstep_flags(AccelState *as)
+{
+ return SSTEP_ENABLE | SSTEP_NOIRQ;
+}
+
+static void hvf_accel_class_init(ObjectClass *oc, const void *data)
+{
+ AccelClass *ac = ACCEL_CLASS(oc);
+ ac->name = "HVF";
+ ac->init_machine = hvf_accel_init;
+ ac->allowed = &hvf_allowed;
+ ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags;
+}
+
+static const TypeInfo hvf_accel_type = {
+ .name = TYPE_HVF_ACCEL,
+ .parent = TYPE_ACCEL,
+ .instance_size = sizeof(HVFState),
+ .class_init = hvf_accel_class_init,
+};
+
+static void hvf_type_init(void)
+{
+ type_register_static(&hvf_accel_type);
+}
+
+type_init(hvf_type_init);
diff --git a/accel/hvf/trace-events b/accel/hvf/trace-events
new file mode 100644
index 0000000..2fd3e12
--- /dev/null
+++ b/accel/hvf/trace-events
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# See docs/devel/tracing.rst for syntax documentation.
+
+# hvf-accel-ops.c
+hvf_vm_map(uint64_t paddr, uint64_t size, void *vaddr, uint8_t flags, const char r, const char w, const char e) "paddr:0x%016"PRIx64" size:0x%08"PRIx64" vaddr:%p flags:0x%02x/%c%c%c"
+hvf_vm_unmap(uint64_t paddr, uint64_t size) "paddr:0x%016"PRIx64" size:0x%08"PRIx64
diff --git a/accel/hvf/trace.h b/accel/hvf/trace.h
new file mode 100644
index 0000000..83a1883
--- /dev/null
+++ b/accel/hvf/trace.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#include "trace/trace-accel_hvf.h"
diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c
index 54ea609..0eafc90 100644
--- a/accel/kvm/kvm-accel-ops.c
+++ b/accel/kvm/kvm-accel-ops.c
@@ -90,7 +90,7 @@ static int kvm_update_guest_debug_ops(CPUState *cpu)
}
#endif
-static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
+static void kvm_accel_ops_class_init(ObjectClass *oc, const void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
@@ -101,6 +101,7 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
ops->synchronize_post_init = kvm_cpu_synchronize_post_init;
ops->synchronize_state = kvm_cpu_synchronize_state;
ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
+ ops->handle_interrupt = generic_handle_interrupt;
#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
ops->update_guest_debug = kvm_update_guest_debug_ops;
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index f89568b..a106d1b 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -33,8 +33,9 @@
#include "system/cpus.h"
#include "system/accel-blocker.h"
#include "qemu/bswap.h"
-#include "exec/memory.h"
-#include "exec/ram_addr.h"
+#include "exec/tswap.h"
+#include "system/memory.h"
+#include "system/ram_addr.h"
#include "qemu/event_notifier.h"
#include "qemu/main-loop.h"
#include "trace.h"
@@ -57,6 +58,11 @@
#include <sys/eventfd.h>
#endif
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+# define KVM_HAVE_MCE_INJECTION 1
+#endif
+
+
/* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
* need to use the real host PAGE_SIZE, as that's what KVM will use.
*/
@@ -93,6 +99,7 @@ bool kvm_allowed;
bool kvm_readonly_mem_allowed;
bool kvm_vm_attributes_allowed;
bool kvm_msi_use_devid;
+bool kvm_pre_fault_memory_supported;
static bool kvm_has_guest_debug;
static int kvm_sstep_flags;
static bool kvm_immediate_exit;
@@ -437,9 +444,8 @@ int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id)
return kvm_fd;
}
-static void kvm_reset_parked_vcpus(void *param)
+static void kvm_reset_parked_vcpus(KVMState *s)
{
- KVMState *s = param;
struct KVMParkedVcpu *cpu;
QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
@@ -447,7 +453,13 @@ static void kvm_reset_parked_vcpus(void *param)
}
}
-int kvm_create_vcpu(CPUState *cpu)
+/**
+ * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU
+ * @cpu: QOM CPUState object for which KVM vCPU has to be fetched/created.
+ *
+ * @returns: 0 when success, errno (<0) when failed.
+ */
+static int kvm_create_vcpu(CPUState *cpu)
{
unsigned long vcpu_id = kvm_arch_vcpu_id(cpu);
KVMState *s = kvm_state;
@@ -466,7 +478,9 @@ int kvm_create_vcpu(CPUState *cpu)
cpu->kvm_fd = kvm_fd;
cpu->kvm_state = s;
- cpu->vcpu_dirty = true;
+ if (!s->guest_state_protected) {
+ cpu->vcpu_dirty = true;
+ }
cpu->dirty_pages = 0;
cpu->throttle_us_per_full = 0;
@@ -507,16 +521,23 @@ static int do_kvm_destroy_vcpu(CPUState *cpu)
goto err;
}
+ /* If I am the CPU that created coalesced_mmio_ring, then discard it */
+ if (s->coalesced_mmio_ring == (void *)cpu->kvm_run + PAGE_SIZE) {
+ s->coalesced_mmio_ring = NULL;
+ }
+
ret = munmap(cpu->kvm_run, mmap_size);
if (ret < 0) {
goto err;
}
+ cpu->kvm_run = NULL;
if (cpu->kvm_dirty_gfns) {
ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes);
if (ret < 0) {
goto err;
}
+ cpu->kvm_dirty_gfns = NULL;
}
kvm_park_vcpu(cpu);
@@ -540,6 +561,11 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp)
trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
+ ret = kvm_arch_pre_create_vcpu(cpu, errp);
+ if (ret < 0) {
+ goto err;
+ }
+
ret = kvm_create_vcpu(cpu);
if (ret < 0) {
error_setg_errno(errp, -ret,
@@ -595,6 +621,31 @@ err:
return ret;
}
+void kvm_close(void)
+{
+ CPUState *cpu;
+
+ if (!kvm_state || kvm_state->fd == -1) {
+ return;
+ }
+
+ CPU_FOREACH(cpu) {
+ cpu_remove_sync(cpu);
+ close(cpu->kvm_fd);
+ cpu->kvm_fd = -1;
+ close(cpu->kvm_vcpu_stats_fd);
+ cpu->kvm_vcpu_stats_fd = -1;
+ }
+
+ if (kvm_state && kvm_state->fd != -1) {
+ close(kvm_state->vmfd);
+ kvm_state->vmfd = -1;
+ close(kvm_state->fd);
+ kvm_state->fd = -1;
+ }
+ kvm_state = NULL;
+}
+
/*
* dirty pages logging control
*/
@@ -1314,21 +1365,22 @@ bool kvm_hwpoisoned_mem(void)
static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
{
-#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
- /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN
- * endianness, but the memory core hands them in target endianness.
- * For example, PPC is always treated as big-endian even if running
- * on KVM and on PPC64LE. Correct here.
- */
- switch (size) {
- case 2:
- val = bswap16(val);
- break;
- case 4:
- val = bswap32(val);
- break;
+ if (target_needs_bswap()) {
+ /*
+ * The kernel expects ioeventfd values in HOST_BIG_ENDIAN
+ * endianness, but the memory core hands them in target endianness.
+ * For example, PPC is always treated as big-endian even if running
+ * on KVM and on PPC64LE. Correct here, swapping back.
+ */
+ switch (size) {
+ case 2:
+ val = bswap16(val);
+ break;
+ case 4:
+ val = bswap32(val);
+ break;
+ }
}
-#endif
return val;
}
@@ -2420,7 +2472,7 @@ static int kvm_recommended_vcpus(KVMState *s)
static int kvm_max_vcpus(KVMState *s)
{
- int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
+ int ret = kvm_vm_check_extension(s, KVM_CAP_MAX_VCPUS);
return (ret) ? ret : kvm_recommended_vcpus(s);
}
@@ -2450,13 +2502,10 @@ uint32_t kvm_dirty_ring_size(void)
return kvm_state->kvm_dirty_ring_size;
}
-static int do_kvm_create_vm(MachineState *ms, int type)
+static int do_kvm_create_vm(KVMState *s, int type)
{
- KVMState *s;
int ret;
- s = KVM_STATE(ms->accelerator);
-
do {
ret = kvm_ioctl(s, KVM_CREATE_VM, type);
} while (ret == -EINTR);
@@ -2553,7 +2602,7 @@ static int kvm_setup_dirty_ring(KVMState *s)
return 0;
}
-static int kvm_init(MachineState *ms)
+static int kvm_init(AccelState *as, MachineState *ms)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
static const char upgrade_note[] =
@@ -2568,15 +2617,13 @@ static int kvm_init(MachineState *ms)
{ /* end of list */ }
}, *nc = num_cpus;
int soft_vcpus_limit, hard_vcpus_limit;
- KVMState *s;
+ KVMState *s = KVM_STATE(as);
const KVMCapabilityInfo *missing_cap;
int ret;
int type;
qemu_mutex_init(&kml_slots_lock);
- s = KVM_STATE(ms->accelerator);
-
/*
* On systems where the kernel can support different base page
* sizes, host page size may be different from TARGET_PAGE_SIZE,
@@ -2628,7 +2675,7 @@ static int kvm_init(MachineState *ms)
goto err;
}
- ret = do_kvm_create_vm(ms, type);
+ ret = do_kvm_create_vm(s, type);
if (ret < 0) {
goto err;
}
@@ -2732,13 +2779,13 @@ static int kvm_init(MachineState *ms)
kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) &&
kvm_check_extension(s, KVM_CAP_USER_MEMORY2) &&
(kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE);
+ kvm_pre_fault_memory_supported = kvm_vm_check_extension(s, KVM_CAP_PRE_FAULT_MEMORY);
if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
}
qemu_register_reset(kvm_unpoison_all, NULL);
- qemu_register_reset(kvm_reset_parked_vcpus, s);
if (s->kernel_irqchip_allowed) {
kvm_irqchip_create(s);
@@ -2908,6 +2955,10 @@ static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg
void kvm_cpu_synchronize_post_reset(CPUState *cpu)
{
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
+
+ if (cpu == first_cpu) {
+ kvm_reset_parked_vcpus(kvm_state);
+ }
}
static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
@@ -3073,6 +3124,15 @@ int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private)
addr = memory_region_get_ram_ptr(mr) + section.offset_within_region;
rb = qemu_ram_block_from_host(addr, false, &offset);
+ ret = ram_block_attributes_state_change(RAM_BLOCK_ATTRIBUTES(mr->rdm),
+ offset, size, to_private);
+ if (ret) {
+ error_report("Failed to notify the listener the state change of "
+ "(0x%"HWADDR_PRIx" + 0x%"HWADDR_PRIx") to %s",
+ start, size, to_private ? "private" : "shared");
+ goto out_unref;
+ }
+
if (to_private) {
if (rb->page_size != qemu_real_host_page_size()) {
/*
@@ -3758,10 +3818,10 @@ int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
return r;
}
-static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
+static bool kvm_accel_has_memory(AccelState *accel, AddressSpace *as,
hwaddr start_addr, hwaddr size)
{
- KVMState *kvm = KVM_STATE(ms->accelerator);
+ KVMState *kvm = KVM_STATE(accel);
int i;
for (i = 0; i < kvm->nr_as; ++i) {
@@ -3952,12 +4012,12 @@ static void kvm_accel_instance_init(Object *obj)
* Returns: SSTEP_* flags that KVM supports for guest debug. The
* support is probed during kvm_init()
*/
-static int kvm_gdbstub_sstep_flags(void)
+static int kvm_gdbstub_sstep_flags(AccelState *as)
{
return kvm_sstep_flags;
}
-static void kvm_accel_class_init(ObjectClass *oc, void *data)
+static void kvm_accel_class_init(ObjectClass *oc, const void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "KVM";
diff --git a/accel/meson.build b/accel/meson.build
index 5eaeb68..5290931 100644
--- a/accel/meson.build
+++ b/accel/meson.build
@@ -1,3 +1,4 @@
+common_ss.add(files('accel-common.c'))
specific_ss.add(files('accel-target.c'))
system_ss.add(files('accel-system.c', 'accel-blocker.c'))
user_ss.add(files('accel-user.c'))
diff --git a/accel/qtest/qtest.c b/accel/qtest/qtest.c
index 7fae80f..2b83126 100644
--- a/accel/qtest/qtest.c
+++ b/accel/qtest/qtest.c
@@ -24,6 +24,7 @@
#include "qemu/guest-random.h"
#include "qemu/main-loop.h"
#include "hw/core/cpu.h"
+#include "accel/dummy-cpus.h"
static int64_t qtest_clock_counter;
@@ -37,12 +38,12 @@ static void qtest_set_virtual_clock(int64_t count)
qatomic_set_i64(&qtest_clock_counter, count);
}
-static int qtest_init_accel(MachineState *ms)
+static int qtest_init_accel(AccelState *as, MachineState *ms)
{
return 0;
}
-static void qtest_accel_class_init(ObjectClass *oc, void *data)
+static void qtest_accel_class_init(ObjectClass *oc, const void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "QTest";
@@ -59,13 +60,14 @@ static const TypeInfo qtest_accel_type = {
};
module_obj(TYPE_QTEST_ACCEL);
-static void qtest_accel_ops_class_init(ObjectClass *oc, void *data)
+static void qtest_accel_ops_class_init(ObjectClass *oc, const void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
ops->create_vcpu_thread = dummy_start_vcpu_thread;
ops->get_virtual_clock = qtest_get_virtual_clock;
ops->set_virtual_clock = qtest_set_virtual_clock;
+ ops->handle_interrupt = generic_handle_interrupt;
};
static const TypeInfo qtest_accel_ops_type = {
diff --git a/accel/stubs/hvf-stub.c b/accel/stubs/hvf-stub.c
new file mode 100644
index 0000000..42eadc5
--- /dev/null
+++ b/accel/stubs/hvf-stub.c
@@ -0,0 +1,12 @@
+/*
+ * HVF stubs for QEMU
+ *
+ * Copyright (c) Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "system/hvf.h"
+
+bool hvf_allowed;
diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c
index ecfd763..68cd33b 100644
--- a/accel/stubs/kvm-stub.c
+++ b/accel/stubs/kvm-stub.c
@@ -29,10 +29,6 @@ void kvm_flush_coalesced_mmio_buffer(void)
{
}
-void kvm_cpu_synchronize_state(CPUState *cpu)
-{
-}
-
bool kvm_has_sync_mmu(void)
{
return false;
@@ -105,11 +101,6 @@ unsigned int kvm_get_free_memslots(void)
return 0;
}
-void kvm_init_cpu_signals(CPUState *cpu)
-{
- abort();
-}
-
bool kvm_arm_supports_user_irq(void)
{
return false;
diff --git a/accel/stubs/meson.build b/accel/stubs/meson.build
index 91a2d21..9dfc4f9 100644
--- a/accel/stubs/meson.build
+++ b/accel/stubs/meson.build
@@ -2,5 +2,8 @@ system_stubs_ss = ss.source_set()
system_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
system_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
system_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
+system_stubs_ss.add(when: 'CONFIG_HVF', if_false: files('hvf-stub.c'))
+system_stubs_ss.add(when: 'CONFIG_NVMM', if_false: files('nvmm-stub.c'))
+system_stubs_ss.add(when: 'CONFIG_WHPX', if_false: files('whpx-stub.c'))
specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: system_stubs_ss)
diff --git a/accel/stubs/nvmm-stub.c b/accel/stubs/nvmm-stub.c
new file mode 100644
index 0000000..ec14837
--- /dev/null
+++ b/accel/stubs/nvmm-stub.c
@@ -0,0 +1,12 @@
+/*
+ * NVMM stubs for QEMU
+ *
+ * Copyright (c) Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "system/nvmm.h"
+
+bool nvmm_allowed;
diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c
index b2b9881..3b76b8b 100644
--- a/accel/stubs/tcg-stub.c
+++ b/accel/stubs/tcg-stub.c
@@ -11,8 +11,7 @@
*/
#include "qemu/osdep.h"
-#include "exec/tb-flush.h"
-#include "exec/exec-all.h"
+#include "exec/cpu-common.h"
G_NORETURN void cpu_loop_exit(CPUState *cpu)
{
diff --git a/accel/stubs/whpx-stub.c b/accel/stubs/whpx-stub.c
new file mode 100644
index 0000000..c564c89
--- /dev/null
+++ b/accel/stubs/whpx-stub.c
@@ -0,0 +1,12 @@
+/*
+ * WHPX stubs for QEMU
+ *
+ * Copyright (c) Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "system/whpx.h"
+
+bool whpx_allowed;
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
index 89593b2..08a475c 100644
--- a/accel/tcg/atomic_template.h
+++ b/accel/tcg/atomic_template.h
@@ -77,7 +77,7 @@
# define END _le
#endif
-ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
+ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, vaddr addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
@@ -101,7 +101,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
}
#if DATA_SIZE < 16
-ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
+ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
@@ -120,7 +120,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
}
#define GEN_ATOMIC_HELPER(X) \
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr, ret; \
@@ -156,7 +156,7 @@ GEN_ATOMIC_HELPER(xor_fetch)
* of CF_PARALLEL's value, we'll trace just a read and a write.
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
@@ -202,7 +202,7 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
# define END _be
#endif
-ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
+ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, vaddr addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
@@ -226,7 +226,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
}
#if DATA_SIZE < 16
-ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
+ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
@@ -245,7 +245,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
}
#define GEN_ATOMIC_HELPER(X) \
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr, ret; \
@@ -278,7 +278,7 @@ GEN_ATOMIC_HELPER(xor_fetch)
* of CF_PARALLEL's value, we'll trace just a read and a write.
*/
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
diff --git a/accel/tcg/backend-ldst.h b/accel/tcg/backend-ldst.h
new file mode 100644
index 0000000..9c3a407
--- /dev/null
+++ b/accel/tcg/backend-ldst.h
@@ -0,0 +1,41 @@
+/*
+ * Internal memory barrier helpers for QEMU (target agnostic)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_BACKEND_LDST_H
+#define ACCEL_TCG_BACKEND_LDST_H
+
+#include "tcg-target-mo.h"
+
+/**
+ * tcg_req_mo:
+ * @guest_mo: Guest default memory order
+ * @type: TCGBar
+ *
+ * Filter @type to the barrier that is required for the guest
+ * memory ordering vs the host memory ordering. A non-zero
+ * result indicates that some barrier is required.
+ */
+#define tcg_req_mo(guest_mo, type) \
+ ((type) & guest_mo & ~TCG_TARGET_DEFAULT_MO)
+
+/**
+ * cpu_req_mo:
+ * @cpu: CPUState
+ * @type: TCGBar
+ *
+ * If tcg_req_mo indicates a barrier for @type is required
+ * for the guest memory model, issue a host memory barrier.
+ */
+#define cpu_req_mo(cpu, type) \
+ do { \
+ if (tcg_req_mo(cpu->cc->tcg_ops->guest_default_memory_order, type)) { \
+ smp_mb(); \
+ } \
+ } while (0)
+
+#endif
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index ef3d967..713bdb2 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -23,18 +23,20 @@
#include "qapi/type-helpers.h"
#include "hw/core/cpu.h"
#include "accel/tcg/cpu-ops.h"
+#include "accel/tcg/helper-retaddr.h"
#include "trace.h"
#include "disas/disas.h"
#include "exec/cpu-common.h"
+#include "exec/cpu-interrupt.h"
#include "exec/page-protection.h"
+#include "exec/mmap-lock.h"
#include "exec/translation-block.h"
#include "tcg/tcg.h"
#include "qemu/atomic.h"
#include "qemu/rcu.h"
#include "exec/log.h"
#include "qemu/main-loop.h"
-#include "exec/cpu-all.h"
-#include "system/cpu-timers.h"
+#include "exec/icount.h"
#include "exec/replay-core.h"
#include "system/tcg.h"
#include "exec/helper-proto-common.h"
@@ -43,7 +45,6 @@
#include "tb-context.h"
#include "tb-internal.h"
#include "internal-common.h"
-#include "internal-target.h"
/* -icount align implementation. */
@@ -148,12 +149,9 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
#endif /* CONFIG USER ONLY */
struct tb_desc {
- vaddr pc;
- uint64_t cs_base;
+ TCGTBCPUState s;
CPUArchState *env;
tb_page_addr_t page_addr0;
- uint32_t flags;
- uint32_t cflags;
};
static bool tb_lookup_cmp(const void *p, const void *d)
@@ -161,11 +159,11 @@ static bool tb_lookup_cmp(const void *p, const void *d)
const TranslationBlock *tb = p;
const struct tb_desc *desc = d;
- if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) &&
+ if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->s.pc) &&
tb_page_addr0(tb) == desc->page_addr0 &&
- tb->cs_base == desc->cs_base &&
- tb->flags == desc->flags &&
- tb_cflags(tb) == desc->cflags) {
+ tb->cs_base == desc->s.cs_base &&
+ tb->flags == desc->s.flags &&
+ tb_cflags(tb) == desc->s.cflags) {
/* check next page if needed */
tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
if (tb_phys_page1 == -1) {
@@ -183,7 +181,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
* is different for the new TB. Therefore any exception raised
* here by the faulting lookup is not premature.
*/
- virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
+ virt_page1 = TARGET_PAGE_ALIGN(desc->s.pc);
phys_page1 = get_page_addr_code(desc->env, virt_page1);
if (tb_phys_page1 == phys_page1) {
return true;
@@ -193,26 +191,21 @@ static bool tb_lookup_cmp(const void *p, const void *d)
return false;
}
-static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
- uint64_t cs_base, uint32_t flags,
- uint32_t cflags)
+static TranslationBlock *tb_htable_lookup(CPUState *cpu, TCGTBCPUState s)
{
tb_page_addr_t phys_pc;
struct tb_desc desc;
uint32_t h;
+ desc.s = s;
desc.env = cpu_env(cpu);
- desc.cs_base = cs_base;
- desc.flags = flags;
- desc.cflags = cflags;
- desc.pc = pc;
- phys_pc = get_page_addr_code(desc.env, pc);
+ phys_pc = get_page_addr_code(desc.env, s.pc);
if (phys_pc == -1) {
return NULL;
}
desc.page_addr0 = phys_pc;
- h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
- flags, cs_base, cflags);
+ h = tb_hash_func(phys_pc, (s.cflags & CF_PCREL ? 0 : s.pc),
+ s.flags, s.cs_base, s.cflags);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
@@ -230,35 +223,33 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
*
* Returns: an existing translation block or NULL.
*/
-static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
- uint64_t cs_base, uint32_t flags,
- uint32_t cflags)
+static inline TranslationBlock *tb_lookup(CPUState *cpu, TCGTBCPUState s)
{
TranslationBlock *tb;
CPUJumpCache *jc;
uint32_t hash;
/* we should never be trying to look up an INVALID tb */
- tcg_debug_assert(!(cflags & CF_INVALID));
+ tcg_debug_assert(!(s.cflags & CF_INVALID));
- hash = tb_jmp_cache_hash_func(pc);
+ hash = tb_jmp_cache_hash_func(s.pc);
jc = cpu->tb_jmp_cache;
tb = qatomic_read(&jc->array[hash].tb);
if (likely(tb &&
- jc->array[hash].pc == pc &&
- tb->cs_base == cs_base &&
- tb->flags == flags &&
- tb_cflags(tb) == cflags)) {
+ jc->array[hash].pc == s.pc &&
+ tb->cs_base == s.cs_base &&
+ tb->flags == s.flags &&
+ tb_cflags(tb) == s.cflags)) {
goto hit;
}
- tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
+ tb = tb_htable_lookup(cpu, s);
if (tb == NULL) {
return NULL;
}
- jc->array[hash].pc = pc;
+ jc->array[hash].pc = s.pc;
qatomic_set(&jc->array[hash].tb, tb);
hit:
@@ -266,7 +257,7 @@ hit:
* As long as tb is not NULL, the contents are consistent. Therefore,
* the virtual PC has to match for non-CF_PCREL translations.
*/
- assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc);
+ assert((tb_cflags(tb) & CF_PCREL) || tb->pc == s.pc);
return tb;
}
@@ -283,14 +274,11 @@ static void log_cpu_exec(vaddr pc, CPUState *cpu,
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
FILE *logfile = qemu_log_trylock();
if (logfile) {
- int flags = 0;
+ int flags = CPU_DUMP_CCOP;
if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
flags |= CPU_DUMP_FPU;
}
-#if defined(TARGET_I386)
- flags |= CPU_DUMP_CCOP;
-#endif
if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) {
flags |= CPU_DUMP_VPU;
}
@@ -386,9 +374,6 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
{
CPUState *cpu = env_cpu(env);
TranslationBlock *tb;
- vaddr pc;
- uint64_t cs_base;
- uint32_t flags, cflags;
/*
* By definition we've just finished a TB, so I/O is OK.
@@ -398,20 +383,21 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
* The next TB, if we chain to it, will clear the flag again.
*/
cpu->neg.can_do_io = true;
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- cflags = curr_cflags(cpu);
- if (check_for_breakpoints(cpu, pc, &cflags)) {
+ TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
+ s.cflags = curr_cflags(cpu);
+
+ if (check_for_breakpoints(cpu, s.pc, &s.cflags)) {
cpu_loop_exit(cpu);
}
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ tb = tb_lookup(cpu, s);
if (tb == NULL) {
return tcg_code_gen_epilogue;
}
if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
- log_cpu_exec(pc, cpu, tb);
+ log_cpu_exec(s.pc, cpu, tb);
}
return tb->tc.ptr;
@@ -561,11 +547,7 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
void cpu_exec_step_atomic(CPUState *cpu)
{
- CPUArchState *env = cpu_env(cpu);
TranslationBlock *tb;
- vaddr pc;
- uint64_t cs_base;
- uint32_t flags, cflags;
int tb_exit;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
@@ -574,13 +556,13 @@ void cpu_exec_step_atomic(CPUState *cpu)
g_assert(!cpu->running);
cpu->running = true;
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+ TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
+ s.cflags = curr_cflags(cpu);
- cflags = curr_cflags(cpu);
/* Execute in a serial context. */
- cflags &= ~CF_PARALLEL;
+ s.cflags &= ~CF_PARALLEL;
/* After 1 insn, return and release the exclusive lock. */
- cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
+ s.cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
/*
* No need to check_for_breakpoints here.
* We only arrive in cpu_exec_step_atomic after beginning execution
@@ -588,16 +570,16 @@ void cpu_exec_step_atomic(CPUState *cpu)
* Any breakpoint for this insn will have been recognized earlier.
*/
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ tb = tb_lookup(cpu, s);
if (tb == NULL) {
mmap_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
+ tb = tb_gen_code(cpu, s);
mmap_unlock();
}
cpu_exec_enter(cpu);
/* execute the generated code */
- trace_exec_tb(tb, pc);
+ trace_exec_tb(tb, s.pc);
cpu_tb_exec(cpu, tb, &tb_exit);
cpu_exec_exit(cpu);
} else {
@@ -665,7 +647,6 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
out_unlock_next:
qemu_spin_unlock(&tb_next->jmp_lock);
- return;
}
static inline bool cpu_handle_halt(CPUState *cpu)
@@ -731,10 +712,10 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
* If user mode only, we simulate a fake exception which will be
* handled outside the cpu execution loop.
*/
-#if defined(TARGET_I386)
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
- tcg_ops->fake_user_interrupt(cpu);
-#endif /* TARGET_I386 */
+ if (tcg_ops->fake_user_interrupt) {
+ tcg_ops->fake_user_interrupt(cpu);
+ }
*ret = cpu->exception_index;
cpu->exception_index = -1;
return true;
@@ -821,33 +802,22 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
cpu->exception_index = EXCP_HLT;
bql_unlock();
return true;
- }
-#if defined(TARGET_I386)
- else if (interrupt_request & CPU_INTERRUPT_INIT) {
- X86CPU *x86_cpu = X86_CPU(cpu);
- CPUArchState *env = &x86_cpu->env;
- replay_interrupt();
- cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
- do_cpu_init(x86_cpu);
- cpu->exception_index = EXCP_HALTED;
- bql_unlock();
- return true;
- }
-#else
- else if (interrupt_request & CPU_INTERRUPT_RESET) {
- replay_interrupt();
- cpu_reset(cpu);
- bql_unlock();
- return true;
- }
-#endif /* !TARGET_I386 */
- /* The target hook has 3 exit conditions:
- False when the interrupt isn't processed,
- True when it is, and we should restart on a new TB,
- and via longjmp via cpu_loop_exit. */
- else {
+ } else {
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
+ if (interrupt_request & CPU_INTERRUPT_RESET) {
+ replay_interrupt();
+ tcg_ops->cpu_exec_reset(cpu);
+ bql_unlock();
+ return true;
+ }
+
+ /*
+ * The target hook has 3 exit conditions:
+ * False when the interrupt isn't processed,
+ * True when it is, and we should restart on a new TB,
+ * and via longjmp via cpu_loop_exit.
+ */
if (tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
if (!tcg_ops->need_replay_interrupt ||
tcg_ops->need_replay_interrupt(interrupt_request)) {
@@ -954,11 +924,8 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
while (!cpu_handle_interrupt(cpu, &last_tb)) {
TranslationBlock *tb;
- vaddr pc;
- uint64_t cs_base;
- uint32_t flags, cflags;
-
- cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags);
+ TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
+ s.cflags = cpu->cflags_next_tb;
/*
* When requested, use an exact setting for cflags for the next
@@ -967,33 +934,32 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
* have CF_INVALID set, -1 is a convenient invalid value that
* does not require tcg headers for cpu_common_reset.
*/
- cflags = cpu->cflags_next_tb;
- if (cflags == -1) {
- cflags = curr_cflags(cpu);
+ if (s.cflags == -1) {
+ s.cflags = curr_cflags(cpu);
} else {
cpu->cflags_next_tb = -1;
}
- if (check_for_breakpoints(cpu, pc, &cflags)) {
+ if (check_for_breakpoints(cpu, s.pc, &s.cflags)) {
break;
}
- tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+ tb = tb_lookup(cpu, s);
if (tb == NULL) {
CPUJumpCache *jc;
uint32_t h;
mmap_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
+ tb = tb_gen_code(cpu, s);
mmap_unlock();
/*
* We add the TB in the virtual pc hash table
* for the fast lookup
*/
- h = tb_jmp_cache_hash_func(pc);
+ h = tb_jmp_cache_hash_func(s.pc);
jc = cpu->tb_jmp_cache;
- jc->array[h].pc = pc;
+ jc->array[h].pc = s.pc;
qatomic_set(&jc->array[h].tb, tb);
}
@@ -1013,7 +979,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
tb_add_jump(last_tb, tb_exit, tb);
}
- cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
+ cpu_loop_exec_tb(cpu, tb, s.pc, &last_tb, &tb_exit);
/* Try to align the host and virtual clocks
if the guest is in advance */
@@ -1072,8 +1038,12 @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
#ifndef CONFIG_USER_ONLY
assert(tcg_ops->cpu_exec_halt);
assert(tcg_ops->cpu_exec_interrupt);
+ assert(tcg_ops->cpu_exec_reset);
+ assert(tcg_ops->pointer_wrap);
#endif /* !CONFIG_USER_ONLY */
assert(tcg_ops->translate_code);
+ assert(tcg_ops->get_tb_cpu_state);
+ assert(tcg_ops->mmu_index);
tcg_ops->initialize();
tcg_target_initialized = true;
}
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index fb22048..87e14bd 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -19,15 +19,17 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
+#include "qemu/target-info.h"
#include "accel/tcg/cpu-ops.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/iommu.h"
+#include "accel/tcg/probe.h"
#include "exec/page-protection.h"
-#include "exec/memory.h"
-#include "exec/cpu_ldst.h"
+#include "system/memory.h"
+#include "accel/tcg/cpu-ldst-common.h"
+#include "accel/tcg/cpu-mmu-index.h"
#include "exec/cputlb.h"
#include "exec/tb-flush.h"
-#include "exec/memory-internal.h"
-#include "exec/ram_addr.h"
+#include "system/ram_addr.h"
#include "exec/mmu-access-type.h"
#include "exec/tlb-common.h"
#include "exec/vaddr.h"
@@ -35,18 +37,21 @@
#include "qemu/error-report.h"
#include "exec/log.h"
#include "exec/helper-proto-common.h"
+#include "exec/tlb-flags.h"
#include "qemu/atomic.h"
#include "qemu/atomic128.h"
#include "tb-internal.h"
#include "trace.h"
#include "tb-hash.h"
#include "tb-internal.h"
+#include "tlb-bounds.h"
#include "internal-common.h"
-#include "internal-target.h"
#ifdef CONFIG_PLUGIN
#include "qemu/plugin-memory.h"
#endif
#include "tcg/tcg-ldst.h"
+#include "backend-ldst.h"
+
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
/* #define DEBUG_TLB */
@@ -768,19 +773,19 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
assert_cpu_is_self(cpu);
+ /* If no page bits are significant, this devolves to tlb_flush. */
+ if (bits < TARGET_PAGE_BITS) {
+ tlb_flush_by_mmuidx(cpu, idxmap);
+ return;
+ }
/*
* If all bits are significant, and len is small,
* this devolves to tlb_flush_page.
*/
- if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
+ if (len <= TARGET_PAGE_SIZE && bits >= target_long_bits()) {
tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
return;
}
- /* If no page bits are significant, this devolves to tlb_flush. */
- if (bits < TARGET_PAGE_BITS) {
- tlb_flush_by_mmuidx(cpu, idxmap);
- return;
- }
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
@@ -806,19 +811,19 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
TLBFlushRangeData d, *p;
CPUState *dst_cpu;
+ /* If no page bits are significant, this devolves to tlb_flush. */
+ if (bits < TARGET_PAGE_BITS) {
+ tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
+ return;
+ }
/*
* If all bits are significant, and len is small,
* this devolves to tlb_flush_page.
*/
- if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
+ if (len <= TARGET_PAGE_SIZE && bits >= target_long_bits()) {
tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
return;
}
- /* If no page bits are significant, this devolves to tlb_flush. */
- if (bits < TARGET_PAGE_BITS) {
- tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
- return;
- }
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
@@ -882,18 +887,17 @@ void tlb_unprotect_code(ram_addr_t ram_addr)
*
* Called with tlb_c.lock held.
*/
-static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
+static void tlb_reset_dirty_range_locked(CPUTLBEntryFull *full, CPUTLBEntry *ent,
uintptr_t start, uintptr_t length)
{
- uintptr_t addr = tlb_entry->addr_write;
+ const uintptr_t addr = ent->addr_write;
+ int flags = addr | full->slow_flags[MMU_DATA_STORE];
- if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
- TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
- addr &= TARGET_PAGE_MASK;
- addr += tlb_entry->addend;
- if ((addr - start) < length) {
- qatomic_set(&tlb_entry->addr_write,
- tlb_entry->addr_write | TLB_NOTDIRTY);
+ flags &= TLB_INVALID_MASK | TLB_MMIO | TLB_DISCARD_WRITE | TLB_NOTDIRTY;
+ if (flags == 0) {
+ uintptr_t host = (addr & TARGET_PAGE_MASK) + ent->addend;
+ if ((host - start) < length) {
+ qatomic_set(&ent->addr_write, addr | TLB_NOTDIRTY);
}
}
}
@@ -912,23 +916,25 @@ static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
* We must take tlb_c.lock to avoid racing with another vCPU update. The only
* thing actually updated is the target TLB entry ->addr_write flags.
*/
-void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
+void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length)
{
int mmu_idx;
qemu_spin_lock(&cpu->neg.tlb.c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+ CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
+ CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
+ unsigned int n = tlb_n_entries(fast);
unsigned int i;
- unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]);
for (i = 0; i < n; i++) {
- tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
- start1, length);
+ tlb_reset_dirty_range_locked(&desc->fulltlb[i], &fast->table[i],
+ start, length);
}
for (i = 0; i < CPU_VTLB_SIZE; i++) {
- tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
- start1, length);
+ tlb_reset_dirty_range_locked(&desc->vfulltlb[i], &desc->vtable[i],
+ start, length);
}
}
qemu_spin_unlock(&cpu->neg.tlb.c.lock);
@@ -1336,7 +1342,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
- tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
+ tb_invalidate_phys_range_fast(cpu, ram_addr, size, retaddr);
}
/*
@@ -1767,6 +1773,9 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
l->page[1].size = l->page[0].size - size0;
l->page[0].size = size0;
+ l->page[1].addr = cpu->cc->tcg_ops->pointer_wrap(cpu, l->mmu_idx,
+ l->page[1].addr, addr);
+
/*
* Lookup both pages, recognizing exceptions from either. If the
* second lookup potentially resized, refresh first CPUTLBEntryFull.
@@ -1865,8 +1874,12 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
goto stop_the_world;
}
- /* Collect tlb flags for read. */
+ /* Finish collecting tlb flags for both read and write. */
+ full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
tlb_addr |= tlbe->addr_read;
+ tlb_addr &= TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
+ tlb_addr |= full->slow_flags[MMU_DATA_STORE];
+ tlb_addr |= full->slow_flags[MMU_DATA_LOAD];
/* Notice an IO access or a needs-MMU-lookup access */
if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
@@ -1876,13 +1889,12 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
}
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
- full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
notdirty_write(cpu, addr, size, full, retaddr);
}
- if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
+ if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
int wp_flags = 0;
if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
@@ -1891,10 +1903,8 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
wp_flags |= BP_MEM_READ;
}
- if (wp_flags) {
- cpu_check_watchpoint(cpu, addr, size,
- full->attrs, wp_flags, retaddr);
- }
+ cpu_check_watchpoint(cpu, addr, size,
+ full->attrs, wp_flags, retaddr);
}
return hostaddr;
@@ -2321,7 +2331,7 @@ static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
MMULookupLocals l;
bool crosspage;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
tcg_debug_assert(!crosspage);
@@ -2336,7 +2346,7 @@ static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uint16_t ret;
uint8_t a, b;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
@@ -2360,7 +2370,7 @@ static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
bool crosspage;
uint32_t ret;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
@@ -2381,7 +2391,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
bool crosspage;
uint64_t ret;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
@@ -2404,7 +2414,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
Int128 ret;
int first;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l);
if (likely(!crosspage)) {
if (unlikely(l.page[0].flags & TLB_MMIO)) {
@@ -2732,7 +2742,7 @@ static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
MMULookupLocals l;
bool crosspage;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
tcg_debug_assert(!crosspage);
@@ -2746,7 +2756,7 @@ static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
bool crosspage;
uint8_t a, b;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
@@ -2768,7 +2778,7 @@ static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
MMULookupLocals l;
bool crosspage;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
@@ -2789,7 +2799,7 @@ static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
MMULookupLocals l;
bool crosspage;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
@@ -2812,7 +2822,7 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
uint64_t a, b;
int first;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
if (unlikely(l.page[0].flags & TLB_MMIO)) {
@@ -2897,54 +2907,45 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
/* Code access functions. */
-uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
-{
- CPUState *cs = env_cpu(env);
- MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true));
- return do_ld1_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
-}
-
-uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
-{
- CPUState *cs = env_cpu(env);
- MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true));
- return do_ld2_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
-}
-
-uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
-{
- CPUState *cs = env_cpu(env);
- MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true));
- return do_ld4_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
-}
-
-uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
-{
- CPUState *cs = env_cpu(env);
- MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true));
- return do_ld8_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
-}
-
-uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
+uint8_t cpu_ldb_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t retaddr)
{
return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
-uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
+uint16_t cpu_ldw_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t retaddr)
{
return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
-uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
+uint32_t cpu_ldl_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t retaddr)
{
return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
-uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
+uint64_t cpu_ldq_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t retaddr)
{
return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
+
+/*
+ * Common pointer_wrap implementations.
+ */
+
+/*
+ * To be used for strict alignment targets.
+ * Because no accesses are unaligned, no accesses wrap either.
+ */
+vaddr cpu_pointer_wrap_notreached(CPUState *cs, int idx, vaddr res, vaddr base)
+{
+ g_assert_not_reached();
+}
+
+/* To be used for strict 32-bit targets. */
+vaddr cpu_pointer_wrap_uint32(CPUState *cs, int idx, vaddr res, vaddr base)
+{
+ return (uint32_t)res;
+}
diff --git a/accel/tcg/icount-common.c b/accel/tcg/icount-common.c
index 402d3e3..d647117 100644
--- a/accel/tcg/icount-common.c
+++ b/accel/tcg/icount-common.c
@@ -35,7 +35,7 @@
#include "system/replay.h"
#include "system/runstate.h"
#include "hw/core/cpu.h"
-#include "system/cpu-timers.h"
+#include "exec/icount.h"
#include "system/cpu-timers-internal.h"
/*
diff --git a/accel/tcg/internal-common.h b/accel/tcg/internal-common.h
index 9b6ab3a..77a3a06 100644
--- a/accel/tcg/internal-common.h
+++ b/accel/tcg/internal-common.h
@@ -11,6 +11,8 @@
#include "exec/cpu-common.h"
#include "exec/translation-block.h"
+#include "exec/mmap-lock.h"
+#include "accel/tcg/tb-cpu-state.h"
extern int64_t max_delay;
extern int64_t max_advance;
@@ -45,9 +47,7 @@ static inline bool cpu_plugin_mem_cbs_enabled(const CPUState *cpu)
#endif
}
-TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
- uint64_t cs_base, uint32_t flags,
- int cflags);
+TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s);
void page_init(void);
void tb_htable_init(void);
void tb_reset_jump(TranslationBlock *tb, int n);
@@ -74,4 +74,71 @@ uint32_t curr_cflags(CPUState *cpu);
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
+/**
+ * get_page_addr_code_hostp()
+ * @env: CPUArchState
+ * @addr: guest virtual address of guest code
+ *
+ * See get_page_addr_code() (full-system version) for documentation on the
+ * return value.
+ *
+ * Sets *@hostp (when @hostp is non-NULL) as follows.
+ * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
+ * to the host address where @addr's content is kept.
+ *
+ * Note: this function can trigger an exception.
+ */
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
+ void **hostp);
+
+/**
+ * get_page_addr_code()
+ * @env: CPUArchState
+ * @addr: guest virtual address of guest code
+ *
+ * If we cannot translate and execute from the entire RAM page, or if
+ * the region is not backed by RAM, returns -1. Otherwise, returns the
+ * ram_addr_t corresponding to the guest code at @addr.
+ *
+ * Note: this function can trigger an exception.
+ */
+static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
+ vaddr addr)
+{
+ return get_page_addr_code_hostp(env, addr, NULL);
+}
+
+/*
+ * Access to the various translations structures need to be serialised
+ * via locks for consistency. In user-mode emulation access to the
+ * memory related structures are protected with mmap_lock.
+ * In !user-mode we use per-page locks.
+ */
+#ifdef CONFIG_USER_ONLY
+#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
+#else
+#define assert_memory_lock()
+#endif
+
+#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
+void assert_no_pages_locked(void);
+#else
+static inline void assert_no_pages_locked(void) { }
+#endif
+
+#ifdef CONFIG_USER_ONLY
+static inline void page_table_config_init(void) { }
+#else
+void page_table_config_init(void);
+#endif
+
+#ifndef CONFIG_USER_ONLY
+G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
+#endif /* CONFIG_USER_ONLY */
+
+void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
+void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
+
+void tcg_dump_stats(GString *buf);
+
#endif
diff --git a/accel/tcg/internal-target.h b/accel/tcg/internal-target.h
deleted file mode 100644
index 2cdf11c..0000000
--- a/accel/tcg/internal-target.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Internal execution defines for qemu (target specific)
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * SPDX-License-Identifier: LGPL-2.1-or-later
- */
-
-#ifndef ACCEL_TCG_INTERNAL_TARGET_H
-#define ACCEL_TCG_INTERNAL_TARGET_H
-
-#include "exec/exec-all.h"
-#include "exec/translation-block.h"
-#include "tb-internal.h"
-#include "tcg-target-mo.h"
-
-/*
- * Access to the various translations structures need to be serialised
- * via locks for consistency. In user-mode emulation access to the
- * memory related structures are protected with mmap_lock.
- * In !user-mode we use per-page locks.
- */
-#ifdef CONFIG_USER_ONLY
-#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
-#else
-#define assert_memory_lock()
-#endif
-
-#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
-void assert_no_pages_locked(void);
-#else
-static inline void assert_no_pages_locked(void) { }
-#endif
-
-#ifdef CONFIG_USER_ONLY
-static inline void page_table_config_init(void) { }
-#else
-void page_table_config_init(void);
-#endif
-
-#ifndef CONFIG_USER_ONLY
-G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
-#endif /* CONFIG_USER_ONLY */
-
-/**
- * tcg_req_mo:
- * @type: TCGBar
- *
- * Filter @type to the barrier that is required for the guest
- * memory ordering vs the host memory ordering. A non-zero
- * result indicates that some barrier is required.
- *
- * If TCG_GUEST_DEFAULT_MO is not defined, assume that the
- * guest requires strict ordering.
- *
- * This is a macro so that it's constant even without optimization.
- */
-#ifdef TCG_GUEST_DEFAULT_MO
-# define tcg_req_mo(type) \
- ((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
-#else
-# define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
-#endif
-
-/**
- * cpu_req_mo:
- * @type: TCGBar
- *
- * If tcg_req_mo indicates a barrier for @type is required
- * for the guest memory model, issue a host memory barrier.
- */
-#define cpu_req_mo(type) \
- do { \
- if (tcg_req_mo(type)) { \
- smp_mb(); \
- } \
- } while (0)
-
-#endif /* ACCEL_TCG_INTERNAL_H */
diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc
index ebbf380..57f3e06 100644
--- a/accel/tcg/ldst_common.c.inc
+++ b/accel/tcg/ldst_common.c.inc
@@ -123,7 +123,7 @@ void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
* Load helpers for cpu_ldst.h
*/
-static void plugin_load_cb(CPUArchState *env, abi_ptr addr,
+static void plugin_load_cb(CPUArchState *env, vaddr addr,
uint64_t value_low,
uint64_t value_high,
MemOpIdx oi)
@@ -135,7 +135,7 @@ static void plugin_load_cb(CPUArchState *env, abi_ptr addr,
}
}
-uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
+uint8_t cpu_ldb_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra)
{
uint8_t ret;
@@ -145,7 +145,7 @@ uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
return ret;
}
-uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
+uint16_t cpu_ldw_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
uint16_t ret;
@@ -156,7 +156,7 @@ uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
return ret;
}
-uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
+uint32_t cpu_ldl_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
uint32_t ret;
@@ -167,7 +167,7 @@ uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
return ret;
}
-uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
+uint64_t cpu_ldq_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
uint64_t ret;
@@ -178,7 +178,7 @@ uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
return ret;
}
-Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
+Int128 cpu_ld16_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
Int128 ret;
@@ -193,7 +193,7 @@ Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
* Store helpers for cpu_ldst.h
*/
-static void plugin_store_cb(CPUArchState *env, abi_ptr addr,
+static void plugin_store_cb(CPUArchState *env, vaddr addr,
uint64_t value_low,
uint64_t value_high,
MemOpIdx oi)
@@ -205,14 +205,14 @@ static void plugin_store_cb(CPUArchState *env, abi_ptr addr,
}
}
-void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
+void cpu_stb_mmu(CPUArchState *env, vaddr addr, uint8_t val,
MemOpIdx oi, uintptr_t retaddr)
{
helper_stb_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, val, 0, oi);
}
-void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
+void cpu_stw_mmu(CPUArchState *env, vaddr addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
@@ -220,7 +220,7 @@ void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
plugin_store_cb(env, addr, val, 0, oi);
}
-void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
+void cpu_stl_mmu(CPUArchState *env, vaddr addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
@@ -228,7 +228,7 @@ void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
plugin_store_cb(env, addr, val, 0, oi);
}
-void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
+void cpu_stq_mmu(CPUArchState *env, vaddr addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
@@ -236,325 +236,10 @@ void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
plugin_store_cb(env, addr, val, 0, oi);
}
-void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
+void cpu_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
MemOpIdx oi, uintptr_t retaddr)
{
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
plugin_store_cb(env, addr, int128_getlo(val), int128_gethi(val), oi);
}
-
-/*
- * Wrappers of the above
- */
-
-uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
- return cpu_ldb_mmu(env, addr, oi, ra);
-}
-
-int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
-}
-
-uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
- return cpu_ldw_mmu(env, addr, oi, ra);
-}
-
-int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
-}
-
-uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
- return cpu_ldl_mmu(env, addr, oi, ra);
-}
-
-uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
- return cpu_ldq_mmu(env, addr, oi, ra);
-}
-
-uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
- return cpu_ldw_mmu(env, addr, oi, ra);
-}
-
-int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
-}
-
-uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
- return cpu_ldl_mmu(env, addr, oi, ra);
-}
-
-uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
- return cpu_ldq_mmu(env, addr, oi, ra);
-}
-
-void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
- cpu_stb_mmu(env, addr, val, oi, ra);
-}
-
-void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
- cpu_stw_mmu(env, addr, val, oi, ra);
-}
-
-void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
- cpu_stl_mmu(env, addr, val, oi, ra);
-}
-
-void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
- cpu_stq_mmu(env, addr, val, oi, ra);
-}
-
-void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
- cpu_stw_mmu(env, addr, val, oi, ra);
-}
-
-void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
- cpu_stl_mmu(env, addr, val, oi, ra);
-}
-
-void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
- int mmu_idx, uintptr_t ra)
-{
- MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
- cpu_stq_mmu(env, addr, val, oi, ra);
-}
-
-/*--------------------------*/
-
-uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- return cpu_ldub_mmuidx_ra(env, addr, mmu_index, ra);
-}
-
-int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- return (int8_t)cpu_ldub_data_ra(env, addr, ra);
-}
-
-uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- return cpu_lduw_be_mmuidx_ra(env, addr, mmu_index, ra);
-}
-
-int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- return (int16_t)cpu_lduw_be_data_ra(env, addr, ra);
-}
-
-uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- return cpu_ldl_be_mmuidx_ra(env, addr, mmu_index, ra);
-}
-
-uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- return cpu_ldq_be_mmuidx_ra(env, addr, mmu_index, ra);
-}
-
-uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- return cpu_lduw_le_mmuidx_ra(env, addr, mmu_index, ra);
-}
-
-int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- return (int16_t)cpu_lduw_le_data_ra(env, addr, ra);
-}
-
-uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- return cpu_ldl_le_mmuidx_ra(env, addr, mmu_index, ra);
-}
-
-uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- return cpu_ldq_le_mmuidx_ra(env, addr, mmu_index, ra);
-}
-
-void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- cpu_stb_mmuidx_ra(env, addr, val, mmu_index, ra);
-}
-
-void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- cpu_stw_be_mmuidx_ra(env, addr, val, mmu_index, ra);
-}
-
-void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- cpu_stl_be_mmuidx_ra(env, addr, val, mmu_index, ra);
-}
-
-void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr,
- uint64_t val, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- cpu_stq_be_mmuidx_ra(env, addr, val, mmu_index, ra);
-}
-
-void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- cpu_stw_le_mmuidx_ra(env, addr, val, mmu_index, ra);
-}
-
-void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr,
- uint32_t val, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- cpu_stl_le_mmuidx_ra(env, addr, val, mmu_index, ra);
-}
-
-void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr,
- uint64_t val, uintptr_t ra)
-{
- int mmu_index = cpu_mmu_index(env_cpu(env), false);
- cpu_stq_le_mmuidx_ra(env, addr, val, mmu_index, ra);
-}
-
-/*--------------------------*/
-
-uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr addr)
-{
- return cpu_ldub_data_ra(env, addr, 0);
-}
-
-int cpu_ldsb_data(CPUArchState *env, abi_ptr addr)
-{
- return (int8_t)cpu_ldub_data(env, addr);
-}
-
-uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr addr)
-{
- return cpu_lduw_be_data_ra(env, addr, 0);
-}
-
-int cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr)
-{
- return (int16_t)cpu_lduw_be_data(env, addr);
-}
-
-uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr addr)
-{
- return cpu_ldl_be_data_ra(env, addr, 0);
-}
-
-uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr addr)
-{
- return cpu_ldq_be_data_ra(env, addr, 0);
-}
-
-uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr addr)
-{
- return cpu_lduw_le_data_ra(env, addr, 0);
-}
-
-int cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr)
-{
- return (int16_t)cpu_lduw_le_data(env, addr);
-}
-
-uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr addr)
-{
- return cpu_ldl_le_data_ra(env, addr, 0);
-}
-
-uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr addr)
-{
- return cpu_ldq_le_data_ra(env, addr, 0);
-}
-
-void cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val)
-{
- cpu_stb_data_ra(env, addr, val, 0);
-}
-
-void cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
-{
- cpu_stw_be_data_ra(env, addr, val, 0);
-}
-
-void cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
-{
- cpu_stl_be_data_ra(env, addr, val, 0);
-}
-
-void cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val)
-{
- cpu_stq_be_data_ra(env, addr, val, 0);
-}
-
-void cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
-{
- cpu_stw_le_data_ra(env, addr, val, 0);
-}
-
-void cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
-{
- cpu_stl_le_data_ra(env, addr, val, 0);
-}
-
-void cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val)
-{
- cpu_stq_le_data_ra(env, addr, val, 0);
-}
diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build
index 38ff227..575e92b 100644
--- a/accel/tcg/meson.build
+++ b/accel/tcg/meson.build
@@ -1,28 +1,33 @@
-common_ss.add(when: 'CONFIG_TCG', if_true: files(
+if not have_tcg
+ subdir_done()
+endif
+
+tcg_ss = ss.source_set()
+
+tcg_ss.add(files(
+ 'cpu-exec.c',
'cpu-exec-common.c',
'tcg-runtime.c',
'tcg-runtime-gvec.c',
-))
-tcg_specific_ss = ss.source_set()
-tcg_specific_ss.add(files(
- 'tcg-all.c',
- 'cpu-exec.c',
'tb-maint.c',
+ 'tcg-all.c',
'translate-all.c',
'translator.c',
))
-tcg_specific_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
-tcg_specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c'))
if get_option('plugins')
- tcg_specific_ss.add(files('plugin-gen.c'))
+ tcg_ss.add(files('plugin-gen.c'))
endif
-specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_specific_ss)
-specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
- 'cputlb.c',
+user_ss.add_all(tcg_ss)
+system_ss.add_all(tcg_ss)
+
+user_ss.add(files(
+ 'user-exec.c',
+ 'user-exec-stub.c',
))
-system_ss.add(when: ['CONFIG_TCG'], if_true: files(
+system_ss.add(files(
+ 'cputlb.c',
'icount-common.c',
'monitor.c',
'tcg-accel-ops.c',
diff --git a/accel/tcg/monitor.c b/accel/tcg/monitor.c
index eeb38a4..e7ed728 100644
--- a/accel/tcg/monitor.c
+++ b/accel/tcg/monitor.c
@@ -14,6 +14,7 @@
#include "qapi/qapi-commands-machine.h"
#include "monitor/monitor.h"
#include "system/cpu-timers.h"
+#include "exec/icount.h"
#include "system/tcg.h"
#include "tcg/tcg.h"
#include "internal-common.h"
@@ -140,16 +141,26 @@ static void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
*pelide = elide;
}
-static void tcg_dump_info(GString *buf)
+static void tcg_dump_flush_info(GString *buf)
{
- g_string_append_printf(buf, "[TCG profiler not compiled]\n");
+ size_t flush_full, flush_part, flush_elide;
+
+ g_string_append_printf(buf, "TB flush count %u\n",
+ qatomic_read(&tb_ctx.tb_flush_count));
+ g_string_append_printf(buf, "TB invalidate count %u\n",
+ qatomic_read(&tb_ctx.tb_phys_invalidate_count));
+
+ tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
+ g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full);
+ g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
+ g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide);
}
static void dump_exec_info(GString *buf)
{
struct tb_tree_stats tst = {};
struct qht_stats hst;
- size_t nb_tbs, flush_full, flush_part, flush_elide;
+ size_t nb_tbs;
tcg_tb_foreach(tb_tree_stats_iter, &tst);
nb_tbs = tst.nb_tbs;
@@ -186,50 +197,26 @@ static void dump_exec_info(GString *buf)
qht_statistics_destroy(&hst);
g_string_append_printf(buf, "\nStatistics:\n");
- g_string_append_printf(buf, "TB flush count %u\n",
- qatomic_read(&tb_ctx.tb_flush_count));
- g_string_append_printf(buf, "TB invalidate count %u\n",
- qatomic_read(&tb_ctx.tb_phys_invalidate_count));
-
- tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
- g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full);
- g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
- g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide);
- tcg_dump_info(buf);
+ tcg_dump_flush_info(buf);
}
-HumanReadableText *qmp_x_query_jit(Error **errp)
+void tcg_dump_stats(GString *buf)
{
- g_autoptr(GString) buf = g_string_new("");
-
- if (!tcg_enabled()) {
- error_setg(errp, "JIT information is only available with accel=tcg");
- return NULL;
- }
-
dump_accel_info(buf);
dump_exec_info(buf);
dump_drift_info(buf);
-
- return human_readable_text_from_str(buf);
-}
-
-static void tcg_dump_op_count(GString *buf)
-{
- g_string_append_printf(buf, "[TCG profiler not compiled]\n");
}
-HumanReadableText *qmp_x_query_opcount(Error **errp)
+HumanReadableText *qmp_x_query_jit(Error **errp)
{
g_autoptr(GString) buf = g_string_new("");
if (!tcg_enabled()) {
- error_setg(errp,
- "Opcode count information is only available with accel=tcg");
+ error_setg(errp, "JIT information is only available with accel=tcg");
return NULL;
}
- tcg_dump_op_count(buf);
+ tcg_dump_stats(buf);
return human_readable_text_from_str(buf);
}
@@ -237,7 +224,6 @@ HumanReadableText *qmp_x_query_opcount(Error **errp)
static void hmp_tcg_register(void)
{
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
- monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount);
}
type_init(hmp_tcg_register);
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
index 7e5f040..9920381 100644
--- a/accel/tcg/plugin-gen.c
+++ b/accel/tcg/plugin-gen.c
@@ -22,13 +22,12 @@
#include "qemu/osdep.h"
#include "qemu/plugin.h"
#include "qemu/log.h"
-#include "cpu.h"
#include "tcg/tcg.h"
#include "tcg/tcg-temp-internal.h"
-#include "tcg/tcg-op.h"
-#include "exec/exec-all.h"
+#include "tcg/tcg-op-common.h"
#include "exec/plugin-gen.h"
#include "exec/translator.h"
+#include "exec/translation-block.h"
enum plugin_gen_from {
PLUGIN_GEN_FROM_TB,
@@ -89,15 +88,13 @@ static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
qemu_plugin_add_dyn_cb_arr(arr);
tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env,
- offsetof(CPUState, neg.plugin_mem_cbs) -
- offsetof(ArchCPU, env));
+ offsetof(CPUState, neg.plugin_mem_cbs) - sizeof(CPUState));
}
static void gen_disable_mem_helper(void)
{
tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env,
- offsetof(CPUState, neg.plugin_mem_cbs) -
- offsetof(ArchCPU, env));
+ offsetof(CPUState, neg.plugin_mem_cbs) - sizeof(CPUState));
}
static TCGv_i32 gen_cpu_index(void)
@@ -113,17 +110,27 @@ static TCGv_i32 gen_cpu_index(void)
}
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
tcg_gen_ld_i32(cpu_index, tcg_env,
- -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
+ offsetof(CPUState, cpu_index) - sizeof(CPUState));
return cpu_index;
}
static void gen_udata_cb(struct qemu_plugin_regular_cb *cb)
{
TCGv_i32 cpu_index = gen_cpu_index();
+ enum qemu_plugin_cb_flags cb_flags =
+ tcg_call_to_qemu_plugin_cb_flags(cb->info->flags);
+ TCGv_i32 flags = tcg_constant_i32(cb_flags);
+ TCGv_i32 clear_flags = tcg_constant_i32(QEMU_PLUGIN_CB_NO_REGS);
+ tcg_gen_st_i32(flags, tcg_env,
+ offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState));
tcg_gen_call2(cb->f.vcpu_udata, cb->info, NULL,
tcgv_i32_temp(cpu_index),
tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
+ tcg_gen_st_i32(clear_flags, tcg_env,
+ offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState));
tcg_temp_free_i32(cpu_index);
+ tcg_temp_free_i32(flags);
+ tcg_temp_free_i32(clear_flags);
}
static TCGv_ptr gen_plugin_u64_ptr(qemu_plugin_u64 entry)
@@ -176,10 +183,20 @@ static void gen_udata_cond_cb(struct qemu_plugin_conditional_cb *cb)
tcg_gen_ld_i64(val, ptr, 0);
tcg_gen_brcondi_i64(cond, val, cb->imm, after_cb);
TCGv_i32 cpu_index = gen_cpu_index();
+ enum qemu_plugin_cb_flags cb_flags =
+ tcg_call_to_qemu_plugin_cb_flags(cb->info->flags);
+ TCGv_i32 flags = tcg_constant_i32(cb_flags);
+ TCGv_i32 clear_flags = tcg_constant_i32(QEMU_PLUGIN_CB_NO_REGS);
+ tcg_gen_st_i32(flags, tcg_env,
+ offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState));
tcg_gen_call2(cb->f.vcpu_udata, cb->info, NULL,
tcgv_i32_temp(cpu_index),
tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
+ tcg_gen_st_i32(clear_flags, tcg_env,
+ offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState));
tcg_temp_free_i32(cpu_index);
+ tcg_temp_free_i32(flags);
+ tcg_temp_free_i32(clear_flags);
gen_set_label(after_cb);
tcg_temp_free_i64(val);
@@ -213,12 +230,22 @@ static void gen_mem_cb(struct qemu_plugin_regular_cb *cb,
qemu_plugin_meminfo_t meminfo, TCGv_i64 addr)
{
TCGv_i32 cpu_index = gen_cpu_index();
+ enum qemu_plugin_cb_flags cb_flags =
+ tcg_call_to_qemu_plugin_cb_flags(cb->info->flags);
+ TCGv_i32 flags = tcg_constant_i32(cb_flags);
+ TCGv_i32 clear_flags = tcg_constant_i32(QEMU_PLUGIN_CB_NO_REGS);
+ tcg_gen_st_i32(flags, tcg_env,
+ offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState));
tcg_gen_call4(cb->f.vcpu_mem, cb->info, NULL,
tcgv_i32_temp(cpu_index),
tcgv_i32_temp(tcg_constant_i32(meminfo)),
tcgv_i64_temp(addr),
tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
+ tcg_gen_st_i32(clear_flags, tcg_env,
+ offsetof(CPUState, neg.plugin_cb_flags) - sizeof(CPUState));
tcg_temp_free_i32(cpu_index);
+ tcg_temp_free_i32(flags);
+ tcg_temp_free_i32(clear_flags);
}
static void inject_cb(struct qemu_plugin_dyn_cb *cb)
diff --git a/accel/tcg/tb-hash.h b/accel/tcg/tb-hash.h
index a5382f4..f7b159f 100644
--- a/accel/tcg/tb-hash.h
+++ b/accel/tcg/tb-hash.h
@@ -20,8 +20,8 @@
#ifndef EXEC_TB_HASH_H
#define EXEC_TB_HASH_H
-#include "exec/cpu-defs.h"
-#include "exec/exec-all.h"
+#include "exec/vaddr.h"
+#include "exec/target_page.h"
#include "exec/translation-block.h"
#include "qemu/xxhash.h"
#include "tb-jmp-cache.h"
diff --git a/accel/tcg/tb-internal.h b/accel/tcg/tb-internal.h
index 68aa8d1..40439f0 100644
--- a/accel/tcg/tb-internal.h
+++ b/accel/tcg/tb-internal.h
@@ -9,8 +9,6 @@
#ifndef ACCEL_TCG_TB_INTERNAL_TARGET_H
#define ACCEL_TCG_TB_INTERNAL_TARGET_H
-#include "exec/cpu-all.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
/*
@@ -24,66 +22,34 @@
*/
#define GETPC_ADJ 2
-#ifdef CONFIG_SOFTMMU
-
-#define CPU_TLB_DYN_MIN_BITS 6
-#define CPU_TLB_DYN_DEFAULT_BITS 8
-
-# if HOST_LONG_BITS == 32
-/* Make sure we do not require a double-word shift for the TLB load */
-# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
-# else /* HOST_LONG_BITS == 64 */
-/*
- * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) ==
- * 2**34 == 16G of address space. This is roughly what one would expect a
- * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel
- * Skylake's Level-2 STLB has 16 1G entries.
- * Also, make sure we do not size the TLB past the guest's address space.
- */
-# ifdef TARGET_PAGE_BITS_VARY
-# define CPU_TLB_DYN_MAX_BITS \
- MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
-# else
-# define CPU_TLB_DYN_MAX_BITS \
- MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
-# endif
-# endif
-
-#endif /* CONFIG_SOFTMMU */
+void tb_lock_page0(tb_page_addr_t);
#ifdef CONFIG_USER_ONLY
-#include "user/page-protection.h"
/*
* For user-only, page_protect sets the page read-only.
* Since most execution is already on read-only pages, and we'd need to
* account for other TBs on the same page, defer undoing any page protection
* until we receive the write fault.
*/
-static inline void tb_lock_page0(tb_page_addr_t p0)
-{
- page_protect(p0);
-}
-
static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
{
- page_protect(p1);
+ tb_lock_page0(p1);
}
static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
static inline void tb_unlock_pages(TranslationBlock *tb) { }
#else
-void tb_lock_page0(tb_page_addr_t);
void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
void tb_unlock_pages(TranslationBlock *);
#endif
#ifdef CONFIG_SOFTMMU
-void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
- unsigned size,
- uintptr_t retaddr);
+void tb_invalidate_phys_range_fast(CPUState *cpu, ram_addr_t ram_addr,
+ unsigned size, uintptr_t retaddr);
#endif /* CONFIG_SOFTMMU */
-bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
+bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
+ uintptr_t pc);
#endif
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index 3f1bebf..0048316 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -22,9 +22,11 @@
#include "qemu/qtree.h"
#include "exec/cputlb.h"
#include "exec/log.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
+#include "exec/mmap-lock.h"
#include "exec/tb-flush.h"
+#include "exec/target_page.h"
+#include "accel/tcg/cpu-ops.h"
#include "tb-internal.h"
#include "system/tcg.h"
#include "tcg/tcg.h"
@@ -32,7 +34,6 @@
#include "tb-context.h"
#include "tb-internal.h"
#include "internal-common.h"
-#include "internal-target.h"
#ifdef CONFIG_USER_ONLY
#include "user/page-protection.h"
#endif
@@ -156,11 +157,7 @@ static PageForEachNext foreach_tb_next(PageForEachNext tb,
/*
* In system mode we want L1_MAP to be based on ram offsets.
*/
-#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
-# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
-#else
-# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
-#endif
+#define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
/* Size of the L2 (and L3, etc) page tables. */
#define V_L2_BITS 10
@@ -1009,7 +1006,8 @@ TranslationBlock *tb_link_page(TranslationBlock *tb)
* Called with mmap_lock held for user-mode emulation.
* NOTE: this function must not be called while a TB is running.
*/
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
+void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
+ tb_page_addr_t last)
{
TranslationBlock *tb;
PageForEachNext n;
@@ -1032,17 +1030,16 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr)
start = addr & TARGET_PAGE_MASK;
last = addr | ~TARGET_PAGE_MASK;
- tb_invalidate_phys_range(start, last);
+ tb_invalidate_phys_range(NULL, start, last);
}
/*
* Called with mmap_lock held. If pc is not 0 then it indicates the
* host PC of the faulting store instruction that caused this invalidate.
- * Returns true if the caller needs to abort execution of the current
- * TB (because it was modified by this store and the guest CPU has
- * precise-SMC semantics).
+ * Returns true if the caller needs to abort execution of the current TB.
*/
-bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
+bool tb_invalidate_phys_page_unwind(CPUState *cpu, tb_page_addr_t addr,
+ uintptr_t pc)
{
TranslationBlock *current_tb;
bool current_tb_modified;
@@ -1054,10 +1051,7 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
* Without precise smc semantics, or when outside of a TB,
* we can skip to invalidate.
*/
-#ifndef TARGET_HAS_PRECISE_SMC
- pc = 0;
-#endif
- if (!pc) {
+ if (!pc || !cpu || !cpu->cc->tcg_ops->precise_smc) {
tb_invalidate_phys_page(addr);
return false;
}
@@ -1080,15 +1074,14 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
* the CPU state.
*/
current_tb_modified = true;
- cpu_restore_state_from_tb(current_cpu, current_tb, pc);
+ cpu_restore_state_from_tb(cpu, current_tb, pc);
}
tb_phys_invalidate__locked(tb);
}
if (current_tb_modified) {
/* Force execution of one insn next time. */
- CPUState *cpu = current_cpu;
- cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
+ cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
return true;
}
return false;
@@ -1097,23 +1090,28 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
/*
* @p must be non-NULL.
* Call with all @pages locked.
+ * (@cpu, @retaddr) may be (NULL, 0) outside of a cpu context,
+ * in which case precise_smc need not be detected.
*/
static void
-tb_invalidate_phys_page_range__locked(struct page_collection *pages,
+tb_invalidate_phys_page_range__locked(CPUState *cpu,
+ struct page_collection *pages,
PageDesc *p, tb_page_addr_t start,
tb_page_addr_t last,
uintptr_t retaddr)
{
TranslationBlock *tb;
PageForEachNext n;
-#ifdef TARGET_HAS_PRECISE_SMC
bool current_tb_modified = false;
- TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
-#endif /* TARGET_HAS_PRECISE_SMC */
+ TranslationBlock *current_tb = NULL;
/* Range may not cross a page. */
tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0);
+ if (retaddr && cpu && cpu->cc->tcg_ops->precise_smc) {
+ current_tb = tcg_tb_lookup(retaddr);
+ }
+
/*
* We remove all the TBs in the range [start, last].
* XXX: see if in some cases it could be faster to invalidate all the code
@@ -1131,8 +1129,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
}
if (!(tb_last < start || tb_start > last)) {
-#ifdef TARGET_HAS_PRECISE_SMC
- if (current_tb == tb &&
+ if (unlikely(current_tb == tb) &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/*
* If we are modifying the current TB, we must stop
@@ -1142,9 +1139,8 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
* restore the CPU state.
*/
current_tb_modified = true;
- cpu_restore_state_from_tb(current_cpu, current_tb, retaddr);
+ cpu_restore_state_from_tb(cpu, current_tb, retaddr);
}
-#endif /* TARGET_HAS_PRECISE_SMC */
tb_phys_invalidate__locked(tb);
}
}
@@ -1154,15 +1150,13 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
tlb_unprotect_code(start);
}
-#ifdef TARGET_HAS_PRECISE_SMC
- if (current_tb_modified) {
+ if (unlikely(current_tb_modified)) {
page_collection_unlock(pages);
/* Force execution of one insn next time. */
- current_cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
+ cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
mmap_unlock();
- cpu_loop_exit_noexc(current_cpu);
+ cpu_loop_exit_noexc(cpu);
}
-#endif
}
/*
@@ -1172,7 +1166,8 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
* access: the virtual CPU will exit the current TB if code is modified inside
* this TB.
*/
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
+void tb_invalidate_phys_range(CPUState *cpu, tb_page_addr_t start,
+ tb_page_addr_t last)
{
struct page_collection *pages;
tb_page_addr_t index, index_last;
@@ -1191,44 +1186,30 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
page_start = index << TARGET_PAGE_BITS;
page_last = page_start | ~TARGET_PAGE_MASK;
page_last = MIN(page_last, last);
- tb_invalidate_phys_page_range__locked(pages, pd,
+ tb_invalidate_phys_page_range__locked(cpu, pages, pd,
page_start, page_last, 0);
}
page_collection_unlock(pages);
}
/*
- * Call with all @pages in the range [@start, @start + len[ locked.
- */
-static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
- tb_page_addr_t start,
- unsigned len, uintptr_t ra)
-{
- PageDesc *p;
-
- p = page_find(start >> TARGET_PAGE_BITS);
- if (!p) {
- return;
- }
-
- assert_page_locked(p);
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra);
-}
-
-/*
* len must be <= 8 and start must be a multiple of len.
* Called via softmmu_template.h when code areas are written to with
* iothread mutex not held.
*/
-void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
- unsigned size,
- uintptr_t retaddr)
+void tb_invalidate_phys_range_fast(CPUState *cpu, ram_addr_t start,
+ unsigned len, uintptr_t ra)
{
- struct page_collection *pages;
+ PageDesc *p = page_find(start >> TARGET_PAGE_BITS);
- pages = page_collection_lock(ram_addr, ram_addr + size - 1);
- tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
- page_collection_unlock(pages);
+ if (p) {
+ ram_addr_t last = start + len - 1;
+ struct page_collection *pages = page_collection_lock(start, last);
+
+ tb_invalidate_phys_page_range__locked(cpu, pages, p,
+ start, last, ra);
+ page_collection_unlock(pages);
+ }
}
#endif /* CONFIG_USER_ONLY */
diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c
index 27cf104..d0f7b41 100644
--- a/accel/tcg/tcg-accel-ops-icount.c
+++ b/accel/tcg/tcg-accel-ops-icount.c
@@ -25,7 +25,7 @@
#include "qemu/osdep.h"
#include "system/replay.h"
-#include "system/cpu-timers.h"
+#include "exec/icount.h"
#include "qemu/main-loop.h"
#include "qemu/guest-random.h"
#include "hw/core/cpu.h"
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
index bdcc385..dfcee30 100644
--- a/accel/tcg/tcg-accel-ops-mttcg.c
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
@@ -26,7 +26,7 @@
#include "qemu/osdep.h"
#include "system/tcg.h"
#include "system/replay.h"
-#include "system/cpu-timers.h"
+#include "exec/icount.h"
#include "qemu/main-loop.h"
#include "qemu/notify.h"
#include "qemu/guest-random.h"
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index f62cf24..6eec5c9 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -27,7 +27,7 @@
#include "qemu/lockable.h"
#include "system/tcg.h"
#include "system/replay.h"
-#include "system/cpu-timers.h"
+#include "exec/icount.h"
#include "qemu/main-loop.h"
#include "qemu/notify.h"
#include "qemu/guest-random.h"
diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c
index d9b662e..37b4b21 100644
--- a/accel/tcg/tcg-accel-ops.c
+++ b/accel/tcg/tcg-accel-ops.c
@@ -29,7 +29,7 @@
#include "system/accel-ops.h"
#include "system/tcg.h"
#include "system/replay.h"
-#include "system/cpu-timers.h"
+#include "exec/icount.h"
#include "qemu/main-loop.h"
#include "qemu/guest-random.h"
#include "qemu/timer.h"
@@ -37,6 +37,7 @@
#include "exec/hwaddr.h"
#include "exec/tb-flush.h"
#include "exec/translation-block.h"
+#include "exec/watchpoint.h"
#include "gdbstub/enums.h"
#include "hw/core/cpu.h"
@@ -92,8 +93,6 @@ static void tcg_cpu_reset_hold(CPUState *cpu)
/* mask must never be zero, except for A20 change call */
void tcg_handle_interrupt(CPUState *cpu, int mask)
{
- g_assert(bql_locked());
-
cpu->interrupt_request |= mask;
/*
@@ -197,8 +196,10 @@ static inline void tcg_remove_all_breakpoints(CPUState *cpu)
cpu_watchpoint_remove_all(cpu, BP_GDB);
}
-static void tcg_accel_ops_init(AccelOpsClass *ops)
+static void tcg_accel_ops_init(AccelClass *ac)
{
+ AccelOpsClass *ops = ac->ops;
+
if (qemu_tcg_mttcg_enabled()) {
ops->create_vcpu_thread = mttcg_start_vcpu_thread;
ops->kick_vcpu_thread = mttcg_kick_vcpu_thread;
@@ -223,7 +224,7 @@ static void tcg_accel_ops_init(AccelOpsClass *ops)
ops->remove_all_breakpoints = tcg_remove_all_breakpoints;
}
-static void tcg_accel_ops_class_init(ObjectClass *oc, void *data)
+static void tcg_accel_ops_class_init(ObjectClass *oc, const void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
index c1a30b0..5904582 100644
--- a/accel/tcg/tcg-all.c
+++ b/accel/tcg/tcg-all.c
@@ -26,27 +26,27 @@
#include "qemu/osdep.h"
#include "system/tcg.h"
#include "exec/replay-core.h"
-#include "system/cpu-timers.h"
+#include "exec/icount.h"
#include "tcg/startup.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/accel.h"
#include "qemu/atomic.h"
+#include "qapi/qapi-types-common.h"
#include "qapi/qapi-builtin-visit.h"
#include "qemu/units.h"
-#if defined(CONFIG_USER_ONLY)
-#include "hw/qdev-core.h"
-#else
+#include "qemu/target-info.h"
+#ifndef CONFIG_USER_ONLY
#include "hw/boards.h"
#endif
+#include "accel/tcg/cpu-ops.h"
#include "internal-common.h"
-#include "cpu-param.h"
struct TCGState {
AccelState parent_obj;
- bool mttcg_enabled;
+ OnOffAuto mttcg_enabled;
bool one_insn_per_tb;
int splitwx_enabled;
unsigned long tb_size;
@@ -58,40 +58,18 @@ typedef struct TCGState TCGState;
DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
TYPE_TCG_ACCEL)
-/*
- * We default to false if we know other options have been enabled
- * which are currently incompatible with MTTCG. Otherwise when each
- * guest (target) has been updated to support:
- * - atomic instructions
- * - memory ordering primitives (barriers)
- * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
- *
- * Once a guest architecture has been converted to the new primitives
- * there is one remaining limitation to check:
- * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
- */
-
-static bool default_mttcg_enabled(void)
+#ifndef CONFIG_USER_ONLY
+bool qemu_tcg_mttcg_enabled(void)
{
- if (icount_enabled()) {
- return false;
- }
-#ifdef TARGET_SUPPORTS_MTTCG
-# ifndef TCG_GUEST_DEFAULT_MO
-# error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO"
-# endif
- return true;
-#else
- return false;
-#endif
+ TCGState *s = TCG_STATE(current_accel());
+ return s->mttcg_enabled == ON_OFF_AUTO_ON;
}
+#endif /* !CONFIG_USER_ONLY */
static void tcg_accel_instance_init(Object *obj)
{
TCGState *s = TCG_STATE(obj);
- s->mttcg_enabled = default_mttcg_enabled();
-
/* If debugging enabled, default "auto on", otherwise off. */
#if defined(CONFIG_DEBUG_TCG) && !defined(CONFIG_USER_ONLY)
s->splitwx_enabled = -1;
@@ -100,24 +78,57 @@ static void tcg_accel_instance_init(Object *obj)
#endif
}
-bool mttcg_enabled;
bool one_insn_per_tb;
-static int tcg_init_machine(MachineState *ms)
+static int tcg_init_machine(AccelState *as, MachineState *ms)
{
- TCGState *s = TCG_STATE(current_accel());
-#ifdef CONFIG_USER_ONLY
- unsigned max_cpus = 1;
-#else
- unsigned max_cpus = ms->smp.max_cpus;
+ TCGState *s = TCG_STATE(as);
+ unsigned max_threads = 1;
+
+#ifndef CONFIG_USER_ONLY
+ CPUClass *cc = CPU_CLASS(object_class_by_name(target_cpu_type()));
+ bool mttcg_supported = cc->tcg_ops->mttcg_supported;
+
+ switch (s->mttcg_enabled) {
+ case ON_OFF_AUTO_AUTO:
+ /*
+ * We default to false if we know other options have been enabled
+ * which are currently incompatible with MTTCG. Otherwise when each
+ * guest (target) has been updated to support:
+ * - atomic instructions
+ * - memory ordering primitives (barriers)
+ * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
+ *
+ * Once a guest architecture has been converted to the new primitives
+ * there is one remaining limitation to check:
+ * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
+ */
+ if (mttcg_supported && !icount_enabled()) {
+ s->mttcg_enabled = ON_OFF_AUTO_ON;
+ max_threads = ms->smp.max_cpus;
+ } else {
+ s->mttcg_enabled = ON_OFF_AUTO_OFF;
+ }
+ break;
+ case ON_OFF_AUTO_ON:
+ if (!mttcg_supported) {
+ warn_report("Guest not yet converted to MTTCG - "
+ "you may get unexpected results");
+ }
+ max_threads = ms->smp.max_cpus;
+ break;
+ case ON_OFF_AUTO_OFF:
+ break;
+ default:
+ g_assert_not_reached();
+ }
#endif
tcg_allowed = true;
- mttcg_enabled = s->mttcg_enabled;
page_init();
tb_htable_init();
- tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_cpus);
+ tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_threads);
#if defined(CONFIG_SOFTMMU)
/*
@@ -138,7 +149,7 @@ static char *tcg_get_thread(Object *obj, Error **errp)
{
TCGState *s = TCG_STATE(obj);
- return g_strdup(s->mttcg_enabled ? "multi" : "single");
+ return g_strdup(s->mttcg_enabled == ON_OFF_AUTO_ON ? "multi" : "single");
}
static void tcg_set_thread(Object *obj, const char *value, Error **errp)
@@ -149,14 +160,10 @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp)
if (icount_enabled()) {
error_setg(errp, "No MTTCG when icount is enabled");
} else {
-#ifndef TARGET_SUPPORTS_MTTCG
- warn_report("Guest not yet converted to MTTCG - "
- "you may get unexpected results");
-#endif
- s->mttcg_enabled = true;
+ s->mttcg_enabled = ON_OFF_AUTO_ON;
}
} else if (strcmp(value, "single") == 0) {
- s->mttcg_enabled = false;
+ s->mttcg_enabled = ON_OFF_AUTO_OFF;
} else {
error_setg(errp, "Invalid 'thread' setting %s", value);
}
@@ -212,7 +219,7 @@ static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp)
qatomic_set(&one_insn_per_tb, value);
}
-static int tcg_gdbstub_supported_sstep_flags(void)
+static int tcg_gdbstub_supported_sstep_flags(AccelState *as)
{
/*
* In replay mode all events will come from the log and can't be
@@ -227,7 +234,7 @@ static int tcg_gdbstub_supported_sstep_flags(void)
}
}
-static void tcg_accel_class_init(ObjectClass *oc, void *data)
+static void tcg_accel_class_init(ObjectClass *oc, const void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "tcg";
diff --git a/accel/tcg/tlb-bounds.h b/accel/tcg/tlb-bounds.h
new file mode 100644
index 0000000..f83d9ac
--- /dev/null
+++ b/accel/tcg/tlb-bounds.h
@@ -0,0 +1,13 @@
+/*
+ * softmmu size bounds
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_TLB_BOUNDS_H
+#define ACCEL_TCG_TLB_BOUNDS_H
+
+#define CPU_TLB_DYN_MIN_BITS 6
+#define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
+#define CPU_TLB_DYN_DEFAULT_BITS 8
+
+#endif /* ACCEL_TCG_TLB_BOUNDS_H */
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 82bc16b..d468667 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -21,49 +21,20 @@
#include "trace.h"
#include "disas/disas.h"
-#include "exec/exec-all.h"
#include "tcg/tcg.h"
-#if defined(CONFIG_USER_ONLY)
-#include "qemu.h"
-#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
-#include <sys/param.h>
-#if __FreeBSD_version >= 700104
-#define HAVE_KINFO_GETVMMAP
-#define sigqueue sigqueue_freebsd /* avoid redefinition */
-#include <sys/proc.h>
-#include <machine/profile.h>
-#define _KERNEL
-#include <sys/user.h>
-#undef _KERNEL
-#undef sigqueue
-#include <libutil.h>
-#endif
-#endif
-#else
-#include "exec/ram_addr.h"
-#endif
-
-#include "exec/cputlb.h"
-#include "exec/page-protection.h"
+#include "exec/mmap-lock.h"
#include "tb-internal.h"
-#include "exec/translator.h"
#include "exec/tb-flush.h"
-#include "qemu/bitmap.h"
-#include "qemu/qemu-print.h"
-#include "qemu/main-loop.h"
#include "qemu/cacheinfo.h"
-#include "qemu/timer.h"
+#include "qemu/target-info.h"
#include "exec/log.h"
-#include "system/cpu-timers.h"
-#include "system/tcg.h"
-#include "qapi/error.h"
+#include "exec/icount.h"
#include "accel/tcg/cpu-ops.h"
#include "tb-jmp-cache.h"
#include "tb-hash.h"
#include "tb-context.h"
#include "tb-internal.h"
#include "internal-common.h"
-#include "internal-target.h"
#include "tcg/perf.h"
#include "tcg/insn-start-words.h"
@@ -106,7 +77,7 @@ static int64_t decode_sleb128(const uint8_t **pp)
val |= (int64_t)(byte & 0x7f) << shift;
shift += 7;
} while (byte & 0x80);
- if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
+ if (shift < 64 && (byte & 0x40)) {
val |= -(int64_t)1 << shift;
}
@@ -117,7 +88,7 @@ static int64_t decode_sleb128(const uint8_t **pp)
/* Encode the data collected about the instructions while compiling TB.
Place the data at BLOCK, and return the number of bytes consumed.
- The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
+ The logical table consists of INSN_START_WORDS uint64_t's,
which come from the target's insn_start data, followed by a uintptr_t
which comes from the host pc of the end of the code implementing the insn.
@@ -137,13 +108,13 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
for (i = 0, n = tb->icount; i < n; ++i) {
uint64_t prev, curr;
- for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
+ for (j = 0; j < INSN_START_WORDS; ++j) {
if (i == 0) {
prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
} else {
- prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j];
+ prev = insn_data[(i - 1) * INSN_START_WORDS + j];
}
- curr = insn_data[i * TARGET_INSN_START_WORDS + j];
+ curr = insn_data[i * INSN_START_WORDS + j];
p = encode_sleb128(p, curr - prev);
}
prev = (i == 0 ? 0 : insn_end_off[i - 1]);
@@ -175,7 +146,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
return -1;
}
- memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
+ memset(data, 0, sizeof(uint64_t) * INSN_START_WORDS);
if (!(tb_cflags(tb) & CF_PCREL)) {
data[0] = tb->pc;
}
@@ -185,7 +156,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
* at which the end of the insn exceeds host_pc.
*/
for (i = 0; i < num_insns; ++i) {
- for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
+ for (j = 0; j < INSN_START_WORDS; ++j) {
data[j] += decode_sleb128(&p);
}
iter_pc += decode_sleb128(&p);
@@ -203,7 +174,7 @@ static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t host_pc)
{
- uint64_t data[TARGET_INSN_START_WORDS];
+ uint64_t data[INSN_START_WORDS];
int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
if (insns_left < 0) {
@@ -287,9 +258,7 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
}
/* Called with mmap_lock held for user mode emulation. */
-TranslationBlock *tb_gen_code(CPUState *cpu,
- vaddr pc, uint64_t cs_base,
- uint32_t flags, int cflags)
+TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s)
{
CPUArchState *env = cpu_env(cpu);
TranslationBlock *tb, *existing_tb;
@@ -302,14 +271,14 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
assert_memory_lock();
qemu_thread_jit_write();
- phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
+ phys_pc = get_page_addr_code_hostp(env, s.pc, &host_pc);
if (phys_pc == -1) {
/* Generate a one-shot TB with 1 insn in it */
- cflags = (cflags & ~CF_COUNT_MASK) | 1;
+ s.cflags = (s.cflags & ~CF_COUNT_MASK) | 1;
}
- max_insns = cflags & CF_COUNT_MASK;
+ max_insns = s.cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = TCG_MAX_INSNS;
}
@@ -329,12 +298,12 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
gen_code_buf = tcg_ctx->code_gen_ptr;
tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
- if (!(cflags & CF_PCREL)) {
- tb->pc = pc;
+ if (!(s.cflags & CF_PCREL)) {
+ tb->pc = s.pc;
}
- tb->cs_base = cs_base;
- tb->flags = flags;
- tb->cflags = cflags;
+ tb->cs_base = s.cs_base;
+ tb->flags = s.flags;
+ tb->cflags = s.cflags;
tb_set_page_addr0(tb, phys_pc);
tb_set_page_addr1(tb, -1);
if (phys_pc != -1) {
@@ -342,23 +311,13 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
}
tcg_ctx->gen_tb = tb;
- tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
-#ifdef CONFIG_SOFTMMU
- tcg_ctx->page_bits = TARGET_PAGE_BITS;
- tcg_ctx->page_mask = TARGET_PAGE_MASK;
- tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
-#endif
- tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
-#ifdef TCG_GUEST_DEFAULT_MO
- tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO;
-#else
- tcg_ctx->guest_mo = TCG_MO_ALL;
-#endif
+ tcg_ctx->addr_type = target_long_bits() == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
+ tcg_ctx->guest_mo = cpu->cc->tcg_ops->guest_default_memory_order;
restart_translate:
- trace_translate_block(tb, pc, tb->tc.ptr);
+ trace_translate_block(tb, s.pc, tb->tc.ptr);
- gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
+ gen_code_size = setjmp_gen_code(env, tb, s.pc, host_pc, &max_insns, &ti);
if (unlikely(gen_code_size < 0)) {
switch (gen_code_size) {
case -1:
@@ -435,10 +394,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* For CF_PCREL, attribute all executions of the generated code
* to its first mapping.
*/
- perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
+ perf_report_code(s.pc, tb, tcg_splitwx_to_rx(gen_code_buf));
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
- qemu_log_in_addr_range(pc)) {
+ qemu_log_in_addr_range(s.pc)) {
FILE *logfile = qemu_log_trylock();
if (logfile) {
int code_size, data_size;
@@ -460,7 +419,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
fprintf(logfile,
" -- guest addr 0x%016" PRIx64 " + tb prologue\n",
- tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
+ tcg_ctx->gen_insn_data[insn * INSN_START_WORDS]);
chunk_start = tcg_ctx->gen_insn_end_off[insn];
disas(logfile, tb->tc.ptr, chunk_start);
@@ -473,7 +432,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
if (chunk_end > chunk_start) {
fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n",
- tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
+ tcg_ctx->gen_insn_data[insn * INSN_START_WORDS]);
disas(logfile, tb->tc.ptr + chunk_start,
chunk_end - chunk_start);
chunk_start = chunk_end;
@@ -591,15 +550,11 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
/* The exception probably happened in a helper. The CPU state should
have been saved before calling it. Fetch the PC from there. */
CPUArchState *env = cpu_env(cpu);
- vaddr pc;
- uint64_t cs_base;
- tb_page_addr_t addr;
- uint32_t flags;
+ TCGTBCPUState s = cpu->cc->tcg_ops->get_tb_cpu_state(cpu);
+ tb_page_addr_t addr = get_page_addr_code(env, s.pc);
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- addr = get_page_addr_code(env, pc);
if (addr != -1) {
- tb_invalidate_phys_range(addr, addr);
+ tb_invalidate_phys_range(cpu, addr, addr);
}
}
}
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
index ef1538b..034f2f3 100644
--- a/accel/tcg/translator.c
+++ b/accel/tcg/translator.c
@@ -8,16 +8,16 @@
*/
#include "qemu/osdep.h"
+#include "qemu/bswap.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-ldst-common.h"
+#include "accel/tcg/cpu-mmu-index.h"
+#include "exec/target_page.h"
#include "exec/translator.h"
-#include "exec/cpu_ldst.h"
#include "exec/plugin-gen.h"
-#include "exec/cpu_ldst.h"
-#include "exec/tswap.h"
#include "tcg/tcg-op-common.h"
-#include "internal-target.h"
+#include "internal-common.h"
#include "disas/disas.h"
#include "tb-internal.h"
@@ -25,8 +25,7 @@ static void set_can_do_io(DisasContextBase *db, bool val)
{
QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1);
tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env,
- offsetof(ArchCPU, parent_obj.neg.can_do_io) -
- offsetof(ArchCPU, env));
+ offsetof(CPUState, neg.can_do_io) - sizeof(CPUState));
}
bool translator_io_start(DisasContextBase *db)
@@ -49,8 +48,8 @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) {
count = tcg_temp_new_i32();
tcg_gen_ld_i32(count, tcg_env,
- offsetof(ArchCPU, parent_obj.neg.icount_decr.u32)
- - offsetof(ArchCPU, env));
+ offsetof(CPUState, neg.icount_decr.u32) -
+ sizeof(CPUState));
}
if (cflags & CF_USE_ICOUNT) {
@@ -79,8 +78,8 @@ static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
if (cflags & CF_USE_ICOUNT) {
tcg_gen_st16_i32(count, tcg_env,
- offsetof(ArchCPU, parent_obj.neg.icount_decr.u16.low)
- - offsetof(ArchCPU, env));
+ offsetof(CPUState, neg.icount_decr.u16.low) -
+ sizeof(CPUState));
}
return icount_start_insn;
@@ -142,6 +141,7 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
db->host_addr[1] = NULL;
db->record_start = 0;
db->record_len = 0;
+ db->code_mmuidx = cpu_mmu_index(cpu, true);
ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@@ -265,12 +265,14 @@ static bool translator_ld(CPUArchState *env, DisasContextBase *db,
if (likely(((base ^ last) & TARGET_PAGE_MASK) == 0)) {
/* Entire read is from the first page. */
- memcpy(dest, host + (pc - base), len);
- return true;
+ goto do_read;
}
if (unlikely(((base ^ pc) & TARGET_PAGE_MASK) == 0)) {
- /* Read begins on the first page and extends to the second. */
+ /*
+ * Read begins on the first page and extends to the second.
+ * The unaligned read is never atomic.
+ */
size_t len0 = -(pc | TARGET_PAGE_MASK);
memcpy(dest, host + (pc - base), len0);
pc += len0;
@@ -329,7 +331,39 @@ static bool translator_ld(CPUArchState *env, DisasContextBase *db,
host = db->host_addr[1];
}
- memcpy(dest, host + (pc - base), len);
+ do_read:
+ /*
+ * Assume aligned reads should be atomic, if possible.
+ * We're not in a position to jump out with EXCP_ATOMIC.
+ */
+ host += pc - base;
+ switch (len) {
+ case 2:
+ if (QEMU_IS_ALIGNED(pc, 2)) {
+ uint16_t t = qatomic_read((uint16_t *)host);
+ stw_he_p(dest, t);
+ return true;
+ }
+ break;
+ case 4:
+ if (QEMU_IS_ALIGNED(pc, 4)) {
+ uint32_t t = qatomic_read((uint32_t *)host);
+ stl_he_p(dest, t);
+ return true;
+ }
+ break;
+#ifdef CONFIG_ATOMIC64
+ case 8:
+ if (QEMU_IS_ALIGNED(pc, 8)) {
+ uint64_t t = qatomic_read__nocheck((uint64_t *)host);
+ stq_he_p(dest, t);
+ return true;
+ }
+ break;
+#endif
+ }
+ /* Unaligned or partial read from the second page is not atomic. */
+ memcpy(dest, host, len);
return true;
}
@@ -423,55 +457,62 @@ bool translator_st(const DisasContextBase *db, void *dest,
uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc)
{
- uint8_t raw;
+ uint8_t val;
- if (!translator_ld(env, db, &raw, pc, sizeof(raw))) {
- raw = cpu_ldub_code(env, pc);
- record_save(db, pc, &raw, sizeof(raw));
+ if (!translator_ld(env, db, &val, pc, sizeof(val))) {
+ MemOpIdx oi = make_memop_idx(MO_UB, db->code_mmuidx);
+ val = cpu_ldb_code_mmu(env, pc, oi, 0);
+ record_save(db, pc, &val, sizeof(val));
}
- return raw;
+ return val;
}
-uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc)
+uint16_t translator_lduw_end(CPUArchState *env, DisasContextBase *db,
+ vaddr pc, MemOp endian)
{
- uint16_t raw, tgt;
+ uint16_t val;
- if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
- tgt = tswap16(raw);
- } else {
- tgt = cpu_lduw_code(env, pc);
- raw = tswap16(tgt);
- record_save(db, pc, &raw, sizeof(raw));
+ if (!translator_ld(env, db, &val, pc, sizeof(val))) {
+ MemOpIdx oi = make_memop_idx(MO_UW, db->code_mmuidx);
+ val = cpu_ldw_code_mmu(env, pc, oi, 0);
+ record_save(db, pc, &val, sizeof(val));
+ }
+ if (endian & MO_BSWAP) {
+ val = bswap16(val);
}
- return tgt;
+ return val;
}
-uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc)
+uint32_t translator_ldl_end(CPUArchState *env, DisasContextBase *db,
+ vaddr pc, MemOp endian)
{
- uint32_t raw, tgt;
+ uint32_t val;
- if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
- tgt = tswap32(raw);
- } else {
- tgt = cpu_ldl_code(env, pc);
- raw = tswap32(tgt);
- record_save(db, pc, &raw, sizeof(raw));
+ if (!translator_ld(env, db, &val, pc, sizeof(val))) {
+ MemOpIdx oi = make_memop_idx(MO_UL, db->code_mmuidx);
+ val = cpu_ldl_code_mmu(env, pc, oi, 0);
+ record_save(db, pc, &val, sizeof(val));
+ }
+ if (endian & MO_BSWAP) {
+ val = bswap32(val);
}
- return tgt;
+ return val;
}
-uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc)
+uint64_t translator_ldq_end(CPUArchState *env, DisasContextBase *db,
+ vaddr pc, MemOp endian)
{
- uint64_t raw, tgt;
+ uint64_t val;
- if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
- tgt = tswap64(raw);
- } else {
- tgt = cpu_ldq_code(env, pc);
- raw = tswap64(tgt);
- record_save(db, pc, &raw, sizeof(raw));
+ if (!translator_ld(env, db, &val, pc, sizeof(val))) {
+ MemOpIdx oi = make_memop_idx(MO_UQ, db->code_mmuidx);
+ val = cpu_ldq_code_mmu(env, pc, oi, 0);
+ record_save(db, pc, &val, sizeof(val));
+ }
+ if (endian & MO_BSWAP) {
+ val = bswap64(val);
}
- return tgt;
+ return val;
}
void translator_fake_ld(DisasContextBase *db, const void *data, size_t len)
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 2322181..f25d80e 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -20,23 +20,26 @@
#include "accel/tcg/cpu-ops.h"
#include "disas/disas.h"
#include "exec/vaddr.h"
-#include "exec/exec-all.h"
+#include "exec/tlb-flags.h"
#include "tcg/tcg.h"
#include "qemu/bitops.h"
#include "qemu/rcu.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst-common.h"
+#include "accel/tcg/helper-retaddr.h"
+#include "accel/tcg/probe.h"
#include "user/cpu_loop.h"
+#include "user/guest-host.h"
#include "qemu/main-loop.h"
#include "user/page-protection.h"
#include "exec/page-protection.h"
-#include "exec/helper-proto.h"
+#include "exec/helper-proto-common.h"
#include "qemu/atomic128.h"
#include "qemu/bswap.h"
#include "qemu/int128.h"
#include "trace.h"
#include "tcg/tcg-ldst.h"
+#include "backend-ldst.h"
#include "internal-common.h"
-#include "internal-target.h"
#include "tb-internal.h"
__thread uintptr_t helper_retaddr;
@@ -123,9 +126,9 @@ MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
* guest, we'd end up in an infinite loop of retrying the faulting access.
*/
bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
- uintptr_t host_pc, abi_ptr guest_addr)
+ uintptr_t host_pc, vaddr guest_addr)
{
- switch (page_unprotect(guest_addr, host_pc)) {
+ switch (page_unprotect(cpu, guest_addr, host_pc)) {
case 0:
/*
* Fault not caused by a page marked unwritable to protect
@@ -159,7 +162,7 @@ typedef struct PageFlagsNode {
static IntervalTreeRoot pageflags_root;
-static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
+static PageFlagsNode *pageflags_find(vaddr start, vaddr last)
{
IntervalTreeNode *n;
@@ -167,8 +170,7 @@ static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
return n ? container_of(n, PageFlagsNode, itree) : NULL;
}
-static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
- target_ulong last)
+static PageFlagsNode *pageflags_next(PageFlagsNode *p, vaddr start, vaddr last)
{
IntervalTreeNode *n;
@@ -197,13 +199,22 @@ int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
return rc;
}
-static int dump_region(void *priv, target_ulong start,
- target_ulong end, unsigned long prot)
+static int dump_region(void *opaque, vaddr start, vaddr end, int prot)
{
- FILE *f = (FILE *)priv;
+ FILE *f = opaque;
+ uint64_t mask;
+ int width;
- fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n",
- start, end, end - start,
+ if (guest_addr_max <= UINT32_MAX) {
+ mask = UINT32_MAX, width = 8;
+ } else {
+ mask = UINT64_MAX, width = 16;
+ }
+
+ fprintf(f, "%0*" PRIx64 "-%0*" PRIx64 " %0*" PRIx64 " %c%c%c\n",
+ width, start & mask,
+ width, end & mask,
+ width, (end - start) & mask,
((prot & PAGE_READ) ? 'r' : '-'),
((prot & PAGE_WRITE) ? 'w' : '-'),
((prot & PAGE_EXEC) ? 'x' : '-'));
@@ -213,14 +224,14 @@ static int dump_region(void *priv, target_ulong start,
/* dump memory mappings */
void page_dump(FILE *f)
{
- const int length = sizeof(target_ulong) * 2;
+ int width = guest_addr_max <= UINT32_MAX ? 8 : 16;
fprintf(f, "%-*s %-*s %-*s %s\n",
- length, "start", length, "end", length, "size", "prot");
+ width, "start", width, "end", width, "size", "prot");
walk_memory_regions(f, dump_region);
}
-int page_get_flags(target_ulong address)
+int page_get_flags(vaddr address)
{
PageFlagsNode *p = pageflags_find(address, address);
@@ -243,7 +254,7 @@ int page_get_flags(target_ulong address)
}
/* A subroutine of page_set_flags: insert a new node for [start,last]. */
-static void pageflags_create(target_ulong start, target_ulong last, int flags)
+static void pageflags_create(vaddr start, vaddr last, int flags)
{
PageFlagsNode *p = g_new(PageFlagsNode, 1);
@@ -254,13 +265,13 @@ static void pageflags_create(target_ulong start, target_ulong last, int flags)
}
/* A subroutine of page_set_flags: remove everything in [start,last]. */
-static bool pageflags_unset(target_ulong start, target_ulong last)
+static bool pageflags_unset(vaddr start, vaddr last)
{
bool inval_tb = false;
while (true) {
PageFlagsNode *p = pageflags_find(start, last);
- target_ulong p_last;
+ vaddr p_last;
if (!p) {
break;
@@ -299,8 +310,7 @@ static bool pageflags_unset(target_ulong start, target_ulong last)
* A subroutine of page_set_flags: nothing overlaps [start,last],
* but check adjacent mappings and maybe merge into a single range.
*/
-static void pageflags_create_merge(target_ulong start, target_ulong last,
- int flags)
+static void pageflags_create_merge(vaddr start, vaddr last, int flags)
{
PageFlagsNode *next = NULL, *prev = NULL;
@@ -351,11 +361,11 @@ static void pageflags_create_merge(target_ulong start, target_ulong last,
#define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
/* A subroutine of page_set_flags: add flags to [start,last]. */
-static bool pageflags_set_clear(target_ulong start, target_ulong last,
+static bool pageflags_set_clear(vaddr start, vaddr last,
int set_flags, int clear_flags)
{
PageFlagsNode *p;
- target_ulong p_start, p_last;
+ vaddr p_start, p_last;
int p_flags, merge_flags;
bool inval_tb = false;
@@ -490,7 +500,7 @@ static bool pageflags_set_clear(target_ulong start, target_ulong last,
return inval_tb;
}
-void page_set_flags(target_ulong start, target_ulong last, int flags)
+void page_set_flags(vaddr start, vaddr last, int flags)
{
bool reset = false;
bool inval_tb = false;
@@ -499,7 +509,7 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
guest address space. If this assert fires, it probably indicates
a missing call to h2g_valid. */
assert(start <= last);
- assert(last <= GUEST_ADDR_MAX);
+ assert(last <= guest_addr_max);
/* Only set PAGE_ANON with new mappings. */
assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
assert_memory_lock();
@@ -526,13 +536,13 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
~(reset ? 0 : PAGE_STICKY));
}
if (inval_tb) {
- tb_invalidate_phys_range(start, last);
+ tb_invalidate_phys_range(NULL, start, last);
}
}
-bool page_check_range(target_ulong start, target_ulong len, int flags)
+bool page_check_range(vaddr start, vaddr len, int flags)
{
- target_ulong last;
+ vaddr last;
int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
bool ret;
@@ -581,7 +591,7 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
break;
}
/* Asking about writable, but has been protected: undo. */
- if (!page_unprotect(start, 0)) {
+ if (!page_unprotect(NULL, start, 0)) {
ret = false;
break;
}
@@ -608,20 +618,19 @@ bool page_check_range(target_ulong start, target_ulong len, int flags)
return ret;
}
-bool page_check_range_empty(target_ulong start, target_ulong last)
+bool page_check_range_empty(vaddr start, vaddr last)
{
assert(last >= start);
assert_memory_lock();
return pageflags_find(start, last) == NULL;
}
-target_ulong page_find_range_empty(target_ulong min, target_ulong max,
- target_ulong len, target_ulong align)
+vaddr page_find_range_empty(vaddr min, vaddr max, vaddr len, vaddr align)
{
- target_ulong len_m1, align_m1;
+ vaddr len_m1, align_m1;
assert(min <= max);
- assert(max <= GUEST_ADDR_MAX);
+ assert(max <= guest_addr_max);
assert(len != 0);
assert(is_power_of_2(align));
assert_memory_lock();
@@ -656,10 +665,10 @@ target_ulong page_find_range_empty(target_ulong min, target_ulong max,
}
}
-void page_protect(tb_page_addr_t address)
+void tb_lock_page0(tb_page_addr_t address)
{
PageFlagsNode *p;
- target_ulong start, last;
+ vaddr start, last;
int host_page_size = qemu_real_host_page_size();
int prot;
@@ -701,11 +710,13 @@ void page_protect(tb_page_addr_t address)
* immediately exited. (We can only return 2 if the 'pc' argument is
* non-zero.)
*/
-int page_unprotect(tb_page_addr_t address, uintptr_t pc)
+int page_unprotect(CPUState *cpu, tb_page_addr_t address, uintptr_t pc)
{
PageFlagsNode *p;
bool current_tb_invalidated;
+ assert((cpu == NULL) == (pc == 0));
+
/*
* Technically this isn't safe inside a signal handler. However we
* know this only ever happens in a synchronous SEGV handler, so in
@@ -728,15 +739,15 @@ int page_unprotect(tb_page_addr_t address, uintptr_t pc)
* this thread raced with another one which got here first and
* set the page to PAGE_WRITE and did the TB invalidate for us.
*/
-#ifdef TARGET_HAS_PRECISE_SMC
- TranslationBlock *current_tb = tcg_tb_lookup(pc);
- if (current_tb) {
- current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
+ if (pc && cpu->cc->tcg_ops->precise_smc) {
+ TranslationBlock *current_tb = tcg_tb_lookup(pc);
+ if (current_tb) {
+ current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
+ }
}
-#endif
} else {
int host_page_size = qemu_real_host_page_size();
- target_ulong start, len, i;
+ vaddr start, len, i;
int prot;
if (host_page_size <= TARGET_PAGE_SIZE) {
@@ -744,14 +755,15 @@ int page_unprotect(tb_page_addr_t address, uintptr_t pc)
len = TARGET_PAGE_SIZE;
prot = p->flags | PAGE_WRITE;
pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
- current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
+ current_tb_invalidated =
+ tb_invalidate_phys_page_unwind(cpu, start, pc);
} else {
start = address & -host_page_size;
len = host_page_size;
prot = 0;
for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
- target_ulong addr = start + i;
+ vaddr addr = start + i;
p = pageflags_find(addr, addr);
if (p) {
@@ -767,7 +779,7 @@ int page_unprotect(tb_page_addr_t address, uintptr_t pc)
* the corresponding translated code.
*/
current_tb_invalidated |=
- tb_invalidate_phys_page_unwind(addr, pc);
+ tb_invalidate_phys_page_unwind(cpu, addr, pc);
}
}
if (prot & PAGE_EXEC) {
@@ -847,6 +859,12 @@ void *probe_access(CPUArchState *env, vaddr addr, int size,
return size ? g2h(env_cpu(env), addr) : NULL;
}
+void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr,
+ MMUAccessType access_type, int mmu_idx)
+{
+ return g2h(env_cpu(env), addr);
+}
+
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp)
{
@@ -861,7 +879,6 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
return addr;
}
-#ifdef TARGET_PAGE_DATA_SIZE
/*
* Allocate chunks of target data together. For the only current user,
* if we allocate one hunk per page, we have overhead of 40/128 or 40%.
@@ -877,10 +894,16 @@ typedef struct TargetPageDataNode {
} TargetPageDataNode;
static IntervalTreeRoot targetdata_root;
+static size_t target_page_data_size;
-void page_reset_target_data(target_ulong start, target_ulong last)
+void page_reset_target_data(vaddr start, vaddr last)
{
IntervalTreeNode *n, *next;
+ size_t size = target_page_data_size;
+
+ if (likely(size == 0)) {
+ return;
+ }
assert_memory_lock();
@@ -892,7 +915,7 @@ void page_reset_target_data(target_ulong start, target_ulong last)
n != NULL;
n = next,
next = next ? interval_tree_iter_next(n, start, last) : NULL) {
- target_ulong n_start, n_last, p_ofs, p_len;
+ vaddr n_start, n_last, p_ofs, p_len;
TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree);
if (n->start >= start && n->last <= last) {
@@ -911,16 +934,21 @@ void page_reset_target_data(target_ulong start, target_ulong last)
n_last = MIN(last, n->last);
p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
- memset(t->data + p_ofs * TARGET_PAGE_DATA_SIZE, 0,
- p_len * TARGET_PAGE_DATA_SIZE);
+ memset(t->data + p_ofs * size, 0, p_len * size);
}
}
-void *page_get_target_data(target_ulong address)
+void *page_get_target_data(vaddr address, size_t size)
{
IntervalTreeNode *n;
TargetPageDataNode *t;
- target_ulong page, region, p_ofs;
+ vaddr page, region, p_ofs;
+
+ /* Remember the size from the first call, and it should be constant. */
+ if (unlikely(target_page_data_size != size)) {
+ assert(target_page_data_size == 0);
+ target_page_data_size = size;
+ }
page = address & TARGET_PAGE_MASK;
region = address & TBD_MASK;
@@ -936,8 +964,7 @@ void *page_get_target_data(target_ulong address)
mmap_lock();
n = interval_tree_iter_first(&targetdata_root, page, page);
if (!n) {
- t = g_malloc0(sizeof(TargetPageDataNode)
- + TPD_PAGES * TARGET_PAGE_DATA_SIZE);
+ t = g_malloc0(sizeof(TargetPageDataNode) + TPD_PAGES * size);
n = &t->itree;
n->start = region;
n->last = region | ~TBD_MASK;
@@ -948,11 +975,8 @@ void *page_get_target_data(target_ulong address)
t = container_of(n, TargetPageDataNode, itree);
p_ofs = (page - region) >> TARGET_PAGE_BITS;
- return t->data + p_ofs * TARGET_PAGE_DATA_SIZE;
+ return t->data + p_ofs * size;
}
-#else
-void page_reset_target_data(target_ulong start, target_ulong last) { }
-#endif /* TARGET_PAGE_DATA_SIZE */
/* The system-mode versions of these helpers are in cputlb.c. */
@@ -1014,7 +1038,7 @@ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
* be under mmap_lock() in order to prevent the creation of
* another TranslationBlock in between.
*/
- tb_invalidate_phys_range(addr, addr + l - 1);
+ tb_invalidate_phys_range(NULL, addr, addr + l - 1);
written = pwrite(fd, buf, l,
(off_t)(uintptr_t)g2h_untagged(addr));
if (written != l) {
@@ -1059,7 +1083,7 @@ static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
void *haddr;
uint8_t ret;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type);
ret = ldub_p(haddr);
clear_helper_retaddr();
@@ -1073,7 +1097,7 @@ static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uint16_t ret;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
ret = load_atom_2(cpu, ra, haddr, mop);
clear_helper_retaddr();
@@ -1091,7 +1115,7 @@ static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uint32_t ret;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
ret = load_atom_4(cpu, ra, haddr, mop);
clear_helper_retaddr();
@@ -1109,7 +1133,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uint64_t ret;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
ret = load_atom_8(cpu, ra, haddr, mop);
clear_helper_retaddr();
@@ -1120,7 +1144,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
return ret;
}
-static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
+static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
void *haddr;
@@ -1128,7 +1152,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
MemOp mop = get_memop(oi);
tcg_debug_assert((mop & MO_SIZE) == MO_128);
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_16(cpu, ra, haddr, mop);
clear_helper_retaddr();
@@ -1144,7 +1168,7 @@ static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
{
void *haddr;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE);
stb_p(haddr, val);
clear_helper_retaddr();
@@ -1156,7 +1180,7 @@ static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
void *haddr;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@@ -1172,7 +1196,7 @@ static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
void *haddr;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@@ -1188,7 +1212,7 @@ static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
void *haddr;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@@ -1204,7 +1228,7 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
void *haddr;
MemOpIdx mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@@ -1214,101 +1238,28 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
clear_helper_retaddr();
}
-uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
-{
- uint32_t ret;
-
- set_helper_retaddr(1);
- ret = ldub_p(g2h_untagged(ptr));
- clear_helper_retaddr();
- return ret;
-}
-
-uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
-{
- uint32_t ret;
-
- set_helper_retaddr(1);
- ret = lduw_p(g2h_untagged(ptr));
- clear_helper_retaddr();
- return ret;
-}
-
-uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
-{
- uint32_t ret;
-
- set_helper_retaddr(1);
- ret = ldl_p(g2h_untagged(ptr));
- clear_helper_retaddr();
- return ret;
-}
-
-uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
-{
- uint64_t ret;
-
- set_helper_retaddr(1);
- ret = ldq_p(g2h_untagged(ptr));
- clear_helper_retaddr();
- return ret;
-}
-
-uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
+uint8_t cpu_ldb_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
- uint8_t ret;
-
- haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
- ret = ldub_p(haddr);
- clear_helper_retaddr();
- return ret;
+ return do_ld1_mmu(env_cpu(env), addr, oi, ra ? ra : 1, MMU_INST_FETCH);
}
-uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
+uint16_t cpu_ldw_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
- uint16_t ret;
-
- haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
- ret = lduw_p(haddr);
- clear_helper_retaddr();
- if (get_memop(oi) & MO_BSWAP) {
- ret = bswap16(ret);
- }
- return ret;
+ return do_ld2_mmu(env_cpu(env), addr, oi, ra ? ra : 1, MMU_INST_FETCH);
}
-uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
+uint32_t cpu_ldl_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
- uint32_t ret;
-
- haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
- ret = ldl_p(haddr);
- clear_helper_retaddr();
- if (get_memop(oi) & MO_BSWAP) {
- ret = bswap32(ret);
- }
- return ret;
+ return do_ld4_mmu(env_cpu(env), addr, oi, ra ? ra : 1, MMU_INST_FETCH);
}
-uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
+uint64_t cpu_ldq_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
- uint64_t ret;
-
- haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
- ret = ldq_p(haddr);
- clear_helper_retaddr();
- if (get_memop(oi) & MO_BSWAP) {
- ret = bswap64(ret);
- }
- return ret;
+ return do_ld8_mmu(env_cpu(env), addr, oi, ra ? ra : 1, MMU_INST_FETCH);
}
#include "ldst_common.c.inc"
diff --git a/accel/tcg/watchpoint.c b/accel/tcg/watchpoint.c
index 65b2188..cfb37a4 100644
--- a/accel/tcg/watchpoint.c
+++ b/accel/tcg/watchpoint.c
@@ -124,17 +124,14 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
}
cpu->watchpoint_hit = wp;
- mmap_lock();
/* This call also restores vCPU state */
tb_check_watchpoint(cpu, ra);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
- mmap_unlock();
cpu_loop_exit(cpu);
} else {
/* Force execution of one insn next time. */
cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
- mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
} else {
diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c
index 7aa28b9..bd0ff64 100644
--- a/accel/xen/xen-all.c
+++ b/accel/xen/xen-all.c
@@ -18,6 +18,7 @@
#include "hw/xen/xen_igd.h"
#include "chardev/char.h"
#include "qemu/accel.h"
+#include "accel/dummy-cpus.h"
#include "system/accel-ops.h"
#include "system/cpus.h"
#include "system/xen.h"
@@ -63,7 +64,7 @@ static void xen_set_igd_gfx_passthru(Object *obj, bool value, Error **errp)
xen_igd_gfx_pt_set(value, errp);
}
-static void xen_setup_post(MachineState *ms, AccelState *accel)
+static void xen_setup_post(AccelState *as)
{
int rc;
@@ -76,7 +77,7 @@ static void xen_setup_post(MachineState *ms, AccelState *accel)
}
}
-static int xen_init(MachineState *ms)
+static int xen_init(AccelState *as, MachineState *ms)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
@@ -116,7 +117,7 @@ static int xen_init(MachineState *ms)
return 0;
}
-static void xen_accel_class_init(ObjectClass *oc, void *data)
+static void xen_accel_class_init(ObjectClass *oc, const void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
static GlobalProperty compat[] = {
@@ -147,11 +148,12 @@ static const TypeInfo xen_accel_type = {
.class_init = xen_accel_class_init,
};
-static void xen_accel_ops_class_init(ObjectClass *oc, void *data)
+static void xen_accel_ops_class_init(ObjectClass *oc, const void *data)
{
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
ops->create_vcpu_thread = dummy_start_vcpu_thread;
+ ops->handle_interrupt = generic_handle_interrupt;
}
static const TypeInfo xen_accel_ops_type = {