diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2025-07-04 08:58:49 -0400 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2025-07-04 08:58:49 -0400 |
commit | 989dd906ed5556563a57b32ae7abf9db5e1f38ba (patch) | |
tree | ba40805a2e5fb2438e8ef8fe47862ef72baaf70f | |
parent | 563ac3d18129a2770a285cc16c20ad50c8adc7c0 (diff) | |
parent | c8beb901be15c57d166574ecf660261f0f23209f (diff) | |
download | qemu-989dd906ed5556563a57b32ae7abf9db5e1f38ba.zip qemu-989dd906ed5556563a57b32ae7abf9db5e1f38ba.tar.gz qemu-989dd906ed5556563a57b32ae7abf9db5e1f38ba.tar.bz2 |
Merge tag 'accel-20250704' of https://github.com/philmd/qemu into staging
Accelerators patches
- Generic API consolidation, cleanups (dead code removal, documentation added)
- Remove monitor TCG 'info opcount' and @x-query-opcount
- Have HVF / NVMM / WHPX use generic CPUState::vcpu_dirty field
- Expose nvmm_enabled() and whpx_enabled() to common code
- Have hmp_info_registers() dump vector registers
# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEE+qvnXhKRciHc/Wuy4+MsLN6twN4FAmhnql4ACgkQ4+MsLN6t
# wN6Lfg//R4h6dyAg02hyopwb/DSI97hAsD9kap15ro1qszYrIOkJcEPoE37HDi6d
# O0Ls+8NPpJcnMwdghHvVaRGoIH2OY5ogXKo6UK1BbOn8iAGxRrT/IPVCyFbPmQoe
# Bk78Z/wne/YgCXiW4HGHSJO5sL04AQqcFYnwjisHHf3Ox8RR85LbhWqthZluta4i
# a/Y8W5UO7jfwhAl1/Zb2cU+Rv75I6xcaLQAfmbt4j+wHP52I2cjLpIYo4sCn+ULJ
# AVX4q4MKrkDrr6CYPXxdGJzYEzVn9evynVcQoRzL6bLZFMpa284AzVd3kQg9NWAb
# p1hvKJTA57q4XDoD50qVGLhP207VVSUcdm0r2ZJA2jag5ddoT+x2talz8/f6In1b
# 7BrSM/pla8x9KvTne/ko0wSL0o2dOWyig8mBxARLZWPxk+LBVs1PBZfvn+3j1pYA
# rWV25Ht4QJlUYMbe3NvEIomsVThKg8Fh3b4mEuyPM+LZ1brgmhrzJG1SF+G4fH8A
# aig/RVqgNHtajSnG4A723k2/QzlvnAiT7E3dKB5FogjTcVzFRaWFKsUb4ORqsCAz
# c/AheCJY4PP3pAnb0ODISSVviXwAXqCLbtZhDGhHNYl3C69EyGPPMiVxCaIxKDxU
# bF7AIYhRTTMyNSbnkcRS3UDO/gZS7x5/K+/YAM9akQEYADIodYM=
# =Vb39
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 04 Jul 2025 06:18:06 EDT
# gpg: using RSA key FAABE75E12917221DCFD6BB2E3E32C2CDEADC0DE
# gpg: Good signature from "Philippe Mathieu-Daudé (F4BUG) <f4bug@amsat.org>" [full]
# Primary key fingerprint: FAAB E75E 1291 7221 DCFD 6BB2 E3E3 2C2C DEAD C0DE
* tag 'accel-20250704' of https://github.com/philmd/qemu: (31 commits)
MAINTAINERS: Add me as reviewer of overall accelerators section
monitor/hmp-cmds-target: add CPU_DUMP_VPU in hmp_info_registers()
accel: Pass AccelState argument to gdbstub_supported_sstep_flags()
accel: Remove unused MachineState argument of AccelClass::setup_post()
accel: Directly pass AccelState argument to AccelClass::has_memory()
accel/kvm: Directly pass KVMState argument to do_kvm_create_vm()
accel/kvm: Prefer local AccelState over global MachineState::accel
accel/tcg: Prefer local AccelState over global current_accel()
accel: Propagate AccelState to AccelClass::init_machine()
accel: Keep reference to AccelOpsClass in AccelClass
accel: Expose and register generic_handle_interrupt()
accel/dummy: Extract 'dummy-cpus.h' header from 'system/cpus.h'
accel/whpx: Expose whpx_enabled() to common code
accel/nvmm: Expose nvmm_enabled() to common code
accel/system: Document cpu_synchronize_state_post_init/reset()
accel/system: Document cpu_synchronize_state()
accel/kvm: Remove kvm_cpu_synchronize_state() stub
accel/whpx: Replace @dirty field by generic CPUState::vcpu_dirty field
accel/nvmm: Replace @dirty field by generic CPUState::vcpu_dirty field
accel/hvf: Replace @dirty field by generic CPUState::vcpu_dirty field
...
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
44 files changed, 539 insertions, 523 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index b1cbfe1..bfd59f6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -495,6 +495,7 @@ Guest CPU Cores (other accelerators) Overall M: Richard Henderson <richard.henderson@linaro.org> R: Paolo Bonzini <pbonzini@redhat.com> +R: Philippe Mathieu-Daudé <philmd@linaro.org> S: Maintained F: include/exec/cpu*.h F: include/exec/target_long.h @@ -503,6 +504,7 @@ F: include/system/accel-*.h F: include/system/cpus.h F: include/accel/accel-cpu*.h F: accel/accel-*.? +F: accel/dummy-cpus.? F: accel/Makefile.objs F: accel/stubs/Makefile.objs F: cpu-common.c @@ -540,6 +542,7 @@ WHPX CPUs M: Sunil Muthuswamy <sunilmut@microsoft.com> S: Supported F: target/i386/whpx/ +F: accel/stubs/whpx-stub.c F: include/system/whpx.h X86 Instruction Emulator @@ -586,6 +589,7 @@ NetBSD Virtual Machine Monitor (NVMM) CPU support M: Reinoud Zandijk <reinoud@netbsd.org> S: Maintained F: include/system/nvmm.h +F: accel/stubs/nvmm-stub.c F: target/i386/nvmm/ Hosts diff --git a/accel/accel-common.c b/accel/accel-common.c index 4894b98..591ff4c 100644 --- a/accel/accel-common.c +++ b/accel/accel-common.c @@ -124,7 +124,7 @@ int accel_supported_gdbstub_sstep_flags(void) AccelState *accel = current_accel(); AccelClass *acc = ACCEL_GET_CLASS(accel); if (acc->gdbstub_supported_sstep_flags) { - return acc->gdbstub_supported_sstep_flags(); + return acc->gdbstub_supported_sstep_flags(accel); } return 0; } diff --git a/accel/accel-system.c b/accel/accel-system.c index a0f562a..af713cc 100644 --- a/accel/accel-system.c +++ b/accel/accel-system.c @@ -37,7 +37,7 @@ int accel_init_machine(AccelState *accel, MachineState *ms) int ret; ms->accelerator = accel; *(acc->allowed) = true; - ret = acc->init_machine(ms); + ret = acc->init_machine(accel, ms); if (ret < 0) { ms->accelerator = NULL; *(acc->allowed) = false; @@ -58,7 +58,7 @@ void accel_setup_post(MachineState *ms) AccelState *accel = ms->accelerator; AccelClass *acc = ACCEL_GET_CLASS(accel); if (acc->setup_post) { - acc->setup_post(ms, accel); + acc->setup_post(accel); } } @@ -85,8 +85,9 @@ void accel_init_ops_interfaces(AccelClass *ac) * non-NULL create_vcpu_thread operation. */ ops = ACCEL_OPS_CLASS(oc); + ac->ops = ops; if (ops->ops_init) { - ops->ops_init(ops); + ops->ops_init(ac); } cpus_register_accel(ops); } diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c index 8672761..03cfc0f 100644 --- a/accel/dummy-cpus.c +++ b/accel/dummy-cpus.c @@ -17,6 +17,7 @@ #include "qemu/guest-random.h" #include "qemu/main-loop.h" #include "hw/core/cpu.h" +#include "accel/dummy-cpus.h" static void *dummy_cpu_thread_fn(void *arg) { diff --git a/accel/dummy-cpus.h b/accel/dummy-cpus.h new file mode 100644 index 0000000..d18dd0f --- /dev/null +++ b/accel/dummy-cpus.h @@ -0,0 +1,14 @@ +/* + * Dummy cpu thread code + * + * Copyright IBM, Corp. 2011 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef ACCEL_DUMMY_CPUS_H +#define ACCEL_DUMMY_CPUS_H + +void dummy_start_vcpu_thread(CPUState *cpu); + +#endif diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index b389772..be8724a 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -48,18 +48,16 @@ */ #include "qemu/osdep.h" -#include "qemu/error-report.h" +#include "qemu/guest-random.h" #include "qemu/main-loop.h" -#include "system/address-spaces.h" +#include "qemu/queue.h" #include "gdbstub/enums.h" -#include "hw/boards.h" +#include "exec/cpu-common.h" +#include "hw/core/cpu.h" #include "system/accel-ops.h" #include "system/cpus.h" #include "system/hvf.h" #include "system/hvf_int.h" -#include "system/runstate.h" -#include "qemu/guest-random.h" -#include "trace.h" HVFState *hvf_state; @@ -79,143 +77,17 @@ hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size) return NULL; } -struct mac_slot { - int present; - uint64_t size; - uint64_t gpa_start; - uint64_t gva; -}; - -struct mac_slot mac_slots[32]; - -static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags) -{ - struct mac_slot *macslot; - hv_return_t ret; - - macslot = &mac_slots[slot->slot_id]; - - if (macslot->present) { - if (macslot->size != slot->size) { - macslot->present = 0; - trace_hvf_vm_unmap(macslot->gpa_start, macslot->size); - ret = hv_vm_unmap(macslot->gpa_start, macslot->size); - assert_hvf_ok(ret); - } - } - - if (!slot->size) { - return 0; - } - - macslot->present = 1; - macslot->gpa_start = slot->start; - macslot->size = slot->size; - trace_hvf_vm_map(slot->start, slot->size, slot->mem, flags, - flags & HV_MEMORY_READ ? 'R' : '-', - flags & HV_MEMORY_WRITE ? 'W' : '-', - flags & HV_MEMORY_EXEC ? 'E' : '-'); - ret = hv_vm_map(slot->mem, slot->start, slot->size, flags); - assert_hvf_ok(ret); - return 0; -} - -static void hvf_set_phys_mem(MemoryRegionSection *section, bool add) -{ - hvf_slot *mem; - MemoryRegion *area = section->mr; - bool writable = !area->readonly && !area->rom_device; - hv_memory_flags_t flags; - uint64_t page_size = qemu_real_host_page_size(); - - if (!memory_region_is_ram(area)) { - if (writable) { - return; - } else if (!memory_region_is_romd(area)) { - /* - * If the memory device is not in romd_mode, then we actually want - * to remove the hvf memory slot so all accesses will trap. - */ - add = false; - } - } - - if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) || - !QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) { - /* Not page aligned, so we can not map as RAM */ - add = false; - } - - mem = hvf_find_overlap_slot( - section->offset_within_address_space, - int128_get64(section->size)); - - if (mem && add) { - if (mem->size == int128_get64(section->size) && - mem->start == section->offset_within_address_space && - mem->mem == (memory_region_get_ram_ptr(area) + - section->offset_within_region)) { - return; /* Same region was attempted to register, go away. */ - } - } - - /* Region needs to be reset. set the size to 0 and remap it. */ - if (mem) { - mem->size = 0; - if (do_hvf_set_memory(mem, 0)) { - error_report("Failed to reset overlapping slot"); - abort(); - } - } - - if (!add) { - return; - } - - if (area->readonly || - (!memory_region_is_ram(area) && memory_region_is_romd(area))) { - flags = HV_MEMORY_READ | HV_MEMORY_EXEC; - } else { - flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC; - } - - /* Now make a new slot. */ - int x; - - for (x = 0; x < hvf_state->num_slots; ++x) { - mem = &hvf_state->slots[x]; - if (!mem->size) { - break; - } - } - - if (x == hvf_state->num_slots) { - error_report("No free slots"); - abort(); - } - - mem->size = int128_get64(section->size); - mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region; - mem->start = section->offset_within_address_space; - mem->region = area; - - if (do_hvf_set_memory(mem, flags)) { - error_report("Error registering new memory slot"); - abort(); - } -} - static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) { - if (!cpu->accel->dirty) { + if (!cpu->vcpu_dirty) { hvf_get_registers(cpu); - cpu->accel->dirty = true; + cpu->vcpu_dirty = true; } } static void hvf_cpu_synchronize_state(CPUState *cpu) { - if (!cpu->accel->dirty) { + if (!cpu->vcpu_dirty) { run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL); } } @@ -224,7 +96,7 @@ static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu, run_on_cpu_data arg) { /* QEMU state is the reference, push it to HVF now and on next entry */ - cpu->accel->dirty = true; + cpu->vcpu_dirty = true; } static void hvf_cpu_synchronize_post_reset(CPUState *cpu) @@ -242,147 +114,10 @@ static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu) run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL); } -static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on) -{ - hvf_slot *slot; - - slot = hvf_find_overlap_slot( - section->offset_within_address_space, - int128_get64(section->size)); - - /* protect region against writes; begin tracking it */ - if (on) { - slot->flags |= HVF_SLOT_LOG; - hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, - HV_MEMORY_READ | HV_MEMORY_EXEC); - /* stop tracking region*/ - } else { - slot->flags &= ~HVF_SLOT_LOG; - hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, - HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC); - } -} - -static void hvf_log_start(MemoryListener *listener, - MemoryRegionSection *section, int old, int new) -{ - if (old != 0) { - return; - } - - hvf_set_dirty_tracking(section, 1); -} - -static void hvf_log_stop(MemoryListener *listener, - MemoryRegionSection *section, int old, int new) -{ - if (new != 0) { - return; - } - - hvf_set_dirty_tracking(section, 0); -} - -static void hvf_log_sync(MemoryListener *listener, - MemoryRegionSection *section) -{ - /* - * sync of dirty pages is handled elsewhere; just make sure we keep - * tracking the region. - */ - hvf_set_dirty_tracking(section, 1); -} - -static void hvf_region_add(MemoryListener *listener, - MemoryRegionSection *section) -{ - hvf_set_phys_mem(section, true); -} - -static void hvf_region_del(MemoryListener *listener, - MemoryRegionSection *section) -{ - hvf_set_phys_mem(section, false); -} - -static MemoryListener hvf_memory_listener = { - .name = "hvf", - .priority = MEMORY_LISTENER_PRIORITY_ACCEL, - .region_add = hvf_region_add, - .region_del = hvf_region_del, - .log_start = hvf_log_start, - .log_stop = hvf_log_stop, - .log_sync = hvf_log_sync, -}; - static void dummy_signal(int sig) { } -bool hvf_allowed; - -static int hvf_accel_init(MachineState *ms) -{ - int x; - hv_return_t ret; - HVFState *s; - int pa_range = 36; - MachineClass *mc = MACHINE_GET_CLASS(ms); - - if (mc->hvf_get_physical_address_range) { - pa_range = mc->hvf_get_physical_address_range(ms); - if (pa_range < 0) { - return -EINVAL; - } - } - - ret = hvf_arch_vm_create(ms, (uint32_t)pa_range); - assert_hvf_ok(ret); - - s = g_new0(HVFState, 1); - - s->num_slots = ARRAY_SIZE(s->slots); - for (x = 0; x < s->num_slots; ++x) { - s->slots[x].size = 0; - s->slots[x].slot_id = x; - } - - QTAILQ_INIT(&s->hvf_sw_breakpoints); - - hvf_state = s; - memory_listener_register(&hvf_memory_listener, &address_space_memory); - - return hvf_arch_init(); -} - -static inline int hvf_gdbstub_sstep_flags(void) -{ - return SSTEP_ENABLE | SSTEP_NOIRQ; -} - -static void hvf_accel_class_init(ObjectClass *oc, const void *data) -{ - AccelClass *ac = ACCEL_CLASS(oc); - ac->name = "HVF"; - ac->init_machine = hvf_accel_init; - ac->allowed = &hvf_allowed; - ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags; -} - -static const TypeInfo hvf_accel_type = { - .name = TYPE_HVF_ACCEL, - .parent = TYPE_ACCEL, - .instance_size = sizeof(HVFState), - .class_init = hvf_accel_class_init, -}; - -static void hvf_type_init(void) -{ - type_register_static(&hvf_accel_type); -} - -type_init(hvf_type_init); - static void hvf_vcpu_destroy(CPUState *cpu) { hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd); @@ -415,8 +150,8 @@ static int hvf_init_vcpu(CPUState *cpu) #else r = hv_vcpu_create(&cpu->accel->fd, HV_VCPU_DEFAULT); #endif - cpu->accel->dirty = true; assert_hvf_ok(r); + cpu->vcpu_dirty = true; cpu->accel->guest_debug_enabled = false; @@ -482,6 +217,34 @@ static void hvf_start_vcpu_thread(CPUState *cpu) cpu, QEMU_THREAD_JOINABLE); } +struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc) +{ + struct hvf_sw_breakpoint *bp; + + QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) { + if (bp->pc == pc) { + return bp; + } + } + return NULL; +} + +int hvf_sw_breakpoints_active(CPUState *cpu) +{ + return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints); +} + +static void do_hvf_update_guest_debug(CPUState *cpu, run_on_cpu_data arg) +{ + hvf_arch_update_guest_debug(cpu); +} + +int hvf_update_guest_debug(CPUState *cpu) +{ + run_on_cpu(cpu, do_hvf_update_guest_debug, RUN_ON_CPU_NULL); + return 0; +} + static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len) { struct hvf_sw_breakpoint *bp; @@ -590,6 +353,7 @@ static void hvf_accel_ops_class_init(ObjectClass *oc, const void *data) ops->create_vcpu_thread = hvf_start_vcpu_thread; ops->kick_vcpu_thread = hvf_kick_vcpu_thread; + ops->handle_interrupt = generic_handle_interrupt; ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset; ops->synchronize_post_init = hvf_cpu_synchronize_post_init; @@ -609,8 +373,10 @@ static const TypeInfo hvf_accel_ops_type = { .class_init = hvf_accel_ops_class_init, .abstract = true, }; + static void hvf_accel_ops_register_types(void) { type_register_static(&hvf_accel_ops_type); } + type_init(hvf_accel_ops_register_types); diff --git a/accel/hvf/hvf-all.c b/accel/hvf/hvf-all.c index 8c387fd..b6075c0 100644 --- a/accel/hvf/hvf-all.c +++ b/accel/hvf/hvf-all.c @@ -10,9 +10,24 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" +#include "system/address-spaces.h" +#include "system/memory.h" #include "system/hvf.h" #include "system/hvf_int.h" #include "hw/core/cpu.h" +#include "hw/boards.h" +#include "trace.h" + +bool hvf_allowed; + +struct mac_slot { + int present; + uint64_t size; + uint64_t gpa_start; + uint64_t gva; +}; + +struct mac_slot mac_slots[32]; const char *hvf_return_string(hv_return_t ret) { @@ -42,30 +57,254 @@ void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line, abort(); } -struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc) +static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags) { - struct hvf_sw_breakpoint *bp; + struct mac_slot *macslot; + hv_return_t ret; - QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) { - if (bp->pc == pc) { - return bp; + macslot = &mac_slots[slot->slot_id]; + + if (macslot->present) { + if (macslot->size != slot->size) { + macslot->present = 0; + trace_hvf_vm_unmap(macslot->gpa_start, macslot->size); + ret = hv_vm_unmap(macslot->gpa_start, macslot->size); + assert_hvf_ok(ret); } } - return NULL; + + if (!slot->size) { + return 0; + } + + macslot->present = 1; + macslot->gpa_start = slot->start; + macslot->size = slot->size; + trace_hvf_vm_map(slot->start, slot->size, slot->mem, flags, + flags & HV_MEMORY_READ ? 'R' : '-', + flags & HV_MEMORY_WRITE ? 'W' : '-', + flags & HV_MEMORY_EXEC ? 'E' : '-'); + ret = hv_vm_map(slot->mem, slot->start, slot->size, flags); + assert_hvf_ok(ret); + return 0; } -int hvf_sw_breakpoints_active(CPUState *cpu) +static void hvf_set_phys_mem(MemoryRegionSection *section, bool add) { - return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints); + hvf_slot *mem; + MemoryRegion *area = section->mr; + bool writable = !area->readonly && !area->rom_device; + hv_memory_flags_t flags; + uint64_t page_size = qemu_real_host_page_size(); + + if (!memory_region_is_ram(area)) { + if (writable) { + return; + } else if (!memory_region_is_romd(area)) { + /* + * If the memory device is not in romd_mode, then we actually want + * to remove the hvf memory slot so all accesses will trap. + */ + add = false; + } + } + + if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) || + !QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) { + /* Not page aligned, so we can not map as RAM */ + add = false; + } + + mem = hvf_find_overlap_slot( + section->offset_within_address_space, + int128_get64(section->size)); + + if (mem && add) { + if (mem->size == int128_get64(section->size) && + mem->start == section->offset_within_address_space && + mem->mem == (memory_region_get_ram_ptr(area) + + section->offset_within_region)) { + return; /* Same region was attempted to register, go away. */ + } + } + + /* Region needs to be reset. set the size to 0 and remap it. */ + if (mem) { + mem->size = 0; + if (do_hvf_set_memory(mem, 0)) { + error_report("Failed to reset overlapping slot"); + abort(); + } + } + + if (!add) { + return; + } + + if (area->readonly || + (!memory_region_is_ram(area) && memory_region_is_romd(area))) { + flags = HV_MEMORY_READ | HV_MEMORY_EXEC; + } else { + flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC; + } + + /* Now make a new slot. */ + int x; + + for (x = 0; x < hvf_state->num_slots; ++x) { + mem = &hvf_state->slots[x]; + if (!mem->size) { + break; + } + } + + if (x == hvf_state->num_slots) { + error_report("No free slots"); + abort(); + } + + mem->size = int128_get64(section->size); + mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region; + mem->start = section->offset_within_address_space; + mem->region = area; + + if (do_hvf_set_memory(mem, flags)) { + error_report("Error registering new memory slot"); + abort(); + } } -static void do_hvf_update_guest_debug(CPUState *cpu, run_on_cpu_data arg) +static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on) { - hvf_arch_update_guest_debug(cpu); + hvf_slot *slot; + + slot = hvf_find_overlap_slot( + section->offset_within_address_space, + int128_get64(section->size)); + + /* protect region against writes; begin tracking it */ + if (on) { + slot->flags |= HVF_SLOT_LOG; + hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, + HV_MEMORY_READ | HV_MEMORY_EXEC); + /* stop tracking region*/ + } else { + slot->flags &= ~HVF_SLOT_LOG; + hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size, + HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC); + } } -int hvf_update_guest_debug(CPUState *cpu) +static void hvf_log_start(MemoryListener *listener, + MemoryRegionSection *section, int old, int new) { - run_on_cpu(cpu, do_hvf_update_guest_debug, RUN_ON_CPU_NULL); - return 0; + if (old != 0) { + return; + } + + hvf_set_dirty_tracking(section, 1); } + +static void hvf_log_stop(MemoryListener *listener, + MemoryRegionSection *section, int old, int new) +{ + if (new != 0) { + return; + } + + hvf_set_dirty_tracking(section, 0); +} + +static void hvf_log_sync(MemoryListener *listener, + MemoryRegionSection *section) +{ + /* + * sync of dirty pages is handled elsewhere; just make sure we keep + * tracking the region. + */ + hvf_set_dirty_tracking(section, 1); +} + +static void hvf_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + hvf_set_phys_mem(section, true); +} + +static void hvf_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + hvf_set_phys_mem(section, false); +} + +static MemoryListener hvf_memory_listener = { + .name = "hvf", + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, + .region_add = hvf_region_add, + .region_del = hvf_region_del, + .log_start = hvf_log_start, + .log_stop = hvf_log_stop, + .log_sync = hvf_log_sync, +}; + +static int hvf_accel_init(AccelState *as, MachineState *ms) +{ + int x; + hv_return_t ret; + HVFState *s; + int pa_range = 36; + MachineClass *mc = MACHINE_GET_CLASS(ms); + + if (mc->hvf_get_physical_address_range) { + pa_range = mc->hvf_get_physical_address_range(ms); + if (pa_range < 0) { + return -EINVAL; + } + } + + ret = hvf_arch_vm_create(ms, (uint32_t)pa_range); + assert_hvf_ok(ret); + + s = g_new0(HVFState, 1); + + s->num_slots = ARRAY_SIZE(s->slots); + for (x = 0; x < s->num_slots; ++x) { + s->slots[x].size = 0; + s->slots[x].slot_id = x; + } + + QTAILQ_INIT(&s->hvf_sw_breakpoints); + + hvf_state = s; + memory_listener_register(&hvf_memory_listener, &address_space_memory); + + return hvf_arch_init(); +} + +static int hvf_gdbstub_sstep_flags(AccelState *as) +{ + return SSTEP_ENABLE | SSTEP_NOIRQ; +} + +static void hvf_accel_class_init(ObjectClass *oc, const void *data) +{ + AccelClass *ac = ACCEL_CLASS(oc); + ac->name = "HVF"; + ac->init_machine = hvf_accel_init; + ac->allowed = &hvf_allowed; + ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags; +} + +static const TypeInfo hvf_accel_type = { + .name = TYPE_HVF_ACCEL, + .parent = TYPE_ACCEL, + .instance_size = sizeof(HVFState), + .class_init = hvf_accel_class_init, +}; + +static void hvf_type_init(void) +{ + type_register_static(&hvf_accel_type); +} + +type_init(hvf_type_init); diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c index e5c1544..0eafc90 100644 --- a/accel/kvm/kvm-accel-ops.c +++ b/accel/kvm/kvm-accel-ops.c @@ -101,6 +101,7 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, const void *data) ops->synchronize_post_init = kvm_cpu_synchronize_post_init; ops->synchronize_state = kvm_cpu_synchronize_state; ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm; + ops->handle_interrupt = generic_handle_interrupt; #ifdef TARGET_KVM_HAVE_GUEST_DEBUG ops->update_guest_debug = kvm_update_guest_debug_ops; diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index 8141854..a106d1b 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -453,7 +453,13 @@ static void kvm_reset_parked_vcpus(KVMState *s) } } -int kvm_create_vcpu(CPUState *cpu) +/** + * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU + * @cpu: QOM CPUState object for which KVM vCPU has to be fetched/created. + * + * @returns: 0 when success, errno (<0) when failed. + */ +static int kvm_create_vcpu(CPUState *cpu) { unsigned long vcpu_id = kvm_arch_vcpu_id(cpu); KVMState *s = kvm_state; @@ -2496,13 +2502,10 @@ uint32_t kvm_dirty_ring_size(void) return kvm_state->kvm_dirty_ring_size; } -static int do_kvm_create_vm(MachineState *ms, int type) +static int do_kvm_create_vm(KVMState *s, int type) { - KVMState *s; int ret; - s = KVM_STATE(ms->accelerator); - do { ret = kvm_ioctl(s, KVM_CREATE_VM, type); } while (ret == -EINTR); @@ -2599,7 +2602,7 @@ static int kvm_setup_dirty_ring(KVMState *s) return 0; } -static int kvm_init(MachineState *ms) +static int kvm_init(AccelState *as, MachineState *ms) { MachineClass *mc = MACHINE_GET_CLASS(ms); static const char upgrade_note[] = @@ -2614,15 +2617,13 @@ static int kvm_init(MachineState *ms) { /* end of list */ } }, *nc = num_cpus; int soft_vcpus_limit, hard_vcpus_limit; - KVMState *s; + KVMState *s = KVM_STATE(as); const KVMCapabilityInfo *missing_cap; int ret; int type; qemu_mutex_init(&kml_slots_lock); - s = KVM_STATE(ms->accelerator); - /* * On systems where the kernel can support different base page * sizes, host page size may be different from TARGET_PAGE_SIZE, @@ -2674,7 +2675,7 @@ static int kvm_init(MachineState *ms) goto err; } - ret = do_kvm_create_vm(ms, type); + ret = do_kvm_create_vm(s, type); if (ret < 0) { goto err; } @@ -3817,10 +3818,10 @@ int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target) return r; } -static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as, +static bool kvm_accel_has_memory(AccelState *accel, AddressSpace *as, hwaddr start_addr, hwaddr size) { - KVMState *kvm = KVM_STATE(ms->accelerator); + KVMState *kvm = KVM_STATE(accel); int i; for (i = 0; i < kvm->nr_as; ++i) { @@ -4011,7 +4012,7 @@ static void kvm_accel_instance_init(Object *obj) * Returns: SSTEP_* flags that KVM supports for guest debug. The * support is probed during kvm_init() */ -static int kvm_gdbstub_sstep_flags(void) +static int kvm_gdbstub_sstep_flags(AccelState *as) { return kvm_sstep_flags; } diff --git a/accel/qtest/qtest.c b/accel/qtest/qtest.c index 92bed92..2b83126 100644 --- a/accel/qtest/qtest.c +++ b/accel/qtest/qtest.c @@ -24,6 +24,7 @@ #include "qemu/guest-random.h" #include "qemu/main-loop.h" #include "hw/core/cpu.h" +#include "accel/dummy-cpus.h" static int64_t qtest_clock_counter; @@ -37,7 +38,7 @@ static void qtest_set_virtual_clock(int64_t count) qatomic_set_i64(&qtest_clock_counter, count); } -static int qtest_init_accel(MachineState *ms) +static int qtest_init_accel(AccelState *as, MachineState *ms) { return 0; } @@ -66,6 +67,7 @@ static void qtest_accel_ops_class_init(ObjectClass *oc, const void *data) ops->create_vcpu_thread = dummy_start_vcpu_thread; ops->get_virtual_clock = qtest_get_virtual_clock; ops->set_virtual_clock = qtest_set_virtual_clock; + ops->handle_interrupt = generic_handle_interrupt; }; static const TypeInfo qtest_accel_ops_type = { diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c index ecfd763..68cd33b 100644 --- a/accel/stubs/kvm-stub.c +++ b/accel/stubs/kvm-stub.c @@ -29,10 +29,6 @@ void kvm_flush_coalesced_mmio_buffer(void) { } -void kvm_cpu_synchronize_state(CPUState *cpu) -{ -} - bool kvm_has_sync_mmu(void) { return false; @@ -105,11 +101,6 @@ unsigned int kvm_get_free_memslots(void) return 0; } -void kvm_init_cpu_signals(CPUState *cpu) -{ - abort(); -} - bool kvm_arm_supports_user_irq(void) { return false; diff --git a/accel/stubs/meson.build b/accel/stubs/meson.build index 8ca1a45..9dfc4f9 100644 --- a/accel/stubs/meson.build +++ b/accel/stubs/meson.build @@ -3,5 +3,7 @@ system_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c')) system_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c')) system_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c')) system_stubs_ss.add(when: 'CONFIG_HVF', if_false: files('hvf-stub.c')) +system_stubs_ss.add(when: 'CONFIG_NVMM', if_false: files('nvmm-stub.c')) +system_stubs_ss.add(when: 'CONFIG_WHPX', if_false: files('whpx-stub.c')) specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: system_stubs_ss) diff --git a/accel/stubs/nvmm-stub.c b/accel/stubs/nvmm-stub.c new file mode 100644 index 0000000..ec14837 --- /dev/null +++ b/accel/stubs/nvmm-stub.c @@ -0,0 +1,12 @@ +/* + * NVMM stubs for QEMU + * + * Copyright (c) Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "system/nvmm.h" + +bool nvmm_allowed; diff --git a/accel/stubs/whpx-stub.c b/accel/stubs/whpx-stub.c new file mode 100644 index 0000000..c564c89 --- /dev/null +++ b/accel/stubs/whpx-stub.c @@ -0,0 +1,12 @@ +/* + * WHPX stubs for QEMU + * + * Copyright (c) Linaro + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "system/whpx.h" + +bool whpx_allowed; diff --git a/accel/tcg/internal-common.h b/accel/tcg/internal-common.h index 1dbc45d..77a3a06 100644 --- a/accel/tcg/internal-common.h +++ b/accel/tcg/internal-common.h @@ -139,4 +139,6 @@ G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); +void tcg_dump_stats(GString *buf); + #endif diff --git a/accel/tcg/monitor.c b/accel/tcg/monitor.c index 1c182b6..e7ed728 100644 --- a/accel/tcg/monitor.c +++ b/accel/tcg/monitor.c @@ -141,16 +141,26 @@ static void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) *pelide = elide; } -static void tcg_dump_info(GString *buf) +static void tcg_dump_flush_info(GString *buf) { - g_string_append_printf(buf, "[TCG profiler not compiled]\n"); + size_t flush_full, flush_part, flush_elide; + + g_string_append_printf(buf, "TB flush count %u\n", + qatomic_read(&tb_ctx.tb_flush_count)); + g_string_append_printf(buf, "TB invalidate count %u\n", + qatomic_read(&tb_ctx.tb_phys_invalidate_count)); + + tlb_flush_counts(&flush_full, &flush_part, &flush_elide); + g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full); + g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part); + g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide); } static void dump_exec_info(GString *buf) { struct tb_tree_stats tst = {}; struct qht_stats hst; - size_t nb_tbs, flush_full, flush_part, flush_elide; + size_t nb_tbs; tcg_tb_foreach(tb_tree_stats_iter, &tst); nb_tbs = tst.nb_tbs; @@ -187,50 +197,26 @@ static void dump_exec_info(GString *buf) qht_statistics_destroy(&hst); g_string_append_printf(buf, "\nStatistics:\n"); - g_string_append_printf(buf, "TB flush count %u\n", - qatomic_read(&tb_ctx.tb_flush_count)); - g_string_append_printf(buf, "TB invalidate count %u\n", - qatomic_read(&tb_ctx.tb_phys_invalidate_count)); - - tlb_flush_counts(&flush_full, &flush_part, &flush_elide); - g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full); - g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part); - g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide); - tcg_dump_info(buf); + tcg_dump_flush_info(buf); } -HumanReadableText *qmp_x_query_jit(Error **errp) +void tcg_dump_stats(GString *buf) { - g_autoptr(GString) buf = g_string_new(""); - - if (!tcg_enabled()) { - error_setg(errp, "JIT information is only available with accel=tcg"); - return NULL; - } - dump_accel_info(buf); dump_exec_info(buf); dump_drift_info(buf); - - return human_readable_text_from_str(buf); -} - -static void tcg_dump_op_count(GString *buf) -{ - g_string_append_printf(buf, "[TCG profiler not compiled]\n"); } -HumanReadableText *qmp_x_query_opcount(Error **errp) +HumanReadableText *qmp_x_query_jit(Error **errp) { g_autoptr(GString) buf = g_string_new(""); if (!tcg_enabled()) { - error_setg(errp, - "Opcode count information is only available with accel=tcg"); + error_setg(errp, "JIT information is only available with accel=tcg"); return NULL; } - tcg_dump_op_count(buf); + tcg_dump_stats(buf); return human_readable_text_from_str(buf); } @@ -238,7 +224,6 @@ HumanReadableText *qmp_x_query_opcount(Error **errp) static void hmp_tcg_register(void) { monitor_register_hmp_info_hrt("jit", qmp_x_query_jit); - monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount); } type_init(hmp_tcg_register); diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c index b24d6a7..37b4b21 100644 --- a/accel/tcg/tcg-accel-ops.c +++ b/accel/tcg/tcg-accel-ops.c @@ -93,8 +93,6 @@ static void tcg_cpu_reset_hold(CPUState *cpu) /* mask must never be zero, except for A20 change call */ void tcg_handle_interrupt(CPUState *cpu, int mask) { - g_assert(bql_locked()); - cpu->interrupt_request |= mask; /* @@ -198,8 +196,10 @@ static inline void tcg_remove_all_breakpoints(CPUState *cpu) cpu_watchpoint_remove_all(cpu, BP_GDB); } -static void tcg_accel_ops_init(AccelOpsClass *ops) +static void tcg_accel_ops_init(AccelClass *ac) { + AccelOpsClass *ops = ac->ops; + if (qemu_tcg_mttcg_enabled()) { ops->create_vcpu_thread = mttcg_start_vcpu_thread; ops->kick_vcpu_thread = mttcg_kick_vcpu_thread; diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c index 6e5dc33..5904582 100644 --- a/accel/tcg/tcg-all.c +++ b/accel/tcg/tcg-all.c @@ -80,9 +80,9 @@ static void tcg_accel_instance_init(Object *obj) bool one_insn_per_tb; -static int tcg_init_machine(MachineState *ms) +static int tcg_init_machine(AccelState *as, MachineState *ms) { - TCGState *s = TCG_STATE(current_accel()); + TCGState *s = TCG_STATE(as); unsigned max_threads = 1; #ifndef CONFIG_USER_ONLY @@ -219,7 +219,7 @@ static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp) qatomic_set(&one_insn_per_tb, value); } -static int tcg_gdbstub_supported_sstep_flags(void) +static int tcg_gdbstub_supported_sstep_flags(AccelState *as) { /* * In replay mode all events will come from the log and can't be diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c index de52a8f..bd0ff64 100644 --- a/accel/xen/xen-all.c +++ b/accel/xen/xen-all.c @@ -18,6 +18,7 @@ #include "hw/xen/xen_igd.h" #include "chardev/char.h" #include "qemu/accel.h" +#include "accel/dummy-cpus.h" #include "system/accel-ops.h" #include "system/cpus.h" #include "system/xen.h" @@ -63,7 +64,7 @@ static void xen_set_igd_gfx_passthru(Object *obj, bool value, Error **errp) xen_igd_gfx_pt_set(value, errp); } -static void xen_setup_post(MachineState *ms, AccelState *accel) +static void xen_setup_post(AccelState *as) { int rc; @@ -76,7 +77,7 @@ static void xen_setup_post(MachineState *ms, AccelState *accel) } } -static int xen_init(MachineState *ms) +static int xen_init(AccelState *as, MachineState *ms) { MachineClass *mc = MACHINE_GET_CLASS(ms); @@ -152,6 +153,7 @@ static void xen_accel_ops_class_init(ObjectClass *oc, const void *data) AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); ops->create_vcpu_thread = dummy_start_vcpu_thread; + ops->handle_interrupt = generic_handle_interrupt; } static const TypeInfo xen_accel_ops_type = { diff --git a/bsd-user/main.c b/bsd-user/main.c index 7c0a059..d0cc8e0 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -474,7 +474,7 @@ int main(int argc, char **argv) opt_one_insn_per_tb, &error_abort); object_property_set_int(OBJECT(accel), "tb-size", opt_tb_size, &error_abort); - ac->init_machine(NULL); + ac->init_machine(accel, NULL); } /* diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx index 639a450..d797922 100644 --- a/hmp-commands-info.hx +++ b/hmp-commands-info.hx @@ -256,20 +256,6 @@ SRST Show dynamic compiler info. ERST -#if defined(CONFIG_TCG) - { - .name = "opcount", - .args_type = "", - .params = "", - .help = "show dynamic compiler opcode counters", - }, -#endif - -SRST - ``info opcount`` - Show dynamic compiler opcode counters -ERST - { .name = "sync-profile", .args_type = "mean:-m,no_coalesce:-n,max:i?", diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 162a56a..5eaf41a 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -442,6 +442,7 @@ struct qemu_work_item; * @opaque: User data. * @mem_io_pc: Host Program Counter at which the memory was accessed. * @accel: Pointer to accelerator specific state. + * @vcpu_dirty: Hardware accelerator is not synchronized with QEMU state * @kvm_fd: vCPU file descriptor for KVM. * @work_mutex: Lock to prevent multiple access to @work_list. * @work_list: List of pending asynchronous work. @@ -538,7 +539,6 @@ struct CPUState { uint32_t kvm_fetch_index; uint64_t dirty_pages; int kvm_vcpu_stats_fd; - bool vcpu_dirty; /* Use by accel-block: CPU is executing an ioctl() */ QemuLockCnt in_ioctl_lock; @@ -554,6 +554,7 @@ struct CPUState { uint32_t halted; int32_t exception_index; + bool vcpu_dirty; AccelCPUState *accel; /* Used to keep track of an outstanding cpu throttle thread for migration diff --git a/include/qemu/accel.h b/include/qemu/accel.h index fbd3d89..1c097ac 100644 --- a/include/qemu/accel.h +++ b/include/qemu/accel.h @@ -37,17 +37,20 @@ typedef struct AccelClass { /*< public >*/ const char *name; - int (*init_machine)(MachineState *ms); + /* Cached by accel_init_ops_interfaces() when created */ + AccelOpsClass *ops; + + int (*init_machine)(AccelState *as, MachineState *ms); bool (*cpu_common_realize)(CPUState *cpu, Error **errp); void (*cpu_common_unrealize)(CPUState *cpu); /* system related hooks */ - void (*setup_post)(MachineState *ms, AccelState *accel); - bool (*has_memory)(MachineState *ms, AddressSpace *as, + void (*setup_post)(AccelState *as); + bool (*has_memory)(AccelState *accel, AddressSpace *as, hwaddr start_addr, hwaddr size); /* gdbstub related hooks */ - int (*gdbstub_supported_sstep_flags)(void); + int (*gdbstub_supported_sstep_flags)(AccelState *as); bool *allowed; /* diff --git a/include/system/accel-ops.h b/include/system/accel-ops.h index 4c99d25..a786c7d 100644 --- a/include/system/accel-ops.h +++ b/include/system/accel-ops.h @@ -10,6 +10,7 @@ #ifndef ACCEL_OPS_H #define ACCEL_OPS_H +#include "qemu/accel.h" #include "exec/vaddr.h" #include "qom/object.h" @@ -31,7 +32,7 @@ struct AccelOpsClass { /*< public >*/ /* initialization function called when accel is chosen */ - void (*ops_init)(AccelOpsClass *ops); + void (*ops_init)(AccelClass *ac); bool (*cpus_are_resettable)(void); void (*cpu_reset_hold)(CPUState *cpu); @@ -40,12 +41,29 @@ struct AccelOpsClass { void (*kick_vcpu_thread)(CPUState *cpu); bool (*cpu_thread_is_idle)(CPUState *cpu); + /** + * synchronize_post_reset: + * synchronize_post_init: + * @cpu: The vCPU to synchronize. + * + * Request to synchronize QEMU vCPU registers to the hardware accelerator + * (QEMU is the reference). + */ void (*synchronize_post_reset)(CPUState *cpu); void (*synchronize_post_init)(CPUState *cpu); + /** + * synchronize_state: + * synchronize_pre_loadvm: + * @cpu: The vCPU to synchronize. + * + * Request to synchronize QEMU vCPU registers from the hardware accelerator + * (the hardware accelerator is the reference). + */ void (*synchronize_state)(CPUState *cpu); void (*synchronize_pre_loadvm)(CPUState *cpu); void (*synchronize_pre_resume)(bool step_pending); + /* handle_interrupt is mandatory. */ void (*handle_interrupt)(CPUState *cpu, int mask); /** @@ -70,4 +88,6 @@ struct AccelOpsClass { void (*remove_all_breakpoints)(CPUState *cpu); }; +void generic_handle_interrupt(CPUState *cpu, int mask); + #endif /* ACCEL_OPS_H */ diff --git a/include/system/cpus.h b/include/system/cpus.h index 3226c76..69be6a7 100644 --- a/include/system/cpus.h +++ b/include/system/cpus.h @@ -7,11 +7,6 @@ void cpus_register_accel(const AccelOpsClass *i); /* return registers ops */ const AccelOpsClass *cpus_get_accel(void); -/* accel/dummy-cpus.c */ - -/* Create a dummy vcpu for AccelOpsClass->create_vcpu_thread */ -void dummy_start_vcpu_thread(CPUState *); - /* interface available for cpus accelerator threads */ /* For temporary buffers for forming a name */ diff --git a/include/system/hvf.h b/include/system/hvf.h index a9a502f..d3dcf08 100644 --- a/include/system/hvf.h +++ b/include/system/hvf.h @@ -14,10 +14,6 @@ #define HVF_H #include "qemu/accel.h" -#include "qemu/queue.h" -#include "exec/vaddr.h" -#include "qom/object.h" -#include "exec/vaddr.h" #ifdef COMPILING_PER_TARGET # ifdef CONFIG_HVF @@ -40,38 +36,4 @@ typedef struct HVFState HVFState; DECLARE_INSTANCE_CHECKER(HVFState, HVF_STATE, TYPE_HVF_ACCEL) -#ifdef COMPILING_PER_TARGET -struct hvf_sw_breakpoint { - vaddr pc; - vaddr saved_insn; - int use_count; - QTAILQ_ENTRY(hvf_sw_breakpoint) entry; -}; - -struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, - vaddr pc); -int hvf_sw_breakpoints_active(CPUState *cpu); - -int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp); -int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp); -int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type); -int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type); -void hvf_arch_remove_all_hw_breakpoints(void); - -/* - * hvf_update_guest_debug: - * @cs: CPUState for the CPU to update - * - * Update guest to enable or disable debugging. Per-arch specifics will be - * handled by calling down to hvf_arch_update_guest_debug. - */ -int hvf_update_guest_debug(CPUState *cpu); -void hvf_arch_update_guest_debug(CPUState *cpu); - -/* - * Return whether the guest supports debugging. - */ -bool hvf_arch_supports_guest_debug(void); -#endif /* COMPILING_PER_TARGET */ - #endif diff --git a/include/system/hvf_int.h b/include/system/hvf_int.h index d774e58..5150c7d 100644 --- a/include/system/hvf_int.h +++ b/include/system/hvf_int.h @@ -12,6 +12,8 @@ #define HVF_INT_H #include "qemu/queue.h" +#include "exec/vaddr.h" +#include "qom/object.h" #ifdef __aarch64__ #include <Hypervisor/Hypervisor.h> @@ -60,7 +62,6 @@ struct AccelCPUState { bool vtimer_masked; sigset_t unblock_ipi_mask; bool guest_debug_enabled; - bool dirty; }; void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line, @@ -77,4 +78,36 @@ int hvf_put_registers(CPUState *); int hvf_get_registers(CPUState *); void hvf_kick_vcpu_thread(CPUState *cpu); +struct hvf_sw_breakpoint { + vaddr pc; + vaddr saved_insn; + int use_count; + QTAILQ_ENTRY(hvf_sw_breakpoint) entry; +}; + +struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, + vaddr pc); +int hvf_sw_breakpoints_active(CPUState *cpu); + +int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp); +int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp); +int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type); +int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type); +void hvf_arch_remove_all_hw_breakpoints(void); + +/* + * hvf_update_guest_debug: + * @cs: CPUState for the CPU to update + * + * Update guest to enable or disable debugging. Per-arch specifics will be + * handled by calling down to hvf_arch_update_guest_debug. + */ +int hvf_update_guest_debug(CPUState *cpu); +void hvf_arch_update_guest_debug(CPUState *cpu); + +/* + * Return whether the guest supports debugging. + */ +bool hvf_arch_supports_guest_debug(void); + #endif diff --git a/include/system/hw_accel.h b/include/system/hw_accel.h index 380e9e6..fa9228d 100644 --- a/include/system/hw_accel.h +++ b/include/system/hw_accel.h @@ -17,9 +17,26 @@ #include "system/whpx.h" #include "system/nvmm.h" +/** + * cpu_synchronize_state: + * cpu_synchronize_pre_loadvm: + * @cpu: The vCPU to synchronize. + * + * Request to synchronize QEMU vCPU registers from the hardware accelerator + * (the hardware accelerator is the reference). + */ void cpu_synchronize_state(CPUState *cpu); +void cpu_synchronize_pre_loadvm(CPUState *cpu); + +/** + * cpu_synchronize_post_reset: + * cpu_synchronize_post_init: + * @cpu: The vCPU to synchronize. + * + * Request to synchronize QEMU vCPU registers to the hardware accelerator + * (QEMU is the reference). + */ void cpu_synchronize_post_reset(CPUState *cpu); void cpu_synchronize_post_init(CPUState *cpu); -void cpu_synchronize_pre_loadvm(CPUState *cpu); #endif /* QEMU_HW_ACCEL_H */ diff --git a/include/system/kvm.h b/include/system/kvm.h index 4896a3c..3c7d314 100644 --- a/include/system/kvm.h +++ b/include/system/kvm.h @@ -318,14 +318,6 @@ int kvm_create_device(KVMState *s, uint64_t type, bool test); bool kvm_device_supported(int vmfd, uint64_t type); /** - * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU - * @cpu: QOM CPUState object for which KVM vCPU has to be fetched/created. - * - * @returns: 0 when success, errno (<0) when failed. - */ -int kvm_create_vcpu(CPUState *cpu); - -/** * kvm_park_vcpu - Park QEMU KVM vCPU context * @cpu: QOM CPUState object for which QEMU KVM vCPU context has to be parked. * diff --git a/include/system/nvmm.h b/include/system/nvmm.h index 6971ddb..7390def 100644 --- a/include/system/nvmm.h +++ b/include/system/nvmm.h @@ -13,17 +13,18 @@ #define QEMU_NVMM_H #ifdef COMPILING_PER_TARGET - -#ifdef CONFIG_NVMM - -int nvmm_enabled(void); - -#else /* CONFIG_NVMM */ - -#define nvmm_enabled() (0) - -#endif /* CONFIG_NVMM */ - +# ifdef CONFIG_NVMM +# define CONFIG_NVMM_IS_POSSIBLE +# endif /* !CONFIG_NVMM */ +#else +# define CONFIG_NVMM_IS_POSSIBLE #endif /* COMPILING_PER_TARGET */ +#ifdef CONFIG_NVMM_IS_POSSIBLE +extern bool nvmm_allowed; +#define nvmm_enabled() (nvmm_allowed) +#else /* !CONFIG_NVMM_IS_POSSIBLE */ +#define nvmm_enabled() 0 +#endif /* !CONFIG_NVMM_IS_POSSIBLE */ + #endif /* QEMU_NVMM_H */ diff --git a/include/system/whpx.h b/include/system/whpx.h index 00ff409..00f6a3e 100644 --- a/include/system/whpx.h +++ b/include/system/whpx.h @@ -16,19 +16,20 @@ #define QEMU_WHPX_H #ifdef COMPILING_PER_TARGET +# ifdef CONFIG_WHPX +# define CONFIG_WHPX_IS_POSSIBLE +# endif /* !CONFIG_WHPX */ +#else +# define CONFIG_WHPX_IS_POSSIBLE +#endif /* COMPILING_PER_TARGET */ -#ifdef CONFIG_WHPX - -int whpx_enabled(void); +#ifdef CONFIG_WHPX_IS_POSSIBLE +extern bool whpx_allowed; +#define whpx_enabled() (whpx_allowed) bool whpx_apic_in_platform(void); - -#else /* CONFIG_WHPX */ - -#define whpx_enabled() (0) +#else /* !CONFIG_WHPX_IS_POSSIBLE */ +#define whpx_enabled() 0 #define whpx_apic_in_platform() (0) - -#endif /* CONFIG_WHPX */ - -#endif /* COMPILING_PER_TARGET */ +#endif /* !CONFIG_WHPX_IS_POSSIBLE */ #endif /* QEMU_WHPX_H */ diff --git a/linux-user/main.c b/linux-user/main.c index 5ac5b55..a9142ee 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -820,7 +820,7 @@ int main(int argc, char **argv, char **envp) opt_one_insn_per_tb, &error_abort); object_property_set_int(OBJECT(accel), "tb-size", opt_tb_size, &error_abort); - ac->init_machine(NULL); + ac->init_machine(accel, NULL); } /* diff --git a/monitor/hmp-cmds-target.c b/monitor/hmp-cmds-target.c index 8eaf70d..e982061 100644 --- a/monitor/hmp-cmds-target.c +++ b/monitor/hmp-cmds-target.c @@ -102,7 +102,7 @@ void hmp_info_registers(Monitor *mon, const QDict *qdict) if (all_cpus) { CPU_FOREACH(cs) { monitor_printf(mon, "\nCPU#%d\n", cs->cpu_index); - cpu_dump_state(cs, NULL, CPU_DUMP_FPU); + cpu_dump_state(cs, NULL, CPU_DUMP_FPU | CPU_DUMP_VPU); } } else { cs = vcpu >= 0 ? qemu_get_cpu(vcpu) : mon_get_cpu(mon); @@ -117,7 +117,7 @@ void hmp_info_registers(Monitor *mon, const QDict *qdict) } monitor_printf(mon, "\nCPU#%d\n", cs->cpu_index); - cpu_dump_state(cs, NULL, CPU_DUMP_FPU); + cpu_dump_state(cs, NULL, CPU_DUMP_FPU | CPU_DUMP_VPU); } } diff --git a/qapi/machine.json b/qapi/machine.json index 0650b8d..f712e7d 100644 --- a/qapi/machine.json +++ b/qapi/machine.json @@ -1762,24 +1762,6 @@ 'features': [ 'unstable' ] } ## -# @x-query-opcount: -# -# Query TCG opcode counters -# -# Features: -# -# @unstable: This command is meant for debugging. -# -# Returns: TCG opcode counters -# -# Since: 6.2 -## -{ 'command': 'x-query-opcount', - 'returns': 'HumanReadableText', - 'if': 'CONFIG_TCG', - 'features': [ 'unstable' ] } - -## # @x-query-ramblock: # # Query system ramblock information diff --git a/system/cpus.c b/system/cpus.c index d16b0df..0d0eec8 100644 --- a/system/cpus.c +++ b/system/cpus.c @@ -254,7 +254,7 @@ int64_t cpus_get_elapsed_ticks(void) return cpu_get_ticks(); } -static void generic_handle_interrupt(CPUState *cpu, int mask) +void generic_handle_interrupt(CPUState *cpu, int mask) { cpu->interrupt_request |= mask; @@ -265,11 +265,9 @@ static void generic_handle_interrupt(CPUState *cpu, int mask) void cpu_interrupt(CPUState *cpu, int mask) { - if (cpus_accel->handle_interrupt) { - cpus_accel->handle_interrupt(cpu, mask); - } else { - generic_handle_interrupt(cpu, mask); - } + g_assert(bql_locked()); + + cpus_accel->handle_interrupt(cpu, mask); } /* @@ -678,6 +676,8 @@ void cpus_register_accel(const AccelOpsClass *ops) { assert(ops != NULL); assert(ops->create_vcpu_thread != NULL); /* mandatory */ + assert(ops->handle_interrupt); + cpus_accel = ops; } diff --git a/system/memory.c b/system/memory.c index 76b44b8..e8d9b15 100644 --- a/system/memory.c +++ b/system/memory.c @@ -3501,7 +3501,7 @@ static void mtree_print_flatview(gpointer key, gpointer value, if (fvi->ac) { for (i = 0; i < fv_address_spaces->len; ++i) { as = g_array_index(fv_address_spaces, AddressSpace*, i); - if (fvi->ac->has_memory(current_machine, as, + if (fvi->ac->has_memory(current_machine->accelerator, as, int128_get64(range->addr.start), MR_SIZE(range->addr.size) + 1)) { qemu_printf(" %s", fvi->ac->name); diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c index 7b6d291..c9cfcdc 100644 --- a/target/arm/hvf/hvf.c +++ b/target/arm/hvf/hvf.c @@ -813,9 +813,9 @@ int hvf_put_registers(CPUState *cpu) static void flush_cpu_state(CPUState *cpu) { - if (cpu->accel->dirty) { + if (cpu->vcpu_dirty) { hvf_put_registers(cpu); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } } diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index 99e37a3..818b504 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -733,9 +733,9 @@ int hvf_vcpu_exec(CPUState *cpu) } do { - if (cpu->accel->dirty) { + if (cpu->vcpu_dirty) { hvf_put_registers(cpu); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } if (hvf_inject_interrupts(cpu)) { diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index 2057314..17fce1d 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -427,7 +427,7 @@ int hvf_process_events(CPUState *cs) X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - if (!cs->accel->dirty) { + if (!cs->vcpu_dirty) { /* light weight sync for CPU_INTERRUPT_HARD and IF_MASK */ env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); } diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c index 2144307..a5517b0 100644 --- a/target/i386/nvmm/nvmm-accel-ops.c +++ b/target/i386/nvmm/nvmm-accel-ops.c @@ -87,6 +87,7 @@ static void nvmm_accel_ops_class_init(ObjectClass *oc, const void *data) ops->create_vcpu_thread = nvmm_start_vcpu_thread; ops->kick_vcpu_thread = nvmm_kick_vcpu_thread; + ops->handle_interrupt = generic_handle_interrupt; ops->synchronize_post_reset = nvmm_cpu_synchronize_post_reset; ops->synchronize_post_init = nvmm_cpu_synchronize_post_init; diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index f1c6120..b4a4d50 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -30,7 +30,6 @@ struct AccelCPUState { struct nvmm_vcpu vcpu; uint8_t tpr; bool stop; - bool dirty; /* Window-exiting for INTs/NMIs. */ bool int_window_exit; @@ -47,7 +46,7 @@ struct qemu_machine { /* -------------------------------------------------------------------------- */ -static bool nvmm_allowed; +bool nvmm_allowed; static struct qemu_machine qemu_mach; static struct nvmm_machine * @@ -508,7 +507,7 @@ nvmm_io_callback(struct nvmm_io *io) } /* Needed, otherwise infinite loop. */ - current_cpu->accel->dirty = false; + current_cpu->vcpu_dirty = false; } static void @@ -517,7 +516,7 @@ nvmm_mem_callback(struct nvmm_mem *mem) cpu_physical_memory_rw(mem->gpa, mem->data, mem->size, mem->write); /* Needed, otherwise infinite loop. */ - current_cpu->accel->dirty = false; + current_cpu->vcpu_dirty = false; } static struct nvmm_assist_callbacks nvmm_callbacks = { @@ -727,9 +726,9 @@ nvmm_vcpu_loop(CPUState *cpu) * Inner VCPU loop. */ do { - if (cpu->accel->dirty) { + if (cpu->vcpu_dirty) { nvmm_set_registers(cpu); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } if (qcpu->stop) { @@ -827,32 +826,32 @@ static void do_nvmm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) { nvmm_get_registers(cpu); - cpu->accel->dirty = true; + cpu->vcpu_dirty = true; } static void do_nvmm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) { nvmm_set_registers(cpu); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } static void do_nvmm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) { nvmm_set_registers(cpu); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } static void do_nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg) { - cpu->accel->dirty = true; + cpu->vcpu_dirty = true; } void nvmm_cpu_synchronize_state(CPUState *cpu) { - if (!cpu->accel->dirty) { + if (!cpu->vcpu_dirty) { run_on_cpu(cpu, do_nvmm_cpu_synchronize_state, RUN_ON_CPU_NULL); } } @@ -982,7 +981,7 @@ nvmm_init_vcpu(CPUState *cpu) } } - qcpu->dirty = true; + qcpu->vcpu_dirty = true; cpu->accel = qcpu; return 0; @@ -1153,7 +1152,7 @@ static struct RAMBlockNotifier nvmm_ram_notifier = { /* -------------------------------------------------------------------------- */ static int -nvmm_accel_init(MachineState *ms) +nvmm_accel_init(AccelState *as, MachineState *ms) { int ret, err; @@ -1193,12 +1192,6 @@ nvmm_accel_init(MachineState *ms) return 0; } -int -nvmm_enabled(void) -{ - return nvmm_allowed; -} - static void nvmm_accel_class_init(ObjectClass *oc, const void *data) { diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c index b8bebe4..31cf15f 100644 --- a/target/i386/whpx/whpx-accel-ops.c +++ b/target/i386/whpx/whpx-accel-ops.c @@ -90,6 +90,7 @@ static void whpx_accel_ops_class_init(ObjectClass *oc, const void *data) ops->create_vcpu_thread = whpx_start_vcpu_thread; ops->kick_vcpu_thread = whpx_kick_vcpu_thread; ops->cpu_thread_is_idle = whpx_vcpu_thread_is_idle; + ops->handle_interrupt = generic_handle_interrupt; ops->synchronize_post_reset = whpx_cpu_synchronize_post_reset; ops->synchronize_post_init = whpx_cpu_synchronize_post_init; diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index cf6d3e4..721c478 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -237,13 +237,12 @@ struct AccelCPUState { uint64_t tpr; uint64_t apic_base; bool interruption_pending; - bool dirty; /* Must be the last field as it may have a tail */ WHV_RUN_VP_EXIT_CONTEXT exit_ctx; }; -static bool whpx_allowed; +bool whpx_allowed; static bool whp_dispatch_initialized; static HMODULE hWinHvPlatform, hWinHvEmulation; static uint32_t max_vcpu_index; @@ -836,7 +835,7 @@ static HRESULT CALLBACK whpx_emu_setreg_callback( * The emulator just successfully wrote the register state. We clear the * dirty state so we avoid the double write on resume of the VP. */ - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; return hr; } @@ -1391,7 +1390,7 @@ static int whpx_last_vcpu_stopping(CPUState *cpu) /* Returns the address of the next instruction that is about to be executed. */ static vaddr whpx_vcpu_get_pc(CPUState *cpu, bool exit_context_valid) { - if (cpu->accel->dirty) { + if (cpu->vcpu_dirty) { /* The CPU registers have been modified by other parts of QEMU. */ return cpu_env(cpu)->eip; } else if (exit_context_valid) { @@ -1704,9 +1703,9 @@ static int whpx_vcpu_run(CPUState *cpu) } do { - if (cpu->accel->dirty) { + if (cpu->vcpu_dirty) { whpx_set_registers(cpu, WHPX_SET_RUNTIME_STATE); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } if (exclusive_step_mode == WHPX_STEP_NONE) { @@ -2054,9 +2053,9 @@ static int whpx_vcpu_run(CPUState *cpu) static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) { - if (!cpu->accel->dirty) { + if (!cpu->vcpu_dirty) { whpx_get_registers(cpu); - cpu->accel->dirty = true; + cpu->vcpu_dirty = true; } } @@ -2064,20 +2063,20 @@ static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) { whpx_set_registers(cpu, WHPX_SET_RESET_STATE); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } static void do_whpx_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) { whpx_set_registers(cpu, WHPX_SET_FULL_STATE); - cpu->accel->dirty = false; + cpu->vcpu_dirty = false; } static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg) { - cpu->accel->dirty = true; + cpu->vcpu_dirty = true; } /* @@ -2086,7 +2085,7 @@ static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu, void whpx_cpu_synchronize_state(CPUState *cpu) { - if (!cpu->accel->dirty) { + if (!cpu->vcpu_dirty) { run_on_cpu(cpu, do_whpx_cpu_synchronize_state, RUN_ON_CPU_NULL); } } @@ -2226,7 +2225,7 @@ int whpx_init_vcpu(CPUState *cpu) } vcpu->interruptable = true; - vcpu->dirty = true; + cpu->vcpu_dirty = true; cpu->accel = vcpu; max_vcpu_index = max(max_vcpu_index, cpu->cpu_index); qemu_add_vm_change_state_handler(whpx_cpu_update_state, env); @@ -2505,7 +2504,7 @@ static void whpx_set_kernel_irqchip(Object *obj, Visitor *v, * Partition support */ -static int whpx_accel_init(MachineState *ms) +static int whpx_accel_init(AccelState *as, MachineState *ms) { struct whpx_state *whpx; int ret; @@ -2689,11 +2688,6 @@ error: return ret; } -int whpx_enabled(void) -{ - return whpx_allowed; -} - bool whpx_apic_in_platform(void) { return whpx_global.apic_in_platform; } diff --git a/tests/qtest/qmp-cmd-test.c b/tests/qtest/qmp-cmd-test.c index 040d042..cf71876 100644 --- a/tests/qtest/qmp-cmd-test.c +++ b/tests/qtest/qmp-cmd-test.c @@ -51,7 +51,6 @@ static int query_error_class(const char *cmd) { "x-query-usb", ERROR_CLASS_GENERIC_ERROR }, /* Only valid with accel=tcg */ { "x-query-jit", ERROR_CLASS_GENERIC_ERROR }, - { "x-query-opcount", ERROR_CLASS_GENERIC_ERROR }, { "xen-event-list", ERROR_CLASS_GENERIC_ERROR }, { NULL, -1 } }; |