From 641b841722836cd08d39701f6e4932543afcf41a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Sat, 24 Jun 2023 15:31:44 +0200 Subject: accel: Re-enable WHPX cross-build on case sensitive filesystems MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since MinGW commit 395dcfdea ("rename hyper-v headers and def files to lower case") [*], WinHvPlatform.h and WinHvEmulation.h got respectively renamed as winhvplatform.h / winhvemulation.h. The mingw64-headers package included in the Fedora version we use for CI does include this commit; and meson fails to detect these present-but-renamed headers while cross-building (on case-sensitive filesystems). Use the renamed header in order to detect and successfully cross-build with the WHPX accelerator. Note, on Windows hosts, the libraries are still named as WinHvPlatform.dll and WinHvEmulation.dll, so we don't bother renaming the definitions used by load_whp_dispatch_fns() in target/i386/whpx/whpx-all.c. [*] https://sourceforge.net/p/mingw-w64/mingw-w64/ci/395dcfdea Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Daniel P. Berrangé Message-Id: <20230624142211.8888-3-philmd@linaro.org> --- target/i386/whpx/whpx-all.c | 4 ++-- target/i386/whpx/whpx-internal.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'target') diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 52af816..7fa68d4 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -31,8 +31,8 @@ #include "whpx-internal.h" #include "whpx-accel-ops.h" -#include -#include +#include +#include #define HYPERV_APIC_BUS_FREQUENCY (200000000ULL) diff --git a/target/i386/whpx/whpx-internal.h b/target/i386/whpx/whpx-internal.h index 06429d8..6633e9c 100644 --- a/target/i386/whpx/whpx-internal.h +++ b/target/i386/whpx/whpx-internal.h @@ -2,8 +2,8 @@ #define TARGET_I386_WHPX_INTERNAL_H #include -#include -#include +#include +#include typedef enum WhpxBreakpointState { WHPX_BP_CLEARED = 0, -- cgit v1.1 From af03d22a0a2aec52f8e6959e8bc5e318ecc72e6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 29 Mar 2023 18:46:12 +0200 Subject: accel: Remove unused hThread variable on TCG/WHPX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On Windows hosts, cpu->hThread is assigned but never accessed: remove it. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-4-philmd@linaro.org> --- target/i386/whpx/whpx-accel-ops.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'target') diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c index e8dc4b3..67cad86 100644 --- a/target/i386/whpx/whpx-accel-ops.c +++ b/target/i386/whpx/whpx-accel-ops.c @@ -71,9 +71,6 @@ static void whpx_start_vcpu_thread(CPUState *cpu) cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, whpx_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); -#ifdef _WIN32 - cpu->hThread = qemu_thread_get_handle(cpu->thread); -#endif } static void whpx_kick_vcpu_thread(CPUState *cpu) -- cgit v1.1 From 43477340c38c7980d4f4d5bfedf1bd0146ccb577 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 5 Apr 2023 09:29:59 +0200 Subject: accel: Fix a leak on Windows HAX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hThread is only used on the error path in hax_kick_vcpu_thread(). Fixes: b0cb0a66d6 ("Plumb the HAXM-based hardware acceleration support") Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-5-philmd@linaro.org> --- target/i386/hax/hax-all.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'target') diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c index 3e5992a..a2321a1 100644 --- a/target/i386/hax/hax-all.c +++ b/target/i386/hax/hax-all.c @@ -205,6 +205,9 @@ int hax_vcpu_destroy(CPUState *cpu) */ hax_close_fd(vcpu->fd); hax_global.vm->vcpus[vcpu->vcpu_id] = NULL; +#ifdef _WIN32 + CloseHandle(cpu->hThread); +#endif g_free(vcpu); return 0; } -- cgit v1.1 From 83d0f7f95cc88bbf31ba000d1d2f05e8563f9d97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 5 Apr 2023 09:15:26 +0200 Subject: accel: Destroy HAX vCPU threads once done MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the vCPU thread finished its processing, destroy it and signal its destruction to generic vCPU management layer. Add a sanity check for the vCPU accelerator context. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-6-philmd@linaro.org> --- target/i386/hax/hax-accel-ops.c | 3 +++ target/i386/hax/hax-all.c | 1 + 2 files changed, 4 insertions(+) (limited to 'target') diff --git a/target/i386/hax/hax-accel-ops.c b/target/i386/hax/hax-accel-ops.c index 18114fe..0157a62 100644 --- a/target/i386/hax/hax-accel-ops.c +++ b/target/i386/hax/hax-accel-ops.c @@ -53,6 +53,8 @@ static void *hax_cpu_thread_fn(void *arg) qemu_wait_io_event(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); + hax_vcpu_destroy(cpu); + cpu_thread_signal_destroyed(cpu); rcu_unregister_thread(); return NULL; } @@ -69,6 +71,7 @@ static void hax_start_vcpu_thread(CPUState *cpu) cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, hax_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); + assert(cpu->hax_vcpu); #ifdef _WIN32 cpu->hThread = qemu_thread_get_handle(cpu->thread); #endif diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c index a2321a1..38a4323 100644 --- a/target/i386/hax/hax-all.c +++ b/target/i386/hax/hax-all.c @@ -209,6 +209,7 @@ int hax_vcpu_destroy(CPUState *cpu) CloseHandle(cpu->hThread); #endif g_free(vcpu); + cpu->hax_vcpu = NULL; return 0; } -- cgit v1.1 From 6ecd2cd0dcfa733bdad7e97ee913ff44fc7681aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 29 Mar 2023 18:56:35 +0200 Subject: accel: Rename 'hax_vcpu' as 'accel' in CPUState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All accelerators will share a single opaque context in CPUState. Start by renaming 'hax_vcpu' as 'accel'. Reviewed-by: Richard Henderson Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20230624174121.11508-7-philmd@linaro.org> --- target/i386/hax/hax-accel-ops.c | 2 +- target/i386/hax/hax-all.c | 18 +++++++++--------- target/i386/nvmm/nvmm-all.c | 6 +++--- target/i386/whpx/whpx-all.c | 6 +++--- 4 files changed, 16 insertions(+), 16 deletions(-) (limited to 'target') diff --git a/target/i386/hax/hax-accel-ops.c b/target/i386/hax/hax-accel-ops.c index 0157a62..a8512ef 100644 --- a/target/i386/hax/hax-accel-ops.c +++ b/target/i386/hax/hax-accel-ops.c @@ -71,7 +71,7 @@ static void hax_start_vcpu_thread(CPUState *cpu) cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, hax_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); - assert(cpu->hax_vcpu); + assert(cpu->accel); #ifdef _WIN32 cpu->hThread = qemu_thread_get_handle(cpu->thread); #endif diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c index 38a4323..3865ff9 100644 --- a/target/i386/hax/hax-all.c +++ b/target/i386/hax/hax-all.c @@ -62,7 +62,7 @@ int valid_hax_tunnel_size(uint16_t size) hax_fd hax_vcpu_get_fd(CPUArchState *env) { - struct hax_vcpu_state *vcpu = env_cpu(env)->hax_vcpu; + struct hax_vcpu_state *vcpu = env_cpu(env)->accel; if (!vcpu) { return HAX_INVALID_FD; } @@ -188,7 +188,7 @@ int hax_vcpu_create(int id) int hax_vcpu_destroy(CPUState *cpu) { - struct hax_vcpu_state *vcpu = cpu->hax_vcpu; + struct hax_vcpu_state *vcpu = cpu->accel; if (!hax_global.vm) { fprintf(stderr, "vcpu %x destroy failed, vm is null\n", vcpu->vcpu_id); @@ -209,7 +209,7 @@ int hax_vcpu_destroy(CPUState *cpu) CloseHandle(cpu->hThread); #endif g_free(vcpu); - cpu->hax_vcpu = NULL; + cpu->accel = NULL; return 0; } @@ -223,7 +223,7 @@ int hax_init_vcpu(CPUState *cpu) exit(-1); } - cpu->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index]; + cpu->accel = hax_global.vm->vcpus[cpu->cpu_index]; cpu->vcpu_dirty = true; qemu_register_reset(hax_reset_vcpu_state, cpu->env_ptr); @@ -415,7 +415,7 @@ static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port, static int hax_vcpu_interrupt(CPUArchState *env) { CPUState *cpu = env_cpu(env); - struct hax_vcpu_state *vcpu = cpu->hax_vcpu; + struct hax_vcpu_state *vcpu = cpu->accel; struct hax_tunnel *ht = vcpu->tunnel; /* @@ -447,7 +447,7 @@ static int hax_vcpu_interrupt(CPUArchState *env) void hax_raise_event(CPUState *cpu) { - struct hax_vcpu_state *vcpu = cpu->hax_vcpu; + struct hax_vcpu_state *vcpu = cpu->accel; if (!vcpu) { return; @@ -468,7 +468,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env) int ret = 0; CPUState *cpu = env_cpu(env); X86CPU *x86_cpu = X86_CPU(cpu); - struct hax_vcpu_state *vcpu = cpu->hax_vcpu; + struct hax_vcpu_state *vcpu = cpu->accel; struct hax_tunnel *ht = vcpu->tunnel; if (!hax_enabled()) { @@ -1114,8 +1114,8 @@ void hax_reset_vcpu_state(void *opaque) { CPUState *cpu; for (cpu = first_cpu; cpu != NULL; cpu = CPU_NEXT(cpu)) { - cpu->hax_vcpu->tunnel->user_event_pending = 0; - cpu->hax_vcpu->tunnel->ready_for_interrupt_injection = 0; + cpu->accel->tunnel->user_event_pending = 0; + cpu->accel->tunnel->ready_for_interrupt_injection = 0; } } diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index b75738e..cf4f0af 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -52,7 +52,7 @@ static struct qemu_machine qemu_mach; static struct qemu_vcpu * get_qemu_vcpu(CPUState *cpu) { - return (struct qemu_vcpu *)cpu->hax_vcpu; + return (struct qemu_vcpu *)cpu->accel; } static struct nvmm_machine * @@ -995,7 +995,7 @@ nvmm_init_vcpu(CPUState *cpu) } cpu->vcpu_dirty = true; - cpu->hax_vcpu = (struct hax_vcpu_state *)qcpu; + cpu->accel = (struct hax_vcpu_state *)qcpu; return 0; } @@ -1030,7 +1030,7 @@ nvmm_destroy_vcpu(CPUState *cpu) struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); nvmm_vcpu_destroy(mach, &qcpu->vcpu); - g_free(cpu->hax_vcpu); + g_free(cpu->accel); } /* -------------------------------------------------------------------------- */ diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 7fa68d4..98dfa5c 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -262,7 +262,7 @@ static bool whpx_has_xsave(void) static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu) { - return (struct whpx_vcpu *)cpu->hax_vcpu; + return (struct whpx_vcpu *)cpu->accel; } static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86, @@ -2258,7 +2258,7 @@ int whpx_init_vcpu(CPUState *cpu) vcpu->interruptable = true; cpu->vcpu_dirty = true; - cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu; + cpu->accel = (struct hax_vcpu_state *)vcpu; max_vcpu_index = max(max_vcpu_index, cpu->cpu_index); qemu_add_vm_change_state_handler(whpx_cpu_update_state, cpu->env_ptr); @@ -2300,7 +2300,7 @@ void whpx_destroy_vcpu(CPUState *cpu) whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index); whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator); - g_free(cpu->hax_vcpu); + g_free(cpu->accel); return; } -- cgit v1.1 From f861b3f39063619b2d169cbacf455a00c22a1584 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 29 Mar 2023 19:01:49 +0200 Subject: accel: Rename HAX 'struct hax_vcpu_state' -> AccelCPUState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want all accelerators to share the same opaque pointer in CPUState. Start with the HAX context, renaming its forward declarated structure 'hax_vcpu_state' as 'AccelCPUState'. Document the CPUState field. Directly use the typedef. Remove the amusing but now unnecessary casts in NVMM / WHPX. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-8-philmd@linaro.org> --- target/i386/hax/hax-all.c | 16 ++++++++-------- target/i386/hax/hax-i386.h | 9 +++++---- target/i386/hax/hax-posix.c | 4 ++-- target/i386/hax/hax-windows.c | 4 ++-- target/i386/nvmm/nvmm-all.c | 2 +- target/i386/whpx/whpx-all.c | 2 +- 6 files changed, 19 insertions(+), 18 deletions(-) (limited to 'target') diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c index 3865ff9..9d9011c 100644 --- a/target/i386/hax/hax-all.c +++ b/target/i386/hax/hax-all.c @@ -62,7 +62,7 @@ int valid_hax_tunnel_size(uint16_t size) hax_fd hax_vcpu_get_fd(CPUArchState *env) { - struct hax_vcpu_state *vcpu = env_cpu(env)->accel; + AccelCPUState *vcpu = env_cpu(env)->accel; if (!vcpu) { return HAX_INVALID_FD; } @@ -136,7 +136,7 @@ static int hax_version_support(struct hax_state *hax) int hax_vcpu_create(int id) { - struct hax_vcpu_state *vcpu = NULL; + AccelCPUState *vcpu = NULL; int ret; if (!hax_global.vm) { @@ -149,7 +149,7 @@ int hax_vcpu_create(int id) return 0; } - vcpu = g_new0(struct hax_vcpu_state, 1); + vcpu = g_new0(AccelCPUState, 1); ret = hax_host_create_vcpu(hax_global.vm->fd, id); if (ret) { @@ -188,7 +188,7 @@ int hax_vcpu_create(int id) int hax_vcpu_destroy(CPUState *cpu) { - struct hax_vcpu_state *vcpu = cpu->accel; + AccelCPUState *vcpu = cpu->accel; if (!hax_global.vm) { fprintf(stderr, "vcpu %x destroy failed, vm is null\n", vcpu->vcpu_id); @@ -263,7 +263,7 @@ struct hax_vm *hax_vm_create(struct hax_state *hax, int max_cpus) } vm->numvcpus = max_cpus; - vm->vcpus = g_new0(struct hax_vcpu_state *, vm->numvcpus); + vm->vcpus = g_new0(AccelCPUState *, vm->numvcpus); for (i = 0; i < vm->numvcpus; i++) { vm->vcpus[i] = NULL; } @@ -415,7 +415,7 @@ static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port, static int hax_vcpu_interrupt(CPUArchState *env) { CPUState *cpu = env_cpu(env); - struct hax_vcpu_state *vcpu = cpu->accel; + AccelCPUState *vcpu = cpu->accel; struct hax_tunnel *ht = vcpu->tunnel; /* @@ -447,7 +447,7 @@ static int hax_vcpu_interrupt(CPUArchState *env) void hax_raise_event(CPUState *cpu) { - struct hax_vcpu_state *vcpu = cpu->accel; + AccelCPUState *vcpu = cpu->accel; if (!vcpu) { return; @@ -468,7 +468,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env) int ret = 0; CPUState *cpu = env_cpu(env); X86CPU *x86_cpu = X86_CPU(cpu); - struct hax_vcpu_state *vcpu = cpu->accel; + AccelCPUState *vcpu = cpu->accel; struct hax_tunnel *ht = vcpu->tunnel; if (!hax_enabled()) { diff --git a/target/i386/hax/hax-i386.h b/target/i386/hax/hax-i386.h index 409ebdb..4372ee5 100644 --- a/target/i386/hax/hax-i386.h +++ b/target/i386/hax/hax-i386.h @@ -25,7 +25,8 @@ typedef HANDLE hax_fd; #endif extern struct hax_state hax_global; -struct hax_vcpu_state { + +struct AccelCPUState { hax_fd fd; int vcpu_id; struct hax_tunnel *tunnel; @@ -46,7 +47,7 @@ struct hax_vm { hax_fd fd; int id; int numvcpus; - struct hax_vcpu_state **vcpus; + AccelCPUState **vcpus; }; /* Functions exported to host specific mode */ @@ -57,7 +58,7 @@ int valid_hax_tunnel_size(uint16_t size); int hax_mod_version(struct hax_state *hax, struct hax_module_version *version); int hax_inject_interrupt(CPUArchState *env, int vector); struct hax_vm *hax_vm_create(struct hax_state *hax, int max_cpus); -int hax_vcpu_run(struct hax_vcpu_state *vcpu); +int hax_vcpu_run(AccelCPUState *vcpu); int hax_vcpu_create(int id); void hax_kick_vcpu_thread(CPUState *cpu); @@ -76,7 +77,7 @@ int hax_host_create_vm(struct hax_state *hax, int *vm_id); hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id); int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid); hax_fd hax_host_open_vcpu(int vmid, int vcpuid); -int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu); +int hax_host_setup_vcpu_channel(AccelCPUState *vcpu); hax_fd hax_mod_open(void); void hax_memory_init(void); diff --git a/target/i386/hax/hax-posix.c b/target/i386/hax/hax-posix.c index ac1a510..a057a5b 100644 --- a/target/i386/hax/hax-posix.c +++ b/target/i386/hax/hax-posix.c @@ -205,7 +205,7 @@ hax_fd hax_host_open_vcpu(int vmid, int vcpuid) return fd; } -int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu) +int hax_host_setup_vcpu_channel(AccelCPUState *vcpu) { int ret; struct hax_tunnel_info info; @@ -227,7 +227,7 @@ int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu) return 0; } -int hax_vcpu_run(struct hax_vcpu_state *vcpu) +int hax_vcpu_run(AccelCPUState *vcpu) { return ioctl(vcpu->fd, HAX_VCPU_IOCTL_RUN, NULL); } diff --git a/target/i386/hax/hax-windows.c b/target/i386/hax/hax-windows.c index 59afa21..bf4b0ad 100644 --- a/target/i386/hax/hax-windows.c +++ b/target/i386/hax/hax-windows.c @@ -301,7 +301,7 @@ hax_fd hax_host_open_vcpu(int vmid, int vcpuid) return hDeviceVCPU; } -int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu) +int hax_host_setup_vcpu_channel(AccelCPUState *vcpu) { hax_fd hDeviceVCPU = vcpu->fd; int ret; @@ -327,7 +327,7 @@ int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu) return 0; } -int hax_vcpu_run(struct hax_vcpu_state *vcpu) +int hax_vcpu_run(AccelCPUState *vcpu) { int ret; HANDLE hDeviceVCPU = vcpu->fd; diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index cf4f0af..b3c3adc 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -995,7 +995,7 @@ nvmm_init_vcpu(CPUState *cpu) } cpu->vcpu_dirty = true; - cpu->accel = (struct hax_vcpu_state *)qcpu; + cpu->accel = qcpu; return 0; } diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 98dfa5c..7fc8d4f 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -2258,7 +2258,7 @@ int whpx_init_vcpu(CPUState *cpu) vcpu->interruptable = true; cpu->vcpu_dirty = true; - cpu->accel = (struct hax_vcpu_state *)vcpu; + cpu->accel = vcpu; max_vcpu_index = max(max_vcpu_index, cpu->cpu_index); qemu_add_vm_change_state_handler(whpx_cpu_update_state, cpu->env_ptr); -- cgit v1.1 From 642ce52d8ea9e1f80aa6013ad55bb66e51f1c76e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 29 Mar 2023 19:13:09 +0200 Subject: accel: Move HAX hThread to accelerator context MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hThread variable is only used by the HAX accelerator, so move it to the accelerator specific context. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-9-philmd@linaro.org> --- target/i386/hax/hax-accel-ops.c | 2 +- target/i386/hax/hax-all.c | 2 +- target/i386/hax/hax-i386.h | 3 +++ target/i386/hax/hax-windows.c | 2 +- 4 files changed, 6 insertions(+), 3 deletions(-) (limited to 'target') diff --git a/target/i386/hax/hax-accel-ops.c b/target/i386/hax/hax-accel-ops.c index a8512ef..5031096 100644 --- a/target/i386/hax/hax-accel-ops.c +++ b/target/i386/hax/hax-accel-ops.c @@ -73,7 +73,7 @@ static void hax_start_vcpu_thread(CPUState *cpu) cpu, QEMU_THREAD_JOINABLE); assert(cpu->accel); #ifdef _WIN32 - cpu->hThread = qemu_thread_get_handle(cpu->thread); + cpu->accel->hThread = qemu_thread_get_handle(cpu->thread); #endif } diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c index 9d9011c..18d78e5 100644 --- a/target/i386/hax/hax-all.c +++ b/target/i386/hax/hax-all.c @@ -206,7 +206,7 @@ int hax_vcpu_destroy(CPUState *cpu) hax_close_fd(vcpu->fd); hax_global.vm->vcpus[vcpu->vcpu_id] = NULL; #ifdef _WIN32 - CloseHandle(cpu->hThread); + CloseHandle(vcpu->hThread); #endif g_free(vcpu); cpu->accel = NULL; diff --git a/target/i386/hax/hax-i386.h b/target/i386/hax/hax-i386.h index 4372ee5..87153f4 100644 --- a/target/i386/hax/hax-i386.h +++ b/target/i386/hax/hax-i386.h @@ -27,6 +27,9 @@ typedef HANDLE hax_fd; extern struct hax_state hax_global; struct AccelCPUState { +#ifdef _WIN32 + HANDLE hThread; +#endif hax_fd fd; int vcpu_id; struct hax_tunnel *tunnel; diff --git a/target/i386/hax/hax-windows.c b/target/i386/hax/hax-windows.c index bf4b0ad..4bf6cc0 100644 --- a/target/i386/hax/hax-windows.c +++ b/target/i386/hax/hax-windows.c @@ -476,7 +476,7 @@ void hax_kick_vcpu_thread(CPUState *cpu) */ cpu->exit_request = 1; if (!qemu_cpu_is_self(cpu)) { - if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) { + if (!QueueUserAPC(dummy_apc_func, cpu->accel->hThread, 0)) { fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n", __func__, GetLastError()); exit(1); -- cgit v1.1 From 8c12c76df2373b61764c2cacb4a6c505f874fa5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 5 Apr 2023 10:04:07 +0200 Subject: accel: Remove NVMM unreachable error path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit g_malloc0() can not fail. Remove the unreachable error path. https://developer-old.gnome.org/glib/stable/glib-Memory-Allocation.html#glib-Memory-Allocation.description Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-10-philmd@linaro.org> --- target/i386/nvmm/nvmm-all.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'target') diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index b3c3adc..90e9e0a 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -943,10 +943,6 @@ nvmm_init_vcpu(CPUState *cpu) } qcpu = g_malloc0(sizeof(*qcpu)); - if (qcpu == NULL) { - error_report("NVMM: Failed to allocate VCPU context."); - return -ENOMEM; - } ret = nvmm_vcpu_create(mach, cpu->cpu_index, &qcpu->vcpu); if (ret == -1) { -- cgit v1.1 From c5beb26a2f5a641a3c4ef9556f48ae05ea86e30d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Thu, 23 Mar 2023 13:38:34 +0100 Subject: accel: Rename NVMM 'struct qemu_vcpu' -> AccelCPUState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want all accelerators to share the same opaque pointer in CPUState. Rename NVMM 'qemu_vcpu' as 'AccelCPUState'; directly use the typedef, remove unnecessary casts. Reviewed-by: Richard Henderson Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20230624174121.11508-11-philmd@linaro.org> --- target/i386/nvmm/nvmm-all.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'target') diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index 90e9e0a..e5ee4af 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -26,7 +26,7 @@ #include -struct qemu_vcpu { +struct AccelCPUState { struct nvmm_vcpu vcpu; uint8_t tpr; bool stop; @@ -49,10 +49,10 @@ struct qemu_machine { static bool nvmm_allowed; static struct qemu_machine qemu_mach; -static struct qemu_vcpu * +static AccelCPUState * get_qemu_vcpu(CPUState *cpu) { - return (struct qemu_vcpu *)cpu->accel; + return cpu->accel; } static struct nvmm_machine * @@ -86,7 +86,7 @@ nvmm_set_registers(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; struct nvmm_x64_state *state = vcpu->state; uint64_t bitmap; @@ -223,7 +223,7 @@ nvmm_get_registers(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -347,7 +347,7 @@ static bool nvmm_can_take_int(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; struct nvmm_machine *mach = get_nvmm_mach(); @@ -372,7 +372,7 @@ nvmm_can_take_int(CPUState *cpu) static bool nvmm_can_take_nmi(CPUState *cpu) { - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); /* * Contrary to INTs, NMIs always schedule an exit when they are @@ -395,7 +395,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -478,7 +478,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) static void nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit) { - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); uint64_t tpr; @@ -565,7 +565,7 @@ static int nvmm_handle_rdmsr(struct nvmm_machine *mach, CPUState *cpu, struct nvmm_vcpu_exit *exit) { - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -610,7 +610,7 @@ static int nvmm_handle_wrmsr(struct nvmm_machine *mach, CPUState *cpu, struct nvmm_vcpu_exit *exit) { - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -686,7 +686,7 @@ nvmm_vcpu_loop(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_vcpu_exit *exit = vcpu->exit; @@ -892,7 +892,7 @@ static void nvmm_ipi_signal(int sigcpu) { if (current_cpu) { - struct qemu_vcpu *qcpu = get_qemu_vcpu(current_cpu); + AccelCPUState *qcpu = get_qemu_vcpu(current_cpu); #if NVMM_USER_VERSION >= 2 struct nvmm_vcpu *vcpu = &qcpu->vcpu; nvmm_vcpu_stop(vcpu); @@ -926,7 +926,7 @@ nvmm_init_vcpu(CPUState *cpu) struct nvmm_vcpu_conf_cpuid cpuid; struct nvmm_vcpu_conf_tpr tpr; Error *local_error = NULL; - struct qemu_vcpu *qcpu; + AccelCPUState *qcpu; int ret, err; nvmm_init_cpu_signals(); @@ -942,7 +942,7 @@ nvmm_init_vcpu(CPUState *cpu) } } - qcpu = g_malloc0(sizeof(*qcpu)); + qcpu = g_new0(AccelCPUState, 1); ret = nvmm_vcpu_create(mach, cpu->cpu_index, &qcpu->vcpu); if (ret == -1) { @@ -1023,7 +1023,7 @@ void nvmm_destroy_vcpu(CPUState *cpu) { struct nvmm_machine *mach = get_nvmm_mach(); - struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = get_qemu_vcpu(cpu); nvmm_vcpu_destroy(mach, &qcpu->vcpu); g_free(cpu->accel); -- cgit v1.1 From 2f642b1c2cfa32a1d5af6cbe66c16b5d1c5fd69c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Thu, 23 Mar 2023 13:39:26 +0100 Subject: accel: Inline NVMM get_qemu_vcpu() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need for this helper to access the CPUState::accel field. Reviewed-by: Richard Henderson Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20230624174121.11508-12-philmd@linaro.org> --- target/i386/nvmm/nvmm-all.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) (limited to 'target') diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index e5ee4af..72a3a9e 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -49,12 +49,6 @@ struct qemu_machine { static bool nvmm_allowed; static struct qemu_machine qemu_mach; -static AccelCPUState * -get_qemu_vcpu(CPUState *cpu) -{ - return cpu->accel; -} - static struct nvmm_machine * get_nvmm_mach(void) { @@ -86,7 +80,7 @@ nvmm_set_registers(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; struct nvmm_x64_state *state = vcpu->state; uint64_t bitmap; @@ -223,7 +217,7 @@ nvmm_get_registers(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -347,7 +341,7 @@ static bool nvmm_can_take_int(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; struct nvmm_machine *mach = get_nvmm_mach(); @@ -372,7 +366,7 @@ nvmm_can_take_int(CPUState *cpu) static bool nvmm_can_take_nmi(CPUState *cpu) { - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; /* * Contrary to INTs, NMIs always schedule an exit when they are @@ -395,7 +389,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -478,7 +472,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) static void nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit) { - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); uint64_t tpr; @@ -565,7 +559,7 @@ static int nvmm_handle_rdmsr(struct nvmm_machine *mach, CPUState *cpu, struct nvmm_vcpu_exit *exit) { - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -610,7 +604,7 @@ static int nvmm_handle_wrmsr(struct nvmm_machine *mach, CPUState *cpu, struct nvmm_vcpu_exit *exit) { - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_x64_state *state = vcpu->state; @@ -686,7 +680,7 @@ nvmm_vcpu_loop(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; struct nvmm_vcpu *vcpu = &qcpu->vcpu; X86CPU *x86_cpu = X86_CPU(cpu); struct nvmm_vcpu_exit *exit = vcpu->exit; @@ -892,7 +886,7 @@ static void nvmm_ipi_signal(int sigcpu) { if (current_cpu) { - AccelCPUState *qcpu = get_qemu_vcpu(current_cpu); + AccelCPUState *qcpu = current_cpu->accel; #if NVMM_USER_VERSION >= 2 struct nvmm_vcpu *vcpu = &qcpu->vcpu; nvmm_vcpu_stop(vcpu); @@ -1023,7 +1017,7 @@ void nvmm_destroy_vcpu(CPUState *cpu) { struct nvmm_machine *mach = get_nvmm_mach(); - AccelCPUState *qcpu = get_qemu_vcpu(cpu); + AccelCPUState *qcpu = cpu->accel; nvmm_vcpu_destroy(mach, &qcpu->vcpu); g_free(cpu->accel); -- cgit v1.1 From 50830fea89ceefefd455d2489c4f8914df2d68ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 21 Jun 2023 15:52:51 +0200 Subject: accel: Remove WHPX unreachable error path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit g_new0() can not fail. Remove the unreachable error path. https://developer-old.gnome.org/glib/stable/glib-Memory-Allocation.html#glib-Memory-Allocation.description Reported-by: Richard Henderson Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-13-philmd@linaro.org> --- target/i386/whpx/whpx-all.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'target') diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 7fc8d4f..63be5fb 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -2179,12 +2179,6 @@ int whpx_init_vcpu(CPUState *cpu) vcpu = g_new0(struct whpx_vcpu, 1); - if (!vcpu) { - error_report("WHPX: Failed to allocte VCPU context."); - ret = -ENOMEM; - goto error; - } - hr = whp_dispatch.WHvEmulatorCreateEmulator( &whpx_emu_callbacks, &vcpu->emulator); -- cgit v1.1 From b4f879a4ed039f73f61ee9ae7142abd84cdc0f27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Thu, 23 Mar 2023 13:43:54 +0100 Subject: accel: Rename WHPX 'struct whpx_vcpu' -> AccelCPUState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want all accelerators to share the same opaque pointer in CPUState. Rename WHPX 'whpx_vcpu' as 'AccelCPUState'; use the typedef. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230624174121.11508-14-philmd@linaro.org> --- target/i386/whpx/whpx-all.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) (limited to 'target') diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 63be5fb..185f2a2 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -229,7 +229,7 @@ typedef enum WhpxStepMode { WHPX_STEP_EXCLUSIVE, } WhpxStepMode; -struct whpx_vcpu { +struct AccelCPUState { WHV_EMULATOR_HANDLE emulator; bool window_registered; bool interruptable; @@ -260,9 +260,9 @@ static bool whpx_has_xsave(void) * VP support */ -static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu) +static AccelCPUState *get_whpx_vcpu(CPUState *cpu) { - return (struct whpx_vcpu *)cpu->accel; + return (AccelCPUState *)cpu->accel; } static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86, @@ -390,7 +390,7 @@ static uint64_t whpx_cr8_to_apic_tpr(uint64_t cr8) static void whpx_set_registers(CPUState *cpu, int level) { struct whpx_state *whpx = &whpx_global; - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_register_set vcxt; @@ -609,7 +609,7 @@ static void whpx_get_xcrs(CPUState *cpu) static void whpx_get_registers(CPUState *cpu) { struct whpx_state *whpx = &whpx_global; - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_register_set vcxt; @@ -892,7 +892,7 @@ static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = { static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx) { HRESULT hr; - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); WHV_EMULATOR_STATUS emu_status; hr = whp_dispatch.WHvEmulatorTryMmioEmulation( @@ -917,7 +917,7 @@ static int whpx_handle_portio(CPUState *cpu, WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx) { HRESULT hr; - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); WHV_EMULATOR_STATUS emu_status; hr = whp_dispatch.WHvEmulatorTryIoEmulation( @@ -1417,7 +1417,7 @@ static vaddr whpx_vcpu_get_pc(CPUState *cpu, bool exit_context_valid) * of QEMU, nor this port by calling WHvSetVirtualProcessorRegisters(). * This is the most common case. */ - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); return vcpu->exit_ctx.VpContext.Rip; } else { /* @@ -1468,7 +1468,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) { HRESULT hr; struct whpx_state *whpx = &whpx_global; - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); int irq; @@ -1590,7 +1590,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) static void whpx_vcpu_post_run(CPUState *cpu) { - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); @@ -1617,7 +1617,7 @@ static void whpx_vcpu_process_async_events(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { @@ -1656,7 +1656,7 @@ static int whpx_vcpu_run(CPUState *cpu) { HRESULT hr; struct whpx_state *whpx = &whpx_global; - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); struct whpx_breakpoint *stepped_over_bp = NULL; WhpxStepMode exclusive_step_mode = WHPX_STEP_NONE; int ret; @@ -2154,7 +2154,7 @@ int whpx_init_vcpu(CPUState *cpu) { HRESULT hr; struct whpx_state *whpx = &whpx_global; - struct whpx_vcpu *vcpu = NULL; + AccelCPUState *vcpu = NULL; Error *local_error = NULL; CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); @@ -2177,7 +2177,7 @@ int whpx_init_vcpu(CPUState *cpu) } } - vcpu = g_new0(struct whpx_vcpu, 1); + vcpu = g_new0(AccelCPUState, 1); hr = whp_dispatch.WHvEmulatorCreateEmulator( &whpx_emu_callbacks, @@ -2290,7 +2290,7 @@ int whpx_vcpu_exec(CPUState *cpu) void whpx_destroy_vcpu(CPUState *cpu) { struct whpx_state *whpx = &whpx_global; - struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = get_whpx_vcpu(cpu); whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index); whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator); -- cgit v1.1 From 441f2449111b276df7fad81dcaa2f3f93ac7cce8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 21 Jun 2023 16:03:17 +0200 Subject: accel: Inline WHPX get_whpx_vcpu() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need for this helper to access the CPUState::accel field. Reviewed-by: Richard Henderson Signed-off-by: Philippe Mathieu-Daudé Message-Id: <20230624174121.11508-15-philmd@linaro.org> --- target/i386/whpx/whpx-all.c | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) (limited to 'target') diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 185f2a2..9ee04ee 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -256,15 +256,6 @@ static bool whpx_has_xsave(void) return whpx_xsave_cap.XsaveSupport; } -/* - * VP support - */ - -static AccelCPUState *get_whpx_vcpu(CPUState *cpu) -{ - return (AccelCPUState *)cpu->accel; -} - static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86, int r86) { @@ -390,7 +381,7 @@ static uint64_t whpx_cr8_to_apic_tpr(uint64_t cr8) static void whpx_set_registers(CPUState *cpu, int level) { struct whpx_state *whpx = &whpx_global; - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_register_set vcxt; @@ -609,7 +600,7 @@ static void whpx_get_xcrs(CPUState *cpu) static void whpx_get_registers(CPUState *cpu) { struct whpx_state *whpx = &whpx_global; - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_register_set vcxt; @@ -892,7 +883,7 @@ static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = { static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx) { HRESULT hr; - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; WHV_EMULATOR_STATUS emu_status; hr = whp_dispatch.WHvEmulatorTryMmioEmulation( @@ -917,7 +908,7 @@ static int whpx_handle_portio(CPUState *cpu, WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx) { HRESULT hr; - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; WHV_EMULATOR_STATUS emu_status; hr = whp_dispatch.WHvEmulatorTryIoEmulation( @@ -1417,7 +1408,7 @@ static vaddr whpx_vcpu_get_pc(CPUState *cpu, bool exit_context_valid) * of QEMU, nor this port by calling WHvSetVirtualProcessorRegisters(). * This is the most common case. */ - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; return vcpu->exit_ctx.VpContext.Rip; } else { /* @@ -1468,7 +1459,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) { HRESULT hr; struct whpx_state *whpx = &whpx_global; - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); int irq; @@ -1590,7 +1581,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) static void whpx_vcpu_post_run(CPUState *cpu) { - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); @@ -1617,7 +1608,7 @@ static void whpx_vcpu_process_async_events(CPUState *cpu) { CPUX86State *env = cpu->env_ptr; X86CPU *x86_cpu = X86_CPU(cpu); - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { @@ -1656,7 +1647,7 @@ static int whpx_vcpu_run(CPUState *cpu) { HRESULT hr; struct whpx_state *whpx = &whpx_global; - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; struct whpx_breakpoint *stepped_over_bp = NULL; WhpxStepMode exclusive_step_mode = WHPX_STEP_NONE; int ret; @@ -2290,7 +2281,7 @@ int whpx_vcpu_exec(CPUState *cpu) void whpx_destroy_vcpu(CPUState *cpu) { struct whpx_state *whpx = &whpx_global; - AccelCPUState *vcpu = get_whpx_vcpu(cpu); + AccelCPUState *vcpu = cpu->accel; whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index); whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator); -- cgit v1.1 From a7159244285058c049ad53a42b3dc7b24809faaa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 21 Jun 2023 13:14:06 +0200 Subject: accel: Rename 'cpu_state' -> 'cs' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Most of the codebase uses 'CPUState *cpu' or 'CPUState *cs'. While 'cpu_state' is kind of explicit, it makes the code harder to review. Simply rename as 'cs'. Acked-by: Richard Henderson Signed-off-by: Philippe Mathieu-Daudé Tested-by: Peter Maydell Message-Id: <20230624174121.11508-16-philmd@linaro.org> --- target/i386/hvf/x86hvf.c | 372 +++++++++++++++++++++++------------------------ target/i386/hvf/x86hvf.h | 18 +-- 2 files changed, 195 insertions(+), 195 deletions(-) (limited to 'target') diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index 69d4fb8..92dfd26 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -32,14 +32,14 @@ #include #include -void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg, +void hvf_set_segment(CPUState *cs, struct vmx_segment *vmx_seg, SegmentCache *qseg, bool is_tr) { vmx_seg->sel = qseg->selector; vmx_seg->base = qseg->base; vmx_seg->limit = qseg->limit; - if (!qseg->selector && !x86_is_real(cpu) && !is_tr) { + if (!qseg->selector && !x86_is_real(cs) && !is_tr) { /* the TR register is usable after processor reset despite * having a null selector */ vmx_seg->ar = 1 << 16; @@ -70,279 +70,279 @@ void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg) (((vmx_seg->ar >> 15) & 1) << DESC_G_SHIFT); } -void hvf_put_xsave(CPUState *cpu_state) +void hvf_put_xsave(CPUState *cs) { - void *xsave = X86_CPU(cpu_state)->env.xsave_buf; - uint32_t xsave_len = X86_CPU(cpu_state)->env.xsave_buf_len; + void *xsave = X86_CPU(cs)->env.xsave_buf; + uint32_t xsave_len = X86_CPU(cs)->env.xsave_buf_len; - x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave, xsave_len); + x86_cpu_xsave_all_areas(X86_CPU(cs), xsave, xsave_len); - if (hv_vcpu_write_fpstate(cpu_state->hvf->fd, xsave, xsave_len)) { + if (hv_vcpu_write_fpstate(cs->hvf->fd, xsave, xsave_len)) { abort(); } } -static void hvf_put_segments(CPUState *cpu_state) +static void hvf_put_segments(CPUState *cs) { - CPUX86State *env = &X86_CPU(cpu_state)->env; + CPUX86State *env = &X86_CPU(cs)->env; struct vmx_segment seg; - wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); - wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE, env->idt.base); + wvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); + wvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_BASE, env->idt.base); - wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); - wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); + wvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); + wvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); - /* wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR2, env->cr[2]); */ - wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3, env->cr[3]); - vmx_update_tpr(cpu_state); - wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER, env->efer); + /* wvmcs(cs->hvf->fd, VMCS_GUEST_CR2, env->cr[2]); */ + wvmcs(cs->hvf->fd, VMCS_GUEST_CR3, env->cr[3]); + vmx_update_tpr(cs); + wvmcs(cs->hvf->fd, VMCS_GUEST_IA32_EFER, env->efer); - macvm_set_cr4(cpu_state->hvf->fd, env->cr[4]); - macvm_set_cr0(cpu_state->hvf->fd, env->cr[0]); + macvm_set_cr4(cs->hvf->fd, env->cr[4]); + macvm_set_cr0(cs->hvf->fd, env->cr[0]); - hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false); - vmx_write_segment_descriptor(cpu_state, &seg, R_CS); + hvf_set_segment(cs, &seg, &env->segs[R_CS], false); + vmx_write_segment_descriptor(cs, &seg, R_CS); - hvf_set_segment(cpu_state, &seg, &env->segs[R_DS], false); - vmx_write_segment_descriptor(cpu_state, &seg, R_DS); + hvf_set_segment(cs, &seg, &env->segs[R_DS], false); + vmx_write_segment_descriptor(cs, &seg, R_DS); - hvf_set_segment(cpu_state, &seg, &env->segs[R_ES], false); - vmx_write_segment_descriptor(cpu_state, &seg, R_ES); + hvf_set_segment(cs, &seg, &env->segs[R_ES], false); + vmx_write_segment_descriptor(cs, &seg, R_ES); - hvf_set_segment(cpu_state, &seg, &env->segs[R_SS], false); - vmx_write_segment_descriptor(cpu_state, &seg, R_SS); + hvf_set_segment(cs, &seg, &env->segs[R_SS], false); + vmx_write_segment_descriptor(cs, &seg, R_SS); - hvf_set_segment(cpu_state, &seg, &env->segs[R_FS], false); - vmx_write_segment_descriptor(cpu_state, &seg, R_FS); + hvf_set_segment(cs, &seg, &env->segs[R_FS], false); + vmx_write_segment_descriptor(cs, &seg, R_FS); - hvf_set_segment(cpu_state, &seg, &env->segs[R_GS], false); - vmx_write_segment_descriptor(cpu_state, &seg, R_GS); + hvf_set_segment(cs, &seg, &env->segs[R_GS], false); + vmx_write_segment_descriptor(cs, &seg, R_GS); - hvf_set_segment(cpu_state, &seg, &env->tr, true); - vmx_write_segment_descriptor(cpu_state, &seg, R_TR); + hvf_set_segment(cs, &seg, &env->tr, true); + vmx_write_segment_descriptor(cs, &seg, R_TR); - hvf_set_segment(cpu_state, &seg, &env->ldt, false); - vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR); + hvf_set_segment(cs, &seg, &env->ldt, false); + vmx_write_segment_descriptor(cs, &seg, R_LDTR); } -void hvf_put_msrs(CPUState *cpu_state) +void hvf_put_msrs(CPUState *cs) { - CPUX86State *env = &X86_CPU(cpu_state)->env; + CPUX86State *env = &X86_CPU(cs)->env; - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS, + hv_vcpu_write_msr(cs->hvf->fd, MSR_IA32_SYSENTER_CS, env->sysenter_cs); - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP, + hv_vcpu_write_msr(cs->hvf->fd, MSR_IA32_SYSENTER_ESP, env->sysenter_esp); - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP, + hv_vcpu_write_msr(cs->hvf->fd, MSR_IA32_SYSENTER_EIP, env->sysenter_eip); - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_STAR, env->star); + hv_vcpu_write_msr(cs->hvf->fd, MSR_STAR, env->star); #ifdef TARGET_X86_64 - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_CSTAR, env->cstar); - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, env->kernelgsbase); - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FMASK, env->fmask); - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_LSTAR, env->lstar); + hv_vcpu_write_msr(cs->hvf->fd, MSR_CSTAR, env->cstar); + hv_vcpu_write_msr(cs->hvf->fd, MSR_KERNELGSBASE, env->kernelgsbase); + hv_vcpu_write_msr(cs->hvf->fd, MSR_FMASK, env->fmask); + hv_vcpu_write_msr(cs->hvf->fd, MSR_LSTAR, env->lstar); #endif - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_GSBASE, env->segs[R_GS].base); - hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FSBASE, env->segs[R_FS].base); + hv_vcpu_write_msr(cs->hvf->fd, MSR_GSBASE, env->segs[R_GS].base); + hv_vcpu_write_msr(cs->hvf->fd, MSR_FSBASE, env->segs[R_FS].base); } -void hvf_get_xsave(CPUState *cpu_state) +void hvf_get_xsave(CPUState *cs) { - void *xsave = X86_CPU(cpu_state)->env.xsave_buf; - uint32_t xsave_len = X86_CPU(cpu_state)->env.xsave_buf_len; + void *xsave = X86_CPU(cs)->env.xsave_buf; + uint32_t xsave_len = X86_CPU(cs)->env.xsave_buf_len; - if (hv_vcpu_read_fpstate(cpu_state->hvf->fd, xsave, xsave_len)) { + if (hv_vcpu_read_fpstate(cs->hvf->fd, xsave, xsave_len)) { abort(); } - x86_cpu_xrstor_all_areas(X86_CPU(cpu_state), xsave, xsave_len); + x86_cpu_xrstor_all_areas(X86_CPU(cs), xsave, xsave_len); } -static void hvf_get_segments(CPUState *cpu_state) +static void hvf_get_segments(CPUState *cs) { - CPUX86State *env = &X86_CPU(cpu_state)->env; + CPUX86State *env = &X86_CPU(cs)->env; struct vmx_segment seg; env->interrupt_injected = -1; - vmx_read_segment_descriptor(cpu_state, &seg, R_CS); + vmx_read_segment_descriptor(cs, &seg, R_CS); hvf_get_segment(&env->segs[R_CS], &seg); - vmx_read_segment_descriptor(cpu_state, &seg, R_DS); + vmx_read_segment_descriptor(cs, &seg, R_DS); hvf_get_segment(&env->segs[R_DS], &seg); - vmx_read_segment_descriptor(cpu_state, &seg, R_ES); + vmx_read_segment_descriptor(cs, &seg, R_ES); hvf_get_segment(&env->segs[R_ES], &seg); - vmx_read_segment_descriptor(cpu_state, &seg, R_FS); + vmx_read_segment_descriptor(cs, &seg, R_FS); hvf_get_segment(&env->segs[R_FS], &seg); - vmx_read_segment_descriptor(cpu_state, &seg, R_GS); + vmx_read_segment_descriptor(cs, &seg, R_GS); hvf_get_segment(&env->segs[R_GS], &seg); - vmx_read_segment_descriptor(cpu_state, &seg, R_SS); + vmx_read_segment_descriptor(cs, &seg, R_SS); hvf_get_segment(&env->segs[R_SS], &seg); - vmx_read_segment_descriptor(cpu_state, &seg, R_TR); + vmx_read_segment_descriptor(cs, &seg, R_TR); hvf_get_segment(&env->tr, &seg); - vmx_read_segment_descriptor(cpu_state, &seg, R_LDTR); + vmx_read_segment_descriptor(cs, &seg, R_LDTR); hvf_get_segment(&env->ldt, &seg); - env->idt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT); - env->idt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE); - env->gdt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT); - env->gdt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE); + env->idt.limit = rvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_LIMIT); + env->idt.base = rvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_BASE); + env->gdt.limit = rvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_LIMIT); + env->gdt.base = rvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_BASE); - env->cr[0] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR0); + env->cr[0] = rvmcs(cs->hvf->fd, VMCS_GUEST_CR0); env->cr[2] = 0; - env->cr[3] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3); - env->cr[4] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR4); + env->cr[3] = rvmcs(cs->hvf->fd, VMCS_GUEST_CR3); + env->cr[4] = rvmcs(cs->hvf->fd, VMCS_GUEST_CR4); - env->efer = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER); + env->efer = rvmcs(cs->hvf->fd, VMCS_GUEST_IA32_EFER); } -void hvf_get_msrs(CPUState *cpu_state) +void hvf_get_msrs(CPUState *cs) { - CPUX86State *env = &X86_CPU(cpu_state)->env; + CPUX86State *env = &X86_CPU(cs)->env; uint64_t tmp; - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS, &tmp); + hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_SYSENTER_CS, &tmp); env->sysenter_cs = tmp; - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP, &tmp); + hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_SYSENTER_ESP, &tmp); env->sysenter_esp = tmp; - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP, &tmp); + hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_SYSENTER_EIP, &tmp); env->sysenter_eip = tmp; - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_STAR, &env->star); + hv_vcpu_read_msr(cs->hvf->fd, MSR_STAR, &env->star); #ifdef TARGET_X86_64 - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_CSTAR, &env->cstar); - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, &env->kernelgsbase); - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_FMASK, &env->fmask); - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_LSTAR, &env->lstar); + hv_vcpu_read_msr(cs->hvf->fd, MSR_CSTAR, &env->cstar); + hv_vcpu_read_msr(cs->hvf->fd, MSR_KERNELGSBASE, &env->kernelgsbase); + hv_vcpu_read_msr(cs->hvf->fd, MSR_FMASK, &env->fmask); + hv_vcpu_read_msr(cs->hvf->fd, MSR_LSTAR, &env->lstar); #endif - hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_APICBASE, &tmp); + hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_APICBASE, &tmp); - env->tsc = rdtscp() + rvmcs(cpu_state->hvf->fd, VMCS_TSC_OFFSET); + env->tsc = rdtscp() + rvmcs(cs->hvf->fd, VMCS_TSC_OFFSET); } -int hvf_put_registers(CPUState *cpu_state) +int hvf_put_registers(CPUState *cs) { - X86CPU *x86cpu = X86_CPU(cpu_state); + X86CPU *x86cpu = X86_CPU(cs); CPUX86State *env = &x86cpu->env; - wreg(cpu_state->hvf->fd, HV_X86_RAX, env->regs[R_EAX]); - wreg(cpu_state->hvf->fd, HV_X86_RBX, env->regs[R_EBX]); - wreg(cpu_state->hvf->fd, HV_X86_RCX, env->regs[R_ECX]); - wreg(cpu_state->hvf->fd, HV_X86_RDX, env->regs[R_EDX]); - wreg(cpu_state->hvf->fd, HV_X86_RBP, env->regs[R_EBP]); - wreg(cpu_state->hvf->fd, HV_X86_RSP, env->regs[R_ESP]); - wreg(cpu_state->hvf->fd, HV_X86_RSI, env->regs[R_ESI]); - wreg(cpu_state->hvf->fd, HV_X86_RDI, env->regs[R_EDI]); - wreg(cpu_state->hvf->fd, HV_X86_R8, env->regs[8]); - wreg(cpu_state->hvf->fd, HV_X86_R9, env->regs[9]); - wreg(cpu_state->hvf->fd, HV_X86_R10, env->regs[10]); - wreg(cpu_state->hvf->fd, HV_X86_R11, env->regs[11]); - wreg(cpu_state->hvf->fd, HV_X86_R12, env->regs[12]); - wreg(cpu_state->hvf->fd, HV_X86_R13, env->regs[13]); - wreg(cpu_state->hvf->fd, HV_X86_R14, env->regs[14]); - wreg(cpu_state->hvf->fd, HV_X86_R15, env->regs[15]); - wreg(cpu_state->hvf->fd, HV_X86_RFLAGS, env->eflags); - wreg(cpu_state->hvf->fd, HV_X86_RIP, env->eip); + wreg(cs->hvf->fd, HV_X86_RAX, env->regs[R_EAX]); + wreg(cs->hvf->fd, HV_X86_RBX, env->regs[R_EBX]); + wreg(cs->hvf->fd, HV_X86_RCX, env->regs[R_ECX]); + wreg(cs->hvf->fd, HV_X86_RDX, env->regs[R_EDX]); + wreg(cs->hvf->fd, HV_X86_RBP, env->regs[R_EBP]); + wreg(cs->hvf->fd, HV_X86_RSP, env->regs[R_ESP]); + wreg(cs->hvf->fd, HV_X86_RSI, env->regs[R_ESI]); + wreg(cs->hvf->fd, HV_X86_RDI, env->regs[R_EDI]); + wreg(cs->hvf->fd, HV_X86_R8, env->regs[8]); + wreg(cs->hvf->fd, HV_X86_R9, env->regs[9]); + wreg(cs->hvf->fd, HV_X86_R10, env->regs[10]); + wreg(cs->hvf->fd, HV_X86_R11, env->regs[11]); + wreg(cs->hvf->fd, HV_X86_R12, env->regs[12]); + wreg(cs->hvf->fd, HV_X86_R13, env->regs[13]); + wreg(cs->hvf->fd, HV_X86_R14, env->regs[14]); + wreg(cs->hvf->fd, HV_X86_R15, env->regs[15]); + wreg(cs->hvf->fd, HV_X86_RFLAGS, env->eflags); + wreg(cs->hvf->fd, HV_X86_RIP, env->eip); - wreg(cpu_state->hvf->fd, HV_X86_XCR0, env->xcr0); + wreg(cs->hvf->fd, HV_X86_XCR0, env->xcr0); - hvf_put_xsave(cpu_state); + hvf_put_xsave(cs); - hvf_put_segments(cpu_state); + hvf_put_segments(cs); - hvf_put_msrs(cpu_state); + hvf_put_msrs(cs); - wreg(cpu_state->hvf->fd, HV_X86_DR0, env->dr[0]); - wreg(cpu_state->hvf->fd, HV_X86_DR1, env->dr[1]); - wreg(cpu_state->hvf->fd, HV_X86_DR2, env->dr[2]); - wreg(cpu_state->hvf->fd, HV_X86_DR3, env->dr[3]); - wreg(cpu_state->hvf->fd, HV_X86_DR4, env->dr[4]); - wreg(cpu_state->hvf->fd, HV_X86_DR5, env->dr[5]); - wreg(cpu_state->hvf->fd, HV_X86_DR6, env->dr[6]); - wreg(cpu_state->hvf->fd, HV_X86_DR7, env->dr[7]); + wreg(cs->hvf->fd, HV_X86_DR0, env->dr[0]); + wreg(cs->hvf->fd, HV_X86_DR1, env->dr[1]); + wreg(cs->hvf->fd, HV_X86_DR2, env->dr[2]); + wreg(cs->hvf->fd, HV_X86_DR3, env->dr[3]); + wreg(cs->hvf->fd, HV_X86_DR4, env->dr[4]); + wreg(cs->hvf->fd, HV_X86_DR5, env->dr[5]); + wreg(cs->hvf->fd, HV_X86_DR6, env->dr[6]); + wreg(cs->hvf->fd, HV_X86_DR7, env->dr[7]); return 0; } -int hvf_get_registers(CPUState *cpu_state) +int hvf_get_registers(CPUState *cs) { - X86CPU *x86cpu = X86_CPU(cpu_state); + X86CPU *x86cpu = X86_CPU(cs); CPUX86State *env = &x86cpu->env; - env->regs[R_EAX] = rreg(cpu_state->hvf->fd, HV_X86_RAX); - env->regs[R_EBX] = rreg(cpu_state->hvf->fd, HV_X86_RBX); - env->regs[R_ECX] = rreg(cpu_state->hvf->fd, HV_X86_RCX); - env->regs[R_EDX] = rreg(cpu_state->hvf->fd, HV_X86_RDX); - env->regs[R_EBP] = rreg(cpu_state->hvf->fd, HV_X86_RBP); - env->regs[R_ESP] = rreg(cpu_state->hvf->fd, HV_X86_RSP); - env->regs[R_ESI] = rreg(cpu_state->hvf->fd, HV_X86_RSI); - env->regs[R_EDI] = rreg(cpu_state->hvf->fd, HV_X86_RDI); - env->regs[8] = rreg(cpu_state->hvf->fd, HV_X86_R8); - env->regs[9] = rreg(cpu_state->hvf->fd, HV_X86_R9); - env->regs[10] = rreg(cpu_state->hvf->fd, HV_X86_R10); - env->regs[11] = rreg(cpu_state->hvf->fd, HV_X86_R11); - env->regs[12] = rreg(cpu_state->hvf->fd, HV_X86_R12); - env->regs[13] = rreg(cpu_state->hvf->fd, HV_X86_R13); - env->regs[14] = rreg(cpu_state->hvf->fd, HV_X86_R14); - env->regs[15] = rreg(cpu_state->hvf->fd, HV_X86_R15); + env->regs[R_EAX] = rreg(cs->hvf->fd, HV_X86_RAX); + env->regs[R_EBX] = rreg(cs->hvf->fd, HV_X86_RBX); + env->regs[R_ECX] = rreg(cs->hvf->fd, HV_X86_RCX); + env->regs[R_EDX] = rreg(cs->hvf->fd, HV_X86_RDX); + env->regs[R_EBP] = rreg(cs->hvf->fd, HV_X86_RBP); + env->regs[R_ESP] = rreg(cs->hvf->fd, HV_X86_RSP); + env->regs[R_ESI] = rreg(cs->hvf->fd, HV_X86_RSI); + env->regs[R_EDI] = rreg(cs->hvf->fd, HV_X86_RDI); + env->regs[8] = rreg(cs->hvf->fd, HV_X86_R8); + env->regs[9] = rreg(cs->hvf->fd, HV_X86_R9); + env->regs[10] = rreg(cs->hvf->fd, HV_X86_R10); + env->regs[11] = rreg(cs->hvf->fd, HV_X86_R11); + env->regs[12] = rreg(cs->hvf->fd, HV_X86_R12); + env->regs[13] = rreg(cs->hvf->fd, HV_X86_R13); + env->regs[14] = rreg(cs->hvf->fd, HV_X86_R14); + env->regs[15] = rreg(cs->hvf->fd, HV_X86_R15); - env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS); - env->eip = rreg(cpu_state->hvf->fd, HV_X86_RIP); + env->eflags = rreg(cs->hvf->fd, HV_X86_RFLAGS); + env->eip = rreg(cs->hvf->fd, HV_X86_RIP); - hvf_get_xsave(cpu_state); - env->xcr0 = rreg(cpu_state->hvf->fd, HV_X86_XCR0); + hvf_get_xsave(cs); + env->xcr0 = rreg(cs->hvf->fd, HV_X86_XCR0); - hvf_get_segments(cpu_state); - hvf_get_msrs(cpu_state); + hvf_get_segments(cs); + hvf_get_msrs(cs); - env->dr[0] = rreg(cpu_state->hvf->fd, HV_X86_DR0); - env->dr[1] = rreg(cpu_state->hvf->fd, HV_X86_DR1); - env->dr[2] = rreg(cpu_state->hvf->fd, HV_X86_DR2); - env->dr[3] = rreg(cpu_state->hvf->fd, HV_X86_DR3); - env->dr[4] = rreg(cpu_state->hvf->fd, HV_X86_DR4); - env->dr[5] = rreg(cpu_state->hvf->fd, HV_X86_DR5); - env->dr[6] = rreg(cpu_state->hvf->fd, HV_X86_DR6); - env->dr[7] = rreg(cpu_state->hvf->fd, HV_X86_DR7); + env->dr[0] = rreg(cs->hvf->fd, HV_X86_DR0); + env->dr[1] = rreg(cs->hvf->fd, HV_X86_DR1); + env->dr[2] = rreg(cs->hvf->fd, HV_X86_DR2); + env->dr[3] = rreg(cs->hvf->fd, HV_X86_DR3); + env->dr[4] = rreg(cs->hvf->fd, HV_X86_DR4); + env->dr[5] = rreg(cs->hvf->fd, HV_X86_DR5); + env->dr[6] = rreg(cs->hvf->fd, HV_X86_DR6); + env->dr[7] = rreg(cs->hvf->fd, HV_X86_DR7); x86_update_hflags(env); return 0; } -static void vmx_set_int_window_exiting(CPUState *cpu) +static void vmx_set_int_window_exiting(CPUState *cs) { uint64_t val; - val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val | + val = rvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val | VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); } -void vmx_clear_int_window_exiting(CPUState *cpu) +void vmx_clear_int_window_exiting(CPUState *cs) { uint64_t val; - val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val & + val = rvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val & ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); } -bool hvf_inject_interrupts(CPUState *cpu_state) +bool hvf_inject_interrupts(CPUState *cs) { - X86CPU *x86cpu = X86_CPU(cpu_state); + X86CPU *x86cpu = X86_CPU(cs); CPUX86State *env = &x86cpu->env; uint8_t vector; @@ -372,89 +372,89 @@ bool hvf_inject_interrupts(CPUState *cpu_state) uint64_t info = 0; if (have_event) { info = vector | intr_type | VMCS_INTR_VALID; - uint64_t reason = rvmcs(cpu_state->hvf->fd, VMCS_EXIT_REASON); + uint64_t reason = rvmcs(cs->hvf->fd, VMCS_EXIT_REASON); if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) { - vmx_clear_nmi_blocking(cpu_state); + vmx_clear_nmi_blocking(cs); } if (!(env->hflags2 & HF2_NMI_MASK) || intr_type != VMCS_INTR_T_NMI) { info &= ~(1 << 12); /* clear undefined bit */ if (intr_type == VMCS_INTR_T_SWINTR || intr_type == VMCS_INTR_T_SWEXCEPTION) { - wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len); + wvmcs(cs->hvf->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len); } if (env->has_error_code) { - wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_EXCEPTION_ERROR, + wvmcs(cs->hvf->fd, VMCS_ENTRY_EXCEPTION_ERROR, env->error_code); /* Indicate that VMCS_ENTRY_EXCEPTION_ERROR is valid */ info |= VMCS_INTR_DEL_ERRCODE; } /*printf("reinject %lx err %d\n", info, err);*/ - wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info); + wvmcs(cs->hvf->fd, VMCS_ENTRY_INTR_INFO, info); }; } - if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) { + if (cs->interrupt_request & CPU_INTERRUPT_NMI) { if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) { - cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI; + cs->interrupt_request &= ~CPU_INTERRUPT_NMI; info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI; - wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info); + wvmcs(cs->hvf->fd, VMCS_ENTRY_INTR_INFO, info); } else { - vmx_set_nmi_window_exiting(cpu_state); + vmx_set_nmi_window_exiting(cs); } } if (!(env->hflags & HF_INHIBIT_IRQ_MASK) && - (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && + (cs->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) { int line = cpu_get_pic_interrupt(&x86cpu->env); - cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD; + cs->interrupt_request &= ~CPU_INTERRUPT_HARD; if (line >= 0) { - wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, line | + wvmcs(cs->hvf->fd, VMCS_ENTRY_INTR_INFO, line | VMCS_INTR_VALID | VMCS_INTR_T_HWINTR); } } - if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) { - vmx_set_int_window_exiting(cpu_state); + if (cs->interrupt_request & CPU_INTERRUPT_HARD) { + vmx_set_int_window_exiting(cs); } - return (cpu_state->interrupt_request + return (cs->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)); } -int hvf_process_events(CPUState *cpu_state) +int hvf_process_events(CPUState *cs) { - X86CPU *cpu = X86_CPU(cpu_state); + X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - if (!cpu_state->vcpu_dirty) { + if (!cs->vcpu_dirty) { /* light weight sync for CPU_INTERRUPT_HARD and IF_MASK */ - env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS); + env->eflags = rreg(cs->hvf->fd, HV_X86_RFLAGS); } - if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) { - cpu_synchronize_state(cpu_state); + if (cs->interrupt_request & CPU_INTERRUPT_INIT) { + cpu_synchronize_state(cs); do_cpu_init(cpu); } - if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) { - cpu_state->interrupt_request &= ~CPU_INTERRUPT_POLL; + if (cs->interrupt_request & CPU_INTERRUPT_POLL) { + cs->interrupt_request &= ~CPU_INTERRUPT_POLL; apic_poll_irq(cpu->apic_state); } - if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && + if (((cs->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) || - (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) { - cpu_state->halted = 0; + (cs->interrupt_request & CPU_INTERRUPT_NMI)) { + cs->halted = 0; } - if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) { - cpu_synchronize_state(cpu_state); + if (cs->interrupt_request & CPU_INTERRUPT_SIPI) { + cpu_synchronize_state(cs); do_cpu_sipi(cpu); } - if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) { - cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR; - cpu_synchronize_state(cpu_state); + if (cs->interrupt_request & CPU_INTERRUPT_TPR) { + cs->interrupt_request &= ~CPU_INTERRUPT_TPR; + cpu_synchronize_state(cs); apic_handle_tpr_access_report(cpu->apic_state, env->eip, env->tpr_access_type); } - return cpu_state->halted; + return cs->halted; } diff --git a/target/i386/hvf/x86hvf.h b/target/i386/hvf/x86hvf.h index db6003d..423a89b 100644 --- a/target/i386/hvf/x86hvf.h +++ b/target/i386/hvf/x86hvf.h @@ -20,15 +20,15 @@ #include "cpu.h" #include "x86_descr.h" -int hvf_process_events(CPUState *); -bool hvf_inject_interrupts(CPUState *); -void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg, +int hvf_process_events(CPUState *cs); +bool hvf_inject_interrupts(CPUState *cs); +void hvf_set_segment(CPUState *cs, struct vmx_segment *vmx_seg, SegmentCache *qseg, bool is_tr); void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg); -void hvf_put_xsave(CPUState *cpu_state); -void hvf_put_msrs(CPUState *cpu_state); -void hvf_get_xsave(CPUState *cpu_state); -void hvf_get_msrs(CPUState *cpu_state); -void vmx_clear_int_window_exiting(CPUState *cpu); -void vmx_update_tpr(CPUState *cpu); +void hvf_put_xsave(CPUState *cs); +void hvf_put_msrs(CPUState *cs); +void hvf_get_xsave(CPUState *cs); +void hvf_get_msrs(CPUState *cs); +void vmx_clear_int_window_exiting(CPUState *cs); +void vmx_update_tpr(CPUState *cs); #endif -- cgit v1.1 From 3b295bcb3289afec09508786032f4ba5d657a934 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 21 Jun 2023 13:15:27 +0200 Subject: accel: Rename HVF 'struct hvf_vcpu_state' -> AccelCPUState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want all accelerators to share the same opaque pointer in CPUState. Rename the 'hvf_vcpu_state' structure as 'AccelCPUState'. Use the generic 'accel' field of CPUState instead of 'hvf'. Replace g_malloc0() by g_new0() for readability. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Tested-by: Peter Maydell Message-Id: <20230624174121.11508-17-philmd@linaro.org> --- target/arm/hvf/hvf.c | 108 +++++++++++------------ target/i386/hvf/hvf.c | 106 +++++++++++----------- target/i386/hvf/vmx.h | 22 ++--- target/i386/hvf/x86.c | 28 +++--- target/i386/hvf/x86_descr.c | 26 +++--- target/i386/hvf/x86_emu.c | 62 ++++++------- target/i386/hvf/x86_mmu.c | 4 +- target/i386/hvf/x86_task.c | 10 +-- target/i386/hvf/x86hvf.c | 208 ++++++++++++++++++++++---------------------- 9 files changed, 287 insertions(+), 287 deletions(-) (limited to 'target') diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c index 8f72624..8fce64b 100644 --- a/target/arm/hvf/hvf.c +++ b/target/arm/hvf/hvf.c @@ -544,29 +544,29 @@ int hvf_get_registers(CPUState *cpu) int i; for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { - ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val); + ret = hv_vcpu_get_reg(cpu->accel->fd, hvf_reg_match[i].reg, &val); *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val; assert_hvf_ok(ret); } for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) { - ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg, + ret = hv_vcpu_get_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg, &fpval); memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval)); assert_hvf_ok(ret); } val = 0; - ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val); + ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPCR, &val); assert_hvf_ok(ret); vfp_set_fpcr(env, val); val = 0; - ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val); + ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPSR, &val); assert_hvf_ok(ret); vfp_set_fpsr(env, val); - ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val); + ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_CPSR, &val); assert_hvf_ok(ret); pstate_write(env, val); @@ -575,7 +575,7 @@ int hvf_get_registers(CPUState *cpu) continue; } - if (cpu->hvf->guest_debug_enabled) { + if (cpu->accel->guest_debug_enabled) { /* Handle debug registers */ switch (hvf_sreg_match[i].reg) { case HV_SYS_REG_DBGBVR0_EL1: @@ -661,7 +661,7 @@ int hvf_get_registers(CPUState *cpu) } } - ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val); + ret = hv_vcpu_get_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, &val); assert_hvf_ok(ret); arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val; @@ -684,24 +684,24 @@ int hvf_put_registers(CPUState *cpu) for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset); - ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val); + ret = hv_vcpu_set_reg(cpu->accel->fd, hvf_reg_match[i].reg, val); assert_hvf_ok(ret); } for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) { memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval)); - ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg, + ret = hv_vcpu_set_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg, fpval); assert_hvf_ok(ret); } - ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env)); + ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPCR, vfp_get_fpcr(env)); assert_hvf_ok(ret); - ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env)); + ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPSR, vfp_get_fpsr(env)); assert_hvf_ok(ret); - ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env)); + ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_CPSR, pstate_read(env)); assert_hvf_ok(ret); aarch64_save_sp(env, arm_current_el(env)); @@ -712,7 +712,7 @@ int hvf_put_registers(CPUState *cpu) continue; } - if (cpu->hvf->guest_debug_enabled) { + if (cpu->accel->guest_debug_enabled) { /* Handle debug registers */ switch (hvf_sreg_match[i].reg) { case HV_SYS_REG_DBGBVR0_EL1: @@ -789,11 +789,11 @@ int hvf_put_registers(CPUState *cpu) } val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx]; - ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val); + ret = hv_vcpu_set_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, val); assert_hvf_ok(ret); } - ret = hv_vcpu_set_vtimer_offset(cpu->hvf->fd, hvf_state->vtimer_offset); + ret = hv_vcpu_set_vtimer_offset(cpu->accel->fd, hvf_state->vtimer_offset); assert_hvf_ok(ret); return 0; @@ -814,7 +814,7 @@ static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val) flush_cpu_state(cpu); if (rt < 31) { - r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val); + r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_X0 + rt, val); assert_hvf_ok(r); } } @@ -827,7 +827,7 @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt) flush_cpu_state(cpu); if (rt < 31) { - r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val); + r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_X0 + rt, &val); assert_hvf_ok(r); } @@ -969,22 +969,22 @@ int hvf_arch_init_vcpu(CPUState *cpu) assert(write_cpustate_to_list(arm_cpu, false)); /* Set CP_NO_RAW system registers on init */ - ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1, + ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MIDR_EL1, arm_cpu->midr); assert_hvf_ok(ret); - ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1, + ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MPIDR_EL1, arm_cpu->mp_affinity); assert_hvf_ok(ret); - ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr); + ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr); assert_hvf_ok(ret); pfr |= env->gicv3state ? (1 << 24) : 0; - ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr); + ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr); assert_hvf_ok(ret); /* We're limited to underlying hardware caps, override internal versions */ - ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1, + ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1, &arm_cpu->isar.id_aa64mmfr0); assert_hvf_ok(ret); @@ -994,7 +994,7 @@ int hvf_arch_init_vcpu(CPUState *cpu) void hvf_kick_vcpu_thread(CPUState *cpu) { cpus_kick_thread(cpu); - hv_vcpus_exit(&cpu->hvf->fd, 1); + hv_vcpus_exit(&cpu->accel->fd, 1); } static void hvf_raise_exception(CPUState *cpu, uint32_t excp, @@ -1678,13 +1678,13 @@ static int hvf_inject_interrupts(CPUState *cpu) { if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) { trace_hvf_inject_fiq(); - hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ, + hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_FIQ, true); } if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { trace_hvf_inject_irq(); - hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ, + hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_IRQ, true); } @@ -1718,7 +1718,7 @@ static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts) */ qatomic_set_mb(&cpu->thread_kicked, false); qemu_mutex_unlock_iothread(); - pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask); + pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask); qemu_mutex_lock_iothread(); } @@ -1739,7 +1739,7 @@ static void hvf_wfi(CPUState *cpu) return; } - r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); + r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); assert_hvf_ok(r); if (!(ctl & 1) || (ctl & 2)) { @@ -1748,7 +1748,7 @@ static void hvf_wfi(CPUState *cpu) return; } - r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval); + r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval); assert_hvf_ok(r); ticks_to_sleep = cval - hvf_vtimer_val(); @@ -1781,12 +1781,12 @@ static void hvf_sync_vtimer(CPUState *cpu) uint64_t ctl; bool irq_state; - if (!cpu->hvf->vtimer_masked) { + if (!cpu->accel->vtimer_masked) { /* We will get notified on vtimer changes by hvf, nothing to do */ return; } - r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); + r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); assert_hvf_ok(r); irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) == @@ -1795,8 +1795,8 @@ static void hvf_sync_vtimer(CPUState *cpu) if (!irq_state) { /* Timer no longer asserting, we can unmask it */ - hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false); - cpu->hvf->vtimer_masked = false; + hv_vcpu_set_vtimer_mask(cpu->accel->fd, false); + cpu->accel->vtimer_masked = false; } } @@ -1805,7 +1805,7 @@ int hvf_vcpu_exec(CPUState *cpu) ARMCPU *arm_cpu = ARM_CPU(cpu); CPUARMState *env = &arm_cpu->env; int ret; - hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit; + hv_vcpu_exit_t *hvf_exit = cpu->accel->exit; hv_return_t r; bool advance_pc = false; @@ -1821,7 +1821,7 @@ int hvf_vcpu_exec(CPUState *cpu) flush_cpu_state(cpu); qemu_mutex_unlock_iothread(); - assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd)); + assert_hvf_ok(hv_vcpu_run(cpu->accel->fd)); /* handle VMEXIT */ uint64_t exit_reason = hvf_exit->reason; @@ -1836,7 +1836,7 @@ int hvf_vcpu_exec(CPUState *cpu) break; case HV_EXIT_REASON_VTIMER_ACTIVATED: qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1); - cpu->hvf->vtimer_masked = true; + cpu->accel->vtimer_masked = true; return 0; case HV_EXIT_REASON_CANCELED: /* we got kicked, no exit to process */ @@ -1990,10 +1990,10 @@ int hvf_vcpu_exec(CPUState *cpu) flush_cpu_state(cpu); - r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc); + r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_PC, &pc); assert_hvf_ok(r); pc += 4; - r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc); + r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_PC, pc); assert_hvf_ok(r); /* Handle single-stepping over instructions which trigger a VM exit */ @@ -2113,29 +2113,29 @@ static void hvf_put_gdbstub_debug_registers(CPUState *cpu) for (i = 0; i < cur_hw_bps; i++) { HWBreakpoint *bp = get_hw_bp(i); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i], bp->bcr); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], bp->bcr); assert_hvf_ok(r); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i], bp->bvr); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], bp->bvr); assert_hvf_ok(r); } for (i = cur_hw_bps; i < max_hw_bps; i++) { - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i], 0); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], 0); assert_hvf_ok(r); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i], 0); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], 0); assert_hvf_ok(r); } for (i = 0; i < cur_hw_wps; i++) { HWWatchpoint *wp = get_hw_wp(i); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i], wp->wcr); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], wp->wcr); assert_hvf_ok(r); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i], wp->wvr); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], wp->wvr); assert_hvf_ok(r); } for (i = cur_hw_wps; i < max_hw_wps; i++) { - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i], 0); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], 0); assert_hvf_ok(r); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i], 0); + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], 0); assert_hvf_ok(r); } } @@ -2152,19 +2152,19 @@ static void hvf_put_guest_debug_registers(CPUState *cpu) int i; for (i = 0; i < max_hw_bps; i++) { - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbcr_regs[i], + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbcr_regs[i], env->cp15.dbgbcr[i]); assert_hvf_ok(r); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgbvr_regs[i], + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgbvr_regs[i], env->cp15.dbgbvr[i]); assert_hvf_ok(r); } for (i = 0; i < max_hw_wps; i++) { - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwcr_regs[i], + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwcr_regs[i], env->cp15.dbgwcr[i]); assert_hvf_ok(r); - r = hv_vcpu_set_sys_reg(cpu->hvf->fd, dbgwvr_regs[i], + r = hv_vcpu_set_sys_reg(cpu->accel->fd, dbgwvr_regs[i], env->cp15.dbgwvr[i]); assert_hvf_ok(r); } @@ -2184,16 +2184,16 @@ static void hvf_arch_set_traps(void) /* Check whether guest debugging is enabled for at least one vCPU; if it * is, enable exiting the guest on all vCPUs */ CPU_FOREACH(cpu) { - should_enable_traps |= cpu->hvf->guest_debug_enabled; + should_enable_traps |= cpu->accel->guest_debug_enabled; } CPU_FOREACH(cpu) { /* Set whether debug exceptions exit the guest */ - r = hv_vcpu_set_trap_debug_exceptions(cpu->hvf->fd, + r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd, should_enable_traps); assert_hvf_ok(r); /* Set whether accesses to debug registers exit the guest */ - r = hv_vcpu_set_trap_debug_reg_accesses(cpu->hvf->fd, + r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd, should_enable_traps); assert_hvf_ok(r); } @@ -2205,12 +2205,12 @@ void hvf_arch_update_guest_debug(CPUState *cpu) CPUARMState *env = &arm_cpu->env; /* Check whether guest debugging is enabled */ - cpu->hvf->guest_debug_enabled = cpu->singlestep_enabled || + cpu->accel->guest_debug_enabled = cpu->singlestep_enabled || hvf_sw_breakpoints_active(cpu) || hvf_arm_hw_debug_active(cpu); /* Update debug registers */ - if (cpu->hvf->guest_debug_enabled) { + if (cpu->accel->guest_debug_enabled) { hvf_put_gdbstub_debug_registers(cpu); } else { hvf_put_guest_debug_registers(cpu); diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index f6775c9..b9cbcc0 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -81,11 +81,11 @@ void vmx_update_tpr(CPUState *cpu) int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4; int irr = apic_get_highest_priority_irr(x86_cpu->apic_state); - wreg(cpu->hvf->fd, HV_X86_TPR, tpr); + wreg(cpu->accel->fd, HV_X86_TPR, tpr); if (irr == -1) { - wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0); + wvmcs(cpu->accel->fd, VMCS_TPR_THRESHOLD, 0); } else { - wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 : + wvmcs(cpu->accel->fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 : irr >> 4); } } @@ -93,7 +93,7 @@ void vmx_update_tpr(CPUState *cpu) static void update_apic_tpr(CPUState *cpu) { X86CPU *x86_cpu = X86_CPU(cpu); - int tpr = rreg(cpu->hvf->fd, HV_X86_TPR) >> 4; + int tpr = rreg(cpu->accel->fd, HV_X86_TPR) >> 4; cpu_set_apic_tpr(x86_cpu->apic_state, tpr); } @@ -256,12 +256,12 @@ int hvf_arch_init_vcpu(CPUState *cpu) } /* set VMCS control fields */ - wvmcs(cpu->hvf->fd, VMCS_PIN_BASED_CTLS, + wvmcs(cpu->accel->fd, VMCS_PIN_BASED_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased, VMCS_PIN_BASED_CTLS_EXTINT | VMCS_PIN_BASED_CTLS_NMI | VMCS_PIN_BASED_CTLS_VNMI)); - wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, + wvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased, VMCS_PRI_PROC_BASED_CTLS_HLT | VMCS_PRI_PROC_BASED_CTLS_MWAIT | @@ -276,14 +276,14 @@ int hvf_arch_init_vcpu(CPUState *cpu) reqCap |= VMCS_PRI_PROC_BASED2_CTLS_RDTSCP; } - wvmcs(cpu->hvf->fd, VMCS_SEC_PROC_BASED_CTLS, + wvmcs(cpu->accel->fd, VMCS_SEC_PROC_BASED_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2, reqCap)); - wvmcs(cpu->hvf->fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry, - 0)); - wvmcs(cpu->hvf->fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */ + wvmcs(cpu->accel->fd, VMCS_ENTRY_CTLS, + cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry, 0)); + wvmcs(cpu->accel->fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */ - wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0); + wvmcs(cpu->accel->fd, VMCS_TPR_THRESHOLD, 0); x86cpu = X86_CPU(cpu); x86cpu->env.xsave_buf_len = 4096; @@ -295,18 +295,18 @@ int hvf_arch_init_vcpu(CPUState *cpu) */ assert(hvf_get_supported_cpuid(0xd, 0, R_ECX) <= x86cpu->env.xsave_buf_len); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_STAR, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_LSTAR, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_CSTAR, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FMASK, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FSBASE, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_GSBASE, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_KERNELGSBASE, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_TSC_AUX, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_TSC, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_CS, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_EIP, 1); - hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_ESP, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_STAR, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_LSTAR, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_CSTAR, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_FMASK, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_FSBASE, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_GSBASE, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_KERNELGSBASE, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_TSC_AUX, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_TSC, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_SYSENTER_CS, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_SYSENTER_EIP, 1); + hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_SYSENTER_ESP, 1); return 0; } @@ -347,16 +347,16 @@ static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_in } if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { env->has_error_code = true; - env->error_code = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_ERROR); + env->error_code = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_ERROR); } } - if ((rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) & + if ((rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY) & VMCS_INTERRUPTIBILITY_NMI_BLOCKING)) { env->hflags2 |= HF2_NMI_MASK; } else { env->hflags2 &= ~HF2_NMI_MASK; } - if (rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) & + if (rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY) & (VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) { env->hflags |= HF_INHIBIT_IRQ_MASK; @@ -435,20 +435,20 @@ int hvf_vcpu_exec(CPUState *cpu) return EXCP_HLT; } - hv_return_t r = hv_vcpu_run(cpu->hvf->fd); + hv_return_t r = hv_vcpu_run(cpu->accel->fd); assert_hvf_ok(r); /* handle VMEXIT */ - uint64_t exit_reason = rvmcs(cpu->hvf->fd, VMCS_EXIT_REASON); - uint64_t exit_qual = rvmcs(cpu->hvf->fd, VMCS_EXIT_QUALIFICATION); - uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf->fd, + uint64_t exit_reason = rvmcs(cpu->accel->fd, VMCS_EXIT_REASON); + uint64_t exit_qual = rvmcs(cpu->accel->fd, VMCS_EXIT_QUALIFICATION); + uint32_t ins_len = (uint32_t)rvmcs(cpu->accel->fd, VMCS_EXIT_INSTRUCTION_LENGTH); - uint64_t idtvec_info = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO); + uint64_t idtvec_info = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO); hvf_store_events(cpu, ins_len, idtvec_info); - rip = rreg(cpu->hvf->fd, HV_X86_RIP); - env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS); + rip = rreg(cpu->accel->fd, HV_X86_RIP); + env->eflags = rreg(cpu->accel->fd, HV_X86_RFLAGS); qemu_mutex_lock_iothread(); @@ -478,7 +478,7 @@ int hvf_vcpu_exec(CPUState *cpu) case EXIT_REASON_EPT_FAULT: { hvf_slot *slot; - uint64_t gpa = rvmcs(cpu->hvf->fd, VMCS_GUEST_PHYSICAL_ADDRESS); + uint64_t gpa = rvmcs(cpu->accel->fd, VMCS_GUEST_PHYSICAL_ADDRESS); if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) && ((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) { @@ -523,7 +523,7 @@ int hvf_vcpu_exec(CPUState *cpu) store_regs(cpu); break; } else if (!string && !in) { - RAX(env) = rreg(cpu->hvf->fd, HV_X86_RAX); + RAX(env) = rreg(cpu->accel->fd, HV_X86_RAX); hvf_handle_io(env, port, &RAX(env), 1, size, 1); macvm_set_rip(cpu, rip + ins_len); break; @@ -539,21 +539,21 @@ int hvf_vcpu_exec(CPUState *cpu) break; } case EXIT_REASON_CPUID: { - uint32_t rax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX); - uint32_t rbx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RBX); - uint32_t rcx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX); - uint32_t rdx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX); + uint32_t rax = (uint32_t)rreg(cpu->accel->fd, HV_X86_RAX); + uint32_t rbx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RBX); + uint32_t rcx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RCX); + uint32_t rdx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RDX); if (rax == 1) { /* CPUID1.ecx.OSXSAVE needs to know CR4 */ - env->cr[4] = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4); + env->cr[4] = rvmcs(cpu->accel->fd, VMCS_GUEST_CR4); } hvf_cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx); - wreg(cpu->hvf->fd, HV_X86_RAX, rax); - wreg(cpu->hvf->fd, HV_X86_RBX, rbx); - wreg(cpu->hvf->fd, HV_X86_RCX, rcx); - wreg(cpu->hvf->fd, HV_X86_RDX, rdx); + wreg(cpu->accel->fd, HV_X86_RAX, rax); + wreg(cpu->accel->fd, HV_X86_RBX, rbx); + wreg(cpu->accel->fd, HV_X86_RCX, rcx); + wreg(cpu->accel->fd, HV_X86_RDX, rdx); macvm_set_rip(cpu, rip + ins_len); break; @@ -561,16 +561,16 @@ int hvf_vcpu_exec(CPUState *cpu) case EXIT_REASON_XSETBV: { X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; - uint32_t eax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX); - uint32_t ecx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX); - uint32_t edx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX); + uint32_t eax = (uint32_t)rreg(cpu->accel->fd, HV_X86_RAX); + uint32_t ecx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RCX); + uint32_t edx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RDX); if (ecx) { macvm_set_rip(cpu, rip + ins_len); break; } env->xcr0 = ((uint64_t)edx << 32) | eax; - wreg(cpu->hvf->fd, HV_X86_XCR0, env->xcr0 | 1); + wreg(cpu->accel->fd, HV_X86_XCR0, env->xcr0 | 1); macvm_set_rip(cpu, rip + ins_len); break; } @@ -609,11 +609,11 @@ int hvf_vcpu_exec(CPUState *cpu) switch (cr) { case 0x0: { - macvm_set_cr0(cpu->hvf->fd, RRX(env, reg)); + macvm_set_cr0(cpu->accel->fd, RRX(env, reg)); break; } case 4: { - macvm_set_cr4(cpu->hvf->fd, RRX(env, reg)); + macvm_set_cr4(cpu->accel->fd, RRX(env, reg)); break; } case 8: { @@ -649,7 +649,7 @@ int hvf_vcpu_exec(CPUState *cpu) break; } case EXIT_REASON_TASK_SWITCH: { - uint64_t vinfo = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO); + uint64_t vinfo = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO); x68_segment_selector sel = {.sel = exit_qual & 0xffff}; vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3, vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo @@ -662,8 +662,8 @@ int hvf_vcpu_exec(CPUState *cpu) break; } case EXIT_REASON_RDPMC: - wreg(cpu->hvf->fd, HV_X86_RAX, 0); - wreg(cpu->hvf->fd, HV_X86_RDX, 0); + wreg(cpu->accel->fd, HV_X86_RAX, 0); + wreg(cpu->accel->fd, HV_X86_RDX, 0); macvm_set_rip(cpu, rip + ins_len); break; case VMX_REASON_VMCALL: diff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h index fcd9a95..0fffcfa 100644 --- a/target/i386/hvf/vmx.h +++ b/target/i386/hvf/vmx.h @@ -180,15 +180,15 @@ static inline void macvm_set_rip(CPUState *cpu, uint64_t rip) uint64_t val; /* BUG, should take considering overlap.. */ - wreg(cpu->hvf->fd, HV_X86_RIP, rip); + wreg(cpu->accel->fd, HV_X86_RIP, rip); env->eip = rip; /* after moving forward in rip, we need to clean INTERRUPTABILITY */ - val = rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY); + val = rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY); if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) { env->hflags &= ~HF_INHIBIT_IRQ_MASK; - wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, + wvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY, val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)); } @@ -200,9 +200,9 @@ static inline void vmx_clear_nmi_blocking(CPUState *cpu) CPUX86State *env = &x86_cpu->env; env->hflags2 &= ~HF2_NMI_MASK; - uint32_t gi = (uint32_t) rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY); + uint32_t gi = (uint32_t) rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY); gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; - wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi); + wvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY, gi); } static inline void vmx_set_nmi_blocking(CPUState *cpu) @@ -211,16 +211,16 @@ static inline void vmx_set_nmi_blocking(CPUState *cpu) CPUX86State *env = &x86_cpu->env; env->hflags2 |= HF2_NMI_MASK; - uint32_t gi = (uint32_t)rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY); + uint32_t gi = (uint32_t)rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY); gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; - wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi); + wvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY, gi); } static inline void vmx_set_nmi_window_exiting(CPUState *cpu) { uint64_t val; - val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val | + val = rvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS, val | VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING); } @@ -229,8 +229,8 @@ static inline void vmx_clear_nmi_window_exiting(CPUState *cpu) { uint64_t val; - val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val & + val = rvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS, val & ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING); } diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c index d086584..8ceea63 100644 --- a/target/i386/hvf/x86.c +++ b/target/i386/hvf/x86.c @@ -61,11 +61,11 @@ bool x86_read_segment_descriptor(struct CPUState *cpu, } if (GDT_SEL == sel.ti) { - base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE); - limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT); + base = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_BASE); + limit = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_LIMIT); } else { - base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE); - limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT); + base = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_BASE); + limit = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_LIMIT); } if (sel.index * 8 >= limit) { @@ -84,11 +84,11 @@ bool x86_write_segment_descriptor(struct CPUState *cpu, uint32_t limit; if (GDT_SEL == sel.ti) { - base = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE); - limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT); + base = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_BASE); + limit = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_LIMIT); } else { - base = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE); - limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT); + base = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_BASE); + limit = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_LIMIT); } if (sel.index * 8 >= limit) { @@ -102,8 +102,8 @@ bool x86_write_segment_descriptor(struct CPUState *cpu, bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, int gate) { - target_ulong base = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_BASE); - uint32_t limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_LIMIT); + target_ulong base = rvmcs(cpu->accel->fd, VMCS_GUEST_IDTR_BASE); + uint32_t limit = rvmcs(cpu->accel->fd, VMCS_GUEST_IDTR_LIMIT); memset(idt_desc, 0, sizeof(*idt_desc)); if (gate * 8 >= limit) { @@ -117,7 +117,7 @@ bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc, bool x86_is_protected(struct CPUState *cpu) { - uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); + uint64_t cr0 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR0); return cr0 & CR0_PE_MASK; } @@ -135,7 +135,7 @@ bool x86_is_v8086(struct CPUState *cpu) bool x86_is_long_mode(struct CPUState *cpu) { - return rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA; + return rvmcs(cpu->accel->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA; } bool x86_is_long64_mode(struct CPUState *cpu) @@ -148,13 +148,13 @@ bool x86_is_long64_mode(struct CPUState *cpu) bool x86_is_paging_mode(struct CPUState *cpu) { - uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); + uint64_t cr0 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR0); return cr0 & CR0_PG_MASK; } bool x86_is_pae_enabled(struct CPUState *cpu) { - uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4); + uint64_t cr4 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR4); return cr4 & CR4_PAE_MASK; } diff --git a/target/i386/hvf/x86_descr.c b/target/i386/hvf/x86_descr.c index a484942..c2d2e9e 100644 --- a/target/i386/hvf/x86_descr.c +++ b/target/i386/hvf/x86_descr.c @@ -47,47 +47,47 @@ static const struct vmx_segment_field { uint32_t vmx_read_segment_limit(CPUState *cpu, X86Seg seg) { - return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit); + return (uint32_t)rvmcs(cpu->accel->fd, vmx_segment_fields[seg].limit); } uint32_t vmx_read_segment_ar(CPUState *cpu, X86Seg seg) { - return (uint32_t)rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes); + return (uint32_t)rvmcs(cpu->accel->fd, vmx_segment_fields[seg].ar_bytes); } uint64_t vmx_read_segment_base(CPUState *cpu, X86Seg seg) { - return rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base); + return rvmcs(cpu->accel->fd, vmx_segment_fields[seg].base); } x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg) { x68_segment_selector sel; - sel.sel = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector); + sel.sel = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector); return sel; } void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector selector, X86Seg seg) { - wvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector, selector.sel); + wvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector, selector.sel); } void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment *desc, X86Seg seg) { - desc->sel = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].selector); - desc->base = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].base); - desc->limit = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].limit); - desc->ar = rvmcs(cpu->hvf->fd, vmx_segment_fields[seg].ar_bytes); + desc->sel = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector); + desc->base = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].base); + desc->limit = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].limit); + desc->ar = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].ar_bytes); } void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Seg seg) { const struct vmx_segment_field *sf = &vmx_segment_fields[seg]; - wvmcs(cpu->hvf->fd, sf->base, desc->base); - wvmcs(cpu->hvf->fd, sf->limit, desc->limit); - wvmcs(cpu->hvf->fd, sf->selector, desc->sel); - wvmcs(cpu->hvf->fd, sf->ar_bytes, desc->ar); + wvmcs(cpu->accel->fd, sf->base, desc->base); + wvmcs(cpu->accel->fd, sf->limit, desc->limit); + wvmcs(cpu->accel->fd, sf->selector, desc->sel); + wvmcs(cpu->accel->fd, sf->ar_bytes, desc->ar); } void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc) diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c index f5704f6..ccda568 100644 --- a/target/i386/hvf/x86_emu.c +++ b/target/i386/hvf/x86_emu.c @@ -673,7 +673,7 @@ void simulate_rdmsr(struct CPUState *cpu) switch (msr) { case MSR_IA32_TSC: - val = rdtscp() + rvmcs(cpu->hvf->fd, VMCS_TSC_OFFSET); + val = rdtscp() + rvmcs(cpu->accel->fd, VMCS_TSC_OFFSET); break; case MSR_IA32_APICBASE: val = cpu_get_apic_base(X86_CPU(cpu)->apic_state); @@ -682,16 +682,16 @@ void simulate_rdmsr(struct CPUState *cpu) val = x86_cpu->ucode_rev; break; case MSR_EFER: - val = rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER); + val = rvmcs(cpu->accel->fd, VMCS_GUEST_IA32_EFER); break; case MSR_FSBASE: - val = rvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE); + val = rvmcs(cpu->accel->fd, VMCS_GUEST_FS_BASE); break; case MSR_GSBASE: - val = rvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE); + val = rvmcs(cpu->accel->fd, VMCS_GUEST_GS_BASE); break; case MSR_KERNELGSBASE: - val = rvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE); + val = rvmcs(cpu->accel->fd, VMCS_HOST_FS_BASE); break; case MSR_STAR: abort(); @@ -779,13 +779,13 @@ void simulate_wrmsr(struct CPUState *cpu) cpu_set_apic_base(X86_CPU(cpu)->apic_state, data); break; case MSR_FSBASE: - wvmcs(cpu->hvf->fd, VMCS_GUEST_FS_BASE, data); + wvmcs(cpu->accel->fd, VMCS_GUEST_FS_BASE, data); break; case MSR_GSBASE: - wvmcs(cpu->hvf->fd, VMCS_GUEST_GS_BASE, data); + wvmcs(cpu->accel->fd, VMCS_GUEST_GS_BASE, data); break; case MSR_KERNELGSBASE: - wvmcs(cpu->hvf->fd, VMCS_HOST_FS_BASE, data); + wvmcs(cpu->accel->fd, VMCS_HOST_FS_BASE, data); break; case MSR_STAR: abort(); @@ -798,9 +798,9 @@ void simulate_wrmsr(struct CPUState *cpu) break; case MSR_EFER: /*printf("new efer %llx\n", EFER(cpu));*/ - wvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER, data); + wvmcs(cpu->accel->fd, VMCS_GUEST_IA32_EFER, data); if (data & MSR_EFER_NXE) { - hv_vcpu_invalidate_tlb(cpu->hvf->fd); + hv_vcpu_invalidate_tlb(cpu->accel->fd); } break; case MSR_MTRRphysBase(0): @@ -1424,21 +1424,21 @@ void load_regs(struct CPUState *cpu) CPUX86State *env = &x86_cpu->env; int i = 0; - RRX(env, R_EAX) = rreg(cpu->hvf->fd, HV_X86_RAX); - RRX(env, R_EBX) = rreg(cpu->hvf->fd, HV_X86_RBX); - RRX(env, R_ECX) = rreg(cpu->hvf->fd, HV_X86_RCX); - RRX(env, R_EDX) = rreg(cpu->hvf->fd, HV_X86_RDX); - RRX(env, R_ESI) = rreg(cpu->hvf->fd, HV_X86_RSI); - RRX(env, R_EDI) = rreg(cpu->hvf->fd, HV_X86_RDI); - RRX(env, R_ESP) = rreg(cpu->hvf->fd, HV_X86_RSP); - RRX(env, R_EBP) = rreg(cpu->hvf->fd, HV_X86_RBP); + RRX(env, R_EAX) = rreg(cpu->accel->fd, HV_X86_RAX); + RRX(env, R_EBX) = rreg(cpu->accel->fd, HV_X86_RBX); + RRX(env, R_ECX) = rreg(cpu->accel->fd, HV_X86_RCX); + RRX(env, R_EDX) = rreg(cpu->accel->fd, HV_X86_RDX); + RRX(env, R_ESI) = rreg(cpu->accel->fd, HV_X86_RSI); + RRX(env, R_EDI) = rreg(cpu->accel->fd, HV_X86_RDI); + RRX(env, R_ESP) = rreg(cpu->accel->fd, HV_X86_RSP); + RRX(env, R_EBP) = rreg(cpu->accel->fd, HV_X86_RBP); for (i = 8; i < 16; i++) { - RRX(env, i) = rreg(cpu->hvf->fd, HV_X86_RAX + i); + RRX(env, i) = rreg(cpu->accel->fd, HV_X86_RAX + i); } - env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS); + env->eflags = rreg(cpu->accel->fd, HV_X86_RFLAGS); rflags_to_lflags(env); - env->eip = rreg(cpu->hvf->fd, HV_X86_RIP); + env->eip = rreg(cpu->accel->fd, HV_X86_RIP); } void store_regs(struct CPUState *cpu) @@ -1447,20 +1447,20 @@ void store_regs(struct CPUState *cpu) CPUX86State *env = &x86_cpu->env; int i = 0; - wreg(cpu->hvf->fd, HV_X86_RAX, RAX(env)); - wreg(cpu->hvf->fd, HV_X86_RBX, RBX(env)); - wreg(cpu->hvf->fd, HV_X86_RCX, RCX(env)); - wreg(cpu->hvf->fd, HV_X86_RDX, RDX(env)); - wreg(cpu->hvf->fd, HV_X86_RSI, RSI(env)); - wreg(cpu->hvf->fd, HV_X86_RDI, RDI(env)); - wreg(cpu->hvf->fd, HV_X86_RBP, RBP(env)); - wreg(cpu->hvf->fd, HV_X86_RSP, RSP(env)); + wreg(cpu->accel->fd, HV_X86_RAX, RAX(env)); + wreg(cpu->accel->fd, HV_X86_RBX, RBX(env)); + wreg(cpu->accel->fd, HV_X86_RCX, RCX(env)); + wreg(cpu->accel->fd, HV_X86_RDX, RDX(env)); + wreg(cpu->accel->fd, HV_X86_RSI, RSI(env)); + wreg(cpu->accel->fd, HV_X86_RDI, RDI(env)); + wreg(cpu->accel->fd, HV_X86_RBP, RBP(env)); + wreg(cpu->accel->fd, HV_X86_RSP, RSP(env)); for (i = 8; i < 16; i++) { - wreg(cpu->hvf->fd, HV_X86_RAX + i, RRX(env, i)); + wreg(cpu->accel->fd, HV_X86_RAX + i, RRX(env, i)); } lflags_to_rflags(env); - wreg(cpu->hvf->fd, HV_X86_RFLAGS, env->eflags); + wreg(cpu->accel->fd, HV_X86_RFLAGS, env->eflags); macvm_set_rip(cpu, env->eip); } diff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c index 96d1175..8cd0862 100644 --- a/target/i386/hvf/x86_mmu.c +++ b/target/i386/hvf/x86_mmu.c @@ -126,7 +126,7 @@ static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt, pt->err_code |= MMU_PAGE_PT; } - uint32_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0); + uint32_t cr0 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR0); /* check protection */ if (cr0 & CR0_WP_MASK) { if (pt->write_access && !pte_write_access(pte)) { @@ -171,7 +171,7 @@ static bool walk_gpt(struct CPUState *cpu, target_ulong addr, int err_code, { int top_level, level; bool is_large = false; - target_ulong cr3 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR3); + target_ulong cr3 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR3); uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK; memset(pt, 0, sizeof(*pt)); diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c index beaeec0..f09bfbd 100644 --- a/target/i386/hvf/x86_task.c +++ b/target/i386/hvf/x86_task.c @@ -61,7 +61,7 @@ static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss) X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; - wvmcs(cpu->hvf->fd, VMCS_GUEST_CR3, tss->cr3); + wvmcs(cpu->accel->fd, VMCS_GUEST_CR3, tss->cr3); env->eip = tss->eip; env->eflags = tss->eflags | 2; @@ -110,11 +110,11 @@ static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segme void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type) { - uint64_t rip = rreg(cpu->hvf->fd, HV_X86_RIP); + uint64_t rip = rreg(cpu->accel->fd, HV_X86_RIP); if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION && gate_type != VMCS_INTR_T_HWINTR && gate_type != VMCS_INTR_T_NMI)) { - int ins_len = rvmcs(cpu->hvf->fd, VMCS_EXIT_INSTRUCTION_LENGTH); + int ins_len = rvmcs(cpu->accel->fd, VMCS_EXIT_INSTRUCTION_LENGTH); macvm_set_rip(cpu, rip + ins_len); return; } @@ -173,12 +173,12 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); VM_PANIC("task_switch_16"); - macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | + macvm_set_cr0(cpu->accel->fd, rvmcs(cpu->accel->fd, VMCS_GUEST_CR0) | CR0_TS_MASK); x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg); vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR); store_regs(cpu); - hv_vcpu_invalidate_tlb(cpu->hvf->fd); + hv_vcpu_invalidate_tlb(cpu->accel->fd); } diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index 92dfd26..3b1ef5f 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -77,7 +77,7 @@ void hvf_put_xsave(CPUState *cs) x86_cpu_xsave_all_areas(X86_CPU(cs), xsave, xsave_len); - if (hv_vcpu_write_fpstate(cs->hvf->fd, xsave, xsave_len)) { + if (hv_vcpu_write_fpstate(cs->accel->fd, xsave, xsave_len)) { abort(); } } @@ -87,19 +87,19 @@ static void hvf_put_segments(CPUState *cs) CPUX86State *env = &X86_CPU(cs)->env; struct vmx_segment seg; - wvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); - wvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_BASE, env->idt.base); + wvmcs(cs->accel->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); + wvmcs(cs->accel->fd, VMCS_GUEST_IDTR_BASE, env->idt.base); - wvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); - wvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); + wvmcs(cs->accel->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); + wvmcs(cs->accel->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); - /* wvmcs(cs->hvf->fd, VMCS_GUEST_CR2, env->cr[2]); */ - wvmcs(cs->hvf->fd, VMCS_GUEST_CR3, env->cr[3]); + /* wvmcs(cs->accel->fd, VMCS_GUEST_CR2, env->cr[2]); */ + wvmcs(cs->accel->fd, VMCS_GUEST_CR3, env->cr[3]); vmx_update_tpr(cs); - wvmcs(cs->hvf->fd, VMCS_GUEST_IA32_EFER, env->efer); + wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, env->efer); - macvm_set_cr4(cs->hvf->fd, env->cr[4]); - macvm_set_cr0(cs->hvf->fd, env->cr[0]); + macvm_set_cr4(cs->accel->fd, env->cr[4]); + macvm_set_cr0(cs->accel->fd, env->cr[0]); hvf_set_segment(cs, &seg, &env->segs[R_CS], false); vmx_write_segment_descriptor(cs, &seg, R_CS); @@ -130,24 +130,24 @@ void hvf_put_msrs(CPUState *cs) { CPUX86State *env = &X86_CPU(cs)->env; - hv_vcpu_write_msr(cs->hvf->fd, MSR_IA32_SYSENTER_CS, + hv_vcpu_write_msr(cs->accel->fd, MSR_IA32_SYSENTER_CS, env->sysenter_cs); - hv_vcpu_write_msr(cs->hvf->fd, MSR_IA32_SYSENTER_ESP, + hv_vcpu_write_msr(cs->accel->fd, MSR_IA32_SYSENTER_ESP, env->sysenter_esp); - hv_vcpu_write_msr(cs->hvf->fd, MSR_IA32_SYSENTER_EIP, + hv_vcpu_write_msr(cs->accel->fd, MSR_IA32_SYSENTER_EIP, env->sysenter_eip); - hv_vcpu_write_msr(cs->hvf->fd, MSR_STAR, env->star); + hv_vcpu_write_msr(cs->accel->fd, MSR_STAR, env->star); #ifdef TARGET_X86_64 - hv_vcpu_write_msr(cs->hvf->fd, MSR_CSTAR, env->cstar); - hv_vcpu_write_msr(cs->hvf->fd, MSR_KERNELGSBASE, env->kernelgsbase); - hv_vcpu_write_msr(cs->hvf->fd, MSR_FMASK, env->fmask); - hv_vcpu_write_msr(cs->hvf->fd, MSR_LSTAR, env->lstar); + hv_vcpu_write_msr(cs->accel->fd, MSR_CSTAR, env->cstar); + hv_vcpu_write_msr(cs->accel->fd, MSR_KERNELGSBASE, env->kernelgsbase); + hv_vcpu_write_msr(cs->accel->fd, MSR_FMASK, env->fmask); + hv_vcpu_write_msr(cs->accel->fd, MSR_LSTAR, env->lstar); #endif - hv_vcpu_write_msr(cs->hvf->fd, MSR_GSBASE, env->segs[R_GS].base); - hv_vcpu_write_msr(cs->hvf->fd, MSR_FSBASE, env->segs[R_FS].base); + hv_vcpu_write_msr(cs->accel->fd, MSR_GSBASE, env->segs[R_GS].base); + hv_vcpu_write_msr(cs->accel->fd, MSR_FSBASE, env->segs[R_FS].base); } @@ -156,7 +156,7 @@ void hvf_get_xsave(CPUState *cs) void *xsave = X86_CPU(cs)->env.xsave_buf; uint32_t xsave_len = X86_CPU(cs)->env.xsave_buf_len; - if (hv_vcpu_read_fpstate(cs->hvf->fd, xsave, xsave_len)) { + if (hv_vcpu_read_fpstate(cs->accel->fd, xsave, xsave_len)) { abort(); } @@ -195,17 +195,17 @@ static void hvf_get_segments(CPUState *cs) vmx_read_segment_descriptor(cs, &seg, R_LDTR); hvf_get_segment(&env->ldt, &seg); - env->idt.limit = rvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_LIMIT); - env->idt.base = rvmcs(cs->hvf->fd, VMCS_GUEST_IDTR_BASE); - env->gdt.limit = rvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_LIMIT); - env->gdt.base = rvmcs(cs->hvf->fd, VMCS_GUEST_GDTR_BASE); + env->idt.limit = rvmcs(cs->accel->fd, VMCS_GUEST_IDTR_LIMIT); + env->idt.base = rvmcs(cs->accel->fd, VMCS_GUEST_IDTR_BASE); + env->gdt.limit = rvmcs(cs->accel->fd, VMCS_GUEST_GDTR_LIMIT); + env->gdt.base = rvmcs(cs->accel->fd, VMCS_GUEST_GDTR_BASE); - env->cr[0] = rvmcs(cs->hvf->fd, VMCS_GUEST_CR0); + env->cr[0] = rvmcs(cs->accel->fd, VMCS_GUEST_CR0); env->cr[2] = 0; - env->cr[3] = rvmcs(cs->hvf->fd, VMCS_GUEST_CR3); - env->cr[4] = rvmcs(cs->hvf->fd, VMCS_GUEST_CR4); + env->cr[3] = rvmcs(cs->accel->fd, VMCS_GUEST_CR3); + env->cr[4] = rvmcs(cs->accel->fd, VMCS_GUEST_CR4); - env->efer = rvmcs(cs->hvf->fd, VMCS_GUEST_IA32_EFER); + env->efer = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER); } void hvf_get_msrs(CPUState *cs) @@ -213,27 +213,27 @@ void hvf_get_msrs(CPUState *cs) CPUX86State *env = &X86_CPU(cs)->env; uint64_t tmp; - hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_SYSENTER_CS, &tmp); + hv_vcpu_read_msr(cs->accel->fd, MSR_IA32_SYSENTER_CS, &tmp); env->sysenter_cs = tmp; - hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_SYSENTER_ESP, &tmp); + hv_vcpu_read_msr(cs->accel->fd, MSR_IA32_SYSENTER_ESP, &tmp); env->sysenter_esp = tmp; - hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_SYSENTER_EIP, &tmp); + hv_vcpu_read_msr(cs->accel->fd, MSR_IA32_SYSENTER_EIP, &tmp); env->sysenter_eip = tmp; - hv_vcpu_read_msr(cs->hvf->fd, MSR_STAR, &env->star); + hv_vcpu_read_msr(cs->accel->fd, MSR_STAR, &env->star); #ifdef TARGET_X86_64 - hv_vcpu_read_msr(cs->hvf->fd, MSR_CSTAR, &env->cstar); - hv_vcpu_read_msr(cs->hvf->fd, MSR_KERNELGSBASE, &env->kernelgsbase); - hv_vcpu_read_msr(cs->hvf->fd, MSR_FMASK, &env->fmask); - hv_vcpu_read_msr(cs->hvf->fd, MSR_LSTAR, &env->lstar); + hv_vcpu_read_msr(cs->accel->fd, MSR_CSTAR, &env->cstar); + hv_vcpu_read_msr(cs->accel->fd, MSR_KERNELGSBASE, &env->kernelgsbase); + hv_vcpu_read_msr(cs->accel->fd, MSR_FMASK, &env->fmask); + hv_vcpu_read_msr(cs->accel->fd, MSR_LSTAR, &env->lstar); #endif - hv_vcpu_read_msr(cs->hvf->fd, MSR_IA32_APICBASE, &tmp); + hv_vcpu_read_msr(cs->accel->fd, MSR_IA32_APICBASE, &tmp); - env->tsc = rdtscp() + rvmcs(cs->hvf->fd, VMCS_TSC_OFFSET); + env->tsc = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET); } int hvf_put_registers(CPUState *cs) @@ -241,26 +241,26 @@ int hvf_put_registers(CPUState *cs) X86CPU *x86cpu = X86_CPU(cs); CPUX86State *env = &x86cpu->env; - wreg(cs->hvf->fd, HV_X86_RAX, env->regs[R_EAX]); - wreg(cs->hvf->fd, HV_X86_RBX, env->regs[R_EBX]); - wreg(cs->hvf->fd, HV_X86_RCX, env->regs[R_ECX]); - wreg(cs->hvf->fd, HV_X86_RDX, env->regs[R_EDX]); - wreg(cs->hvf->fd, HV_X86_RBP, env->regs[R_EBP]); - wreg(cs->hvf->fd, HV_X86_RSP, env->regs[R_ESP]); - wreg(cs->hvf->fd, HV_X86_RSI, env->regs[R_ESI]); - wreg(cs->hvf->fd, HV_X86_RDI, env->regs[R_EDI]); - wreg(cs->hvf->fd, HV_X86_R8, env->regs[8]); - wreg(cs->hvf->fd, HV_X86_R9, env->regs[9]); - wreg(cs->hvf->fd, HV_X86_R10, env->regs[10]); - wreg(cs->hvf->fd, HV_X86_R11, env->regs[11]); - wreg(cs->hvf->fd, HV_X86_R12, env->regs[12]); - wreg(cs->hvf->fd, HV_X86_R13, env->regs[13]); - wreg(cs->hvf->fd, HV_X86_R14, env->regs[14]); - wreg(cs->hvf->fd, HV_X86_R15, env->regs[15]); - wreg(cs->hvf->fd, HV_X86_RFLAGS, env->eflags); - wreg(cs->hvf->fd, HV_X86_RIP, env->eip); + wreg(cs->accel->fd, HV_X86_RAX, env->regs[R_EAX]); + wreg(cs->accel->fd, HV_X86_RBX, env->regs[R_EBX]); + wreg(cs->accel->fd, HV_X86_RCX, env->regs[R_ECX]); + wreg(cs->accel->fd, HV_X86_RDX, env->regs[R_EDX]); + wreg(cs->accel->fd, HV_X86_RBP, env->regs[R_EBP]); + wreg(cs->accel->fd, HV_X86_RSP, env->regs[R_ESP]); + wreg(cs->accel->fd, HV_X86_RSI, env->regs[R_ESI]); + wreg(cs->accel->fd, HV_X86_RDI, env->regs[R_EDI]); + wreg(cs->accel->fd, HV_X86_R8, env->regs[8]); + wreg(cs->accel->fd, HV_X86_R9, env->regs[9]); + wreg(cs->accel->fd, HV_X86_R10, env->regs[10]); + wreg(cs->accel->fd, HV_X86_R11, env->regs[11]); + wreg(cs->accel->fd, HV_X86_R12, env->regs[12]); + wreg(cs->accel->fd, HV_X86_R13, env->regs[13]); + wreg(cs->accel->fd, HV_X86_R14, env->regs[14]); + wreg(cs->accel->fd, HV_X86_R15, env->regs[15]); + wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags); + wreg(cs->accel->fd, HV_X86_RIP, env->eip); - wreg(cs->hvf->fd, HV_X86_XCR0, env->xcr0); + wreg(cs->accel->fd, HV_X86_XCR0, env->xcr0); hvf_put_xsave(cs); @@ -268,14 +268,14 @@ int hvf_put_registers(CPUState *cs) hvf_put_msrs(cs); - wreg(cs->hvf->fd, HV_X86_DR0, env->dr[0]); - wreg(cs->hvf->fd, HV_X86_DR1, env->dr[1]); - wreg(cs->hvf->fd, HV_X86_DR2, env->dr[2]); - wreg(cs->hvf->fd, HV_X86_DR3, env->dr[3]); - wreg(cs->hvf->fd, HV_X86_DR4, env->dr[4]); - wreg(cs->hvf->fd, HV_X86_DR5, env->dr[5]); - wreg(cs->hvf->fd, HV_X86_DR6, env->dr[6]); - wreg(cs->hvf->fd, HV_X86_DR7, env->dr[7]); + wreg(cs->accel->fd, HV_X86_DR0, env->dr[0]); + wreg(cs->accel->fd, HV_X86_DR1, env->dr[1]); + wreg(cs->accel->fd, HV_X86_DR2, env->dr[2]); + wreg(cs->accel->fd, HV_X86_DR3, env->dr[3]); + wreg(cs->accel->fd, HV_X86_DR4, env->dr[4]); + wreg(cs->accel->fd, HV_X86_DR5, env->dr[5]); + wreg(cs->accel->fd, HV_X86_DR6, env->dr[6]); + wreg(cs->accel->fd, HV_X86_DR7, env->dr[7]); return 0; } @@ -285,40 +285,40 @@ int hvf_get_registers(CPUState *cs) X86CPU *x86cpu = X86_CPU(cs); CPUX86State *env = &x86cpu->env; - env->regs[R_EAX] = rreg(cs->hvf->fd, HV_X86_RAX); - env->regs[R_EBX] = rreg(cs->hvf->fd, HV_X86_RBX); - env->regs[R_ECX] = rreg(cs->hvf->fd, HV_X86_RCX); - env->regs[R_EDX] = rreg(cs->hvf->fd, HV_X86_RDX); - env->regs[R_EBP] = rreg(cs->hvf->fd, HV_X86_RBP); - env->regs[R_ESP] = rreg(cs->hvf->fd, HV_X86_RSP); - env->regs[R_ESI] = rreg(cs->hvf->fd, HV_X86_RSI); - env->regs[R_EDI] = rreg(cs->hvf->fd, HV_X86_RDI); - env->regs[8] = rreg(cs->hvf->fd, HV_X86_R8); - env->regs[9] = rreg(cs->hvf->fd, HV_X86_R9); - env->regs[10] = rreg(cs->hvf->fd, HV_X86_R10); - env->regs[11] = rreg(cs->hvf->fd, HV_X86_R11); - env->regs[12] = rreg(cs->hvf->fd, HV_X86_R12); - env->regs[13] = rreg(cs->hvf->fd, HV_X86_R13); - env->regs[14] = rreg(cs->hvf->fd, HV_X86_R14); - env->regs[15] = rreg(cs->hvf->fd, HV_X86_R15); + env->regs[R_EAX] = rreg(cs->accel->fd, HV_X86_RAX); + env->regs[R_EBX] = rreg(cs->accel->fd, HV_X86_RBX); + env->regs[R_ECX] = rreg(cs->accel->fd, HV_X86_RCX); + env->regs[R_EDX] = rreg(cs->accel->fd, HV_X86_RDX); + env->regs[R_EBP] = rreg(cs->accel->fd, HV_X86_RBP); + env->regs[R_ESP] = rreg(cs->accel->fd, HV_X86_RSP); + env->regs[R_ESI] = rreg(cs->accel->fd, HV_X86_RSI); + env->regs[R_EDI] = rreg(cs->accel->fd, HV_X86_RDI); + env->regs[8] = rreg(cs->accel->fd, HV_X86_R8); + env->regs[9] = rreg(cs->accel->fd, HV_X86_R9); + env->regs[10] = rreg(cs->accel->fd, HV_X86_R10); + env->regs[11] = rreg(cs->accel->fd, HV_X86_R11); + env->regs[12] = rreg(cs->accel->fd, HV_X86_R12); + env->regs[13] = rreg(cs->accel->fd, HV_X86_R13); + env->regs[14] = rreg(cs->accel->fd, HV_X86_R14); + env->regs[15] = rreg(cs->accel->fd, HV_X86_R15); - env->eflags = rreg(cs->hvf->fd, HV_X86_RFLAGS); - env->eip = rreg(cs->hvf->fd, HV_X86_RIP); + env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); + env->eip = rreg(cs->accel->fd, HV_X86_RIP); hvf_get_xsave(cs); - env->xcr0 = rreg(cs->hvf->fd, HV_X86_XCR0); + env->xcr0 = rreg(cs->accel->fd, HV_X86_XCR0); hvf_get_segments(cs); hvf_get_msrs(cs); - env->dr[0] = rreg(cs->hvf->fd, HV_X86_DR0); - env->dr[1] = rreg(cs->hvf->fd, HV_X86_DR1); - env->dr[2] = rreg(cs->hvf->fd, HV_X86_DR2); - env->dr[3] = rreg(cs->hvf->fd, HV_X86_DR3); - env->dr[4] = rreg(cs->hvf->fd, HV_X86_DR4); - env->dr[5] = rreg(cs->hvf->fd, HV_X86_DR5); - env->dr[6] = rreg(cs->hvf->fd, HV_X86_DR6); - env->dr[7] = rreg(cs->hvf->fd, HV_X86_DR7); + env->dr[0] = rreg(cs->accel->fd, HV_X86_DR0); + env->dr[1] = rreg(cs->accel->fd, HV_X86_DR1); + env->dr[2] = rreg(cs->accel->fd, HV_X86_DR2); + env->dr[3] = rreg(cs->accel->fd, HV_X86_DR3); + env->dr[4] = rreg(cs->accel->fd, HV_X86_DR4); + env->dr[5] = rreg(cs->accel->fd, HV_X86_DR5); + env->dr[6] = rreg(cs->accel->fd, HV_X86_DR6); + env->dr[7] = rreg(cs->accel->fd, HV_X86_DR7); x86_update_hflags(env); return 0; @@ -327,16 +327,16 @@ int hvf_get_registers(CPUState *cs) static void vmx_set_int_window_exiting(CPUState *cs) { uint64_t val; - val = rvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val | + val = rvmcs(cs->accel->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cs->accel->fd, VMCS_PRI_PROC_BASED_CTLS, val | VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); } void vmx_clear_int_window_exiting(CPUState *cs) { uint64_t val; - val = rvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); - wvmcs(cs->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val & + val = rvmcs(cs->accel->fd, VMCS_PRI_PROC_BASED_CTLS); + wvmcs(cs->accel->fd, VMCS_PRI_PROC_BASED_CTLS, val & ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); } @@ -372,7 +372,7 @@ bool hvf_inject_interrupts(CPUState *cs) uint64_t info = 0; if (have_event) { info = vector | intr_type | VMCS_INTR_VALID; - uint64_t reason = rvmcs(cs->hvf->fd, VMCS_EXIT_REASON); + uint64_t reason = rvmcs(cs->accel->fd, VMCS_EXIT_REASON); if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) { vmx_clear_nmi_blocking(cs); } @@ -381,17 +381,17 @@ bool hvf_inject_interrupts(CPUState *cs) info &= ~(1 << 12); /* clear undefined bit */ if (intr_type == VMCS_INTR_T_SWINTR || intr_type == VMCS_INTR_T_SWEXCEPTION) { - wvmcs(cs->hvf->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len); + wvmcs(cs->accel->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len); } if (env->has_error_code) { - wvmcs(cs->hvf->fd, VMCS_ENTRY_EXCEPTION_ERROR, + wvmcs(cs->accel->fd, VMCS_ENTRY_EXCEPTION_ERROR, env->error_code); /* Indicate that VMCS_ENTRY_EXCEPTION_ERROR is valid */ info |= VMCS_INTR_DEL_ERRCODE; } /*printf("reinject %lx err %d\n", info, err);*/ - wvmcs(cs->hvf->fd, VMCS_ENTRY_INTR_INFO, info); + wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, info); }; } @@ -399,7 +399,7 @@ bool hvf_inject_interrupts(CPUState *cs) if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) { cs->interrupt_request &= ~CPU_INTERRUPT_NMI; info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI; - wvmcs(cs->hvf->fd, VMCS_ENTRY_INTR_INFO, info); + wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, info); } else { vmx_set_nmi_window_exiting(cs); } @@ -411,7 +411,7 @@ bool hvf_inject_interrupts(CPUState *cs) int line = cpu_get_pic_interrupt(&x86cpu->env); cs->interrupt_request &= ~CPU_INTERRUPT_HARD; if (line >= 0) { - wvmcs(cs->hvf->fd, VMCS_ENTRY_INTR_INFO, line | + wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, line | VMCS_INTR_VALID | VMCS_INTR_T_HWINTR); } } @@ -429,7 +429,7 @@ int hvf_process_events(CPUState *cs) if (!cs->vcpu_dirty) { /* light weight sync for CPU_INTERRUPT_HARD and IF_MASK */ - env->eflags = rreg(cs->hvf->fd, HV_X86_RFLAGS); + env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); } if (cs->interrupt_request & CPU_INTERRUPT_INIT) { -- cgit v1.1 From 0c40daf03810ed56535c40f6debdcc62ce4c5e6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Wed, 5 Apr 2023 13:48:26 +0200 Subject: hw/intc/arm_gic: Un-inline GIC*/ITS class_name() helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit "kvm_arm.h" contains external and internal prototype declarations. Files under the hw/ directory should only access the KVM external API. In order to avoid machine / device models to include "kvm_arm.h" simply to get the QOM GIC/ITS class name, un-inline each class name getter to the proper device model file. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230405160454.97436-4-philmd@linaro.org> --- target/arm/kvm_arm.h | 45 --------------------------------------------- 1 file changed, 45 deletions(-) (limited to 'target') diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index 330fbe5..051a0da 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -453,32 +453,6 @@ static inline uint32_t kvm_arm_sve_get_vls(CPUState *cs) #endif -static inline const char *gic_class_name(void) -{ - return kvm_irqchip_in_kernel() ? "kvm-arm-gic" : "arm_gic"; -} - -/** - * gicv3_class_name - * - * Return name of GICv3 class to use depending on whether KVM acceleration is - * in use. May throw an error if the chosen implementation is not available. - * - * Returns: class name to use - */ -static inline const char *gicv3_class_name(void) -{ - if (kvm_irqchip_in_kernel()) { - return "kvm-arm-gicv3"; - } else { - if (kvm_enabled()) { - error_report("Userspace GICv3 is not supported with KVM"); - exit(1); - } - return "arm-gicv3"; - } -} - /** * kvm_arm_handle_debug: * @cs: CPUState @@ -516,23 +490,4 @@ void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr); */ bool kvm_arm_verify_ext_dabt_pending(CPUState *cs); -/** - * its_class_name: - * - * Return the ITS class name to use depending on whether KVM acceleration - * and KVM CAP_SIGNAL_MSI are supported - * - * Returns: class name to use or NULL - */ -static inline const char *its_class_name(void) -{ - if (kvm_irqchip_in_kernel()) { - /* KVM implementation requires this capability */ - return kvm_direct_msi_enabled() ? "arm-its-kvm" : NULL; - } else { - /* Software emulation based model */ - return "arm-gicv3-its"; - } -} - #endif -- cgit v1.1 From cf43b5b69c0f4acee52e3648b88f4a0bf3de770b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Tue, 4 Apr 2023 11:12:38 +0200 Subject: target/arm: Restrict KVM-specific fields from ArchCPU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These fields shouldn't be accessed when KVM is not available. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Richard Henderson Message-Id: <20230405160454.97436-8-philmd@linaro.org> --- target/arm/cpu.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'target') diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 00e675f..4d6c0f9 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -961,6 +961,7 @@ struct ArchCPU { */ uint32_t kvm_target; +#ifdef CONFIG_KVM /* KVM init features for this CPU */ uint32_t kvm_init_features[7]; @@ -973,6 +974,7 @@ struct ArchCPU { /* KVM steal time */ OnOffAuto kvm_steal_time; +#endif /* CONFIG_KVM */ /* Uniprocessor system with MP extensions */ bool mp_is_up; -- cgit v1.1 From 0573997713eb0a37ab98b545794e18b868b471ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Tue, 4 Apr 2023 11:14:58 +0200 Subject: target/ppc: Restrict KVM-specific fields from ArchCPU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The 'kvm_sw_tlb' and 'tlb_dirty' fields introduced in commit 93dd5e852c ("kvm: ppc: booke206: use MMU API") are specific to KVM and shouldn't be accessed when it is not available. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Nicholas Piggin Message-Id: <20230624192645.13680-1-philmd@linaro.org> --- target/ppc/cpu.h | 2 ++ target/ppc/mmu_common.c | 4 ++++ 2 files changed, 6 insertions(+) (limited to 'target') diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 94497aa..af12c93 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -1149,8 +1149,10 @@ struct CPUArchState { int nb_pids; /* Number of available PID registers */ int tlb_type; /* Type of TLB we're dealing with */ ppc_tlb_t tlb; /* TLB is optional. Allocate them only if needed */ +#ifdef CONFIG_KVM bool tlb_dirty; /* Set to non-zero when modifying TLB */ bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */ +#endif /* CONFIG_KVM */ uint32_t tlb_need_flush; /* Delayed flush needed */ #define TLB_NEED_LOCAL_FLUSH 0x1 #define TLB_NEED_GLOBAL_FLUSH 0x2 diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c index ae1db6e..8c000e2 100644 --- a/target/ppc/mmu_common.c +++ b/target/ppc/mmu_common.c @@ -930,10 +930,12 @@ static void mmubooke_dump_mmu(CPUPPCState *env) ppcemb_tlb_t *entry; int i; +#ifdef CONFIG_KVM if (kvm_enabled() && !env->kvm_sw_tlb) { qemu_printf("Cannot access KVM TLB\n"); return; } +#endif qemu_printf("\nTLB:\n"); qemu_printf("Effective Physical Size PID Prot " @@ -1021,10 +1023,12 @@ static void mmubooke206_dump_mmu(CPUPPCState *env) int offset = 0; int i; +#ifdef CONFIG_KVM if (kvm_enabled() && !env->kvm_sw_tlb) { qemu_printf("Cannot access KVM TLB\n"); return; } +#endif for (i = 0; i < BOOKE206_MAX_TLBN; i++) { int size = booke206_tlb_size(env, i); -- cgit v1.1 From 9638cbde6c4f433b157672c0fdee783c10be45fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Date: Tue, 4 Apr 2023 11:15:05 +0200 Subject: target/riscv: Restrict KVM-specific fields from ArchCPU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These fields shouldn't be accessed when KVM is not available. Restrict the KVM timer migration state. Rename the KVM timer post_load() handler accordingly, because cpu_post_load() is too generic. Signed-off-by: Philippe Mathieu-Daudé Reviewed-by: Daniel Henrique Barboza Message-Id: <20230626232007.8933-3-philmd@linaro.org> --- target/riscv/cpu.c | 2 +- target/riscv/cpu.h | 2 ++ target/riscv/machine.c | 8 ++++++-- 3 files changed, 9 insertions(+), 3 deletions(-) (limited to 'target') diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 881bddf..4035fe0 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -584,7 +584,7 @@ static void riscv_host_cpu_init(Object *obj) #endif riscv_cpu_add_user_properties(obj); } -#endif +#endif /* CONFIG_KVM */ static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) { diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 7bff1d4..7adb870 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -363,12 +363,14 @@ struct CPUArchState { hwaddr kernel_addr; hwaddr fdt_addr; +#ifdef CONFIG_KVM /* kvm timer */ bool kvm_timer_dirty; uint64_t kvm_timer_time; uint64_t kvm_timer_compare; uint64_t kvm_timer_state; uint64_t kvm_timer_frequency; +#endif /* CONFIG_KVM */ }; /* diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 3ce2970..c7c862c 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -194,12 +194,13 @@ static const VMStateDescription vmstate_rv128 = { } }; +#ifdef CONFIG_KVM static bool kvmtimer_needed(void *opaque) { return kvm_enabled(); } -static int cpu_post_load(void *opaque, int version_id) +static int cpu_kvmtimer_post_load(void *opaque, int version_id) { RISCVCPU *cpu = opaque; CPURISCVState *env = &cpu->env; @@ -213,7 +214,7 @@ static const VMStateDescription vmstate_kvmtimer = { .version_id = 1, .minimum_version_id = 1, .needed = kvmtimer_needed, - .post_load = cpu_post_load, + .post_load = cpu_kvmtimer_post_load, .fields = (VMStateField[]) { VMSTATE_UINT64(env.kvm_timer_time, RISCVCPU), VMSTATE_UINT64(env.kvm_timer_compare, RISCVCPU), @@ -221,6 +222,7 @@ static const VMStateDescription vmstate_kvmtimer = { VMSTATE_END_OF_LIST() } }; +#endif static bool debug_needed(void *opaque) { @@ -409,7 +411,9 @@ const VMStateDescription vmstate_riscv_cpu = { &vmstate_vector, &vmstate_pointermasking, &vmstate_rv128, +#ifdef CONFIG_KVM &vmstate_kvmtimer, +#endif &vmstate_envcfg, &vmstate_debug, &vmstate_smstateen, -- cgit v1.1 From db5a06b3a21239ffa9424f89f98ee9afa14e955b Mon Sep 17 00:00:00 2001 From: Zhao Liu Date: Mon, 29 May 2023 20:43:31 +0800 Subject: target/i386/WHPX: Fix error message when fail to set ProcessorCount MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 003f230e37d7 ("machine: Tweak the order of topology members in struct CpuTopology") changes the meaning of MachineState.smp.cores from "the number of cores in one package" to "the number of cores in one die" and doesn't fix other uses of MachineState.smp.cores. And because of the introduction of cluster, now smp.cores just means "the number of cores in one cluster". This clearly does not fit the semantics here. And before this error message, WHvSetPartitionProperty() is called to set prop.ProcessorCount. So the error message should show the prop.ProcessorCount other than "cores per cluster" or "cores per package". Cc: Sunil Muthuswamy Signed-off-by: Zhao Liu Reviewed-by: Philippe Mathieu-Daudé Message-Id: <20230529124331.412822-1-zhao1.liu@linux.intel.com> [PMD: Use '%u' format for ProcessorCount] Signed-off-by: Philippe Mathieu-Daudé --- target/i386/whpx/whpx-all.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'target') diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 9ee04ee..57580ca 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -2598,8 +2598,8 @@ static int whpx_accel_init(MachineState *ms) sizeof(WHV_PARTITION_PROPERTY)); if (FAILED(hr)) { - error_report("WHPX: Failed to set partition core count to %d," - " hr=%08lx", ms->smp.cores, hr); + error_report("WHPX: Failed to set partition processor count to %u," + " hr=%08lx", prop.ProcessorCount, hr); ret = -EINVAL; goto error; } -- cgit v1.1 From 5369a36c4fe5a70e8831059bcd2a2eef834aae21 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Tue, 20 Jun 2023 09:50:47 -0700 Subject: exec/memory: Add symbolic value for memory listener priority for accel MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add MEMORY_LISTNER_PRIORITY_ACCEL for the symbolic value for the memory listener to replace the hard-coded value 10 for accel. No functional change intended. Signed-off-by: Isaku Yamahata Reviewed-by: Philippe Mathieu-Daudé Message-Id: Signed-off-by: Philippe Mathieu-Daudé --- target/i386/hax/hax-mem.c | 2 +- target/i386/nvmm/nvmm-all.c | 2 +- target/i386/whpx/whpx-all.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'target') diff --git a/target/i386/hax/hax-mem.c b/target/i386/hax/hax-mem.c index 05dbe8c..bb5ffbc 100644 --- a/target/i386/hax/hax-mem.c +++ b/target/i386/hax/hax-mem.c @@ -291,7 +291,7 @@ static MemoryListener hax_memory_listener = { .region_add = hax_region_add, .region_del = hax_region_del, .log_sync = hax_log_sync, - .priority = 10, + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, }; static void hax_ram_block_added(RAMBlockNotifier *n, void *host, size_t size, diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index 72a3a9e..066a173 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -1128,7 +1128,7 @@ static MemoryListener nvmm_memory_listener = { .region_add = nvmm_region_add, .region_del = nvmm_region_del, .log_sync = nvmm_log_sync, - .priority = 10, + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, }; static void diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 57580ca..3de0dc1 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -2412,7 +2412,7 @@ static MemoryListener whpx_memory_listener = { .region_add = whpx_region_add, .region_del = whpx_region_del, .log_sync = whpx_log_sync, - .priority = 10, + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, }; static void whpx_memory_init(void) -- cgit v1.1 From 14a868c626e99eea063ecbf6ef86002f6a314f0a Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Tue, 20 Jun 2023 09:50:49 -0700 Subject: exec/memory: Add symbol for the min value of memory listener priority MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add MEMORY_LISTNER_PRIORITY_MIN for the symbolic value for the min value of the memory listener instead of the hard-coded magic value 0. Add explicit initialization. No functional change intended. Signed-off-by: Isaku Yamahata Reviewed-by: Philippe Mathieu-Daudé Message-Id: <29f88477fe82eb774bcfcae7f65ea21995f865f2.1687279702.git.isaku.yamahata@intel.com> Signed-off-by: Philippe Mathieu-Daudé --- target/arm/kvm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'target') diff --git a/target/arm/kvm.c b/target/arm/kvm.c index 84da493..b4c7654 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -341,6 +341,7 @@ static MemoryListener devlistener = { .name = "kvm-arm", .region_add = kvm_arm_devlistener_add, .region_del = kvm_arm_devlistener_del, + .priority = MEMORY_LISTENER_PRIORITY_MIN, }; static void kvm_arm_set_device_addr(KVMDevice *kd) -- cgit v1.1