aboutsummaryrefslogtreecommitdiff
path: root/target/i386/whpx/whpx-all.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/i386/whpx/whpx-all.c')
-rw-r--r--target/i386/whpx/whpx-all.c142
1 files changed, 77 insertions, 65 deletions
diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c
index 41fb8c5..2567618 100644
--- a/target/i386/whpx/whpx-all.c
+++ b/target/i386/whpx/whpx-all.c
@@ -10,10 +10,11 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/address-spaces.h"
-#include "exec/ioport.h"
+#include "system/address-spaces.h"
+#include "system/ioport.h"
#include "gdbstub/helpers.h"
#include "qemu/accel.h"
+#include "accel/accel-ops.h"
#include "system/whpx.h"
#include "system/cpus.h"
#include "system/runstate.h"
@@ -26,6 +27,8 @@
#include "qapi/qapi-types-common.h"
#include "qapi/qapi-visit-common.h"
#include "migration/blocker.h"
+#include "host-cpu.h"
+#include "accel/accel-cpu-target.h"
#include <winerror.h>
#include "whpx-internal.h"
@@ -237,13 +240,12 @@ struct AccelCPUState {
uint64_t tpr;
uint64_t apic_base;
bool interruption_pending;
- bool dirty;
/* Must be the last field as it may have a tail */
WHV_RUN_VP_EXIT_CONTEXT exit_ctx;
};
-static bool whpx_allowed;
+bool whpx_allowed;
static bool whp_dispatch_initialized;
static HMODULE hWinHvPlatform, hWinHvEmulation;
static uint32_t max_vcpu_index;
@@ -549,8 +551,6 @@ static void whpx_set_registers(CPUState *cpu, int level)
error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
hr);
}
-
- return;
}
static int whpx_get_tsc(CPUState *cpu)
@@ -771,8 +771,6 @@ static void whpx_get_registers(CPUState *cpu)
}
x86_update_hflags(env);
-
- return;
}
static HRESULT CALLBACK whpx_emu_ioport_callback(
@@ -790,8 +788,11 @@ static HRESULT CALLBACK whpx_emu_mmio_callback(
void *ctx,
WHV_EMULATOR_MEMORY_ACCESS_INFO *ma)
{
- cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize,
- ma->Direction);
+ CPUState *cs = (CPUState *)ctx;
+ AddressSpace *as = cpu_addressspace(cs, MEMTXATTRS_UNSPECIFIED);
+
+ address_space_rw(as, ma->GpaAddress, MEMTXATTRS_UNSPECIFIED,
+ ma->Data, ma->AccessSize, ma->Direction);
return S_OK;
}
@@ -840,7 +841,7 @@ static HRESULT CALLBACK whpx_emu_setreg_callback(
* The emulator just successfully wrote the register state. We clear the
* dirty state so we avoid the double write on resume of the VP.
*/
- cpu->accel->dirty = false;
+ cpu->vcpu_dirty = false;
return hr;
}
@@ -1395,7 +1396,7 @@ static int whpx_last_vcpu_stopping(CPUState *cpu)
/* Returns the address of the next instruction that is about to be executed. */
static vaddr whpx_vcpu_get_pc(CPUState *cpu, bool exit_context_valid)
{
- if (cpu->accel->dirty) {
+ if (cpu->vcpu_dirty) {
/* The CPU registers have been modified by other parts of QEMU. */
return cpu_env(cpu)->eip;
} else if (exit_context_valid) {
@@ -1438,9 +1439,9 @@ static int whpx_handle_halt(CPUState *cpu)
int ret = 0;
bql_lock();
- if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
(cpu_env(cpu)->eflags & IF_MASK)) &&
- !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ !cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
cpu->exception_index = EXCP_HLT;
cpu->halted = true;
ret = 1;
@@ -1471,16 +1472,16 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
/* Inject NMI */
if (!vcpu->interruption_pending &&
- cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
- if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI);
vcpu->interruptable = false;
new_int.InterruptionType = WHvX64PendingNmi;
new_int.InterruptionPending = 1;
new_int.InterruptionVector = 2;
}
- if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) {
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI);
}
}
@@ -1488,13 +1489,13 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
* Force the VCPU out of its inner loop to process any INIT requests or
* commit pending TPR access.
*/
- if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
- if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) &&
!(env->hflags & HF_SMM_MASK)) {
- cpu->exit_request = 1;
+ qatomic_set(&cpu->exit_request, true);
}
- if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
- cpu->exit_request = 1;
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) {
+ qatomic_set(&cpu->exit_request, true);
}
}
@@ -1503,8 +1504,8 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
if (!vcpu->interruption_pending &&
vcpu->interruptable && (env->eflags & IF_MASK)) {
assert(!new_int.InterruptionPending);
- if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
new_int.InterruptionType = WHvX64PendingInterrupt;
@@ -1521,8 +1522,8 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
reg_count += 1;
}
} else if (vcpu->ready_for_pic_interrupt &&
- (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
reg_names[reg_count] = WHvRegisterPendingEvent;
@@ -1541,14 +1542,14 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
if (tpr != vcpu->tpr) {
vcpu->tpr = tpr;
reg_values[reg_count].Reg64 = tpr;
- cpu->exit_request = 1;
+ qatomic_set(&cpu->exit_request, true);
reg_names[reg_count] = WHvX64RegisterCr8;
reg_count += 1;
}
/* Update the state of the interrupt delivery notification */
if (!vcpu->window_registered &&
- cpu->interrupt_request & CPU_INTERRUPT_HARD) {
+ cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
reg_values[reg_count].DeliverabilityNotifications =
(WHV_X64_DELIVERABILITY_NOTIFICATIONS_REGISTER) {
.InterruptNotification = 1
@@ -1570,8 +1571,6 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
" hr=%08lx", hr);
}
}
-
- return;
}
static void whpx_vcpu_post_run(CPUState *cpu)
@@ -1595,8 +1594,6 @@ static void whpx_vcpu_post_run(CPUState *cpu)
vcpu->interruptable =
!vcpu->exit_ctx.VpContext.ExecutionState.InterruptShadow;
-
- return;
}
static void whpx_vcpu_process_async_events(CPUState *cpu)
@@ -1605,37 +1602,35 @@ static void whpx_vcpu_process_async_events(CPUState *cpu)
CPUX86State *env = &x86_cpu->env;
AccelCPUState *vcpu = cpu->accel;
- if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) &&
!(env->hflags & HF_SMM_MASK)) {
whpx_cpu_synchronize_state(cpu);
do_cpu_init(x86_cpu);
vcpu->interruptable = true;
}
- if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) {
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
apic_poll_irq(x86_cpu->apic_state);
}
- if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if ((cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
- (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
cpu->halted = false;
}
- if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SIPI)) {
whpx_cpu_synchronize_state(cpu);
do_cpu_sipi(x86_cpu);
}
- if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) {
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_TPR);
whpx_cpu_synchronize_state(cpu);
apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
env->tpr_access_type);
}
-
- return;
}
static int whpx_vcpu_run(CPUState *cpu)
@@ -1714,15 +1709,16 @@ static int whpx_vcpu_run(CPUState *cpu)
}
do {
- if (cpu->accel->dirty) {
+ if (cpu->vcpu_dirty) {
whpx_set_registers(cpu, WHPX_SET_RUNTIME_STATE);
- cpu->accel->dirty = false;
+ cpu->vcpu_dirty = false;
}
if (exclusive_step_mode == WHPX_STEP_NONE) {
whpx_vcpu_pre_run(cpu);
- if (qatomic_read(&cpu->exit_request)) {
+ /* Corresponding store-release is in cpu_exit. */
+ if (qatomic_load_acquire(&cpu->exit_request)) {
whpx_vcpu_kick(cpu);
}
}
@@ -2057,16 +2053,14 @@ static int whpx_vcpu_run(CPUState *cpu)
whpx_last_vcpu_stopping(cpu);
}
- qatomic_set(&cpu->exit_request, false);
-
return ret < 0;
}
static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{
- if (!cpu->accel->dirty) {
+ if (!cpu->vcpu_dirty) {
whpx_get_registers(cpu);
- cpu->accel->dirty = true;
+ cpu->vcpu_dirty = true;
}
}
@@ -2074,20 +2068,20 @@ static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu,
run_on_cpu_data arg)
{
whpx_set_registers(cpu, WHPX_SET_RESET_STATE);
- cpu->accel->dirty = false;
+ cpu->vcpu_dirty = false;
}
static void do_whpx_cpu_synchronize_post_init(CPUState *cpu,
run_on_cpu_data arg)
{
whpx_set_registers(cpu, WHPX_SET_FULL_STATE);
- cpu->accel->dirty = false;
+ cpu->vcpu_dirty = false;
}
static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu,
run_on_cpu_data arg)
{
- cpu->accel->dirty = true;
+ cpu->vcpu_dirty = true;
}
/*
@@ -2096,7 +2090,7 @@ static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu,
void whpx_cpu_synchronize_state(CPUState *cpu)
{
- if (!cpu->accel->dirty) {
+ if (!cpu->vcpu_dirty) {
run_on_cpu(cpu, do_whpx_cpu_synchronize_state, RUN_ON_CPU_NULL);
}
}
@@ -2116,7 +2110,7 @@ void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu)
run_on_cpu(cpu, do_whpx_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
}
-void whpx_cpu_synchronize_pre_resume(bool step_pending)
+static void whpx_pre_resume_vm(AccelState *as, bool step_pending)
{
whpx_global.step_pending = step_pending;
}
@@ -2236,7 +2230,7 @@ int whpx_init_vcpu(CPUState *cpu)
}
vcpu->interruptable = true;
- vcpu->dirty = true;
+ cpu->vcpu_dirty = true;
cpu->accel = vcpu;
max_vcpu_index = max(max_vcpu_index, cpu->cpu_index);
qemu_add_vm_change_state_handler(whpx_cpu_update_state, env);
@@ -2280,7 +2274,6 @@ void whpx_destroy_vcpu(CPUState *cpu)
whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index);
whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator);
g_free(cpu->accel);
- return;
}
void whpx_vcpu_kick(CPUState *cpu)
@@ -2512,11 +2505,33 @@ static void whpx_set_kernel_irqchip(Object *obj, Visitor *v,
}
}
+static void whpx_cpu_instance_init(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ host_cpu_instance_init(cpu);
+}
+
+static void whpx_cpu_accel_class_init(ObjectClass *oc, const void *data)
+{
+ AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
+
+ acc->cpu_instance_init = whpx_cpu_instance_init;
+}
+
+static const TypeInfo whpx_cpu_accel_type = {
+ .name = ACCEL_CPU_NAME("whpx"),
+
+ .parent = TYPE_ACCEL_CPU,
+ .class_init = whpx_cpu_accel_class_init,
+ .abstract = true,
+};
+
/*
* Partition support
*/
-static int whpx_accel_init(MachineState *ms)
+static int whpx_accel_init(AccelState *as, MachineState *ms)
{
struct whpx_state *whpx;
int ret;
@@ -2700,20 +2715,16 @@ error:
return ret;
}
-int whpx_enabled(void)
-{
- return whpx_allowed;
-}
-
bool whpx_apic_in_platform(void) {
return whpx_global.apic_in_platform;
}
-static void whpx_accel_class_init(ObjectClass *oc, void *data)
+static void whpx_accel_class_init(ObjectClass *oc, const void *data)
{
AccelClass *ac = ACCEL_CLASS(oc);
ac->name = "WHPX";
ac->init_machine = whpx_accel_init;
+ ac->pre_resume_vm = whpx_pre_resume_vm;
ac->allowed = &whpx_allowed;
object_class_property_add(oc, "kernel-irqchip", "on|off|split",
@@ -2742,6 +2753,7 @@ static const TypeInfo whpx_accel_type = {
static void whpx_type_init(void)
{
type_register_static(&whpx_accel_type);
+ type_register_static(&whpx_cpu_accel_type);
}
bool init_whp_dispatch(void)