aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/hyperv.txt10
-rw-r--r--target/i386/cpu.c1
-rw-r--r--target/i386/cpu.h1
-rw-r--r--target/i386/kvm.c89
4 files changed, 87 insertions, 14 deletions
diff --git a/docs/hyperv.txt b/docs/hyperv.txt
index c423e0f..beadb2d 100644
--- a/docs/hyperv.txt
+++ b/docs/hyperv.txt
@@ -175,6 +175,16 @@ without the feature to find out if enabling it is beneficial.
Requires: hv-vapic
+4. Development features
+========================
+In some cases (e.g. during development) it may make sense to use QEMU in
+'pass-through' mode and give Windows guests all enlightenments currently
+supported by KVM. This pass-through mode is enabled by "hv-passthrough" CPU
+flag.
+Note: enabling this flag effectively prevents migration as supported features
+may differ between target and destination.
+
+
4. Useful links
================
Hyper-V Top Level Functional specification and other information:
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index e90c1ac..e07996c 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -5883,6 +5883,7 @@ static Property x86_cpu_properties[] = {
HYPERV_FEAT_EVMCS, 0),
DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
HYPERV_FEAT_IPI, 0),
+ DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 30cd1a0..86edbf5 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1390,6 +1390,7 @@ struct X86CPU {
char *hyperv_vendor_id;
bool hyperv_synic_kvm_only;
uint64_t hyperv_features;
+ bool hyperv_passthrough;
bool check_cpuid;
bool enforce_cpuid;
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index a041b4d..93ac6ba 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -639,7 +639,7 @@ static bool hyperv_enabled(X86CPU *cpu)
CPUState *cs = CPU(cpu);
return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY) ||
- cpu->hyperv_features);
+ cpu->hyperv_features || cpu->hyperv_passthrough);
}
static int kvm_arch_set_tsc_khz(CPUState *cs)
@@ -985,10 +985,10 @@ static int hv_cpuid_check_and_set(CPUState *cs, struct kvm_cpuid2 *cpuid,
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
- uint32_t r, fw, bits;;
+ uint32_t r, fw, bits;
int i;
- if (!hyperv_feat_enabled(cpu, feature)) {
+ if (!hyperv_feat_enabled(cpu, feature) && !cpu->hyperv_passthrough) {
return 0;
}
@@ -1001,15 +1001,23 @@ static int hv_cpuid_check_and_set(CPUState *cs, struct kvm_cpuid2 *cpuid,
}
if (hv_cpuid_get_fw(cpuid, fw, &r) || (r & bits) != bits) {
- fprintf(stderr,
- "Hyper-V %s is not supported by kernel\n",
- kvm_hyperv_properties[feature].desc);
- return 1;
+ if (hyperv_feat_enabled(cpu, feature)) {
+ fprintf(stderr,
+ "Hyper-V %s is not supported by kernel\n",
+ kvm_hyperv_properties[feature].desc);
+ return 1;
+ } else {
+ return 0;
+ }
}
env->features[fw] |= bits;
}
+ if (cpu->hyperv_passthrough) {
+ cpu->hyperv_features |= BIT(feature);
+ }
+
return 0;
}
@@ -1027,22 +1035,29 @@ static int hyperv_handle_properties(CPUState *cs,
struct kvm_cpuid_entry2 *c;
uint32_t signature[3];
uint32_t cpuid_i = 0;
- int r = 0;
+ int r;
if (!hyperv_enabled(cpu))
return 0;
- if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ||
+ cpu->hyperv_passthrough) {
uint16_t evmcs_version;
- if (kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
- (uintptr_t)&evmcs_version)) {
+ r = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
+ (uintptr_t)&evmcs_version);
+
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) && r) {
fprintf(stderr, "Hyper-V %s is not supported by kernel\n",
kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc);
return -ENOSYS;
}
- env->features[FEAT_HV_RECOMM_EAX] |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
- env->features[FEAT_HV_NESTED_EAX] = evmcs_version;
+
+ if (!r) {
+ env->features[FEAT_HV_RECOMM_EAX] |=
+ HV_ENLIGHTENED_VMCS_RECOMMENDED;
+ env->features[FEAT_HV_NESTED_EAX] = evmcs_version;
+ }
}
if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_CPUID) > 0) {
@@ -1051,8 +1066,33 @@ static int hyperv_handle_properties(CPUState *cs,
cpuid = get_supported_hv_cpuid_legacy(cs);
}
+ if (cpu->hyperv_passthrough) {
+ memcpy(cpuid_ent, &cpuid->entries[0],
+ cpuid->nent * sizeof(cpuid->entries[0]));
+
+ c = cpuid_find_entry(cpuid, HV_CPUID_FEATURES, 0);
+ if (c) {
+ env->features[FEAT_HYPERV_EAX] = c->eax;
+ env->features[FEAT_HYPERV_EBX] = c->ebx;
+ env->features[FEAT_HYPERV_EDX] = c->eax;
+ }
+ c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0);
+ if (c) {
+ env->features[FEAT_HV_RECOMM_EAX] = c->eax;
+
+ /* hv-spinlocks may have been overriden */
+ if (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY) {
+ c->ebx = cpu->hyperv_spinlock_attempts;
+ }
+ }
+ c = cpuid_find_entry(cpuid, HV_CPUID_NESTED_FEATURES, 0);
+ if (c) {
+ env->features[FEAT_HV_NESTED_EAX] = c->eax;
+ }
+ }
+
/* Features */
- r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RELAXED);
+ r = hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RELAXED);
r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VAPIC);
r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TIME);
r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_CRASH);
@@ -1085,6 +1125,12 @@ static int hyperv_handle_properties(CPUState *cs,
goto free;
}
+ if (cpu->hyperv_passthrough) {
+ /* We already copied all feature words from KVM as is */
+ r = cpuid->nent;
+ goto free;
+ }
+
c = &cpuid_ent[cpuid_i++];
c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
if (!cpu->hyperv_vendor_id) {
@@ -1156,11 +1202,26 @@ free:
return r;
}
+static Error *hv_passthrough_mig_blocker;
+
static int hyperv_init_vcpu(X86CPU *cpu)
{
CPUState *cs = CPU(cpu);
+ Error *local_err = NULL;
int ret;
+ if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) {
+ error_setg(&hv_passthrough_mig_blocker,
+ "'hv-passthrough' CPU flag prevents migration, use explicit"
+ " set of hv-* flags instead");
+ ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ error_free(hv_passthrough_mig_blocker);
+ return ret;
+ }
+ }
+
if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) {
/*
* the kernel doesn't support setting vp_index; assert that its value