diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2015-06-18 18:28:42 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-07-06 17:59:43 +0200 |
commit | fc12d72e10828ca6ff75f2ad432b741f07a10cef (patch) | |
tree | 6340f3a7faa68a9bc2ffc3b7a6ed060a197b2b39 /target-i386/kvm.c | |
parent | afd6895b45f20eb43b7ff95f7a76cc5af8d36cd7 (diff) | |
download | qemu-fc12d72e10828ca6ff75f2ad432b741f07a10cef.zip qemu-fc12d72e10828ca6ff75f2ad432b741f07a10cef.tar.gz qemu-fc12d72e10828ca6ff75f2ad432b741f07a10cef.tar.bz2 |
target-i386: add support for SMBASE MSR and SMIs
Apart from the MSR, the smi field of struct kvm_vcpu_events has to be
translated into the corresponding CPUX86State fields. Also,
memory transaction flags depend on SMM state, so pull it from struct
kvm_run on every exit from KVM to userspace.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target-i386/kvm.c')
-rw-r--r-- | target-i386/kvm.c | 105 |
1 files changed, 93 insertions, 12 deletions
diff --git a/target-i386/kvm.c b/target-i386/kvm.c index 6426600..b42b56c 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -73,6 +73,7 @@ static bool has_msr_feature_control; static bool has_msr_async_pf_en; static bool has_msr_pv_eoi_en; static bool has_msr_misc_enable; +static bool has_msr_smbase; static bool has_msr_bndcfgs; static bool has_msr_kvm_steal_time; static int lm_capable_kernel; @@ -819,6 +820,10 @@ static int kvm_get_supported_msrs(KVMState *s) has_msr_tsc_deadline = true; continue; } + if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) { + has_msr_smbase = true; + continue; + } if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) { has_msr_misc_enable = true; continue; @@ -1245,6 +1250,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE, env->msr_ia32_misc_enable); } + if (has_msr_smbase) { + kvm_msr_entry_set(&msrs[n++], MSR_IA32_SMBASE, env->smbase); + } if (has_msr_bndcfgs) { kvm_msr_entry_set(&msrs[n++], MSR_IA32_BNDCFGS, env->msr_bndcfgs); } @@ -1606,6 +1614,9 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_misc_enable) { msrs[n++].index = MSR_IA32_MISC_ENABLE; } + if (has_msr_smbase) { + msrs[n++].index = MSR_IA32_SMBASE; + } if (has_msr_feature_control) { msrs[n++].index = MSR_IA32_FEATURE_CONTROL; } @@ -1760,6 +1771,9 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_IA32_MISC_ENABLE: env->msr_ia32_misc_enable = msrs[i].data; break; + case MSR_IA32_SMBASE: + env->smbase = msrs[i].data; + break; case MSR_IA32_FEATURE_CONTROL: env->msr_ia32_feature_control = msrs[i].data; break; @@ -1923,6 +1937,7 @@ static int kvm_put_apic(X86CPU *cpu) static int kvm_put_vcpu_events(X86CPU *cpu, int level) { + CPUState *cs = CPU(cpu); CPUX86State *env = &cpu->env; struct kvm_vcpu_events events = {}; @@ -1947,6 +1962,24 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level) events.sipi_vector = env->sipi_vector; + if (has_msr_smbase) { + events.smi.smm = !!(env->hflags & HF_SMM_MASK); + events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK); + if (kvm_irqchip_in_kernel()) { + /* As soon as these are moved to the kernel, remove them + * from cs->interrupt_request. + */ + events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI; + events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT; + cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI); + } else { + /* Keep these in cs->interrupt_request. */ + events.smi.pending = 0; + events.smi.latched_init = 0; + } + events.flags |= KVM_VCPUEVENT_VALID_SMM; + } + events.flags = 0; if (level >= KVM_PUT_RESET_STATE) { events.flags |= @@ -1966,6 +1999,7 @@ static int kvm_get_vcpu_events(X86CPU *cpu) return 0; } + memset(&events, 0, sizeof(events)); ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events); if (ret < 0) { return ret; @@ -1987,6 +2021,29 @@ static int kvm_get_vcpu_events(X86CPU *cpu) env->hflags2 &= ~HF2_NMI_MASK; } + if (events.flags & KVM_VCPUEVENT_VALID_SMM) { + if (events.smi.smm) { + env->hflags |= HF_SMM_MASK; + } else { + env->hflags &= ~HF_SMM_MASK; + } + if (events.smi.pending) { + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); + } else { + cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); + } + if (events.smi.smm_inside_nmi) { + env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK; + } else { + env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK; + } + if (events.smi.latched_init) { + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT); + } else { + cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT); + } + } + env->sipi_vector = events.sipi_vector; return 0; @@ -2190,16 +2247,28 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) int ret; /* Inject NMI */ - if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { - qemu_mutex_lock_iothread(); - cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; - qemu_mutex_unlock_iothread(); - - DPRINTF("injected NMI\n"); - ret = kvm_vcpu_ioctl(cpu, KVM_NMI); - if (ret < 0) { - fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n", - strerror(-ret)); + if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { + if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { + qemu_mutex_lock_iothread(); + cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; + qemu_mutex_unlock_iothread(); + DPRINTF("injected NMI\n"); + ret = kvm_vcpu_ioctl(cpu, KVM_NMI); + if (ret < 0) { + fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n", + strerror(-ret)); + } + } + if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { + qemu_mutex_lock_iothread(); + cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; + qemu_mutex_unlock_iothread(); + DPRINTF("injected SMI\n"); + ret = kvm_vcpu_ioctl(cpu, KVM_SMI); + if (ret < 0) { + fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n", + strerror(-ret)); + } } } @@ -2212,7 +2281,13 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) * pending TPR access reports. */ if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { - cpu->exit_request = 1; + if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && + !(env->hflags & HF_SMM_MASK)) { + cpu->exit_request = 1; + } + if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { + cpu->exit_request = 1; + } } if (!kvm_irqchip_in_kernel()) { @@ -2260,6 +2335,11 @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; + if (run->flags & KVM_RUN_X86_SMM) { + env->hflags |= HF_SMM_MASK; + } else { + env->hflags &= HF_SMM_MASK; + } if (run->if_flag) { env->eflags |= IF_MASK; } else { @@ -2307,7 +2387,8 @@ int kvm_arch_process_async_events(CPUState *cs) } } - if (cs->interrupt_request & CPU_INTERRUPT_INIT) { + if ((cs->interrupt_request & CPU_INTERRUPT_INIT) && + !(env->hflags & HF_SMM_MASK)) { kvm_cpu_synchronize_state(cs); do_cpu_init(cpu); } |