aboutsummaryrefslogtreecommitdiff
path: root/target-i386
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2014-06-05 19:16:28 +0100
committerPeter Maydell <peter.maydell@linaro.org>2014-06-05 19:16:28 +0100
commit9f0355b590ac523d0c4e67c416c3f9cf7af3d574 (patch)
tree59be39e43a8014ec6031a0afe60dec29b6b3e032 /target-i386
parentd4f005db9b90b3df6945c709867ab62d8b772a94 (diff)
parent79b6f2f651d64a122dd647c1456635d5a6a176ac (diff)
downloadqemu-9f0355b590ac523d0c4e67c416c3f9cf7af3d574.zip
qemu-9f0355b590ac523d0c4e67c416c3f9cf7af3d574.tar.gz
qemu-9f0355b590ac523d0c4e67c416c3f9cf7af3d574.tar.bz2
Merge remote-tracking branch 'remotes/kvm/uq/master' into staging
* remotes/kvm/uq/master: kvm: Fix eax for cpuid leaf 0x40000000 kvmclock: Ensure proper env->tsc value for kvmclock_current_nsec calculation kvm: Enable -cpu option to hide KVM kvm: Ensure negative return value on kvm_init() error handling path target-i386: set CC_OP to CC_OP_EFLAGS in cpu_load_eflags target-i386: get CPL from SS.DPL target-i386: rework CPL checks during task switch, preparing for next patch target-i386: fix segment flags for SMM and VM86 mode target-i386: Fix vm86 mode regression introduced in fd460606fd6f. kvm_stat: allow choosing between tracepoints and old stats kvmclock: Ensure time in migration never goes backward Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/cpu-qom.h1
-rw-r--r--target-i386/cpu.c1
-rw-r--r--target-i386/cpu.h13
-rw-r--r--target-i386/gdbstub.c4
-rw-r--r--target-i386/kvm.c30
-rw-r--r--target-i386/machine.c8
-rw-r--r--target-i386/seg_helper.c47
-rw-r--r--target-i386/smm_helper.c26
-rw-r--r--target-i386/svm_helper.c2
9 files changed, 77 insertions, 55 deletions
diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h
index e9b3d57..0808cfc 100644
--- a/target-i386/cpu-qom.h
+++ b/target-i386/cpu-qom.h
@@ -87,6 +87,7 @@ typedef struct X86CPU {
bool hyperv_time;
bool check_cpuid;
bool enforce_cpuid;
+ bool expose_kvm;
/* if true the CPUID code directly forward host cache leaves to the guest */
bool cache_info_passthrough;
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 042a48d..cbf1d97 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -2792,6 +2792,7 @@ static Property x86_cpu_properties[] = {
DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
+ DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
DEFINE_PROP_END_OF_LIST()
};
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index e9cbdab..ee410af 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -986,7 +986,6 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
/* update the hidden flags */
{
if (seg_reg == R_CS) {
- int cpl = selector & 3;
#ifdef TARGET_X86_64
if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
/* long mode */
@@ -996,15 +995,14 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
#endif
{
/* legacy / compatibility case */
- if (!(env->cr[0] & CR0_PE_MASK))
- cpl = 0;
- else if (env->eflags & VM_MASK)
- cpl = 3;
new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_CS32_SHIFT);
env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
new_hflags;
}
+ }
+ if (seg_reg == R_SS) {
+ int cpl = (flags >> DESC_DPL_SHIFT) & 3;
#if HF_CPL_MASK != 3
#error HF_CPL_MASK is hardcoded
#endif
@@ -1234,11 +1232,14 @@ static inline uint32_t cpu_compute_eflags(CPUX86State *env)
return env->eflags | cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
}
-/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
+/* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS
+ * after generating a call to a helper that uses this.
+ */
static inline void cpu_load_eflags(CPUX86State *env, int eflags,
int update_mask)
{
CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+ CC_OP = CC_OP_EFLAGS;
env->df = 1 - (2 * ((eflags >> 10) & 1));
env->eflags = (env->eflags & ~update_mask) |
(eflags & update_mask) | 0x2;
diff --git a/target-i386/gdbstub.c b/target-i386/gdbstub.c
index d34e535..19fe9ad 100644
--- a/target-i386/gdbstub.c
+++ b/target-i386/gdbstub.c
@@ -127,9 +127,11 @@ static int x86_cpu_gdb_load_seg(X86CPU *cpu, int sreg, uint8_t *mem_buf)
target_ulong base;
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
+ int dpl = (env->eflags & VM_MASK) ? 3 : 0;
base = selector << 4;
limit = 0xffff;
- flags = 0;
+ flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK | (dpl << DESC_DPL_SHIFT);
} else {
if (!cpu_x86_get_descr_debug(env, selector, &base, &limit,
&flags)) {
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 0d894ef..4bf0ac9 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -528,23 +528,25 @@ int kvm_arch_init_vcpu(CPUState *cs)
has_msr_hv_hypercall = true;
}
- memcpy(signature, "KVMKVMKVM\0\0\0", 12);
- c = &cpuid_data.entries[cpuid_i++];
- c->function = KVM_CPUID_SIGNATURE | kvm_base;
- c->eax = 0;
- c->ebx = signature[0];
- c->ecx = signature[1];
- c->edx = signature[2];
+ if (cpu->expose_kvm) {
+ memcpy(signature, "KVMKVMKVM\0\0\0", 12);
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = KVM_CPUID_SIGNATURE | kvm_base;
+ c->eax = KVM_CPUID_FEATURES | kvm_base;
+ c->ebx = signature[0];
+ c->ecx = signature[1];
+ c->edx = signature[2];
- c = &cpuid_data.entries[cpuid_i++];
- c->function = KVM_CPUID_FEATURES | kvm_base;
- c->eax = env->features[FEAT_KVM];
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = KVM_CPUID_FEATURES | kvm_base;
+ c->eax = env->features[FEAT_KVM];
- has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
+ has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
- has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
+ has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
- has_msr_kvm_steal_time = c->eax & (1 << KVM_FEATURE_STEAL_TIME);
+ has_msr_kvm_steal_time = c->eax & (1 << KVM_FEATURE_STEAL_TIME);
+ }
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
@@ -1430,7 +1432,7 @@ static int kvm_get_sregs(X86CPU *cpu)
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
- hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+ hflags = (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
diff --git a/target-i386/machine.c b/target-i386/machine.c
index 168cab6..bdff447 100644
--- a/target-i386/machine.c
+++ b/target-i386/machine.c
@@ -312,6 +312,14 @@ static int cpu_post_load(void *opaque, int version_id)
env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
}
+ /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
+ * running under KVM. This is wrong for conforming code segments.
+ * Luckily, in our implementation the CPL field of hflags is redundant
+ * and we can get the right value from the SS descriptor privilege level.
+ */
+ env->hflags &= ~HF_CPL_MASK;
+ env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+
/* XXX: restore FPU round state */
env->fpstt = (env->fpus_vmstate >> 11) & 7;
env->fpus = env->fpus_vmstate & ~0x3800;
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index 258aae8..51c2833 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -88,8 +88,10 @@ static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
{
selector &= 0xffff;
- cpu_x86_load_seg_cache(env, seg, selector,
- (selector << 4), 0xffff, 0);
+
+ cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK | (3 << DESC_DPL_SHIFT));
}
static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
@@ -133,11 +135,10 @@ static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
}
}
-/* XXX: merge with load_seg() */
-static void tss_load_seg(CPUX86State *env, int seg_reg, int selector)
+static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl)
{
uint32_t e1, e2;
- int rpl, dpl, cpl;
+ int rpl, dpl;
if ((selector & 0xfffc) != 0) {
if (load_segment(env, &e1, &e2, selector) != 0) {
@@ -148,18 +149,13 @@ static void tss_load_seg(CPUX86State *env, int seg_reg, int selector)
}
rpl = selector & 3;
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- cpl = env->hflags & HF_CPL_MASK;
if (seg_reg == R_CS) {
if (!(e2 & DESC_CS_MASK)) {
raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
}
- /* XXX: is it correct? */
if (dpl != rpl) {
raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
}
- if ((e2 & DESC_C_MASK) && dpl > rpl) {
- raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
- }
} else if (seg_reg == R_SS) {
/* SS must be writable data */
if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
@@ -446,12 +442,13 @@ static void switch_tss(CPUX86State *env, int tss_selector,
/* load the segments */
if (!(new_eflags & VM_MASK)) {
- tss_load_seg(env, R_CS, new_segs[R_CS]);
- tss_load_seg(env, R_SS, new_segs[R_SS]);
- tss_load_seg(env, R_ES, new_segs[R_ES]);
- tss_load_seg(env, R_DS, new_segs[R_DS]);
- tss_load_seg(env, R_FS, new_segs[R_FS]);
- tss_load_seg(env, R_GS, new_segs[R_GS]);
+ int cpl = new_segs[R_CS] & 3;
+ tss_load_seg(env, R_CS, new_segs[R_CS], cpl);
+ tss_load_seg(env, R_SS, new_segs[R_SS], cpl);
+ tss_load_seg(env, R_ES, new_segs[R_ES], cpl);
+ tss_load_seg(env, R_DS, new_segs[R_DS], cpl);
+ tss_load_seg(env, R_FS, new_segs[R_FS], cpl);
+ tss_load_seg(env, R_GS, new_segs[R_GS], cpl);
}
/* check that env->eip is in the CS segment limits */
@@ -558,6 +555,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
int has_error_code, new_stack, shift;
uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
uint32_t old_eip, sp_mask;
+ int vm86 = env->eflags & VM_MASK;
has_error_code = 0;
if (!is_int && !is_hw) {
@@ -673,7 +671,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
ssp = get_seg_base(ss_e1, ss_e2);
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
/* to same privilege */
- if (env->eflags & VM_MASK) {
+ if (vm86) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
new_stack = 0;
@@ -694,14 +692,14 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
#if 0
/* XXX: check that enough room is available */
push_size = 6 + (new_stack << 2) + (has_error_code << 1);
- if (env->eflags & VM_MASK) {
+ if (vm86) {
push_size += 8;
}
push_size <<= shift;
#endif
if (shift == 1) {
if (new_stack) {
- if (env->eflags & VM_MASK) {
+ if (vm86) {
PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
@@ -718,7 +716,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
}
} else {
if (new_stack) {
- if (env->eflags & VM_MASK) {
+ if (vm86) {
PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
@@ -742,7 +740,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
if (new_stack) {
- if (env->eflags & VM_MASK) {
+ if (vm86) {
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
@@ -1600,7 +1598,6 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
}
next_eip = env->eip + next_eip_addend;
switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
- CC_OP = CC_OP_EFLAGS;
break;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
@@ -1769,7 +1766,6 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
- CC_OP = CC_OP_EFLAGS;
return;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
@@ -2464,9 +2460,12 @@ void helper_verw(CPUX86State *env, target_ulong selector1)
void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
{
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
+ int dpl = (env->eflags & VM_MASK) ? 3 : 0;
selector &= 0xffff;
cpu_x86_load_seg_cache(env, seg_reg, selector,
- (selector << 4), 0xffff, 0);
+ (selector << 4), 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
} else {
helper_load_seg(env, seg_reg, selector);
}
diff --git a/target-i386/smm_helper.c b/target-i386/smm_helper.c
index 5d7697c..58051d3 100644
--- a/target-i386/smm_helper.c
+++ b/target-i386/smm_helper.c
@@ -168,15 +168,26 @@ void do_smm_enter(X86CPU *cpu)
CR0_PG_MASK));
cpu_x86_update_cr4(env, 0);
env->dr[7] = 0x00000400;
- CC_OP = CC_OP_EFLAGS;
cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
- 0xffffffff, 0);
- cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
- cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
- cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
- cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
- cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
+ 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
}
void helper_rsm(CPUX86State *env)
@@ -296,7 +307,6 @@ void helper_rsm(CPUX86State *env)
env->smbase = ldl_phys(cs->as, sm_state + 0x7ef8) & ~0x7fff;
}
#endif
- CC_OP = CC_OP_EFLAGS;
env->hflags &= ~HF_SMM_MASK;
cpu_smm_update(env);
diff --git a/target-i386/svm_helper.c b/target-i386/svm_helper.c
index 852e2ba..ea5de6f 100644
--- a/target-i386/svm_helper.c
+++ b/target-i386/svm_helper.c
@@ -260,7 +260,6 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
env->vm_vmcb + offsetof(struct vmcb,
save.rflags)),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
- CC_OP = CC_OP_EFLAGS;
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
R_ES);
@@ -702,7 +701,6 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
save.rflags)),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
VM_MASK));
- CC_OP = CC_OP_EFLAGS;
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
R_ES);