aboutsummaryrefslogtreecommitdiff
path: root/target-i386
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-05-14 10:38:18 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2014-05-21 18:02:08 +0200
commit7125c937c97d9ec4a41b3cb6d1b3e805ec53e255 (patch)
tree9708833c2a12eecd0a52c9c85a0853d0a54784a5 /target-i386
parentd3b5491897456739c6dc21c604ef8bc28e294bfc (diff)
downloadqemu-7125c937c97d9ec4a41b3cb6d1b3e805ec53e255.zip
qemu-7125c937c97d9ec4a41b3cb6d1b3e805ec53e255.tar.gz
qemu-7125c937c97d9ec4a41b3cb6d1b3e805ec53e255.tar.bz2
target-i386: get CPL from SS.DPL
CS.RPL is not equal to the CPL in the few instructions between setting CR0.PE and reloading CS. We get this right in the common case, because writes to CR0 do not modify the CPL, but it would not be enough if an SMI comes exactly during that brief period. Were this to happen, the RSM instruction would erroneously set CPL to the low two bits of the real-mode selector; and if they are not 00, the next instruction fetch cannot access the code segment and causes a triple fault. However, SS.DPL *is* always equal to the CPL. In real processors (AMD only) there is a weird case of SYSRET setting SS.DPL=SS.RPL from the STAR register while forcing CPL=3, but we do not emulate that. Tested-by: Kevin O'Connor <kevin@koconnor.net> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/cpu.h8
-rw-r--r--target-i386/kvm.c2
-rw-r--r--target-i386/machine.c8
3 files changed, 12 insertions, 6 deletions
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index e9cbdab..65a44d9 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -986,7 +986,6 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
/* update the hidden flags */
{
if (seg_reg == R_CS) {
- int cpl = selector & 3;
#ifdef TARGET_X86_64
if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
/* long mode */
@@ -996,15 +995,14 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
#endif
{
/* legacy / compatibility case */
- if (!(env->cr[0] & CR0_PE_MASK))
- cpl = 0;
- else if (env->eflags & VM_MASK)
- cpl = 3;
new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_CS32_SHIFT);
env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
new_hflags;
}
+ }
+ if (seg_reg == R_SS) {
+ int cpl = (flags >> DESC_DPL_SHIFT) & 3;
#if HF_CPL_MASK != 3
#error HF_CPL_MASK is hardcoded
#endif
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 0d894ef..3931d4c 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -1430,7 +1430,7 @@ static int kvm_get_sregs(X86CPU *cpu)
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
- hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+ hflags = (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
diff --git a/target-i386/machine.c b/target-i386/machine.c
index 168cab6..bdff447 100644
--- a/target-i386/machine.c
+++ b/target-i386/machine.c
@@ -312,6 +312,14 @@ static int cpu_post_load(void *opaque, int version_id)
env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
}
+ /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
+ * running under KVM. This is wrong for conforming code segments.
+ * Luckily, in our implementation the CPL field of hflags is redundant
+ * and we can get the right value from the SS descriptor privilege level.
+ */
+ env->hflags &= ~HF_CPL_MASK;
+ env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+
/* XXX: restore FPU round state */
env->fpstt = (env->fpus_vmstate >> 11) & 7;
env->fpus = env->fpus_vmstate & ~0x3800;