aboutsummaryrefslogtreecommitdiff
path: root/target-i386
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2014-05-15 15:38:39 +0100
committerPeter Maydell <peter.maydell@linaro.org>2014-05-15 15:38:40 +0100
commitef3cb5ca82c341e575ee5cb9a9dd8edc6aa96b1b (patch)
tree1ec45abf7d4bcadc6bfa97e2be4d23e549d02eec /target-i386
parent06e33c1c3c193074574eb3813eded91bff0fc86f (diff)
parent4700a316df7d2cdcd256dcd64a10cec643f4dfa1 (diff)
downloadqemu-ef3cb5ca82c341e575ee5cb9a9dd8edc6aa96b1b.zip
qemu-ef3cb5ca82c341e575ee5cb9a9dd8edc6aa96b1b.tar.gz
qemu-ef3cb5ca82c341e575ee5cb9a9dd8edc6aa96b1b.tar.bz2
Merge remote-tracking branch 'remotes/kvm/uq/master' into staging
* remotes/kvm/uq/master: pc: port 92 reset requires a low->high transition cpu: make CPU_INTERRUPT_RESET available on all targets apic: do not accept SIPI on the bootstrap processor target-i386: preserve FPU and MSR state on INIT target-i386: fix set of registers zeroed on reset kvm: forward INIT signals coming from the chipset kvm: reset state from the CPU's reset method target-i386: the x86 CPL is stored in CS.selector - auto update hflags accordingly. target-i386: set eflags prior to calling cpu_x86_load_seg_cache() in seg_helper.c target-i386: set eflags and cr0 prior to calling cpu_x86_load_seg_cache() in smm_helper.c target-i386: set eflags prior to calling svm_load_seg_cache() in svm_helper.c pci-assign: limit # of msix vectors pci-assign: Fix a bug when map MSI-X table memory failed kvm: make one_reg helpers available for everyone target-i386: Remove unused data from local array Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/cpu.c11
-rw-r--r--target-i386/cpu.h85
-rw-r--r--target-i386/helper.c14
-rw-r--r--target-i386/kvm.c42
-rw-r--r--target-i386/kvm_i386.h2
-rw-r--r--target-i386/seg_helper.c53
-rw-r--r--target-i386/smm_helper.c34
-rw-r--r--target-i386/svm_helper.c11
8 files changed, 134 insertions, 118 deletions
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 8f193a9..042a48d 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -24,6 +24,7 @@
#include "cpu.h"
#include "sysemu/kvm.h"
#include "sysemu/cpus.h"
+#include "kvm_i386.h"
#include "topology.h"
#include "qemu/option.h"
@@ -2417,8 +2418,7 @@ static void x86_cpu_reset(CPUState *s)
xcc->parent_reset(s);
-
- memset(env, 0, offsetof(CPUX86State, pat));
+ memset(env, 0, offsetof(CPUX86State, cpuid_level));
tlb_flush(s, 1);
@@ -2484,8 +2484,7 @@ static void x86_cpu_reset(CPUState *s)
cpu_breakpoint_remove_all(s, BP_CPU);
cpu_watchpoint_remove_all(s, BP_CPU);
- env->tsc_adjust = 0;
- env->tsc = 0;
+ env->xcr0 = 1;
#if !defined(CONFIG_USER_ONLY)
/* We hard-wire the BSP to the first CPU. */
@@ -2494,6 +2493,10 @@ static void x86_cpu_reset(CPUState *s)
}
s->halted = !cpu_is_bsp(cpu);
+
+ if (kvm_enabled()) {
+ kvm_arch_reset_vcpu(cpu);
+ }
#endif
}
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 2a22a7d..e9cbdab 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -124,9 +124,9 @@
#define ID_MASK 0x00200000
/* hidden flags - used internally by qemu to represent additional cpu
- states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not
- redundant. We avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK
- bit positions to ease oring with eflags. */
+ states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We
+ avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit
+ positions to ease oring with eflags. */
/* current cpl */
#define HF_CPL_SHIFT 0
/* true if soft mmu is being used */
@@ -606,10 +606,11 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
#define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0
-#define CPU_INTERRUPT_INIT CPU_INTERRUPT_TGT_INT_1
-#define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_2
-#define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_3
+#define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1
+#define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2
+/* Use a clearer name for this. */
+#define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
typedef enum {
CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
@@ -797,6 +798,13 @@ typedef struct CPUX86State {
target_ulong cr[5]; /* NOTE: cr1 is unused */
int32_t a20_mask;
+ BNDReg bnd_regs[4];
+ BNDCSReg bndcs_regs;
+ uint64_t msr_bndcfgs;
+
+ /* Beginning of state preserved by INIT (dummy marker). */
+ struct {} start_init_save;
+
/* FPU state */
unsigned int fpstt; /* top of stack index */
uint16_t fpus;
@@ -819,6 +827,8 @@ typedef struct CPUX86State {
XMMReg xmm_t0;
MMXReg mmx_t0;
+ XMMReg ymmh_regs[CPU_NB_REGS];
+
/* sysenter registers */
uint32_t sysenter_cs;
target_ulong sysenter_esp;
@@ -827,15 +837,6 @@ typedef struct CPUX86State {
uint64_t star;
uint64_t vm_hsave;
- uint64_t vm_vmcb;
- uint64_t tsc_offset;
- uint64_t intercept;
- uint16_t intercept_cr_read;
- uint16_t intercept_cr_write;
- uint16_t intercept_dr_read;
- uint16_t intercept_dr_write;
- uint32_t intercept_exceptions;
- uint8_t v_tpr;
#ifdef TARGET_X86_64
target_ulong lstar;
@@ -843,11 +844,6 @@ typedef struct CPUX86State {
target_ulong fmask;
target_ulong kernelgsbase;
#endif
- uint64_t system_time_msr;
- uint64_t wall_clock_msr;
- uint64_t steal_time_msr;
- uint64_t async_pf_en_msr;
- uint64_t pv_eoi_en_msr;
uint64_t tsc;
uint64_t tsc_adjust;
@@ -864,6 +860,19 @@ typedef struct CPUX86State {
uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
uint64_t msr_gp_counters[MAX_GP_COUNTERS];
uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
+
+ uint64_t pat;
+ uint32_t smbase;
+
+ /* End of state preserved by INIT (dummy marker). */
+ struct {} end_init_save;
+
+ uint64_t system_time_msr;
+ uint64_t wall_clock_msr;
+ uint64_t steal_time_msr;
+ uint64_t async_pf_en_msr;
+ uint64_t pv_eoi_en_msr;
+
uint64_t msr_hv_hypercall;
uint64_t msr_hv_guest_os_id;
uint64_t msr_hv_vapic;
@@ -878,9 +887,18 @@ typedef struct CPUX86State {
struct CPUBreakpoint *cpu_breakpoint[4];
struct CPUWatchpoint *cpu_watchpoint[4];
}; /* break/watchpoints for dr[0..3] */
- uint32_t smbase;
int old_exception; /* exception in flight */
+ uint64_t vm_vmcb;
+ uint64_t tsc_offset;
+ uint64_t intercept;
+ uint16_t intercept_cr_read;
+ uint16_t intercept_cr_write;
+ uint16_t intercept_dr_read;
+ uint16_t intercept_dr_write;
+ uint32_t intercept_exceptions;
+ uint8_t v_tpr;
+
/* KVM states, automatically cleared on reset */
uint8_t nmi_injected;
uint8_t nmi_pending;
@@ -888,7 +906,6 @@ typedef struct CPUX86State {
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
- uint64_t pat;
/* processor features (e.g. for CPUID insn) */
uint32_t cpuid_level;
@@ -928,12 +945,7 @@ typedef struct CPUX86State {
uint16_t fpus_vmstate;
uint16_t fptag_vmstate;
uint16_t fpregs_format_vmstate;
-
uint64_t xstate_bv;
- XMMReg ymmh_regs[CPU_NB_REGS];
- BNDReg bnd_regs[4];
- BNDCSReg bndcs_regs;
- uint64_t msr_bndcfgs;
uint64_t xcr0;
@@ -974,6 +986,7 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
/* update the hidden flags */
{
if (seg_reg == R_CS) {
+ int cpl = selector & 3;
#ifdef TARGET_X86_64
if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
/* long mode */
@@ -983,11 +996,19 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
#endif
{
/* legacy / compatibility case */
+ if (!(env->cr[0] & CR0_PE_MASK))
+ cpl = 0;
+ else if (env->eflags & VM_MASK)
+ cpl = 3;
new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_CS32_SHIFT);
env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
new_hflags;
}
+#if HF_CPL_MASK != 3
+#error HF_CPL_MASK is hardcoded
+#endif
+ env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
}
new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_SS32_SHIFT);
@@ -1031,16 +1052,6 @@ int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
target_ulong *base, unsigned int *limit,
unsigned int *flags);
-/* wrapper, just in case memory mappings must be changed */
-static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
-{
-#if HF_CPL_MASK == 3
- s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
-#else
-#error HF_CPL_MASK is hardcoded
-#endif
-}
-
/* op_helper.c */
/* used for debug or cpu save/restore */
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f);
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 372f0e3..46d20e4 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -19,6 +19,7 @@
#include "cpu.h"
#include "sysemu/kvm.h"
+#include "kvm_i386.h"
#ifndef CONFIG_USER_ONLY
#include "sysemu/sysemu.h"
#include "monitor/monitor.h"
@@ -1329,12 +1330,21 @@ void do_cpu_init(X86CPU *cpu)
{
CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
+ CPUX86State *save = g_new(CPUX86State, 1);
int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
- uint64_t pat = env->pat;
+
+ *save = *env;
cpu_reset(cs);
cs->interrupt_request = sipi;
- env->pat = pat;
+ memcpy(&env->start_init_save, &save->start_init_save,
+ offsetof(CPUX86State, end_init_save) -
+ offsetof(CPUX86State, start_init_save));
+ g_free(save);
+
+ if (kvm_enabled()) {
+ kvm_arch_do_init_vcpu(cpu);
+ }
apic_init_reset(cpu->apic_state);
}
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 4389959..0d894ef 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -30,6 +30,8 @@
#include "qemu/config-file.h"
#include "hw/i386/pc.h"
#include "hw/i386/apic.h"
+#include "hw/i386/apic_internal.h"
+#include "hw/i386/apic-msidef.h"
#include "exec/ioport.h"
#include <asm/hyperv.h>
#include "hw/pci/pci.h"
@@ -130,14 +132,13 @@ static const struct kvm_para_features {
{ KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
{ KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
{ KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
- { -1, -1 }
};
static int get_para_features(KVMState *s)
{
int i, features = 0;
- for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) {
+ for (i = 0; i < ARRAY_SIZE(para_features); i++) {
if (kvm_check_extension(s, para_features[i].cap)) {
features |= (1 << para_features[i].feature);
}
@@ -724,9 +725,8 @@ int kvm_arch_init_vcpu(CPUState *cs)
return 0;
}
-void kvm_arch_reset_vcpu(CPUState *cs)
+void kvm_arch_reset_vcpu(X86CPU *cpu)
{
- X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
env->exception_injected = -1;
@@ -740,6 +740,16 @@ void kvm_arch_reset_vcpu(CPUState *cs)
}
}
+void kvm_arch_do_init_vcpu(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+
+ /* APs get directly into wait-for-SIPI state. */
+ if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
+ env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
+ }
+}
+
static int kvm_get_supported_msrs(KVMState *s)
{
static int kvm_supported_msrs;
@@ -2005,14 +2015,15 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
}
}
- if (!kvm_irqchip_in_kernel()) {
- /* Force the VCPU out of its inner loop to process any INIT requests
- * or pending TPR access reports. */
- if (cpu->interrupt_request &
- (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
- cpu->exit_request = 1;
- }
+ /* Force the VCPU out of its inner loop to process any INIT requests
+ * or (for userspace APIC, but it is cheap to combine the checks here)
+ * pending TPR access reports.
+ */
+ if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
+ cpu->exit_request = 1;
+ }
+ if (!kvm_irqchip_in_kernel()) {
/* Try to inject an interrupt if the guest can accept it */
if (run->ready_for_interrupt_injection &&
(cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -2092,6 +2103,11 @@ int kvm_arch_process_async_events(CPUState *cs)
}
}
+ if (cs->interrupt_request & CPU_INTERRUPT_INIT) {
+ kvm_cpu_synchronize_state(cs);
+ do_cpu_init(cpu);
+ }
+
if (kvm_irqchip_in_kernel()) {
return 0;
}
@@ -2105,10 +2121,6 @@ int kvm_arch_process_async_events(CPUState *cs)
(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
cs->halted = 0;
}
- if (cs->interrupt_request & CPU_INTERRUPT_INIT) {
- kvm_cpu_synchronize_state(cs);
- do_cpu_init(cpu);
- }
if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
kvm_cpu_synchronize_state(cs);
do_cpu_sipi(cpu);
diff --git a/target-i386/kvm_i386.h b/target-i386/kvm_i386.h
index 4392ab4..cac30fd 100644
--- a/target-i386/kvm_i386.h
+++ b/target-i386/kvm_i386.h
@@ -14,6 +14,8 @@
#include "sysemu/kvm.h"
bool kvm_allows_irq0_override(void);
+void kvm_arch_reset_vcpu(X86CPU *cs);
+void kvm_arch_do_init_vcpu(X86CPU *cs);
int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
uint32_t flags, uint32_t *dev_id);
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index 8c3f92c..3cf862e 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -409,11 +409,7 @@ static void switch_tss(CPUX86State *env, int tss_selector,
for (i = 0; i < 6; i++) {
load_seg_vm(env, i, new_segs[i]);
}
- /* in vm86, CPL is always 3 */
- cpu_x86_set_cpl(env, 3);
} else {
- /* CPL is set the RPL of CS */
- cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
/* first just selectors as the rest may trigger exceptions */
for (i = 0; i < 6; i++) {
cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
@@ -739,6 +735,12 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
}
}
+ /* interrupt gate clear IF mask */
+ if ((type & 1) == 0) {
+ env->eflags &= ~IF_MASK;
+ }
+ env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
+
if (new_stack) {
if (env->eflags & VM_MASK) {
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
@@ -757,14 +759,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
- cpu_x86_set_cpl(env, dpl);
env->eip = offset;
-
- /* interrupt gate clear IF mask */
- if ((type & 1) == 0) {
- env->eflags &= ~IF_MASK;
- }
- env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
}
#ifdef TARGET_X86_64
@@ -911,6 +906,12 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
PUSHQ(esp, error_code);
}
+ /* interrupt gate clear IF mask */
+ if ((type & 1) == 0) {
+ env->eflags &= ~IF_MASK;
+ }
+ env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
+
if (new_stack) {
ss = 0 | dpl;
cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
@@ -922,14 +923,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
- cpu_x86_set_cpl(env, dpl);
env->eip = offset;
-
- /* interrupt gate clear IF mask */
- if ((type & 1) == 0) {
- env->eflags &= ~IF_MASK;
- }
- env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
}
#endif
@@ -960,7 +954,8 @@ void helper_syscall(CPUX86State *env, int next_eip_addend)
code64 = env->hflags & HF_CS64_MASK;
- cpu_x86_set_cpl(env, 0);
+ env->eflags &= ~env->fmask;
+ cpu_load_eflags(env, env->eflags, 0);
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_P_MASK |
@@ -972,8 +967,6 @@ void helper_syscall(CPUX86State *env, int next_eip_addend)
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_W_MASK | DESC_A_MASK);
- env->eflags &= ~env->fmask;
- cpu_load_eflags(env, env->eflags, 0);
if (code64) {
env->eip = env->lstar;
} else {
@@ -982,7 +975,7 @@ void helper_syscall(CPUX86State *env, int next_eip_addend)
} else {
env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
- cpu_x86_set_cpl(env, 0);
+ env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
@@ -993,7 +986,6 @@ void helper_syscall(CPUX86State *env, int next_eip_addend)
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_W_MASK | DESC_A_MASK);
- env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
env->eip = (uint32_t)env->star;
}
}
@@ -1014,6 +1006,9 @@ void helper_sysret(CPUX86State *env, int dflag)
}
selector = (env->star >> 48) & 0xffff;
if (env->hflags & HF_LMA_MASK) {
+ cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
+ | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
+ NT_MASK);
if (dflag == 2) {
cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
0, 0xffffffff,
@@ -1035,11 +1030,8 @@ void helper_sysret(CPUX86State *env, int dflag)
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_W_MASK | DESC_A_MASK);
- cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
- | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
- NT_MASK);
- cpu_x86_set_cpl(env, 3);
} else {
+ env->eflags |= IF_MASK;
cpu_x86_load_seg_cache(env, R_CS, selector | 3,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
@@ -1051,8 +1043,6 @@ void helper_sysret(CPUX86State *env, int dflag)
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_W_MASK | DESC_A_MASK);
- env->eflags |= IF_MASK;
- cpu_x86_set_cpl(env, 3);
}
}
#endif
@@ -1905,7 +1895,6 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
- cpu_x86_set_cpl(env, dpl);
SET_ESP(sp, sp_mask);
env->eip = offset;
}
@@ -2134,7 +2123,6 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
- cpu_x86_set_cpl(env, rpl);
sp = new_esp;
#ifdef TARGET_X86_64
if (env->hflags & HF_CS64_MASK) {
@@ -2185,7 +2173,6 @@ static inline void helper_ret_protected(CPUX86State *env, int shift,
IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
VIP_MASK);
load_seg_vm(env, R_CS, new_cs & 0xffff);
- cpu_x86_set_cpl(env, 3);
load_seg_vm(env, R_SS, new_ss & 0xffff);
load_seg_vm(env, R_ES, new_es & 0xffff);
load_seg_vm(env, R_DS, new_ds & 0xffff);
@@ -2238,7 +2225,6 @@ void helper_sysenter(CPUX86State *env)
raise_exception_err(env, EXCP0D_GPF, 0);
}
env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
- cpu_x86_set_cpl(env, 0);
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
@@ -2274,7 +2260,6 @@ void helper_sysexit(CPUX86State *env, int dflag)
if (env->sysenter_cs == 0 || cpl != 0) {
raise_exception_err(env, EXCP0D_GPF, 0);
}
- cpu_x86_set_cpl(env, 3);
#ifdef TARGET_X86_64
if (dflag == 2) {
cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
diff --git a/target-i386/smm_helper.c b/target-i386/smm_helper.c
index 35901c9..4841d53 100644
--- a/target-i386/smm_helper.c
+++ b/target-i386/smm_helper.c
@@ -163,6 +163,13 @@ void do_smm_enter(X86CPU *cpu)
cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
DF_MASK));
env->eip = 0x00008000;
+ cpu_x86_update_cr0(env,
+ env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
+ CR0_PG_MASK));
+ cpu_x86_update_cr4(env, 0);
+ env->dr[7] = 0x00000400;
+ CC_OP = CC_OP_EFLAGS;
+
cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
0xffffffff, 0);
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
@@ -170,13 +177,6 @@ void do_smm_enter(X86CPU *cpu)
cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
-
- cpu_x86_update_cr0(env,
- env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
- CR0_PG_MASK));
- cpu_x86_update_cr4(env, 0);
- env->dr[7] = 0x00000400;
- CC_OP = CC_OP_EFLAGS;
}
void helper_rsm(CPUX86State *env)
@@ -191,16 +191,6 @@ void helper_rsm(CPUX86State *env)
#ifdef TARGET_X86_64
cpu_load_efer(env, ldq_phys(cs->as, sm_state + 0x7ed0));
- for (i = 0; i < 6; i++) {
- offset = 0x7e00 + i * 16;
- cpu_x86_load_seg_cache(env, i,
- lduw_phys(cs->as, sm_state + offset),
- ldq_phys(cs->as, sm_state + offset + 8),
- ldl_phys(cs->as, sm_state + offset + 4),
- (lduw_phys(cs->as, sm_state + offset + 2) &
- 0xf0ff) << 8);
- }
-
env->gdt.base = ldq_phys(cs->as, sm_state + 0x7e68);
env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7e64);
@@ -238,6 +228,16 @@ void helper_rsm(CPUX86State *env)
cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7f50));
cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7f58));
+ for (i = 0; i < 6; i++) {
+ offset = 0x7e00 + i * 16;
+ cpu_x86_load_seg_cache(env, i,
+ lduw_phys(cs->as, sm_state + offset),
+ ldq_phys(cs->as, sm_state + offset + 8),
+ ldl_phys(cs->as, sm_state + offset + 4),
+ (lduw_phys(cs->as, sm_state + offset + 2) &
+ 0xf0ff) << 8);
+ }
+
val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */
if (val & 0x20000) {
env->smbase = ldl_phys(cs->as, sm_state + 0x7f00) & ~0x7fff;
diff --git a/target-i386/svm_helper.c b/target-i386/svm_helper.c
index aa17ecd..846eaa5 100644
--- a/target-i386/svm_helper.c
+++ b/target-i386/svm_helper.c
@@ -282,9 +282,6 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
env->vm_vmcb + offsetof(struct vmcb, save.dr7));
env->dr[6] = ldq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.dr6));
- cpu_x86_set_cpl(env, ldub_phys(cs->as,
- env->vm_vmcb + offsetof(struct vmcb,
- save.cpl)));
/* FIXME: guest state consistency checks */
@@ -703,7 +700,8 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
cpu_load_eflags(env, ldq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb,
save.rflags)),
- ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
+ VM_MASK));
CC_OP = CC_OP_EFLAGS;
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
@@ -728,7 +726,6 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
env->vm_hsave + offsetof(struct vmcb, save.dr7));
/* other setups */
- cpu_x86_set_cpl(env, 0);
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
exit_code);
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
@@ -756,10 +753,6 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
from the page table indicated the host's CR3. If the PDPEs contain
illegal state, the processor causes a shutdown. */
- /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
- env->cr[0] |= CR0_PE_MASK;
- env->eflags &= ~VM_MASK;
-
/* Disables all breakpoints in the host DR7 register. */
/* Checks the reloaded host state for consistency. */