aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/i386/Makefile.objs1
-rw-r--r--target/i386/cpu.c13
-rw-r--r--target/i386/cpu.h10
-rw-r--r--target/i386/excp_helper.c4
-rw-r--r--target/i386/fpu_helper.c37
-rw-r--r--target/i386/gdbstub.c1
-rw-r--r--target/i386/helper.c6
-rw-r--r--target/i386/helper.h1
-rw-r--r--target/i386/hvf/hvf.c137
-rw-r--r--target/i386/hvf/vmx.h17
-rw-r--r--target/i386/kvm.c53
-rw-r--r--target/i386/kvm_i386.h1
-rw-r--r--target/i386/machine.c31
-rw-r--r--target/i386/monitor.c10
-rw-r--r--target/i386/ops_sse.h28
-rw-r--r--target/i386/sev-stub.c3
-rw-r--r--target/i386/sev.c27
-rw-r--r--target/i386/sev_i386.h2
-rw-r--r--target/i386/svm.h1
-rw-r--r--target/i386/svm_helper.c7
-rw-r--r--target/i386/tcg-stub.c25
-rw-r--r--target/i386/translate.c36
22 files changed, 258 insertions, 193 deletions
diff --git a/target/i386/Makefile.objs b/target/i386/Makefile.objs
index 48e0c28..0b93143 100644
--- a/target/i386/Makefile.objs
+++ b/target/i386/Makefile.objs
@@ -3,6 +3,7 @@ obj-$(CONFIG_TCG) += translate.o
obj-$(CONFIG_TCG) += bpt_helper.o cc_helper.o excp_helper.o fpu_helper.o
obj-$(CONFIG_TCG) += int_helper.o mem_helper.o misc_helper.o mpx_helper.o
obj-$(CONFIG_TCG) += seg_helper.o smm_helper.o svm_helper.o
+obj-$(call lnot,$(CONFIG_TCG)) += tcg-stub.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
ifeq ($(CONFIG_SOFTMMU),y)
obj-y += machine.o arch_memory_mapping.o arch_dump.o monitor.o
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index e46ab8f..1e51232 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -986,8 +986,8 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
NULL, NULL, NULL, NULL,
"avx512-vp2intersect", NULL, "md-clear", NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL /* pconfig */, NULL,
+ NULL, NULL, "serialize", NULL,
+ "tsx-ldtrk", NULL, NULL /* pconfig */, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, "spec-ctrl", "stibp",
NULL, "arch-capabilities", "core-capability", "ssbd",
@@ -5968,6 +5968,7 @@ static void x86_cpu_reset(DeviceState *dev)
/* init to reset state */
env->hflags2 |= HF2_GIF_MASK;
+ env->hflags &= ~HF_GUEST_MASK;
cpu_x86_update_cr0(env, 0x60000010);
env->a20_mask = ~0x0;
@@ -6079,9 +6080,6 @@ static void x86_cpu_reset(DeviceState *dev)
if (kvm_enabled()) {
kvm_arch_reset_vcpu(cpu);
}
- else if (hvf_enabled()) {
- hvf_reset_vcpu(s);
- }
#endif
}
@@ -6400,7 +6398,7 @@ static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
} else if (cpu->env.cpuid_min_level < 0x14) {
mark_unavailable_features(cpu, FEAT_7_0_EBX,
CPUID_7_0_EBX_INTEL_PT,
- "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,level=0x14\"");
+ "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,min-level=0x14\"");
}
}
@@ -6511,6 +6509,9 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
&cpu->mwait.ecx, &cpu->mwait.edx);
env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
+ if (kvm_enabled() && kvm_has_waitpkg()) {
+ env->features[FEAT_7_0_ECX] |= CPUID_7_0_ECX_WAITPKG;
+ }
}
if (kvm_enabled() && cpu->ucode_rev == 0) {
cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state,
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 7d77efd..37fffa5c 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -777,6 +777,10 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3)
/* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
#define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
+/* SERIALIZE instruction */
+#define CPUID_7_0_EDX_SERIALIZE (1U << 14)
+/* TSX Suspend Load Address Tracking instruction */
+#define CPUID_7_0_EDX_TSX_LDTRK (1U << 16)
/* Speculation Control */
#define CPUID_7_0_EDX_SPEC_CTRL (1U << 26)
/* Single Thread Indirect Branch Predictors */
@@ -2118,6 +2122,11 @@ static inline bool cpu_has_vmx(CPUX86State *env)
return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
}
+static inline bool cpu_has_svm(CPUX86State *env)
+{
+ return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM;
+}
+
/*
* In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
* Since it was set, CR4.VMXE must remain set as long as vCPU is in
@@ -2143,6 +2152,7 @@ static inline bool cpu_vmx_maybe_enabled(CPUX86State *env)
/* fpu_helper.c */
void update_fp_status(CPUX86State *env);
void update_mxcsr_status(CPUX86State *env);
+void update_mxcsr_from_sse_status(CPUX86State *env);
static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
{
diff --git a/target/i386/excp_helper.c b/target/i386/excp_helper.c
index 1447bda..b10c7ec 100644
--- a/target/i386/excp_helper.c
+++ b/target/i386/excp_helper.c
@@ -262,8 +262,8 @@ static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
}
ptep = pde | PG_NX_MASK;
- /* if PSE bit is set, then we use a 4MB page */
- if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+ /* if host cr4 PSE bit is set, then we use a 4MB page */
+ if ((pde & PG_PSE_MASK) && (env->nested_pg_mode & SVM_NPT_PSE)) {
page_size = 4096 * 1024;
pte_addr = pde_addr;
diff --git a/target/i386/fpu_helper.c b/target/i386/fpu_helper.c
index 71cec39..f5e6c4b 100644
--- a/target/i386/fpu_helper.c
+++ b/target/i386/fpu_helper.c
@@ -2539,6 +2539,7 @@ static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
static void do_xsave_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
+ update_mxcsr_from_sse_status(env);
cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr), env->mxcsr, ra);
cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr_mask), 0x0000ffff, ra);
}
@@ -2968,11 +2969,45 @@ void update_mxcsr_status(CPUX86State *env)
}
set_float_rounding_mode(rnd_type, &env->sse_status);
+ /* Set exception flags. */
+ set_float_exception_flags((mxcsr & FPUS_IE ? float_flag_invalid : 0) |
+ (mxcsr & FPUS_ZE ? float_flag_divbyzero : 0) |
+ (mxcsr & FPUS_OE ? float_flag_overflow : 0) |
+ (mxcsr & FPUS_UE ? float_flag_underflow : 0) |
+ (mxcsr & FPUS_PE ? float_flag_inexact : 0),
+ &env->sse_status);
+
/* set denormals are zero */
set_flush_inputs_to_zero((mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status);
/* set flush to zero */
- set_flush_to_zero((mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status);
+ set_flush_to_zero((mxcsr & SSE_FZ) ? 1 : 0, &env->sse_status);
+}
+
+void update_mxcsr_from_sse_status(CPUX86State *env)
+{
+ if (tcg_enabled()) {
+ uint8_t flags = get_float_exception_flags(&env->sse_status);
+ /*
+ * The MXCSR denormal flag has opposite semantics to
+ * float_flag_input_denormal (the softfloat code sets that flag
+ * only when flushing input denormals to zero, but SSE sets it
+ * only when not flushing them to zero), so is not converted
+ * here.
+ */
+ env->mxcsr |= ((flags & float_flag_invalid ? FPUS_IE : 0) |
+ (flags & float_flag_divbyzero ? FPUS_ZE : 0) |
+ (flags & float_flag_overflow ? FPUS_OE : 0) |
+ (flags & float_flag_underflow ? FPUS_UE : 0) |
+ (flags & float_flag_inexact ? FPUS_PE : 0) |
+ (flags & float_flag_output_denormal ? FPUS_UE | FPUS_PE :
+ 0));
+ }
+}
+
+void helper_update_mxcsr(CPUX86State *env)
+{
+ update_mxcsr_from_sse_status(env);
}
void helper_ldmxcsr(CPUX86State *env, uint32_t val)
diff --git a/target/i386/gdbstub.c b/target/i386/gdbstub.c
index b98a995..9ae43bd 100644
--- a/target/i386/gdbstub.c
+++ b/target/i386/gdbstub.c
@@ -184,6 +184,7 @@ int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
return gdb_get_reg32(mem_buf, 0); /* fop */
case IDX_MXCSR_REG:
+ update_mxcsr_from_sse_status(env);
return gdb_get_reg32(mem_buf, env->mxcsr);
case IDX_CTL_CR0_REG:
diff --git a/target/i386/helper.c b/target/i386/helper.c
index c3a6e4f..70be53e 100644
--- a/target/i386/helper.c
+++ b/target/i386/helper.c
@@ -370,10 +370,11 @@ void x86_cpu_dump_local_apic_state(CPUState *cs, int flags)
dump_apic_lvt("LVTTHMR", lvt[APIC_LVT_THERMAL], false);
dump_apic_lvt("LVTT", lvt[APIC_LVT_TIMER], true);
- qemu_printf("Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
+ qemu_printf("Timer\t DCR=0x%x (divide by %u) initial_count = %u"
+ " current_count = %u\n",
s->divide_conf & APIC_DCR_MASK,
divider_conf(s->divide_conf),
- s->initial_count);
+ s->initial_count, apic_get_current_count(s));
qemu_printf("SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
s->spurious_vec,
@@ -544,6 +545,7 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
for(i = 0; i < 8; i++) {
fptag |= ((!env->fptags[i]) << i);
}
+ update_mxcsr_from_sse_status(env);
qemu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
env->fpuc,
(env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
diff --git a/target/i386/helper.h b/target/i386/helper.h
index 8f9e190..c2ae2f7 100644
--- a/target/i386/helper.h
+++ b/target/i386/helper.h
@@ -207,6 +207,7 @@ DEF_HELPER_FLAGS_2(pext, TCG_CALL_NO_RWG_SE, tl, tl, tl)
/* MMX/SSE */
DEF_HELPER_2(ldmxcsr, void, env, i32)
+DEF_HELPER_1(update_mxcsr, void, env)
DEF_HELPER_1(enter_mmx, void, env)
DEF_HELPER_1(emms, void, env)
DEF_HELPER_3(movq, void, env, ptr, ptr)
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
index be016b9..d81f569 100644
--- a/target/i386/hvf/hvf.c
+++ b/target/i386/hvf/hvf.c
@@ -282,47 +282,54 @@ void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,
}
}
-/* TODO: synchronize vcpu state */
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{
- CPUState *cpu_state = cpu;
- if (cpu_state->vcpu_dirty == 0) {
- hvf_get_registers(cpu_state);
+ if (!cpu->vcpu_dirty) {
+ hvf_get_registers(cpu);
+ cpu->vcpu_dirty = true;
}
-
- cpu_state->vcpu_dirty = 1;
}
-void hvf_cpu_synchronize_state(CPUState *cpu_state)
+void hvf_cpu_synchronize_state(CPUState *cpu)
{
- if (cpu_state->vcpu_dirty == 0) {
- run_on_cpu(cpu_state, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
+ if (!cpu->vcpu_dirty) {
+ run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
}
}
-static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
+static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu,
+ run_on_cpu_data arg)
{
- CPUState *cpu_state = cpu;
- hvf_put_registers(cpu_state);
- cpu_state->vcpu_dirty = false;
+ hvf_put_registers(cpu);
+ cpu->vcpu_dirty = false;
}
-void hvf_cpu_synchronize_post_reset(CPUState *cpu_state)
+void hvf_cpu_synchronize_post_reset(CPUState *cpu)
{
- run_on_cpu(cpu_state, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
+ run_on_cpu(cpu, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
}
static void do_hvf_cpu_synchronize_post_init(CPUState *cpu,
run_on_cpu_data arg)
{
- CPUState *cpu_state = cpu;
- hvf_put_registers(cpu_state);
- cpu_state->vcpu_dirty = false;
+ hvf_put_registers(cpu);
+ cpu->vcpu_dirty = false;
+}
+
+void hvf_cpu_synchronize_post_init(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
+}
+
+static void do_hvf_cpu_synchronize_pre_loadvm(CPUState *cpu,
+ run_on_cpu_data arg)
+{
+ cpu->vcpu_dirty = true;
}
-void hvf_cpu_synchronize_post_init(CPUState *cpu_state)
+void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
{
- run_on_cpu(cpu_state, do_hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
+ run_on_cpu(cpu, do_hvf_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
}
static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
@@ -441,96 +448,6 @@ static MemoryListener hvf_memory_listener = {
.log_sync = hvf_log_sync,
};
-void hvf_reset_vcpu(CPUState *cpu) {
- uint64_t pdpte[4] = {0, 0, 0, 0};
- int i;
-
- /* TODO: this shouldn't be needed; there is already a call to
- * cpu_synchronize_all_post_reset in vl.c
- */
- wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, 0);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, 0);
-
- /* Initialize PDPTE */
- for (i = 0; i < 4; i++) {
- wvmcs(cpu->hvf_fd, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);
- }
-
- macvm_set_cr0(cpu->hvf_fd, 0x60000010);
-
- wvmcs(cpu->hvf_fd, VMCS_CR4_MASK, CR4_VMXE_MASK);
- wvmcs(cpu->hvf_fd, VMCS_CR4_SHADOW, 0x0);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_CR4, CR4_VMXE_MASK);
-
- /* set VMCS guest state fields */
- wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_SELECTOR, 0xf000);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_LIMIT, 0xffff);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_ACCESS_RIGHTS, 0x9b);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_BASE, 0xffff0000);
-
- wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_SELECTOR, 0);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_LIMIT, 0xffff);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_ACCESS_RIGHTS, 0x93);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_BASE, 0);
-
- wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_SELECTOR, 0);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_LIMIT, 0xffff);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_ACCESS_RIGHTS, 0x93);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_BASE, 0);
-
- wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_SELECTOR, 0);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_LIMIT, 0xffff);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_ACCESS_RIGHTS, 0x93);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, 0);
-
- wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_SELECTOR, 0);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_LIMIT, 0xffff);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_ACCESS_RIGHTS, 0x93);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, 0);
-
- wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_SELECTOR, 0);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_LIMIT, 0xffff);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_ACCESS_RIGHTS, 0x93);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_BASE, 0);
-
- wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_SELECTOR, 0);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT, 0);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_ACCESS_RIGHTS, 0x10000);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE, 0);
-
- wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_SELECTOR, 0);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_LIMIT, 0);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_ACCESS_RIGHTS, 0x83);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_BASE, 0);
-
- wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT, 0);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE, 0);
-
- wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT, 0);
- wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE, 0);
-
- /*wvmcs(cpu->hvf_fd, VMCS_GUEST_CR2, 0x0);*/
- wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, 0x0);
-
- wreg(cpu->hvf_fd, HV_X86_RIP, 0xfff0);
- wreg(cpu->hvf_fd, HV_X86_RDX, 0x623);
- wreg(cpu->hvf_fd, HV_X86_RFLAGS, 0x2);
- wreg(cpu->hvf_fd, HV_X86_RSP, 0x0);
- wreg(cpu->hvf_fd, HV_X86_RAX, 0x0);
- wreg(cpu->hvf_fd, HV_X86_RBX, 0x0);
- wreg(cpu->hvf_fd, HV_X86_RCX, 0x0);
- wreg(cpu->hvf_fd, HV_X86_RSI, 0x0);
- wreg(cpu->hvf_fd, HV_X86_RDI, 0x0);
- wreg(cpu->hvf_fd, HV_X86_RBP, 0x0);
-
- for (int i = 0; i < 8; i++) {
- wreg(cpu->hvf_fd, HV_X86_R8 + i, 0x0);
- }
-
- hv_vcpu_invalidate_tlb(cpu->hvf_fd);
- hv_vcpu_flush(cpu->hvf_fd);
-}
-
void hvf_vcpu_destroy(CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
diff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h
index ce2a153..75ba1e2 100644
--- a/target/i386/hvf/vmx.h
+++ b/target/i386/hvf/vmx.h
@@ -121,7 +121,9 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
uint64_t pdpte[4] = {0, 0, 0, 0};
uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);
uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);
+ uint64_t changed_cr0 = old_cr0 ^ cr0;
uint64_t mask = CR0_PG | CR0_CD | CR0_NW | CR0_NE | CR0_ET;
+ uint64_t entry_ctls;
if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) &&
!(efer & MSR_EFER_LME)) {
@@ -138,12 +140,16 @@ static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);
if (efer & MSR_EFER_LME) {
- if (!(old_cr0 & CR0_PG) && (cr0 & CR0_PG)) {
- enter_long_mode(vcpu, cr0, efer);
- }
- if (/*(old_cr0 & CR0_PG) &&*/ !(cr0 & CR0_PG)) {
- exit_long_mode(vcpu, cr0, efer);
+ if (changed_cr0 & CR0_PG) {
+ if (cr0 & CR0_PG) {
+ enter_long_mode(vcpu, cr0, efer);
+ } else {
+ exit_long_mode(vcpu, cr0, efer);
+ }
}
+ } else {
+ entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
+ wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);
}
/* Filter new CR0 after we are finished examining it above. */
@@ -173,6 +179,7 @@ static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
/* BUG, should take considering overlap.. */
wreg(cpu->hvf_fd, HV_X86_RIP, rip);
+ env->eip = rip;
/* after moving forward in rip, we need to clean INTERRUPTABILITY */
val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index 6adbff3..b8455c8 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -411,12 +411,6 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
if (host_tsx_blacklisted()) {
ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
}
- } else if (function == 7 && index == 0 && reg == R_ECX) {
- if (enable_cpu_pm) {
- ret |= CPUID_7_0_ECX_WAITPKG;
- } else {
- ret &= ~CPUID_7_0_ECX_WAITPKG;
- }
} else if (function == 7 && index == 0 && reg == R_EDX) {
/*
* Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
@@ -1840,16 +1834,18 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (max_nested_state_len > 0) {
assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
- if (cpu_has_vmx(env)) {
+ if (cpu_has_vmx(env) || cpu_has_svm(env)) {
struct kvm_vmx_nested_state_hdr *vmx_hdr;
env->nested_state = g_malloc0(max_nested_state_len);
env->nested_state->size = max_nested_state_len;
env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
- vmx_hdr = &env->nested_state->hdr.vmx;
- vmx_hdr->vmxon_pa = -1ull;
- vmx_hdr->vmcs12_pa = -1ull;
+ if (cpu_has_vmx(env)) {
+ vmx_hdr = &env->nested_state->hdr.vmx;
+ vmx_hdr->vmxon_pa = -1ull;
+ vmx_hdr->vmcs12_pa = -1ull;
+ }
}
}
@@ -3873,6 +3869,20 @@ static int kvm_put_nested_state(X86CPU *cpu)
return 0;
}
+ /*
+ * Copy flags that are affected by reset from env->hflags and env->hflags2.
+ */
+ if (env->hflags & HF_GUEST_MASK) {
+ env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE;
+ } else {
+ env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE;
+ }
+ if (env->hflags2 & HF2_GIF_MASK) {
+ env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET;
+ } else {
+ env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET;
+ }
+
assert(env->nested_state->size <= max_nested_state_len);
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
}
@@ -3901,11 +3911,19 @@ static int kvm_get_nested_state(X86CPU *cpu)
return ret;
}
+ /*
+ * Copy flags that are affected by reset to env->hflags and env->hflags2.
+ */
if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
env->hflags |= HF_GUEST_MASK;
} else {
env->hflags &= ~HF_GUEST_MASK;
}
+ if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) {
+ env->hflags2 |= HF2_GIF_MASK;
+ } else {
+ env->hflags2 &= ~HF2_GIF_MASK;
+ }
return ret;
}
@@ -3917,6 +3935,12 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
+ /* must be before kvm_put_nested_state so that EFER.SVME is set */
+ ret = kvm_put_sregs(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+
if (level >= KVM_PUT_RESET_STATE) {
ret = kvm_put_nested_state(x86_cpu);
if (ret < 0) {
@@ -3950,10 +3974,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
if (ret < 0) {
return ret;
}
- ret = kvm_put_sregs(x86_cpu);
- if (ret < 0) {
- return ret;
- }
/* must be before kvm_put_msrs */
ret = kvm_inject_mce_oldstyle(x86_cpu);
if (ret < 0) {
@@ -4704,3 +4724,8 @@ int kvm_arch_msi_data_to_gsi(uint32_t data)
{
abort();
}
+
+bool kvm_has_waitpkg(void)
+{
+ return has_msr_umwait;
+}
diff --git a/target/i386/kvm_i386.h b/target/i386/kvm_i386.h
index 00bde7a..064b879 100644
--- a/target/i386/kvm_i386.h
+++ b/target/i386/kvm_i386.h
@@ -44,6 +44,7 @@ void kvm_put_apicbase(X86CPU *cpu, uint64_t value);
bool kvm_enable_x2apic(void);
bool kvm_has_x2apic_api(void);
+bool kvm_has_waitpkg(void);
bool kvm_hv_vpindex_settable(void);
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 0c96531..b1acf7d 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -1071,13 +1071,41 @@ static const VMStateDescription vmstate_vmx_nested_state = {
}
};
+static bool svm_nested_state_needed(void *opaque)
+{
+ struct kvm_nested_state *nested_state = opaque;
+
+ /*
+ * HF_GUEST_MASK and HF2_GIF_MASK are already serialized
+ * via hflags and hflags2, all that's left is the opaque
+ * nested state blob.
+ */
+ return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM &&
+ nested_state->size > offsetof(struct kvm_nested_state, data));
+}
+
+static const VMStateDescription vmstate_svm_nested_state = {
+ .name = "cpu/kvm_nested_state/svm",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = svm_nested_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_U64(hdr.svm.vmcb_pa, struct kvm_nested_state),
+ VMSTATE_UINT8_ARRAY(data.svm[0].vmcb12,
+ struct kvm_nested_state,
+ KVM_STATE_NESTED_SVM_VMCB_SIZE),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static bool nested_state_needed(void *opaque)
{
X86CPU *cpu = opaque;
CPUX86State *env = &cpu->env;
return (env->nested_state &&
- vmx_nested_state_needed(env->nested_state));
+ (vmx_nested_state_needed(env->nested_state) ||
+ svm_nested_state_needed(env->nested_state)));
}
static int nested_state_post_load(void *opaque, int version_id)
@@ -1139,6 +1167,7 @@ static const VMStateDescription vmstate_kvm_nested_state = {
},
.subsections = (const VMStateDescription*[]) {
&vmstate_vmx_nested_state,
+ &vmstate_svm_nested_state,
NULL
}
};
diff --git a/target/i386/monitor.c b/target/i386/monitor.c
index 27ebfa3..7abae3c 100644
--- a/target/i386/monitor.c
+++ b/target/i386/monitor.c
@@ -726,13 +726,5 @@ SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
SevCapability *qmp_query_sev_capabilities(Error **errp)
{
- SevCapability *data;
-
- data = sev_get_capabilities();
- if (!data) {
- error_setg(errp, "SEV feature is not available");
- return NULL;
- }
-
- return data;
+ return sev_get_capabilities(errp);
}
diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h
index 14f2b16..c7614f8 100644
--- a/target/i386/ops_sse.h
+++ b/target/i386/ops_sse.h
@@ -843,6 +843,7 @@ int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s)
void helper_rsqrtps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
d->ZMM_S(0) = float32_div(float32_one,
float32_sqrt(s->ZMM_S(0), &env->sse_status),
&env->sse_status);
@@ -855,26 +856,33 @@ void helper_rsqrtps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
d->ZMM_S(3) = float32_div(float32_one,
float32_sqrt(s->ZMM_S(3), &env->sse_status),
&env->sse_status);
+ set_float_exception_flags(old_flags, &env->sse_status);
}
void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
d->ZMM_S(0) = float32_div(float32_one,
float32_sqrt(s->ZMM_S(0), &env->sse_status),
&env->sse_status);
+ set_float_exception_flags(old_flags, &env->sse_status);
}
void helper_rcpps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
d->ZMM_S(1) = float32_div(float32_one, s->ZMM_S(1), &env->sse_status);
d->ZMM_S(2) = float32_div(float32_one, s->ZMM_S(2), &env->sse_status);
d->ZMM_S(3) = float32_div(float32_one, s->ZMM_S(3), &env->sse_status);
+ set_float_exception_flags(old_flags, &env->sse_status);
}
void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
+ set_float_exception_flags(old_flags, &env->sse_status);
}
static inline uint64_t helper_extrq(uint64_t src, int shift, int len)
@@ -1764,6 +1772,7 @@ void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
uint32_t mode)
{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
signed char prev_rounding_mode;
prev_rounding_mode = env->sse_status.float_rounding_mode;
@@ -1789,19 +1798,18 @@ void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
d->ZMM_S(2) = float32_round_to_int(s->ZMM_S(2), &env->sse_status);
d->ZMM_S(3) = float32_round_to_int(s->ZMM_S(3), &env->sse_status);
-#if 0 /* TODO */
- if (mode & (1 << 3)) {
+ if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) {
set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
~float_flag_inexact,
&env->sse_status);
}
-#endif
env->sse_status.float_rounding_mode = prev_rounding_mode;
}
void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
uint32_t mode)
{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
signed char prev_rounding_mode;
prev_rounding_mode = env->sse_status.float_rounding_mode;
@@ -1825,19 +1833,18 @@ void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status);
d->ZMM_D(1) = float64_round_to_int(s->ZMM_D(1), &env->sse_status);
-#if 0 /* TODO */
- if (mode & (1 << 3)) {
+ if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) {
set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
~float_flag_inexact,
&env->sse_status);
}
-#endif
env->sse_status.float_rounding_mode = prev_rounding_mode;
}
void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
uint32_t mode)
{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
signed char prev_rounding_mode;
prev_rounding_mode = env->sse_status.float_rounding_mode;
@@ -1860,19 +1867,18 @@ void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status);
-#if 0 /* TODO */
- if (mode & (1 << 3)) {
+ if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) {
set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
~float_flag_inexact,
&env->sse_status);
}
-#endif
env->sse_status.float_rounding_mode = prev_rounding_mode;
}
void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
uint32_t mode)
{
+ uint8_t old_flags = get_float_exception_flags(&env->sse_status);
signed char prev_rounding_mode;
prev_rounding_mode = env->sse_status.float_rounding_mode;
@@ -1895,13 +1901,11 @@ void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status);
-#if 0 /* TODO */
- if (mode & (1 << 3)) {
+ if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) {
set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
~float_flag_inexact,
&env->sse_status);
}
-#endif
env->sse_status.float_rounding_mode = prev_rounding_mode;
}
diff --git a/target/i386/sev-stub.c b/target/i386/sev-stub.c
index e5ee133..88e3f39 100644
--- a/target/i386/sev-stub.c
+++ b/target/i386/sev-stub.c
@@ -44,7 +44,8 @@ char *sev_get_launch_measurement(void)
return NULL;
}
-SevCapability *sev_get_capabilities(void)
+SevCapability *sev_get_capabilities(Error **errp)
{
+ error_setg(errp, "SEV is not available in this QEMU");
return NULL;
}
diff --git a/target/i386/sev.c b/target/i386/sev.c
index f100a53..c3ecf86 100644
--- a/target/i386/sev.c
+++ b/target/i386/sev.c
@@ -399,7 +399,7 @@ sev_get_info(void)
static int
sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain,
- size_t *cert_chain_len)
+ size_t *cert_chain_len, Error **errp)
{
guchar *pdh_data = NULL;
guchar *cert_chain_data = NULL;
@@ -410,8 +410,8 @@ sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain,
r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err);
if (r < 0) {
if (err != SEV_RET_INVALID_LEN) {
- error_report("failed to export PDH cert ret=%d fw_err=%d (%s)",
- r, err, fw_error_to_str(err));
+ error_setg(errp, "failed to export PDH cert ret=%d fw_err=%d (%s)",
+ r, err, fw_error_to_str(err));
return 1;
}
}
@@ -423,8 +423,8 @@ sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain,
r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err);
if (r < 0) {
- error_report("failed to export PDH cert ret=%d fw_err=%d (%s)",
- r, err, fw_error_to_str(err));
+ error_setg(errp, "failed to export PDH cert ret=%d fw_err=%d (%s)",
+ r, err, fw_error_to_str(err));
goto e_free;
}
@@ -441,7 +441,7 @@ e_free:
}
SevCapability *
-sev_get_capabilities(void)
+sev_get_capabilities(Error **errp)
{
SevCapability *cap = NULL;
guchar *pdh_data = NULL;
@@ -450,15 +450,24 @@ sev_get_capabilities(void)
uint32_t ebx;
int fd;
+ if (!kvm_enabled()) {
+ error_setg(errp, "KVM not enabled");
+ return NULL;
+ }
+ if (kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, NULL) < 0) {
+ error_setg(errp, "SEV is not enabled in KVM");
+ return NULL;
+ }
+
fd = open(DEFAULT_SEV_DEVICE, O_RDWR);
if (fd < 0) {
- error_report("%s: Failed to open %s '%s'", __func__,
- DEFAULT_SEV_DEVICE, strerror(errno));
+ error_setg_errno(errp, errno, "Failed to open %s",
+ DEFAULT_SEV_DEVICE);
return NULL;
}
if (sev_get_pdh_info(fd, &pdh_data, &pdh_len,
- &cert_chain_data, &cert_chain_len)) {
+ &cert_chain_data, &cert_chain_len, errp)) {
goto out;
}
diff --git a/target/i386/sev_i386.h b/target/i386/sev_i386.h
index 8eb7de1..4db6960 100644
--- a/target/i386/sev_i386.h
+++ b/target/i386/sev_i386.h
@@ -34,6 +34,6 @@ extern SevInfo *sev_get_info(void);
extern uint32_t sev_get_cbit_position(void);
extern uint32_t sev_get_reduced_phys_bits(void);
extern char *sev_get_launch_measurement(void);
-extern SevCapability *sev_get_capabilities(void);
+extern SevCapability *sev_get_capabilities(Error **errp);
#endif
diff --git a/target/i386/svm.h b/target/i386/svm.h
index 23a3a04..ae30fc6 100644
--- a/target/i386/svm.h
+++ b/target/i386/svm.h
@@ -135,6 +135,7 @@
#define SVM_NPT_PAE (1 << 0)
#define SVM_NPT_LMA (1 << 1)
#define SVM_NPT_NXE (1 << 2)
+#define SVM_NPT_PSE (1 << 3)
#define SVM_NPTEXIT_P (1ULL << 0)
#define SVM_NPTEXIT_RW (1ULL << 1)
diff --git a/target/i386/svm_helper.c b/target/i386/svm_helper.c
index 7b8105a..6224387 100644
--- a/target/i386/svm_helper.c
+++ b/target/i386/svm_helper.c
@@ -209,16 +209,21 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
control.nested_ctl));
+
+ env->nested_pg_mode = 0;
+
if (nested_ctl & SVM_NPT_ENABLED) {
env->nested_cr3 = x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb,
control.nested_cr3));
env->hflags2 |= HF2_NPT_MASK;
- env->nested_pg_mode = 0;
if (env->cr[4] & CR4_PAE_MASK) {
env->nested_pg_mode |= SVM_NPT_PAE;
}
+ if (env->cr[4] & CR4_PSE_MASK) {
+ env->nested_pg_mode |= SVM_NPT_PSE;
+ }
if (env->hflags & HF_LMA_MASK) {
env->nested_pg_mode |= SVM_NPT_LMA;
}
diff --git a/target/i386/tcg-stub.c b/target/i386/tcg-stub.c
new file mode 100644
index 0000000..b00e23d
--- /dev/null
+++ b/target/i386/tcg-stub.c
@@ -0,0 +1,25 @@
+/*
+ * x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+
+void update_mxcsr_from_sse_status(CPUX86State *env)
+{
+}
diff --git a/target/i386/translate.c b/target/i386/translate.c
index 5e5dbb4..a1d31f0 100644
--- a/target/i386/translate.c
+++ b/target/i386/translate.c
@@ -1128,9 +1128,6 @@ static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
static inline void gen_ins(DisasContext *s, MemOp ot)
{
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_start();
- }
gen_string_movl_A0_EDI(s);
/* Note: we must do this dummy write first to be restartable in
case of page fault. */
@@ -1143,16 +1140,10 @@ static inline void gen_ins(DisasContext *s, MemOp ot)
gen_op_movl_T0_Dshift(s, ot);
gen_op_add_reg_T0(s, s->aflag, R_EDI);
gen_bpt_io(s, s->tmp2_i32, ot);
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
}
static inline void gen_outs(DisasContext *s, MemOp ot)
{
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_start();
- }
gen_string_movl_A0_ESI(s);
gen_op_ld_v(s, ot, s->T0, s->A0);
@@ -1163,9 +1154,6 @@ static inline void gen_outs(DisasContext *s, MemOp ot)
gen_op_movl_T0_Dshift(s, ot);
gen_op_add_reg_T0(s, s->aflag, R_ESI);
gen_bpt_io(s, s->tmp2_i32, ot);
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
- }
}
/* same method as Valgrind : we generate jumps to current or next
@@ -6400,8 +6388,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ /* jump generated by gen_repz_ins */
} else {
gen_ins(s, ot);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
@@ -6415,8 +6407,12 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes) | 4);
+ if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ /* jump generated by gen_repz_outs */
} else {
gen_outs(s, ot);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
@@ -7583,12 +7579,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
CASE_MODRM_OP(4): /* smsw */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, cr[0]));
- if (CODE64(s)) {
- mod = (modrm >> 6) & 3;
- ot = (mod != 3 ? MO_16 : s->dflag);
- } else {
- ot = MO_16;
- }
+ /*
+ * In 32-bit mode, the higher 16 bits of the destination
+ * register are undefined. In practice CR0[31:0] is stored
+ * just like in 64-bit mode.
+ */
+ mod = (modrm >> 6) & 3;
+ ot = (mod != 3 ? MO_16 : s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 0xee: /* rdpkru */
@@ -8039,7 +8036,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_helper_read_crN(s->T0, cpu_env, tcg_const_i32(reg));
gen_op_mov_reg_v(s, ot, rm, s->T0);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
- gen_io_end();
+ gen_jmp(s, s->pc - s->cs_base);
}
}
break;
@@ -8157,6 +8154,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
+ gen_helper_update_mxcsr(cpu_env);
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, mxcsr));
gen_op_st_v(s, MO_32, s->T0, s->A0);