aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2021-07-22 18:32:02 +0100
committerPeter Maydell <peter.maydell@linaro.org>2021-07-22 18:32:02 +0100
commit7b7ca8ebde4ee6fba171004b2726ae1ff5489c03 (patch)
treef3327a0520948327ac540ad77792aee2df8a60d2 /target
parentbeb191385882a2a283ce777d76b1a77e71813d14 (diff)
parent0848f8aca6f7b13f2a755c2593b0a1cbb39f658e (diff)
downloadqemu-7b7ca8ebde4ee6fba171004b2726ae1ff5489c03.zip
qemu-7b7ca8ebde4ee6fba171004b2726ae1ff5489c03.tar.gz
qemu-7b7ca8ebde4ee6fba171004b2726ae1ff5489c03.tar.bz2
Merge remote-tracking branch 'remotes/bonzini-gitlab/tags/for-upstream' into staging
Bugfixes. # gpg: Signature made Thu 22 Jul 2021 14:11:27 BST # gpg: using RSA key F13338574B662389866C7682BFFBD25F78C7AE83 # gpg: issuer "pbonzini@redhat.com" # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full] # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full] # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini-gitlab/tags/for-upstream: configure: Let --without-default-features disable vhost-kernel and vhost-vdpa configure: Fix the default setting of the "xen" feature configure: Allow vnc to get disabled with --without-default-features configure: Fix --without-default-features propagation to meson meson: fix dependencies for modinfo configure: Drop obsolete check for the alloc_size attribute target/i386: Added consistency checks for EFER target/i386: Added consistency checks for CR4 target/i386: Added V_INTR_PRIO check to virtual interrupts qemu-config: restore "machine" in qmp_query_command_line_options() usb: fix usb-host dependency check chardev-spice: add missing module_obj directive vl: Parse legacy default_machine_opts qemu-config: fix memory leak on ferror() qemu-config: never call the callback after an error, fix leak Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/i386/cpu.h44
-rw-r--r--target/i386/tcg/sysemu/misc_helper.c3
-rw-r--r--target/i386/tcg/sysemu/svm_helper.c60
3 files changed, 103 insertions, 4 deletions
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 5d98a4e..6c50d3a 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -240,6 +240,7 @@ typedef enum X86Seg {
#define CR4_OSFXSR_SHIFT 9
#define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
#define CR4_OSXMMEXCPT_MASK (1U << 10)
+#define CR4_UMIP_MASK (1U << 11)
#define CR4_LA57_MASK (1U << 12)
#define CR4_VMXE_MASK (1U << 13)
#define CR4_SMXE_MASK (1U << 14)
@@ -251,6 +252,14 @@ typedef enum X86Seg {
#define CR4_PKE_MASK (1U << 22)
#define CR4_PKS_MASK (1U << 24)
+#define CR4_RESERVED_MASK \
+(~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \
+ | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \
+ | CR4_MCE_MASK | CR4_PGE_MASK | CR4_PCE_MASK \
+ | CR4_OSFXSR_MASK | CR4_OSXMMEXCPT_MASK |CR4_UMIP_MASK \
+ | CR4_FSGSBASE_MASK | CR4_PCIDE_MASK | CR4_OSXSAVE_MASK \
+ | CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_PKE_MASK | CR4_PKS_MASK))
+
#define DR6_BD (1 << 13)
#define DR6_BS (1 << 14)
#define DR6_BT (1 << 15)
@@ -466,6 +475,11 @@ typedef enum X86Seg {
#define MSR_EFER_SVME (1 << 12)
#define MSR_EFER_FFXSR (1 << 14)
+#define MSR_EFER_RESERVED\
+ (~(target_ulong)(MSR_EFER_SCE | MSR_EFER_LME\
+ | MSR_EFER_LMA | MSR_EFER_NXE | MSR_EFER_SVME\
+ | MSR_EFER_FFXSR))
+
#define MSR_STAR 0xc0000081
#define MSR_LSTAR 0xc0000082
#define MSR_CSTAR 0xc0000083
@@ -2196,6 +2210,36 @@ static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat)
return !!(cpu->hyperv_features & BIT(feat));
}
+static inline uint64_t cr4_reserved_bits(CPUX86State *env)
+{
+ uint64_t reserved_bits = CR4_RESERVED_MASK;
+ if (!env->features[FEAT_XSAVE]) {
+ reserved_bits |= CR4_OSXSAVE_MASK;
+ }
+ if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMEP)) {
+ reserved_bits |= CR4_SMEP_MASK;
+ }
+ if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
+ reserved_bits |= CR4_SMAP_MASK;
+ }
+ if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE)) {
+ reserved_bits |= CR4_FSGSBASE_MASK;
+ }
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
+ reserved_bits |= CR4_PKE_MASK;
+ }
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57)) {
+ reserved_bits |= CR4_LA57_MASK;
+ }
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) {
+ reserved_bits |= CR4_UMIP_MASK;
+ }
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) {
+ reserved_bits |= CR4_PKS_MASK;
+ }
+ return reserved_bits;
+}
+
#if defined(TARGET_X86_64) && \
defined(CONFIG_USER_ONLY) && \
defined(CONFIG_LINUX)
diff --git a/target/i386/tcg/sysemu/misc_helper.c b/target/i386/tcg/sysemu/misc_helper.c
index db0d8a9..a2af2c9 100644
--- a/target/i386/tcg/sysemu/misc_helper.c
+++ b/target/i386/tcg/sysemu/misc_helper.c
@@ -99,6 +99,9 @@ void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
cpu_x86_update_cr3(env, t0);
break;
case 4:
+ if (t0 & cr4_reserved_bits(env)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) &&
(env->hflags & HF_CS64_MASK)) {
raise_exception_ra(env, EXCP0D_GPF, GETPC());
diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
index 00618cf..4d64ec3 100644
--- a/target/i386/tcg/sysemu/svm_helper.c
+++ b/target/i386/tcg/sysemu/svm_helper.c
@@ -65,6 +65,51 @@ static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
sc->base, sc->limit, sc->flags);
}
+static inline bool ctl_has_irq(uint32_t int_ctl)
+{
+ uint32_t int_prio;
+ uint32_t tpr;
+
+ int_prio = (int_ctl & V_INTR_PRIO_MASK) >> V_INTR_MASKING_SHIFT;
+ tpr = int_ctl & V_TPR_MASK;
+ return (int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
+}
+
+static inline bool is_efer_invalid_state (CPUX86State *env)
+{
+ if (!(env->efer & MSR_EFER_SVME)) {
+ return true;
+ }
+
+ if (env->efer & MSR_EFER_RESERVED) {
+ return true;
+ }
+
+ if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
+ !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
+ return true;
+ }
+
+ if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+ && !(env->cr[4] & CR4_PAE_MASK)) {
+ return true;
+ }
+
+ if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+ && !(env->cr[0] & CR0_PE_MASK)) {
+ return true;
+ }
+
+ if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+ && (env->cr[4] & CR4_PAE_MASK)
+ && (env->segs[R_CS].flags & DESC_L_MASK)
+ && (env->segs[R_CS].flags & DESC_B_MASK)) {
+ return true;
+ }
+
+ return false;
+}
+
void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
{
CPUState *cs = env_cpu(env);
@@ -75,6 +120,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
uint32_t int_ctl;
uint32_t asid;
uint64_t new_cr0;
+ uint64_t new_cr4;
cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
@@ -215,14 +261,16 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
}
+ new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4));
+ if (new_cr4 & cr4_reserved_bits(env)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
/* clear exit_info_2 so we behave like the real hardware */
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
cpu_x86_update_cr0(env, new_cr0);
- cpu_x86_update_cr4(env, x86_ldq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb,
- save.cr4)));
+ cpu_x86_update_cr4(env, new_cr4);
cpu_x86_update_cr3(env, x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb,
save.cr3)));
@@ -278,6 +326,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
}
#endif
+ if (is_efer_invalid_state(env)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+
switch (x86_ldub_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
case TLB_CONTROL_DO_NOTHING:
@@ -290,7 +342,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
env->hflags2 |= HF2_GIF_MASK;
- if (int_ctl & V_IRQ_MASK) {
+ if (ctl_has_irq(int_ctl)) {
CPUState *cs = env_cpu(env);
cs->interrupt_request |= CPU_INTERRUPT_VIRQ;