aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2024-07-04 09:16:07 -0700
committerRichard Henderson <richard.henderson@linaro.org>2024-07-04 09:16:07 -0700
commit5915139aba1646220630596de30c673528e047c9 (patch)
tree23192bb450ca10835b65562ebb516a45b6903723 /target
parent1406b7fc4bbaab42133b1ef03270179746e91723 (diff)
parent188569c10d5dc6996bde90ce25645083e9661ecb (diff)
downloadqemu-5915139aba1646220630596de30c673528e047c9.zip
qemu-5915139aba1646220630596de30c673528e047c9.tar.gz
qemu-5915139aba1646220630596de30c673528e047c9.tar.bz2
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging
* meson: Pass objects and dependencies to declare_dependency(), not static_library() * meson: Drop the .fa library suffix * target/i386: drop AMD machine check bits from Intel CPUID * target/i386: add avx-vnni-int16 feature * target/i386: SEV bugfixes * target/i386: SEV-SNP -cpu host support * char: fix exit issues # -----BEGIN PGP SIGNATURE----- # # iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmaGceoUHHBib256aW5p # QHJlZGhhdC5jb20ACgkQv/vSX3jHroNcpgf/XziKojGOTvYsE7xMijOUswYjCG5m # ZVLqxTug8Q0zO/9mGvluKBTWmh8KhRWOovX5iZL8+F0gPoYPG4ONpNhh3wpA9+S7 # H7ph4V6sDJBX4l3OrOK6htD8dO5D9kns1iKGnE0lY60PkcHl+pU8BNWfK1zYp5US # geiyzuRFRRtDmoNx5+o+w+D+W5msPZsnlj5BnPWM+O/ykeFfSrk2ztfdwHKXUhCB # 5FJcu2sWVx+wsdVzdjgT8USi5+VTK4vabq3SfccmNRxBRnJOCU5MrR63stMDceo4 # TswSB88I0WRV1848AudcGZRkjvKaXLyHJ+QTjg2dp7itEARJ3MGsvOpS5A== # =3kv7 # -----END PGP SIGNATURE----- # gpg: Signature made Thu 04 Jul 2024 02:56:58 AM PDT # gpg: using RSA key F13338574B662389866C7682BFFBD25F78C7AE83 # gpg: issuer "pbonzini@redhat.com" # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full] # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full] * tag 'for-upstream' of https://gitlab.com/bonzini/qemu: target/i386/SEV: implement mask_cpuid_features target/i386: add support for masking CPUID features in confidential guests char-stdio: Restore blocking mode of stdout on exit target/i386: add avx-vnni-int16 feature i386/sev: Fallback to the default SEV device if none provided in sev_get_capabilities() i386/sev: Fix error message in sev_get_capabilities() target/i386: do not include undefined bits in the AMD topoext leaf target/i386: SEV: fix formatting of CPUID mismatch message target/i386: drop AMD machine check bits from Intel CPUID target/i386: pass X86CPU to x86_cpu_get_supported_feature_word meson: Drop the .fa library suffix Revert "meson: Propagate gnutls dependency" meson: Pass objects and dependencies to declare_dependency() meson: merge plugin_ldflags into emulator_link_args meson: move block.syms dependency out of libblock meson: move shared_module() calls where modules are already walked Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/i386/confidential-guest.h24
-rw-r--r--target/i386/cpu.c40
-rw-r--r--target/i386/cpu.h10
-rw-r--r--target/i386/kvm/kvm-cpu.c2
-rw-r--r--target/i386/kvm/kvm.c5
-rw-r--r--target/i386/sev.c51
6 files changed, 109 insertions, 23 deletions
diff --git a/target/i386/confidential-guest.h b/target/i386/confidential-guest.h
index 532e172..7342d28 100644
--- a/target/i386/confidential-guest.h
+++ b/target/i386/confidential-guest.h
@@ -39,6 +39,8 @@ struct X86ConfidentialGuestClass {
/* <public> */
int (*kvm_type)(X86ConfidentialGuest *cg);
+ uint32_t (*mask_cpuid_features)(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index,
+ int reg, uint32_t value);
};
/**
@@ -56,4 +58,26 @@ static inline int x86_confidential_guest_kvm_type(X86ConfidentialGuest *cg)
return 0;
}
}
+
+/**
+ * x86_confidential_guest_mask_cpuid_features:
+ *
+ * Removes unsupported features from a confidential guest's CPUID values, returns
+ * the value with the bits removed. The bits removed should be those that KVM
+ * provides independent of host-supported CPUID features, but are not supported by
+ * the confidential computing firmware.
+ */
+static inline int x86_confidential_guest_mask_cpuid_features(X86ConfidentialGuest *cg,
+ uint32_t feature, uint32_t index,
+ int reg, uint32_t value)
+{
+ X86ConfidentialGuestClass *klass = X86_CONFIDENTIAL_GUEST_GET_CLASS(cg);
+
+ if (klass->mask_cpuid_features) {
+ return klass->mask_cpuid_features(cg, feature, index, reg, value);
+ } else {
+ return value;
+ }
+}
+
#endif
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 4c2e6f3..c05765e 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -1131,7 +1131,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.feat_names = {
NULL, NULL, NULL, NULL,
"avx-vnni-int8", "avx-ne-convert", NULL, NULL,
- "amx-complex", NULL, NULL, NULL,
+ "amx-complex", NULL, "avx-vnni-int16", NULL,
NULL, NULL, "prefetchiti", NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
@@ -6035,11 +6035,11 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
#endif /* !CONFIG_USER_ONLY */
-uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
- bool migratable_only)
+uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w)
{
FeatureWordInfo *wi = &feature_word_info[w];
uint64_t r = 0;
+ uint32_t unavail = 0;
if (kvm_enabled()) {
switch (wi->type) {
@@ -6065,20 +6065,34 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
} else {
return ~0;
}
+
+ switch (w) {
#ifndef TARGET_X86_64
- if (w == FEAT_8000_0001_EDX) {
+ case FEAT_8000_0001_EDX:
/*
* 32-bit TCG can emulate 64-bit compatibility mode. If there is no
* way for userspace to get out of its 32-bit jail, we can leave
* the LM bit set.
*/
- uint32_t unavail = tcg_enabled()
+ unavail = tcg_enabled()
? CPUID_EXT2_LM & ~CPUID_EXT2_KERNEL_FEATURES
: CPUID_EXT2_LM;
- r &= ~unavail;
- }
+ break;
#endif
- if (migratable_only) {
+
+ case FEAT_8000_0007_EBX:
+ if (cpu && !IS_AMD_CPU(&cpu->env)) {
+ /* Disable AMD machine check architecture for Intel CPU. */
+ unavail = ~0;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ r &= ~unavail;
+ if (cpu && cpu->migratable) {
r &= x86_cpu_get_migratable_flags(w);
}
return r;
@@ -6968,6 +6982,9 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*eax = *ebx = *ecx = *edx = 0;
break;
}
+ if (cpu->amd_topoext_features_only) {
+ *edx &= CACHE_NO_INVD_SHARING | CACHE_INCLUSIVE;
+ }
break;
case 0x8000001E:
if (cpu->core_id <= 255) {
@@ -7371,7 +7388,7 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
* by the user.
*/
env->features[w] |=
- x86_cpu_get_supported_feature_word(w, cpu->migratable) &
+ x86_cpu_get_supported_feature_word(cpu, w) &
~env->user_features[w] &
~feature_word_info[w].no_autoenable_flags;
}
@@ -7497,7 +7514,7 @@ static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
for (w = 0; w < FEATURE_WORDS; w++) {
uint64_t host_feat =
- x86_cpu_get_supported_feature_word(w, false);
+ x86_cpu_get_supported_feature_word(NULL, w);
uint64_t requested_features = env->features[w];
uint64_t unavailable_features = requested_features & ~host_feat;
mark_unavailable_features(cpu, w, unavailable_features, prefix);
@@ -7617,7 +7634,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
env->features[FEAT_PERF_CAPABILITIES] & PERF_CAP_LBR_FMT;
if (requested_lbr_fmt && kvm_enabled()) {
uint64_t host_perf_cap =
- x86_cpu_get_supported_feature_word(FEAT_PERF_CAPABILITIES, false);
+ x86_cpu_get_supported_feature_word(NULL, FEAT_PERF_CAPABILITIES);
unsigned host_lbr_fmt = host_perf_cap & PERF_CAP_LBR_FMT;
if (!cpu->enable_pmu) {
@@ -8279,6 +8296,7 @@ static Property x86_cpu_properties[] = {
DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor),
DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
DEFINE_PROP_BOOL("x-vendor-cpuid-only", X86CPU, vendor_cpuid_only, true),
+ DEFINE_PROP_BOOL("x-amd-topoext-features-only", X86CPU, amd_topoext_features_only, true),
DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
DEFINE_PROP_BOOL("kvm-pv-enforce-cpuid", X86CPU, kvm_pv_enforce_cpuid,
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 29daf37..c43ac01 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -666,8 +666,7 @@ typedef enum FeatureWord {
} FeatureWord;
typedef uint64_t FeatureWordArray[FEATURE_WORDS];
-uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
- bool migratable_only);
+uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
/* cpuid_features bits */
#define CPUID_FP87 (1U << 0)
@@ -813,6 +812,8 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
/* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */
#define CPUID_7_0_EBX_FSGSBASE (1U << 0)
+/* Support TSC adjust MSR */
+#define CPUID_7_0_EBX_TSC_ADJUST (1U << 1)
/* Support SGX */
#define CPUID_7_0_EBX_SGX (1U << 2)
/* 1st Group of Advanced Bit Manipulation Extensions */
@@ -1003,6 +1004,8 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
#define CPUID_8000_0008_EBX_STIBP_ALWAYS_ON (1U << 17)
/* Speculative Store Bypass Disable */
#define CPUID_8000_0008_EBX_AMD_SSBD (1U << 24)
+/* Paravirtualized Speculative Store Bypass Disable MSR */
+#define CPUID_8000_0008_EBX_VIRT_SSBD (1U << 25)
/* Predictive Store Forwarding Disable */
#define CPUID_8000_0008_EBX_AMD_PSFD (1U << 28)
@@ -2105,6 +2108,9 @@ struct ArchCPU {
/* Only advertise CPUID leaves defined by the vendor */
bool vendor_cpuid_only;
+ /* Only advertise TOPOEXT features that AMD defines */
+ bool amd_topoext_features_only;
+
/* Enable auto level-increase for Intel Processor Trace leave */
bool intel_pt_auto_level;
diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c
index d57a68a..6bf8dcf 100644
--- a/target/i386/kvm/kvm-cpu.c
+++ b/target/i386/kvm/kvm-cpu.c
@@ -143,7 +143,7 @@ static void kvm_cpu_xsave_init(void)
if (!esa->size) {
continue;
}
- if ((x86_cpu_get_supported_feature_word(esa->feature, false) & esa->bits)
+ if ((x86_cpu_get_supported_feature_word(NULL, esa->feature) & esa->bits)
!= esa->bits) {
continue;
}
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index bf18257..becca2e 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -548,6 +548,11 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
ret |= 1U << KVM_HINTS_REALTIME;
}
+ if (current_machine->cgs) {
+ ret = x86_confidential_guest_mask_cpuid_features(
+ X86_CONFIDENTIAL_GUEST(current_machine->cgs),
+ function, index, reg, ret);
+ }
return ret;
}
diff --git a/target/i386/sev.c b/target/i386/sev.c
index 3ab8b3c..2ba5f51 100644
--- a/target/i386/sev.c
+++ b/target/i386/sev.c
@@ -587,17 +587,17 @@ static SevCapability *sev_get_capabilities(Error **errp)
}
sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
- if (!sev_common) {
- error_setg(errp, "SEV is not configured");
- return NULL;
+ if (sev_common) {
+ sev_device = object_property_get_str(OBJECT(sev_common), "sev-device",
+ &error_abort);
+ } else {
+ sev_device = g_strdup(DEFAULT_SEV_DEVICE);
}
- sev_device = object_property_get_str(OBJECT(sev_common), "sev-device",
- &error_abort);
fd = open(sev_device, O_RDWR);
if (fd < 0) {
error_setg_errno(errp, errno, "SEV: Failed to open %s",
- DEFAULT_SEV_DEVICE);
+ sev_device);
g_free(sev_device);
return NULL;
}
@@ -841,7 +841,7 @@ sev_snp_cpuid_report_mismatches(SnpCpuidInfo *old,
size_t i;
if (old->count != new->count) {
- error_report("SEV-SNP: CPUID validation failed due to count mismatch,"
+ error_report("SEV-SNP: CPUID validation failed due to count mismatch, "
"provided: %d, expected: %d", old->count, new->count);
return;
}
@@ -853,8 +853,8 @@ sev_snp_cpuid_report_mismatches(SnpCpuidInfo *old,
new_func = &new->entries[i];
if (memcmp(old_func, new_func, sizeof(SnpCpuidFunc))) {
- error_report("SEV-SNP: CPUID validation failed for function 0x%x, index: 0x%x"
- "provided: eax:0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x"
+ error_report("SEV-SNP: CPUID validation failed for function 0x%x, index: 0x%x, "
+ "provided: eax:0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x, "
"expected: eax:0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x",
old_func->eax_in, old_func->ecx_in,
old_func->eax, old_func->ebx, old_func->ecx, old_func->edx,
@@ -945,6 +945,38 @@ out:
return ret;
}
+static uint32_t
+sev_snp_mask_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index,
+ int reg, uint32_t value)
+{
+ switch (feature) {
+ case 1:
+ if (reg == R_ECX) {
+ return value & ~CPUID_EXT_TSC_DEADLINE_TIMER;
+ }
+ break;
+ case 7:
+ if (index == 0 && reg == R_EBX) {
+ return value & ~CPUID_7_0_EBX_TSC_ADJUST;
+ }
+ if (index == 0 && reg == R_EDX) {
+ return value & ~(CPUID_7_0_EDX_SPEC_CTRL |
+ CPUID_7_0_EDX_STIBP |
+ CPUID_7_0_EDX_FLUSH_L1D |
+ CPUID_7_0_EDX_ARCH_CAPABILITIES |
+ CPUID_7_0_EDX_CORE_CAPABILITY |
+ CPUID_7_0_EDX_SPEC_CTRL_SSBD);
+ }
+ break;
+ case 0x80000008:
+ if (reg == R_EBX) {
+ return value & ~CPUID_8000_0008_EBX_VIRT_SSBD;
+ }
+ break;
+ }
+ return value;
+}
+
static int
sev_launch_update_data(SevCommonState *sev_common, hwaddr gpa,
uint8_t *addr, size_t len)
@@ -2315,6 +2347,7 @@ sev_snp_guest_class_init(ObjectClass *oc, void *data)
klass->launch_finish = sev_snp_launch_finish;
klass->launch_update_data = sev_snp_launch_update_data;
klass->kvm_init = sev_snp_kvm_init;
+ x86_klass->mask_cpuid_features = sev_snp_mask_cpuid_features;
x86_klass->kvm_type = sev_snp_kvm_type;
object_class_property_add(oc, "policy", "uint64",