aboutsummaryrefslogtreecommitdiff
path: root/target/i386/cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/i386/cpu.c')
-rw-r--r--target/i386/cpu.c2904
1 files changed, 2349 insertions, 555 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 3fb1ec6..b5e483e 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -28,20 +28,25 @@
#include "system/hvf.h"
#include "hvf/hvf-i386.h"
#include "kvm/kvm_i386.h"
+#include "kvm/tdx.h"
#include "sev.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qapi/qapi-visit-machine.h"
#include "standard-headers/asm-x86/kvm_para.h"
-#include "hw/qdev-properties.h"
+#include "hw/core/qdev-properties.h"
#include "hw/i386/topology.h"
+#include "exec/watchpoint.h"
#ifndef CONFIG_USER_ONLY
+#include "confidential-guest.h"
#include "system/reset.h"
-#include "qapi/qapi-commands-machine-target.h"
-#include "exec/address-spaces.h"
-#include "hw/boards.h"
+#include "qapi/qapi-commands-machine.h"
+#include "system/address-spaces.h"
+#include "hw/core/boards.h"
#include "hw/i386/sgx-epc.h"
#endif
+#include "system/qtest.h"
+#include "tcg/tcg-cpu.h"
#include "disas/capstone.h"
#include "cpu-internal.h"
@@ -63,6 +68,7 @@ struct CPUID2CacheDescriptorInfo {
/*
* Known CPUID 2 cache descriptors.
+ * TLB, prefetch and sectored cache related descriptors are not included.
* From Intel SDM Volume 2A, CPUID instruction
*/
struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
@@ -84,18 +90,29 @@ struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
.associativity = 2, .line_size = 64, },
[0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
.associativity = 8, .line_size = 64, },
- /* lines per sector is not supported cpuid2_cache_descriptor(),
- * so descriptors 0x22, 0x23 are not included
- */
+ /*
+ * lines per sector is not supported cpuid2_cache_descriptor(),
+ * so descriptors 0x22, 0x23 are not included
+ */
[0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
.associativity = 16, .line_size = 64, },
- /* lines per sector is not supported cpuid2_cache_descriptor(),
- * so descriptors 0x25, 0x20 are not included
- */
+ /*
+ * lines per sector is not supported cpuid2_cache_descriptor(),
+ * so descriptors 0x25, 0x29 are not included
+ */
[0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
.associativity = 8, .line_size = 64, },
[0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
.associativity = 8, .line_size = 64, },
+ /*
+ * Newer Intel CPUs (having the cores without L3, e.g., Intel MTL, ARL)
+ * use CPUID 0x4 leaf to describe cache topology, by encoding CPUID 0x2
+ * leaf with 0xFF. For older CPUs (without 0x4 leaf), it's also valid
+ * to just ignore L3's code if there's no L3.
+ *
+ * This already covers all the cases in QEMU, so code 0x40 is not
+ * included.
+ */
[0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
.associativity = 4, .line_size = 32, },
[0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
@@ -112,7 +129,18 @@ struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
.associativity = 8, .line_size = 64, },
[0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
.associativity = 12, .line_size = 64, },
- /* Descriptor 0x49 depends on CPU family/model, so it is not included */
+ /*
+ * Descriptor 0x49 has 2 cases:
+ * - 2nd-level cache: 4 MByte, 16-way set associative, 64 byte line size.
+ * - 3rd-level cache: 4MB, 16-way set associative, 64-byte line size
+ * (Intel Xeon processor MP, Family 0FH, Model 06H).
+ *
+ * When it represents L3, then it depends on CPU family/model. Fortunately,
+ * the legacy cache/CPU models don't have such special L3. So, just add it
+ * to represent the general L2 case.
+ */
+ [0x49] = { .level = 2, .type = UNIFIED_CACHE, .size = 4 * MiB,
+ .associativity = 16, .line_size = 64, },
[0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
.associativity = 12, .line_size = 64, },
[0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
@@ -133,9 +161,10 @@ struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
.associativity = 4, .line_size = 64, },
[0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
.associativity = 4, .line_size = 64, },
- /* lines per sector is not supported cpuid2_cache_descriptor(),
- * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
- */
+ /*
+ * lines per sector is not supported cpuid2_cache_descriptor(),
+ * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
+ */
[0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
.associativity = 8, .line_size = 64, },
[0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
@@ -196,7 +225,7 @@ struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
* Return a CPUID 2 cache descriptor for a given cache.
* If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
*/
-static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
+static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache, bool *unmacthed)
{
int i;
@@ -213,9 +242,46 @@ static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
}
}
+ *unmacthed |= true;
return CACHE_DESCRIPTOR_UNAVAILABLE;
}
+static const CPUCaches legacy_intel_cpuid2_cache_info;
+
+/* Encode cache info for CPUID[2] */
+static void encode_cache_cpuid2(X86CPU *cpu,
+ const CPUCaches *caches,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ CPUX86State *env = &cpu->env;
+ int l1d, l1i, l2, l3;
+ bool unmatched = false;
+
+ *eax = 1; /* Number of CPUID[EAX=2] calls required */
+ *ebx = *ecx = *edx = 0;
+
+ l1d = cpuid2_cache_descriptor(caches->l1d_cache, &unmatched);
+ l1i = cpuid2_cache_descriptor(caches->l1i_cache, &unmatched);
+ l2 = cpuid2_cache_descriptor(caches->l2_cache, &unmatched);
+ l3 = cpuid2_cache_descriptor(caches->l3_cache, &unmatched);
+
+ if (!cpu->consistent_cache ||
+ (env->cpuid_min_level < 0x4 && !unmatched)) {
+ /*
+ * Though SDM defines code 0x40 for cases with no L2 or L3. It's
+ * also valid to just ignore l3's code if there's no l2.
+ */
+ if (cpu->enable_l3_cache) {
+ *ecx = l3;
+ }
+ *edx = (l1d << 16) | (l1i << 8) | l2;
+ } else {
+ *ecx = 0;
+ *edx = CACHE_DESCRIPTOR_UNAVAILABLE;
+ }
+}
+
/* CPUID Leaf 4 constants: */
/* EAX: */
@@ -238,33 +304,30 @@ static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
0 /* Invalid value */)
-static uint32_t max_thread_ids_for_cache(X86CPUTopoInfo *topo_info,
- enum CpuTopologyLevel share_level)
+static uint32_t apicid_offset_by_topo_level(X86CPUTopoInfo *topo_info,
+ enum CpuTopologyLevel topo_level)
{
- uint32_t num_ids = 0;
-
- switch (share_level) {
+ switch (topo_level) {
+ case CPU_TOPOLOGY_LEVEL_THREAD:
+ return 0;
case CPU_TOPOLOGY_LEVEL_CORE:
- num_ids = 1 << apicid_core_offset(topo_info);
- break;
+ return apicid_core_offset(topo_info);
case CPU_TOPOLOGY_LEVEL_MODULE:
- num_ids = 1 << apicid_module_offset(topo_info);
- break;
+ return apicid_module_offset(topo_info);
case CPU_TOPOLOGY_LEVEL_DIE:
- num_ids = 1 << apicid_die_offset(topo_info);
- break;
+ return apicid_die_offset(topo_info);
case CPU_TOPOLOGY_LEVEL_SOCKET:
- num_ids = 1 << apicid_pkg_offset(topo_info);
- break;
+ return apicid_pkg_offset(topo_info);
default:
- /*
- * Currently there is no use case for THREAD, so use
- * assert directly to facilitate debugging.
- */
g_assert_not_reached();
}
+ return 0;
+}
- return num_ids - 1;
+static uint32_t max_thread_ids_for_cache(X86CPUTopoInfo *topo_info,
+ enum CpuTopologyLevel share_level)
+{
+ return (1 << apicid_offset_by_topo_level(topo_info, share_level)) - 1;
}
static uint32_t max_core_ids_in_package(X86CPUTopoInfo *topo_info)
@@ -283,11 +346,17 @@ static void encode_cache_cpuid4(CPUCacheInfo *cache,
assert(cache->size == cache->line_size * cache->associativity *
cache->partitions * cache->sets);
+ /*
+ * The following fields have bit-width limitations, so consider the
+ * maximum values to avoid overflow:
+ * Bits 25-14: maximum 4095.
+ * Bits 31-26: maximum 63.
+ */
*eax = CACHE_TYPE(cache->type) |
CACHE_LEVEL(cache->level) |
(cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
- (max_core_ids_in_package(topo_info) << 26) |
- (max_thread_ids_for_cache(topo_info, cache->share_level) << 14);
+ (MIN(max_core_ids_in_package(topo_info), 63) << 26) |
+ (MIN(max_thread_ids_for_cache(topo_info, cache->share_level), 4095) << 14);
assert(cache->line_size > 0);
assert(cache->partitions > 0);
@@ -326,26 +395,6 @@ static uint32_t num_threads_by_topo_level(X86CPUTopoInfo *topo_info,
return 0;
}
-static uint32_t apicid_offset_by_topo_level(X86CPUTopoInfo *topo_info,
- enum CpuTopologyLevel topo_level)
-{
- switch (topo_level) {
- case CPU_TOPOLOGY_LEVEL_THREAD:
- return 0;
- case CPU_TOPOLOGY_LEVEL_CORE:
- return apicid_core_offset(topo_info);
- case CPU_TOPOLOGY_LEVEL_MODULE:
- return apicid_module_offset(topo_info);
- case CPU_TOPOLOGY_LEVEL_DIE:
- return apicid_die_offset(topo_info);
- case CPU_TOPOLOGY_LEVEL_SOCKET:
- return apicid_pkg_offset(topo_info);
- default:
- g_assert_not_reached();
- }
- return 0;
-}
-
static uint32_t cpuid1f_topo_type(enum CpuTopologyLevel topo_level)
{
switch (topo_level) {
@@ -427,7 +476,6 @@ static void encode_topo_cpuid1f(CPUX86State *env, uint32_t count,
static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
{
assert(cache->size % 1024 == 0);
- assert(cache->lines_per_tag > 0);
assert(cache->associativity > 0);
assert(cache->line_size > 0);
return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
@@ -436,8 +484,8 @@ static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
#define ASSOC_FULL 0xFF
-/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
-#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
+/* x86 associativity encoding used on CPUID Leaf 0x80000006: */
+#define X86_ENC_ASSOC(a) (a <= 1 ? a : \
a == 2 ? 0x2 : \
a == 4 ? 0x4 : \
a == 8 ? 0x6 : \
@@ -460,19 +508,18 @@ static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
{
assert(l2->size % 1024 == 0);
assert(l2->associativity > 0);
- assert(l2->lines_per_tag > 0);
assert(l2->line_size > 0);
*ecx = ((l2->size / 1024) << 16) |
- (AMD_ENC_ASSOC(l2->associativity) << 12) |
+ (X86_ENC_ASSOC(l2->associativity) << 12) |
(l2->lines_per_tag << 8) | (l2->line_size);
+ /* For Intel, EDX is reserved. */
if (l3) {
assert(l3->size % (512 * 1024) == 0);
assert(l3->associativity > 0);
- assert(l3->lines_per_tag > 0);
assert(l3->line_size > 0);
*edx = ((l3->size / (512 * 1024)) << 18) |
- (AMD_ENC_ASSOC(l3->associativity) << 12) |
+ (X86_ENC_ASSOC(l3->associativity) << 12) |
(l3->lines_per_tag << 8) | (l3->line_size);
} else {
*edx = 0;
@@ -490,7 +537,8 @@ static void encode_cache_cpuid8000001d(CPUCacheInfo *cache,
*eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
(cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
- *eax |= max_thread_ids_for_cache(topo_info, cache->share_level) << 14;
+ /* Bits 25:14 - NumSharingCache: maximum 4095. */
+ *eax |= MIN(max_thread_ids_for_cache(topo_info, cache->share_level), 4095) << 14;
assert(cache->line_size > 0);
assert(cache->partitions > 0);
@@ -570,117 +618,172 @@ static void encode_topo_cpuid8000001e(X86CPU *cpu, X86CPUTopoInfo *topo_info,
* These are legacy cache values. If there is a need to change any
* of these values please use builtin_x86_defs
*/
-
-/* L1 data cache: */
-static CPUCacheInfo legacy_l1d_cache = {
- .type = DATA_CACHE,
- .level = 1,
- .size = 32 * KiB,
- .self_init = 1,
- .line_size = 64,
- .associativity = 8,
- .sets = 64,
- .partitions = 1,
- .no_invd_sharing = true,
- .share_level = CPU_TOPOLOGY_LEVEL_CORE,
-};
-
-/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
-static CPUCacheInfo legacy_l1d_cache_amd = {
- .type = DATA_CACHE,
- .level = 1,
- .size = 64 * KiB,
- .self_init = 1,
- .line_size = 64,
- .associativity = 2,
- .sets = 512,
- .partitions = 1,
- .lines_per_tag = 1,
- .no_invd_sharing = true,
- .share_level = CPU_TOPOLOGY_LEVEL_CORE,
-};
-
-/* L1 instruction cache: */
-static CPUCacheInfo legacy_l1i_cache = {
- .type = INSTRUCTION_CACHE,
- .level = 1,
- .size = 32 * KiB,
- .self_init = 1,
- .line_size = 64,
- .associativity = 8,
- .sets = 64,
- .partitions = 1,
- .no_invd_sharing = true,
- .share_level = CPU_TOPOLOGY_LEVEL_CORE,
-};
-
-/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
-static CPUCacheInfo legacy_l1i_cache_amd = {
- .type = INSTRUCTION_CACHE,
- .level = 1,
- .size = 64 * KiB,
- .self_init = 1,
- .line_size = 64,
- .associativity = 2,
- .sets = 512,
- .partitions = 1,
- .lines_per_tag = 1,
- .no_invd_sharing = true,
- .share_level = CPU_TOPOLOGY_LEVEL_CORE,
-};
-
-/* Level 2 unified cache: */
-static CPUCacheInfo legacy_l2_cache = {
- .type = UNIFIED_CACHE,
- .level = 2,
- .size = 4 * MiB,
- .self_init = 1,
- .line_size = 64,
- .associativity = 16,
- .sets = 4096,
- .partitions = 1,
- .no_invd_sharing = true,
- .share_level = CPU_TOPOLOGY_LEVEL_CORE,
-};
-
-/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
-static CPUCacheInfo legacy_l2_cache_cpuid2 = {
- .type = UNIFIED_CACHE,
- .level = 2,
- .size = 2 * MiB,
- .line_size = 64,
- .associativity = 8,
- .share_level = CPU_TOPOLOGY_LEVEL_INVALID,
+static const CPUCaches legacy_amd_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 64 * KiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 2,
+ .sets = 512,
+ .partitions = 1,
+ .lines_per_tag = 1,
+ .no_invd_sharing = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 64 * KiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 2,
+ .sets = 512,
+ .partitions = 1,
+ .lines_per_tag = 1,
+ .no_invd_sharing = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .lines_per_tag = 1,
+ .associativity = 16,
+ .sets = 512,
+ .partitions = 1,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 16 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .sets = 16384,
+ .partitions = 1,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .complex_indexing = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
};
-
-/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
-static CPUCacheInfo legacy_l2_cache_amd = {
- .type = UNIFIED_CACHE,
- .level = 2,
- .size = 512 * KiB,
- .line_size = 64,
- .lines_per_tag = 1,
- .associativity = 16,
- .sets = 512,
- .partitions = 1,
- .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+/*
+ * Only used for the CPU models with CPUID level < 4.
+ * These CPUs (CPUID level < 4) only use CPUID leaf 2 to present
+ * cache information.
+ *
+ * Note: This cache model is just a default one, and is not
+ * guaranteed to match real hardwares.
+ */
+static const CPUCaches legacy_intel_cpuid2_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 8,
+ .sets = 64,
+ .partitions = 1,
+ .no_invd_sharing = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 8,
+ .sets = 64,
+ .partitions = 1,
+ .no_invd_sharing = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 2 * MiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 8,
+ .sets = 4096,
+ .partitions = 1,
+ .no_invd_sharing = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 16 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .sets = 16384,
+ .partitions = 1,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .complex_indexing = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
};
-/* Level 3 unified cache: */
-static CPUCacheInfo legacy_l3_cache = {
- .type = UNIFIED_CACHE,
- .level = 3,
- .size = 16 * MiB,
- .line_size = 64,
- .associativity = 16,
- .sets = 16384,
- .partitions = 1,
- .lines_per_tag = 1,
- .self_init = true,
- .inclusive = true,
- .complex_indexing = true,
- .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+static const CPUCaches legacy_intel_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 8,
+ .sets = 64,
+ .partitions = 1,
+ .no_invd_sharing = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 8,
+ .sets = 64,
+ .partitions = 1,
+ .no_invd_sharing = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 4 * MiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 16,
+ .sets = 4096,
+ .partitions = 1,
+ .no_invd_sharing = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 16 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .sets = 16384,
+ .partitions = 1,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .complex_indexing = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
};
/* TLB definitions: */
@@ -774,11 +877,12 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
- CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
+ CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE | \
+ CPUID_HT)
/* partly implemented:
CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
/* missing:
- CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
+ CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_TM, CPUID_PBE */
/*
* Kernel-only features that can be shown to usermode programs even if
@@ -846,7 +950,8 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A | \
- CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_KERNEL_FEATURES)
+ CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_KERNEL_FEATURES | \
+ CPUID_EXT3_CMP_LEG)
#define TCG_EXT4_FEATURES 0
@@ -895,6 +1000,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
#define TCG_7_1_EAX_FEATURES (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | \
CPUID_7_1_EAX_FSRC | CPUID_7_1_EAX_CMPCCXADD)
+#define TCG_7_1_ECX_FEATURES 0
#define TCG_7_1_EDX_FEATURES 0
#define TCG_7_2_EDX_FEATURES 0
#define TCG_APM_FEATURES 0
@@ -907,6 +1013,9 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
#define TCG_SGX_12_0_EBX_FEATURES 0
#define TCG_SGX_12_1_EAX_FEATURES 0
#define TCG_24_0_EBX_FEATURES 0
+#define TCG_29_0_EBX_FEATURES 0
+#define TCG_1E_1_EAX_FEATURES 0
+#define TCG_24_1_ECX_FEATURES 0
#if defined CONFIG_USER_ONLY
#define CPUID_8000_0008_EBX_KERNEL_FEATURES (CPUID_8000_0008_EBX_IBPB | \
@@ -920,6 +1029,17 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
#define TCG_8000_0008_EBX (CPUID_8000_0008_EBX_XSAVEERPTR | \
CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_KERNEL_FEATURES)
+#if defined CONFIG_USER_ONLY
+#define CPUID_8000_0021_EAX_KERNEL_FEATURES CPUID_8000_0021_EAX_AUTO_IBRS
+#else
+#define CPUID_8000_0021_EAX_KERNEL_FEATURES 0
+#endif
+
+#define TCG_8000_0021_EAX_FEATURES ( \
+ CPUID_8000_0021_EAX_NO_NESTED_DATA_BP | \
+ CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE | \
+ CPUID_8000_0021_EAX_KERNEL_FEATURES)
+
FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_1_EDX] = {
.type = CPUID_FEATURE_WORD,
@@ -1081,7 +1201,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
NULL, "avx512vbmi", "umip", "pku",
- NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL,
+ NULL /* ospke */, "waitpkg", "avx512vbmi2", "cet-ss",
"gfni", "vaes", "vpclmulqdq", "avx512vnni",
"avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
"la57", NULL, NULL, NULL,
@@ -1104,7 +1224,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"avx512-vp2intersect", NULL, "md-clear", NULL,
NULL, NULL, "serialize", NULL,
"tsx-ldtrk", NULL, NULL /* pconfig */, "arch-lbr",
- NULL, NULL, "amx-bf16", "avx512-fp16",
+ "cet-ibt", NULL, "amx-bf16", "avx512-fp16",
"amx-tile", "amx-int8", "spec-ctrl", "stibp",
"flush-l1d", "arch-capabilities", "core-capability", "ssbd",
},
@@ -1125,7 +1245,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
NULL, "fred", "lkgs", "wrmsrns",
NULL, "amx-fp16", NULL, "avx-ifma",
NULL, NULL, "lam", NULL,
- NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, "movrs",
},
.cpuid = {
.eax = 7,
@@ -1134,6 +1254,25 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
.tcg_features = TCG_7_1_EAX_FEATURES,
},
+ [FEAT_7_1_ECX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ NULL, "msr-imm", NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = {
+ .eax = 7,
+ .needs_ecx = true, .ecx = 1,
+ .reg = R_ECX,
+ },
+ .tcg_features = TCG_7_1_ECX_FEATURES,
+ },
[FEAT_7_1_EDX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
@@ -1142,7 +1281,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"amx-complex", NULL, "avx-vnni-int16", NULL,
NULL, NULL, "prefetchiti", NULL,
NULL, NULL, NULL, "avx10",
- NULL, NULL, NULL, NULL,
+ NULL, "apxf", NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
@@ -1172,6 +1311,25 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
.tcg_features = TCG_7_2_EDX_FEATURES,
},
+ [FEAT_1E_1_EAX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ "amx-int8-alias", "amx-bf16-alias", "amx-complex-alias", "amx-fp16-alias",
+ "amx-fp8", NULL, "amx-tf32", "amx-avx512",
+ "amx-movrs", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = {
+ .eax = 0x1e,
+ .needs_ecx = true, .ecx = 1,
+ .reg = R_EAX,
+ },
+ .tcg_features = TCG_1E_1_EAX_FEATURES,
+ },
[FEAT_24_0_EBX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
@@ -1186,6 +1344,37 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
.tcg_features = TCG_24_0_EBX_FEATURES,
},
+ [FEAT_29_0_EBX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ "apx-nci-ndd-nf", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = {
+ .eax = 0x29,
+ .needs_ecx = true, .ecx = 0,
+ .reg = R_EBX,
+ },
+ .tcg_features = TCG_29_0_EBX_FEATURES,
+ },
+ [FEAT_24_1_ECX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ [2] = "avx10-vnni-int",
+ },
+ .cpuid = {
+ .eax = 0x24,
+ .needs_ecx = true, .ecx = 1,
+ .reg = R_ECX,
+ },
+ .tcg_features = TCG_24_1_ECX_FEATURES,
+ },
[FEAT_8000_0007_EDX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
@@ -1237,17 +1426,17 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_8000_0021_EAX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
- "no-nested-data-bp", NULL, "lfence-always-serializing", NULL,
- NULL, NULL, "null-sel-clr-base", NULL,
+ "no-nested-data-bp", "fs-gs-base-ns", "lfence-always-serializing", NULL,
+ NULL, "verw-clear", "null-sel-clr-base", NULL,
"auto-ibrs", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
+ "prefetchi", NULL, NULL, NULL,
"eraps", NULL, NULL, "sbpb",
"ibpb-brtype", "srso-no", "srso-user-kernel-no", NULL,
},
.cpuid = { .eax = 0x80000021, .reg = R_EAX, },
- .tcg_features = 0,
+ .tcg_features = TCG_8000_0021_EAX_FEATURES,
.unmigratable_flags = 0,
},
[FEAT_8000_0021_EBX] = {
@@ -1256,6 +1445,22 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.tcg_features = 0,
.unmigratable_flags = 0,
},
+ [FEAT_8000_0021_ECX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL, "tsa-sq-no", "tsa-l1-no", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = { .eax = 0x80000021, .reg = R_ECX, },
+ .tcg_features = 0,
+ .unmigratable_flags = 0,
+ },
[FEAT_8000_0022_EAX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
@@ -1309,6 +1514,8 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.ecx = 1,
.reg = R_ECX,
},
+ .migratable_flags = XSTATE_CET_U_MASK | XSTATE_CET_S_MASK |
+ XSTATE_ARCH_LBR_MASK,
},
[FEAT_XSAVE_XSS_HI] = {
.type = CPUID_FEATURE_WORD,
@@ -1347,7 +1554,8 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
- XSTATE_PKRU_MASK,
+ XSTATE_PKRU_MASK | XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK |
+ XSTATE_APX_MASK,
},
[FEAT_XSAVE_XCR0_HI] = {
.type = CPUID_FEATURE_WORD,
@@ -1370,6 +1578,14 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"bhi-no", NULL, NULL, NULL,
"pbrsb-no", NULL, "gds-no", "rfds-no",
"rfds-clear", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, "its-no", NULL,
},
.msr = {
.index = MSR_IA32_ARCH_CAPABILITIES,
@@ -1440,7 +1656,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit",
"vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit",
"vmx-rdseed-exit", "vmx-pml", NULL, NULL,
- "vmx-xsaves", NULL, NULL, NULL,
+ "vmx-xsaves", NULL, "vmx-mbec", NULL,
NULL, "vmx-tsc-scaling", "vmx-enable-user-wait-pause", NULL,
NULL, NULL, NULL, NULL,
},
@@ -1481,7 +1697,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"vmx-exit-save-efer", "vmx-exit-load-efer",
"vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs",
NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL,
- NULL, "vmx-exit-load-pkrs", NULL, "vmx-exit-secondary-ctls",
+ "vmx-exit-save-cet", "vmx-exit-load-pkrs", NULL, "vmx-exit-secondary-ctls",
},
.msr = {
.index = MSR_IA32_VMX_TRUE_EXIT_CTLS,
@@ -1496,7 +1712,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
NULL, "vmx-entry-ia32e-mode", NULL, NULL,
NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer",
"vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL,
- NULL, NULL, "vmx-entry-load-pkrs", "vmx-entry-load-fred",
+ "vmx-entry-load-cet", NULL, "vmx-entry-load-pkrs", "vmx-entry-load-fred",
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
@@ -1654,14 +1870,21 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
};
-typedef struct FeatureMask {
- FeatureWord index;
- uint64_t mask;
-} FeatureMask;
+bool is_feature_word_cpuid(uint32_t feature, uint32_t index, int reg)
+{
+ FeatureWordInfo *wi;
+ FeatureWord w;
-typedef struct FeatureDep {
- FeatureMask from, to;
-} FeatureDep;
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ wi = &feature_word_info[w];
+ if (wi->type == CPUID_FEATURE_WORD && wi->cpuid.eax == feature &&
+ (!wi->cpuid.needs_ecx || wi->cpuid.ecx == index) &&
+ wi->cpuid.reg == reg) {
+ return true;
+ }
+ }
+ return false;
+}
static FeatureDep feature_dependencies[] = {
{
@@ -1749,6 +1972,10 @@ static FeatureDep feature_dependencies[] = {
.to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST },
},
{
+ .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
+ .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_MODE_BASED_EPT_EXEC },
+ },
+ {
.from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID },
.to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 },
},
@@ -1773,10 +2000,6 @@ static FeatureDep feature_dependencies[] = {
.to = { FEAT_7_1_EAX, CPUID_7_1_EAX_FRED },
},
{
- .from = { FEAT_7_1_EAX, CPUID_7_1_EAX_WRMSRNS },
- .to = { FEAT_7_1_EAX, CPUID_7_1_EAX_FRED },
- },
- {
.from = { FEAT_7_0_EBX, CPUID_7_0_EBX_SGX },
.to = { FEAT_7_0_ECX, CPUID_7_0_ECX_SGX_LC },
},
@@ -1808,6 +2031,19 @@ static FeatureDep feature_dependencies[] = {
.from = { FEAT_7_1_EDX, CPUID_7_1_EDX_AVX10 },
.to = { FEAT_24_0_EBX, ~0ull },
},
+ {
+ .from = { FEAT_7_1_EDX, CPUID_7_1_EDX_APXF },
+ .to = { FEAT_29_0_EBX, ~0ull },
+ },
+
+ {
+ .from = { FEAT_7_1_EDX, CPUID_7_1_EDX_AVX10 },
+ .to = { FEAT_24_1_ECX, ~0ull },
+ },
+ {
+ .from = { FEAT_7_1_EAX, CPUID_7_1_EAX_FRED },
+ .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_LOAD_IA32_FRED },
+ },
};
typedef struct X86RegisterInfo32 {
@@ -1831,52 +2067,103 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
};
#undef REGISTER
-/* CPUID feature bits available in XSS */
-#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK)
-
ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
[XSTATE_FP_BIT] = {
/* x87 FP state component is always enabled if XSAVE is supported */
- .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
.size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
+ .features = {
+ { FEAT_1_ECX, CPUID_EXT_XSAVE },
+ },
},
[XSTATE_SSE_BIT] = {
/* SSE state component is always enabled if XSAVE is supported */
- .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
.size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
+ .features = {
+ { FEAT_1_ECX, CPUID_EXT_XSAVE },
+ },
+ },
+ [XSTATE_YMM_BIT] = {
+ .size = sizeof(XSaveAVX),
+ .features = {
+ { FEAT_1_ECX, CPUID_EXT_AVX },
+ },
+ },
+ [XSTATE_BNDREGS_BIT] = {
+ .size = sizeof(XSaveBNDREG),
+ .features = {
+ { FEAT_7_0_EBX, CPUID_7_0_EBX_MPX },
+ },
+ },
+ [XSTATE_BNDCSR_BIT] = {
+ .size = sizeof(XSaveBNDCSR),
+ .features = {
+ { FEAT_7_0_EBX, CPUID_7_0_EBX_MPX },
+ },
+ },
+ [XSTATE_OPMASK_BIT] = {
+ .size = sizeof(XSaveOpmask),
+ .features = {
+ { FEAT_7_0_EBX, CPUID_7_0_EBX_AVX512F },
+ { FEAT_7_1_EDX, CPUID_7_1_EDX_AVX10 },
+ },
+ },
+ [XSTATE_ZMM_Hi256_BIT] = {
+ .size = sizeof(XSaveZMM_Hi256),
+ .features = {
+ { FEAT_7_0_EBX, CPUID_7_0_EBX_AVX512F },
+ { FEAT_7_1_EDX, CPUID_7_1_EDX_AVX10 },
+ },
+ },
+ [XSTATE_Hi16_ZMM_BIT] = {
+ .size = sizeof(XSaveHi16_ZMM),
+ .features = {
+ { FEAT_7_0_EBX, CPUID_7_0_EBX_AVX512F },
+ { FEAT_7_1_EDX, CPUID_7_1_EDX_AVX10 },
+ },
+ },
+ [XSTATE_PKRU_BIT] = {
+ .size = sizeof(XSavePKRU),
+ .features = {
+ { FEAT_7_0_ECX, CPUID_7_0_ECX_PKU },
+ },
+ },
+ [XSTATE_CET_U_BIT] = {
+ .size = sizeof(XSaveCETU),
+ .features = {
+ { FEAT_7_0_ECX, CPUID_7_0_ECX_CET_SHSTK },
+ { FEAT_7_0_EDX, CPUID_7_0_EDX_CET_IBT },
+ },
+ },
+ [XSTATE_CET_S_BIT] = {
+ .size = sizeof(XSaveCETS),
+ .features = {
+ { FEAT_7_0_ECX, CPUID_7_0_ECX_CET_SHSTK },
+ { FEAT_7_0_EDX, CPUID_7_0_EDX_CET_IBT },
+ },
},
- [XSTATE_YMM_BIT] =
- { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
- .size = sizeof(XSaveAVX) },
- [XSTATE_BNDREGS_BIT] =
- { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
- .size = sizeof(XSaveBNDREG) },
- [XSTATE_BNDCSR_BIT] =
- { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
- .size = sizeof(XSaveBNDCSR) },
- [XSTATE_OPMASK_BIT] =
- { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
- .size = sizeof(XSaveOpmask) },
- [XSTATE_ZMM_Hi256_BIT] =
- { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
- .size = sizeof(XSaveZMM_Hi256) },
- [XSTATE_Hi16_ZMM_BIT] =
- { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
- .size = sizeof(XSaveHi16_ZMM) },
- [XSTATE_PKRU_BIT] =
- { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
- .size = sizeof(XSavePKRU) },
[XSTATE_ARCH_LBR_BIT] = {
- .feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_ARCH_LBR,
- .offset = 0 /*supervisor mode component, offset = 0 */,
- .size = sizeof(XSavesArchLBR) },
+ .size = sizeof(XSaveArchLBR),
+ .features = {
+ { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_LBR },
+ },
+ },
[XSTATE_XTILE_CFG_BIT] = {
- .feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_AMX_TILE,
.size = sizeof(XSaveXTILECFG),
+ .features = {
+ { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_TILE },
+ },
},
[XSTATE_XTILE_DATA_BIT] = {
- .feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_AMX_TILE,
- .size = sizeof(XSaveXTILEDATA)
+ .size = sizeof(XSaveXTILEDATA),
+ .features = {
+ { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_TILE },
+ },
+ },
+ [XSTATE_APX_BIT] = {
+ .size = sizeof(XSaveAPX),
+ .features = {
+ { FEAT_7_1_EDX, CPUID_7_1_EDX_APXF },
+ },
},
};
@@ -1899,7 +2186,7 @@ uint32_t xsave_area_size(uint64_t mask, bool compacted)
static inline bool accel_uses_host_cpuid(void)
{
- return kvm_enabled() || hvf_enabled();
+ return !tcg_enabled() && !qtest_enabled();
}
static inline uint64_t x86_cpu_xsave_xcr0_components(X86CPU *cpu)
@@ -1937,8 +2224,13 @@ static uint64_t x86_cpu_get_migratable_flags(X86CPU *cpu, FeatureWord w)
for (i = 0; i < 64; i++) {
uint64_t f = 1ULL << i;
- /* If the feature name is known, it is implicitly considered migratable,
- * unless it is explicitly set in unmigratable_flags */
+ /*
+ * If the feature name is known, it is implicitly considered migratable,
+ * unless it is explicitly set in unmigratable_flags.
+ *
+ * TODO: Make the behavior of x86_cpu_enable_xsave_components() align
+ * with migratable_flags masking.
+ */
if ((wi->migratable_flags & f) ||
(wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
r |= f;
@@ -1963,16 +2255,6 @@ void host_cpuid(uint32_t function, uint32_t count,
: "=a"(vec[0]), "=b"(vec[1]),
"=c"(vec[2]), "=d"(vec[3])
: "0"(function), "c"(count) : "cc");
-#elif defined(__i386__)
- asm volatile("pusha \n\t"
- "cpuid \n\t"
- "mov %%eax, 0(%2) \n\t"
- "mov %%ebx, 4(%2) \n\t"
- "mov %%ecx, 8(%2) \n\t"
- "mov %%edx, 12(%2) \n\t"
- "popa"
- : : "a"(function), "c"(count), "S"(vec)
- : "memory", "cc");
#else
abort();
#endif
@@ -2029,6 +2311,12 @@ typedef struct X86CPUDefinition {
int model;
int stepping;
uint8_t avx10_version;
+ /*
+ * Whether to present CPUID 0x1f by default.
+ * If true, encode CPU topology in 0x1f leaf even if there's no
+ * extended topology levels.
+ */
+ bool cpuid_0x1f;
FeatureWordArray features;
const char *model_id;
const CPUCaches *const cache_info;
@@ -2075,6 +2363,40 @@ x86_cpu_def_get_versions(const X86CPUDefinition *def)
return def->versions ?: default_version_list;
}
+/* CPUID 0x24.0x0 (EAX, EBX, ECX, EDX) and 0x24.0x1 (EAX, EBX, ECX, EDX) */
+#define AVX10_FEATURE_WORDS 8
+
+typedef struct AVX10VersionDefinition {
+ const char *name;
+ /* AVX10 version */
+ uint8_t version;
+ /* AVX10 (CPUID 0x24) maximum supported sub-leaf. */
+ uint8_t max_subleaf;
+ FeatureMask *features;
+} AVX10VersionDefinition;
+
+static const AVX10VersionDefinition builtin_avx10_defs[] = {
+ {
+ .name = "avx10.1",
+ .version = 1,
+ .max_subleaf = 0,
+ .features = (FeatureMask[]) {
+ { FEAT_7_1_EDX, CPUID_7_1_EDX_AVX10 },
+ { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_VL_MASK },
+ { /* end of list */ }
+ }
+ },
+ {
+ .name = "avx10.2",
+ .version = 2,
+ .max_subleaf = 1,
+ .features = (FeatureMask[]) {
+ { FEAT_24_1_ECX, CPUID_24_1_ECX_AVX10_VNNI_INT },
+ { /* end of list */ }
+ }
+ },
+};
+
static const CPUCaches epyc_cache_info = {
.l1d_cache = &(CPUCacheInfo) {
.type = DATA_CACHE,
@@ -2183,6 +2505,60 @@ static CPUCaches epyc_v4_cache_info = {
},
};
+static CPUCaches epyc_v5_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 64 * KiB,
+ .line_size = 64,
+ .associativity = 4,
+ .partitions = 1,
+ .sets = 256,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 8 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 8192,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
static const CPUCaches epyc_rome_cache_info = {
.l1d_cache = &(CPUCacheInfo) {
.type = DATA_CACHE,
@@ -2291,6 +2667,60 @@ static const CPUCaches epyc_rome_v3_cache_info = {
},
};
+static const CPUCaches epyc_rome_v5_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 16 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 16384,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
static const CPUCaches epyc_milan_cache_info = {
.l1d_cache = &(CPUCacheInfo) {
.type = DATA_CACHE,
@@ -2399,6 +2829,60 @@ static const CPUCaches epyc_milan_v2_cache_info = {
},
};
+static const CPUCaches epyc_milan_v3_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 32 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 32768,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
static const CPUCaches epyc_genoa_cache_info = {
.l1d_cache = &(CPUCacheInfo) {
.type = DATA_CACHE,
@@ -2453,6 +2937,577 @@ static const CPUCaches epyc_genoa_cache_info = {
},
};
+static const CPUCaches epyc_genoa_v2_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 1 * MiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 2048,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 32 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 32768,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
+static const CPUCaches epyc_turin_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 48 * KiB,
+ .line_size = 64,
+ .associativity = 12,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 1 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 32 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 32768,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ }
+};
+
+static const CPUCaches xeon_spr_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x0.EAX */
+ .type = DATA_CACHE,
+ .level = 1,
+ .self_init = true,
+
+ /* CPUID 0x4.0x0.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 12,
+
+ /* CPUID 0x4.0x0.ECX */
+ .sets = 64,
+
+ /* CPUID 0x4.0x0.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = false,
+
+ .size = 48 * KiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x1.EAX */
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .self_init = true,
+
+ /* CPUID 0x4.0x1.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 8,
+
+ /* CPUID 0x4.0x1.ECX */
+ .sets = 64,
+
+ /* CPUID 0x4.0x1.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = false,
+
+ .size = 32 * KiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x2.EAX */
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .self_init = true,
+
+ /* CPUID 0x4.0x2.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 16,
+
+ /* CPUID 0x4.0x2.ECX */
+ .sets = 2048,
+
+ /* CPUID 0x4.0x2.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = false,
+
+ .size = 2 * MiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x3.EAX */
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .self_init = true,
+
+ /* CPUID 0x4.0x3.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 15,
+
+ /* CPUID 0x4.0x3.ECX */
+ .sets = 65536,
+
+ /* CPUID 0x4.0x3.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = true,
+
+ .size = 60 * MiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_SOCKET,
+ },
+};
+
+static const CPUCaches xeon_gnr_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x0.EAX */
+ .type = DATA_CACHE,
+ .level = 1,
+ .self_init = true,
+
+ /* CPUID 0x4.0x0.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 12,
+
+ /* CPUID 0x4.0x0.ECX */
+ .sets = 64,
+
+ /* CPUID 0x4.0x0.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = false,
+
+ .size = 48 * KiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x1.EAX */
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .self_init = true,
+
+ /* CPUID 0x4.0x1.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 16,
+
+ /* CPUID 0x4.0x1.ECX */
+ .sets = 64,
+
+ /* CPUID 0x4.0x1.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = false,
+
+ .size = 64 * KiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x2.EAX */
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .self_init = true,
+
+ /* CPUID 0x4.0x2.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 16,
+
+ /* CPUID 0x4.0x2.ECX */
+ .sets = 2048,
+
+ /* CPUID 0x4.0x2.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = false,
+
+ .size = 2 * MiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x3.EAX */
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .self_init = true,
+
+ /* CPUID 0x4.0x3.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 16,
+
+ /* CPUID 0x4.0x3.ECX */
+ .sets = 294912,
+
+ /* CPUID 0x4.0x3.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = true,
+
+ .size = 288 * MiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_SOCKET,
+ },
+};
+
+static const CPUCaches xeon_srf_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x0.EAX */
+ .type = DATA_CACHE,
+ .level = 1,
+ .self_init = true,
+
+ /* CPUID 0x4.0x0.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 8,
+
+ /* CPUID 0x4.0x0.ECX */
+ .sets = 64,
+
+ /* CPUID 0x4.0x0.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = false,
+
+ .size = 32 * KiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x1.EAX */
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .self_init = true,
+
+ /* CPUID 0x4.0x1.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 8,
+
+ /* CPUID 0x4.0x1.ECX */
+ .sets = 128,
+
+ /* CPUID 0x4.0x1.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = false,
+
+ .size = 64 * KiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x2.EAX */
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .self_init = true,
+
+ /* CPUID 0x4.0x2.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 16,
+
+ /* CPUID 0x4.0x2.ECX */
+ .sets = 4096,
+
+ /* CPUID 0x4.0x2.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = false,
+
+ .size = 4 * MiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_MODULE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x3.EAX */
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .self_init = true,
+
+ /* CPUID 0x4.0x3.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 12,
+
+ /* CPUID 0x4.0x3.ECX */
+ .sets = 147456,
+
+ /* CPUID 0x4.0x3.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = true,
+
+ .size = 108 * MiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_SOCKET,
+ },
+};
+
+static const CPUCaches xeon_cwf_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x0.EAX */
+ .type = DATA_CACHE,
+ .level = 1,
+ .self_init = true,
+
+ /* CPUID 0x4.0x0.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 8,
+
+ /* CPUID 0x4.0x0.ECX */
+ .sets = 64,
+
+ /* CPUID 0x4.0x0.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = false,
+
+ .size = 32 * KiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x1.EAX */
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .self_init = true,
+
+ /* CPUID 0x4.0x1.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 8,
+
+ /* CPUID 0x4.0x1.ECX */
+ .sets = 128,
+
+ /* CPUID 0x4.0x1.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = false,
+
+ .size = 64 * KiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x2.EAX */
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .self_init = true,
+
+ /* CPUID 0x4.0x2.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 16,
+
+ /* CPUID 0x4.0x2.ECX */
+ .sets = 4096,
+
+ /* CPUID 0x4.0x2.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = false,
+
+ .size = 4 * MiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_MODULE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x3.EAX */
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .self_init = true,
+
+ /* CPUID 0x4.0x3.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 16,
+
+ /* CPUID 0x4.0x3.ECX */
+ .sets = 540672,
+
+ /* CPUID 0x4.0x3.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = true,
+
+ .size = 528 * MiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_SOCKET,
+ },
+};
+
+static const CPUCaches yongfeng_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x0.EAX */
+ .type = DATA_CACHE,
+ .level = 1,
+ .self_init = true,
+
+ /* CPUID 0x4.0x0.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 8,
+
+ /* CPUID 0x4.0x0.ECX */
+ .sets = 64,
+
+ /* CPUID 0x4.0x0.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = false,
+
+ /* CPUID 0x80000005.ECX */
+ .lines_per_tag = 1,
+ .size = 32 * KiB,
+
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x1.EAX */
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .self_init = true,
+
+ /* CPUID 0x4.0x1.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 16,
+
+ /* CPUID 0x4.0x1.ECX */
+ .sets = 64,
+
+ /* CPUID 0x4.0x1.EDX */
+ .no_invd_sharing = false,
+ .inclusive = false,
+ .complex_indexing = false,
+
+ /* CPUID 0x80000005.EDX */
+ .lines_per_tag = 1,
+ .size = 64 * KiB,
+
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x2.EAX */
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .self_init = true,
+
+ /* CPUID 0x4.0x2.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 8,
+
+ /* CPUID 0x4.0x2.ECX */
+ .sets = 512,
+
+ /* CPUID 0x4.0x2.EDX */
+ .no_invd_sharing = false,
+ .inclusive = true,
+ .complex_indexing = false,
+
+ /* CPUID 0x80000006.ECX */
+ .size = 256 * KiB,
+
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ /* CPUID 0x4.0x3.EAX */
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .self_init = true,
+
+ /* CPUID 0x4.0x3.EBX */
+ .line_size = 64,
+ .partitions = 1,
+ .associativity = 16,
+
+ /* CPUID 0x4.0x3.ECX */
+ .sets = 8192,
+
+ /* CPUID 0x4.0x3.EDX */
+ .no_invd_sharing = true,
+ .inclusive = true,
+ .complex_indexing = false,
+
+ .size = 8 * MiB,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
/* The following VMX features are not supported by KVM and are left out in the
* CPU definitions:
*
@@ -2705,6 +3760,7 @@ static const X86CPUDefinition builtin_x86_defs[] = {
I486_FEATURES,
.xlevel = 0,
.model_id = "",
+ .cache_info = &legacy_intel_cpuid2_cache_info,
},
{
.name = "pentium",
@@ -2717,6 +3773,7 @@ static const X86CPUDefinition builtin_x86_defs[] = {
PENTIUM_FEATURES,
.xlevel = 0,
.model_id = "",
+ .cache_info = &legacy_intel_cpuid2_cache_info,
},
{
.name = "pentium2",
@@ -2729,6 +3786,7 @@ static const X86CPUDefinition builtin_x86_defs[] = {
PENTIUM2_FEATURES,
.xlevel = 0,
.model_id = "",
+ .cache_info = &legacy_intel_cpuid2_cache_info,
},
{
.name = "pentium3",
@@ -2741,6 +3799,7 @@ static const X86CPUDefinition builtin_x86_defs[] = {
PENTIUM3_FEATURES,
.xlevel = 0,
.model_id = "",
+ .cache_info = &legacy_intel_cpuid2_cache_info,
},
{
.name = "athlon",
@@ -4273,6 +5332,34 @@ static const X86CPUDefinition builtin_x86_defs[] = {
{ /* end of list */ }
}
},
+ {
+ .version = 4,
+ .note = "with spr-sp cache model and 0x1f leaf",
+ .cache_info = &xeon_spr_cache_info,
+ .props = (PropValue[]) {
+ { "x-force-cpuid-0x1f", "on" },
+ { /* end of list */ },
+ }
+ },
+ {
+ .version = 5,
+ .note = "with cet-ss and cet-ibt",
+ .props = (PropValue[]) {
+ { "cet-ss", "on" },
+ { "cet-ibt", "on" },
+ { "vmx-exit-save-cet", "on" },
+ { "vmx-entry-load-cet", "on" },
+ { /* end of list */ },
+ }
+ },
+ {
+ .version = 6,
+ .note = "with its-no",
+ .props = (PropValue[]) {
+ { "its-no", "on" },
+ { /* end of list */ },
+ }
+ },
{ /* end of list */ }
}
},
@@ -4426,10 +5513,230 @@ static const X86CPUDefinition builtin_x86_defs[] = {
{ /* end of list */ }
}
},
+ {
+ .version = 3,
+ .note = "with gnr-sp cache model and 0x1f leaf",
+ .cache_info = &xeon_gnr_cache_info,
+ .props = (PropValue[]) {
+ { "x-force-cpuid-0x1f", "on" },
+ { /* end of list */ },
+ }
+ },
+ {
+ .version = 4,
+ .note = "with cet-ss and cet-ibt",
+ .props = (PropValue[]) {
+ { "cet-ss", "on" },
+ { "cet-ibt", "on" },
+ { "vmx-exit-save-cet", "on" },
+ { "vmx-entry-load-cet", "on" },
+ { /* end of list */ },
+ }
+ },
+ {
+ .version = 5,
+ .note = "with its-no",
+ .props = (PropValue[]) {
+ { "its-no", "on" },
+ { /* end of list */ },
+ }
+ },
{ /* end of list */ },
},
},
{
+ .name = "DiamondRapids",
+ .level = 0x29,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 0x13, /* family: 0xf, extended famil: 0x4 */
+ .model = 0x1, /* model: 0x1, extended model: 0x0 */
+ .stepping = 0,
+ .avx10_version = 2, /* avx10.2 */
+ .cpuid_0x1f = true,
+ /*
+ * Please keep the ascending order so that we can have a clear view of
+ * bit position of each feature.
+ *
+ * Missing: CPUID_EXT_DTES64, CPUID_EXT_MONITOR, CPUID_EXT_DSCPL,
+ * CPUID_EXT_VMX, CPUID_EXT_SMX, CPUID_EXT_EST, CPUID_EXT_TM2,
+ * CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_DCA, CPUID_EXT_OSXSAVE
+ */
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 |
+ CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID | CPUID_EXT_SSE41 |
+ CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
+ CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES |
+ CPUID_EXT_XSAVE | CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ /* Missing: CPUID_DTS, CPUID_ACPI, CPUID_HT, CPUID_TM, CPUID_PBE */
+ .features[FEAT_1_EDX] =
+ CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
+ CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
+ CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
+ CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR |
+ CPUID_SSE | CPUID_SSE2 | CPUID_SS,
+ .features[FEAT_6_EAX] = CPUID_6_EAX_ARAT,
+ /*
+ * Missing: CPUID_7_0_EBX_SGX, "cqm" Cache QoS Monitoring,
+ * "rdt_a" Resource Director Technology Allocation,
+ * CPUID_7_0_EBX_INTEL_PT,
+ */
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_TSC_ADJUST |
+ CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 |
+ CPUID_7_0_EBX_FDP_EXCPTN_ONLY | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_ZERO_FCS_FDS |
+ CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
+ CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP |
+ CPUID_7_0_EBX_AVX512IFMA | CPUID_7_0_EBX_CLFLUSHOPT |
+ CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_AVX512CD |
+ CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_AVX512BW |
+ CPUID_7_0_EBX_AVX512VL,
+ /*
+ * Missing: CPUID_7_0_ECX_OSPKE, CPUID_7_0_ECX_WAITPKG, TME, ENQCMD,
+ * CPUID_7_0_ECX_SGX_LC, CPUID_7_0_ECX_PKS
+ */
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP |
+ CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_AVX512_VBMI2 |
+ CPUID_7_0_ECX_CET_SHSTK | CPUID_7_0_ECX_GFNI | CPUID_7_0_ECX_VAES |
+ CPUID_7_0_ECX_VPCLMULQDQ | CPUID_7_0_ECX_AVX512VNNI |
+ CPUID_7_0_ECX_AVX512BITALG | CPUID_7_0_ECX_AVX512_VPOPCNTDQ |
+ CPUID_7_0_ECX_LA57 | CPUID_7_0_ECX_RDPID |
+ CPUID_7_0_ECX_BUS_LOCK_DETECT | CPUID_7_0_ECX_CLDEMOTE |
+ CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_MOVDIR64B,
+ /*
+ * Missing: SGX-KEYS, UINTR, PCONFIG, ARCH LBR,
+ * CPUID_7_0_EDX_CORE_CAPABILITY
+ */
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_MD_CLEAR |
+ CPUID_7_0_EDX_SERIALIZE | CPUID_7_0_EDX_TSX_LDTRK |
+ CPUID_7_0_EDX_CET_IBT | CPUID_7_0_EDX_AMX_BF16 |
+ CPUID_7_0_EDX_AVX512_FP16 | CPUID_7_0_EDX_AMX_TILE |
+ CPUID_7_0_EDX_AMX_INT8 | CPUID_7_0_EDX_SPEC_CTRL |
+ CPUID_7_0_EDX_STIBP | CPUID_7_0_EDX_FLUSH_L1D |
+ CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
+ /* Missing: CPUID_7_1_EAX_LASS, ArchPerfmonExt (0x23 leaf), MSRLIST */
+ .features[FEAT_7_1_EAX] =
+ CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_AVX512_BF16 |
+ CPUID_7_1_EAX_CMPCCXADD | CPUID_7_1_EAX_FZRM |
+ CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_FSRC | CPUID_7_1_EAX_FRED |
+ CPUID_7_1_EAX_LKGS | CPUID_7_1_EAX_WRMSRNS |
+ CPUID_7_1_EAX_AMX_FP16 | CPUID_7_1_EAX_AVX_IFMA |
+ CPUID_7_1_EAX_LAM | CPUID_7_1_EAX_MOVRS,
+ /* Missing: CET_SSS */
+ .features[FEAT_7_1_EDX] =
+ CPUID_7_1_EDX_AVX_VNNI_INT8 | CPUID_7_1_EDX_AVX_NE_CONVERT |
+ CPUID_7_1_EDX_AMX_COMPLEX | CPUID_7_1_EDX_PREFETCHITI |
+ CPUID_7_1_EDX_AVX10 | CPUID_7_1_EDX_APXF,
+ /* Missing: UC-lock disable */
+ .features[FEAT_7_2_EDX] =
+ CPUID_7_2_EDX_PSFD | CPUID_7_2_EDX_IPRED_CTRL |
+ CPUID_7_2_EDX_RRSBA_CTRL | CPUID_7_2_EDX_DDPD_U |
+ CPUID_7_2_EDX_BHI_CTRL | CPUID_7_2_EDX_MCDT_NO,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES | CPUID_D_1_EAX_XFD,
+ .features[FEAT_1E_1_EAX] =
+ CPUID_1E_1_EAX_AMX_INT8_ALIAS | CPUID_1E_1_EAX_AMX_BF16_ALIAS |
+ CPUID_1E_1_EAX_AMX_COMPLEX_ALIAS |
+ CPUID_1E_1_EAX_AMX_FP16_ALIAS | CPUID_1E_1_EAX_AMX_FP8 |
+ CPUID_1E_1_EAX_AMX_TF32 | CPUID_1E_1_EAX_AMX_AVX512 |
+ CPUID_1E_1_EAX_AMX_MOVRS,
+ .features[FEAT_29_0_EBX] = CPUID_29_0_EBX_APX_NCI_NDD_NF,
+ /*
+ * Though this bit will be set by avx_version=2, it's better to
+ * explicitly enumerate this feature here.
+ */
+ .features[FEAT_24_1_ECX] = CPUID_24_1_ECX_AVX10_VNNI_INT,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_RDTSCP | CPUID_EXT2_LM,
+ .features[FEAT_8000_0008_EBX] = CPUID_8000_0008_EBX_WBNOINVD,
+ /*
+ * Missing: ARCH_CAP_RRSBA (KVM bit 19), ARCH_CAP_RFDS_CLEAR (KVM bit
+ * 28), MCU_CONTROL (bit 9), MISC_PACKAGE_CTLS (bit 10),
+ * ENERGY_FILTERING_CTL (bit 11), DOITM (bit 12), MCU_ENUMERATION (bit
+ * 16), RRSBA (bit 19), XAPIC_DISABLE_STATUS (bit 21),
+ * OVERCLOCKING_STATUS (bit 23).
+ */
+ .features[FEAT_ARCH_CAPABILITIES] =
+ MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL |
+ MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO |
+ MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO |
+ MSR_ARCH_CAP_SBDR_SSDP_NO | MSR_ARCH_CAP_FBSDP_NO |
+ MSR_ARCH_CAP_PSDP_NO | MSR_ARCH_CAP_BHI_NO |
+ MSR_ARCH_CAP_PBRSB_NO | MSR_ARCH_CAP_GDS_NO |
+ MSR_ARCH_CAP_RFDS_NO,
+ .features[FEAT_VMX_BASIC] =
+ MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS |
+ MSR_VMX_BASIC_NESTED_EXCEPTION,
+ .features[FEAT_VMX_ENTRY_CTLS] =
+ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_IA32E_MODE |
+ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_IA32_EFER |
+ VMX_VM_ENTRY_LOAD_CET | VMX_VM_ENTRY_LOAD_IA32_FRED,
+ .features[FEAT_VMX_EPT_VPID_CAPS] =
+ MSR_VMX_EPT_EXECONLY |
+ MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_PAGE_WALK_LENGTH_5 |
+ MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB |
+ MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_AD_BITS |
+ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT |
+ MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
+ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
+ .features[FEAT_VMX_EXIT_CTLS] =
+ VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE |
+ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_IA32_PAT |
+ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
+ VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER |
+ VMX_VM_EXIT_SAVE_CET | VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_MISC] =
+ MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_ACTIVITY_HLT |
+ MSR_VMX_MISC_ACTIVITY_SHUTDOWN | MSR_VMX_MISC_ACTIVITY_WAIT_SIPI |
+ MSR_VMX_MISC_VMWRITE_VMEXIT,
+ .features[FEAT_VMX_PINBASED_CTLS] =
+ VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING |
+ VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER |
+ VMX_PIN_BASED_POSTED_INTR,
+ .features[FEAT_VMX_PROCBASED_CTLS] =
+ VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
+ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
+ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
+ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
+ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
+ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
+ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_VIRTUAL_NMI_PENDING |
+ VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
+ VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_TRAP_FLAG |
+ VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
+ VMX_CPU_BASED_PAUSE_EXITING |
+ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
+ .features[FEAT_VMX_SECONDARY_CTLS] =
+ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC |
+ VMX_SECONDARY_EXEC_RDTSCP |
+ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_WBINVD_EXITING |
+ VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ VMX_SECONDARY_EXEC_RDRAND_EXITING |
+ VMX_SECONDARY_EXEC_ENABLE_INVPCID |
+ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
+ VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML |
+ VMX_SECONDARY_EXEC_XSAVES | VMX_SECONDARY_EXEC_TSC_SCALING |
+ VMX_SECONDARY_EXEC_ENABLE_USER_WAIT_PAUSE,
+ .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Xeon Processor (DiamondRapids)",
+ },
+ {
.name = "SierraForest",
.level = 0x23,
.vendor = CPUID_VENDOR_INTEL,
@@ -4571,6 +5878,34 @@ static const X86CPUDefinition builtin_x86_defs[] = {
{ /* end of list */ }
}
},
+ {
+ .version = 3,
+ .note = "with srf-sp cache model and 0x1f leaf",
+ .cache_info = &xeon_srf_cache_info,
+ .props = (PropValue[]) {
+ { "x-force-cpuid-0x1f", "on" },
+ { /* end of list */ },
+ }
+ },
+ {
+ .version = 4,
+ .note = "with cet-ss and cet-ibt",
+ .props = (PropValue[]) {
+ { "cet-ss", "on" },
+ { "cet-ibt", "on" },
+ { "vmx-exit-save-cet", "on" },
+ { "vmx-entry-load-cet", "on" },
+ { /* end of list */ },
+ }
+ },
+ {
+ .version = 5,
+ .note = "with its-no",
+ .props = (PropValue[]) {
+ { "its-no", "on" },
+ { /* end of list */ },
+ }
+ },
{ /* end of list */ },
},
},
@@ -4706,6 +6041,27 @@ static const X86CPUDefinition builtin_x86_defs[] = {
.model_id = "Intel Xeon Processor (ClearwaterForest)",
.versions = (X86CPUVersionDefinition[]) {
{ .version = 1 },
+ {
+ .version = 2,
+ .note = "with cet-ss and cet-ibt",
+ .props = (PropValue[]) {
+ { "cet-ss", "on" },
+ { "cet-ibt", "on" },
+ { "vmx-exit-save-cet", "on" },
+ { "vmx-entry-load-cet", "on" },
+ { /* end of list */ },
+ }
+ },
+ {
+ .version = 3,
+ .note = "with its-no, cwf-ap cache model and 0x1f leaf",
+ .cache_info = &xeon_cwf_cache_info,
+ .props = (PropValue[]) {
+ { "its-no", "on" },
+ { "x-force-cpuid-0x1f", "on" },
+ { /* end of list */ },
+ }
+ },
{ /* end of list */ },
},
},
@@ -5210,6 +6566,25 @@ static const X86CPUDefinition builtin_x86_defs[] = {
},
.cache_info = &epyc_v4_cache_info
},
+ {
+ .version = 5,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "model-id",
+ "AMD EPYC-v5 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_v5_cache_info
+ },
{ /* end of list */ }
}
},
@@ -5348,6 +6723,25 @@ static const X86CPUDefinition builtin_x86_defs[] = {
{ /* end of list */ }
},
},
+ {
+ .version = 5,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "model-id",
+ "AMD EPYC-Rome-v5 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_rome_v5_cache_info
+ },
{ /* end of list */ }
}
},
@@ -5423,6 +6817,25 @@ static const X86CPUDefinition builtin_x86_defs[] = {
},
.cache_info = &epyc_milan_v2_cache_info
},
+ {
+ .version = 3,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "model-id",
+ "AMD EPYC-Milan-v3 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_milan_v3_cache_info
+ },
{ /* end of list */ }
}
},
@@ -5497,6 +6910,31 @@ static const X86CPUDefinition builtin_x86_defs[] = {
.xlevel = 0x80000022,
.model_id = "AMD EPYC-Genoa Processor",
.cache_info = &epyc_genoa_cache_info,
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "fs-gs-base-ns", "on" },
+ { "perfmon-v2", "on" },
+ { "model-id",
+ "AMD EPYC-Genoa-v2 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_genoa_v2_cache_info
+ },
+ { /* end of list */ }
+ }
},
{
.name = "YongFeng",
@@ -5631,9 +7069,101 @@ static const X86CPUDefinition builtin_x86_defs[] = {
{ /* end of list */ }
}
},
+ {
+ .version = 3,
+ .note = "with the cache model and 0x1f leaf",
+ .cache_info = &yongfeng_cache_info,
+ .props = (PropValue[]) {
+ { "x-force-cpuid-0x1f", "on" },
+ { /* end of list */ },
+ }
+ },
{ /* end of list */ }
}
},
+ {
+ .name = "EPYC-Turin",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 26,
+ .model = 0,
+ .stepping = 0,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
+ CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
+ CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_PCID | CPUID_EXT_CX16 | CPUID_EXT_FMA |
+ CPUID_EXT_SSSE3 | CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3,
+ .features[FEAT_1_EDX] =
+ CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
+ CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
+ CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
+ CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
+ CPUID_VME | CPUID_FP87,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
+ CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_AVX512F |
+ CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_AVX512IFMA |
+ CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB |
+ CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_SHA_NI |
+ CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
+ CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
+ CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
+ CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
+ CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57 |
+ CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_MOVDIRI |
+ CPUID_7_0_ECX_MOVDIR64B,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_AVX512_VP2INTERSECT,
+ .features[FEAT_7_1_EAX] =
+ CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_AVX512_BF16,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
+ CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
+ CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
+ CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0007_EBX] =
+ CPUID_8000_0007_EBX_OVERFLOW_RECOV | CPUID_8000_0007_EBX_SUCCOR,
+ .features[FEAT_8000_0008_EBX] =
+ CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR |
+ CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB |
+ CPUID_8000_0008_EBX_IBRS | CPUID_8000_0008_EBX_STIBP |
+ CPUID_8000_0008_EBX_STIBP_ALWAYS_ON |
+ CPUID_8000_0008_EBX_AMD_SSBD | CPUID_8000_0008_EBX_AMD_PSFD,
+ .features[FEAT_8000_0021_EAX] =
+ CPUID_8000_0021_EAX_NO_NESTED_DATA_BP |
+ CPUID_8000_0021_EAX_FS_GS_BASE_NS |
+ CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING |
+ CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE |
+ CPUID_8000_0021_EAX_AUTO_IBRS | CPUID_8000_0021_EAX_PREFETCHI |
+ CPUID_8000_0021_EAX_SBPB | CPUID_8000_0021_EAX_IBPB_BRTYPE |
+ CPUID_8000_0021_EAX_SRSO_USER_KERNEL_NO,
+ .features[FEAT_8000_0022_EAX] =
+ CPUID_8000_0022_EAX_PERFMON_V2,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
+ .features[FEAT_SVM] =
+ CPUID_SVM_NPT | CPUID_SVM_LBRV | CPUID_SVM_NRIPSAVE |
+ CPUID_SVM_TSCSCALE | CPUID_SVM_VMCBCLEAN | CPUID_SVM_FLUSHASID |
+ CPUID_SVM_PAUSEFILTER | CPUID_SVM_PFTHRESHOLD |
+ CPUID_SVM_V_VMSAVE_VMLOAD | CPUID_SVM_VGIF |
+ CPUID_SVM_VNMI | CPUID_SVM_SVME_ADDR_CHK,
+ .xlevel = 0x80000022,
+ .model_id = "AMD EPYC-Turin Processor",
+ .cache_info = &epyc_turin_cache_info,
+ },
};
/*
@@ -5701,13 +7231,14 @@ static void max_x86_cpu_realize(DeviceState *dev, Error **errp)
x86_cpu_realizefn(dev, errp);
}
-static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
+static void max_x86_cpu_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
X86CPUClass *xcc = X86_CPU_CLASS(oc);
xcc->ordering = 9;
+ xcc->max_features = true;
xcc->model_description =
"Enables all features supported by the accelerator in the current host";
@@ -5718,22 +7249,21 @@ static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
static void max_x86_cpu_initfn(Object *obj)
{
X86CPU *cpu = X86_CPU(obj);
-
- /* We can't fill the features array here because we don't know yet if
- * "migratable" is true or false.
- */
- cpu->max_features = true;
- object_property_set_bool(OBJECT(cpu), "pmu", true, &error_abort);
+ CPUX86State *env = &cpu->env;
/*
- * these defaults are used for TCG and all other accelerators
- * besides KVM and HVF, which overwrite these values
+ * these defaults are used for TCG, other accelerators have overwritten
+ * these values
*/
- object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD,
- &error_abort);
- object_property_set_str(OBJECT(cpu), "model-id",
- "QEMU TCG CPU version " QEMU_HW_VERSION,
- &error_abort);
+ if (!env->cpuid_vendor1) {
+ object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD,
+ &error_abort);
+ }
+ if (!env->cpuid_model[0]) {
+ object_property_set_str(OBJECT(cpu), "model-id",
+ "QEMU TCG CPU version " QEMU_HW_VERSION,
+ &error_abort);
+ }
}
static const TypeInfo max_x86_cpu_type_info = {
@@ -5743,7 +7273,7 @@ static const TypeInfo max_x86_cpu_type_info = {
.class_init = max_x86_cpu_class_init,
};
-static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
+static char *feature_word_description(FeatureWordInfo *f)
{
assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
@@ -5752,11 +7282,15 @@ static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
{
const char *reg = get_register_name_32(f->cpuid.reg);
assert(reg);
- return g_strdup_printf("CPUID.%02XH:%s",
- f->cpuid.eax, reg);
+ if (!f->cpuid.needs_ecx) {
+ return g_strdup_printf("CPUID[eax=%02Xh].%s", f->cpuid.eax, reg);
+ } else {
+ return g_strdup_printf("CPUID[eax=%02Xh,ecx=%02Xh].%s",
+ f->cpuid.eax, f->cpuid.ecx, reg);
+ }
}
case MSR_FEATURE_WORD:
- return g_strdup_printf("MSR(%02XH)",
+ return g_strdup_printf("MSR(%02Xh)",
f->msr.index);
}
@@ -5776,12 +7310,13 @@ static bool x86_cpu_have_filtered_features(X86CPU *cpu)
return false;
}
-static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
- const char *verbose_prefix)
+void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix)
{
CPUX86State *env = &cpu->env;
FeatureWordInfo *f = &feature_word_info[w];
int i;
+ g_autofree char *feat_word_str = feature_word_description(f);
if (!cpu->force_features) {
env->features[w] &= ~mask;
@@ -5794,7 +7329,35 @@ static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
for (i = 0; i < 64; ++i) {
if ((1ULL << i) & mask) {
- g_autofree char *feat_word_str = feature_word_description(f, i);
+ warn_report("%s: %s%s%s [bit %d]",
+ verbose_prefix,
+ feat_word_str,
+ f->feat_names[i] ? "." : "",
+ f->feat_names[i] ? f->feat_names[i] : "", i);
+ }
+ }
+}
+
+void mark_forced_on_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix)
+{
+ CPUX86State *env = &cpu->env;
+ FeatureWordInfo *f = &feature_word_info[w];
+ int i;
+
+ if (!cpu->force_features) {
+ env->features[w] |= mask;
+ }
+
+ cpu->forced_on_features[w] |= mask;
+
+ if (!verbose_prefix) {
+ return;
+ }
+
+ for (i = 0; i < 64; ++i) {
+ if ((1ULL << i) & mask) {
+ g_autofree char *feat_word_str = feature_word_description(f);
warn_report("%s: %s%s%s [bit %d]",
verbose_prefix,
feat_word_str,
@@ -5812,10 +7375,7 @@ static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
CPUX86State *env = &cpu->env;
uint64_t value;
- value = (env->cpuid_version >> 8) & 0xf;
- if (value == 0xf) {
- value += (env->cpuid_version >> 20) & 0xff;
- }
+ value = x86_cpu_family(env->cpuid_version);
visit_type_uint64(v, name, &value, errp);
}
@@ -5853,8 +7413,7 @@ static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
CPUX86State *env = &cpu->env;
uint64_t value;
- value = (env->cpuid_version >> 4) & 0xf;
- value |= ((env->cpuid_version >> 16) & 0xf) << 4;
+ value = x86_cpu_model(env->cpuid_version);
visit_type_uint64(v, name, &value, errp);
}
@@ -5888,7 +7447,7 @@ static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
CPUX86State *env = &cpu->env;
uint64_t value;
- value = env->cpuid_version & 0xf;
+ value = x86_cpu_stepping(env->cpuid_version);
visit_type_uint64(v, name, &value, errp);
}
@@ -5956,11 +7515,11 @@ static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
char *value;
int i;
- value = g_malloc(48 + 1);
- for (i = 0; i < 48; i++) {
+ value = g_malloc(CPUID_MODEL_ID_SZ + 1);
+ for (i = 0; i < CPUID_MODEL_ID_SZ; i++) {
value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
}
- value[48] = '\0';
+ value[CPUID_MODEL_ID_SZ] = '\0';
return value;
}
@@ -5975,7 +7534,7 @@ static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
model_id = "";
}
len = strlen(model_id);
- memset(env->cpuid_model, 0, 48);
+ memset(env->cpuid_model, 0, CPUID_MODEL_ID_SZ);
for (i = 0; i < 48; i++) {
if (i >= len) {
c = '\0';
@@ -6015,6 +7574,65 @@ static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
}
+static void x86_cpuid_get_avx10_version(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ uint8_t value;
+
+ value = cpu->env.avx10_version;
+ visit_type_uint8(v, name, &value, errp);
+}
+
+static bool x86_cpu_apply_avx10_features(X86CPU *cpu, uint8_t version,
+ Error **errp)
+{
+ const AVX10VersionDefinition *def;
+ CPUX86State *env = &cpu->env;
+
+ if (!version) {
+ env->avx10_version = 0;
+ env->avx10_max_subleaf = 0;
+ return true;
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(builtin_avx10_defs); i++) {
+ FeatureMask *f;
+
+ def = &builtin_avx10_defs[i];
+ for (f = def->features; f && f->mask; f++) {
+ env->features[f->index] |= f->mask;
+ }
+
+ if (def->version == version) {
+ env->avx10_version = version;
+ env->avx10_max_subleaf = def->max_subleaf;
+ break;
+ }
+ }
+
+ if (def->version < version) {
+ error_setg(errp, "avx10-version can be at most %d", def->version);
+ return false;
+ }
+ return true;
+}
+
+static void x86_cpuid_set_avx10_version(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ uint8_t value;
+
+ if (!visit_type_uint8(v, name, &value, errp)) {
+ return;
+ }
+
+ x86_cpu_apply_avx10_features(cpu, value, errp);
+}
+
/* Generic getter for "feature-words" and "filtered-features" properties */
static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
const char *name, void *opaque,
@@ -6065,16 +7683,24 @@ static inline void feat2prop(char *s)
static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
{
const char *name;
- /* XSAVE components are automatically enabled by other features,
+ /*
+ * XSAVE components are automatically enabled by other features,
* so return the original feature name instead
*/
if (w == FEAT_XSAVE_XCR0_LO || w == FEAT_XSAVE_XCR0_HI) {
int comp = (w == FEAT_XSAVE_XCR0_HI) ? bitnr + 32 : bitnr;
- if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
- x86_ext_save_areas[comp].bits) {
- w = x86_ext_save_areas[comp].feature;
- bitnr = ctz32(x86_ext_save_areas[comp].bits);
+ if (comp < ARRAY_SIZE(x86_ext_save_areas)) {
+ /*
+ * Present the first feature as the default.
+ * FIXME: select and present the one which is actually enabled
+ * among multiple dependencies.
+ */
+ const FeatureMask *fm = &x86_ext_save_areas[comp].features[0];
+ if (fm->mask) {
+ w = fm->index;
+ bitnr = ctz32(fm->mask);
+ }
}
}
@@ -6238,7 +7864,7 @@ static void listflags(GList *features)
}
/* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
-static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
+static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b, gpointer d)
{
ObjectClass *class_a = (ObjectClass *)a;
ObjectClass *class_b = (ObjectClass *)b;
@@ -6259,7 +7885,7 @@ static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
static GSList *get_sorted_cpu_model_list(void)
{
GSList *list = object_class_get_list(TYPE_X86_CPU, false);
- list = g_slist_sort(list, x86_cpu_list_compare);
+ list = g_slist_sort_with_data(list, x86_cpu_list_compare, NULL);
return list;
}
@@ -6316,8 +7942,13 @@ static void x86_cpu_list_entry(gpointer data, gpointer user_data)
qemu_printf(" %-20s %s\n", name, desc);
}
+static gint strcmp_wrap(gconstpointer a, gconstpointer b, gpointer d)
+{
+ return strcmp(a, b);
+}
+
/* list available CPU models and flags */
-void x86_cpu_list(void)
+static void x86_cpu_list(void)
{
int i, j;
GSList *list;
@@ -6338,7 +7969,7 @@ void x86_cpu_list(void)
}
}
- names = g_list_sort(names, (GCompareFunc)strcmp);
+ names = g_list_sort_with_data(names, strcmp_wrap, NULL);
qemu_printf("\nRecognized CPUID flags:\n");
listflags(names);
@@ -6424,6 +8055,13 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
#endif /* !CONFIG_USER_ONLY */
+static uint8_t x86_cpu_get_host_avx10_version(void)
+{
+ uint32_t eax, ebx, ecx, edx;
+ x86_cpu_get_supported_cpuid(0x24, 0, &eax, &ebx, &ecx, &edx);
+ return ebx & 0xff;
+}
+
uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w)
{
FeatureWordInfo *wi = &feature_word_info[w];
@@ -6491,6 +8129,20 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w)
#endif
break;
+ case FEAT_7_0_EDX:
+ /*
+ * Windows does not like ARCH_CAPABILITIES on AMD machines at all.
+ * Do not show the fake ARCH_CAPABILITIES MSR that KVM sets up,
+ * except if needed for migration.
+ *
+ * When arch_cap_always_on is removed, this tweak can move to
+ * kvm_arch_get_supported_cpuid.
+ */
+ if (cpu && IS_AMD_CPU(&cpu->env) && !cpu->arch_cap_always_on) {
+ unavail = CPUID_7_0_EDX_ARCH_CAPABILITIES;
+ }
+ break;
+
default:
break;
}
@@ -6678,9 +8330,15 @@ static void x86_cpu_load_model(X86CPU *cpu, const X86CPUModel *model)
*/
object_property_set_str(OBJECT(cpu), "vendor", def->vendor, &error_abort);
- object_property_set_uint(OBJECT(cpu), "avx10-version", def->avx10_version,
- &error_abort);
+ if (def->avx10_version) {
+ object_property_set_uint(OBJECT(cpu), "avx10-version",
+ def->avx10_version, &error_abort);
+ }
+ if (def->cpuid_0x1f) {
+ object_property_set_bool(OBJECT(cpu), "x-force-cpuid-0x1f",
+ def->cpuid_0x1f, &error_abort);
+ }
x86_cpu_apply_version_props(cpu, model);
/*
@@ -6700,7 +8358,7 @@ static const gchar *x86_gdb_arch_name(CPUState *cs)
#endif
}
-static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
+static void x86_cpu_cpudef_class_init(ObjectClass *oc, const void *data)
{
const X86CPUModel *model = data;
X86CPUClass *xcc = X86_CPU_CLASS(oc);
@@ -6830,14 +8488,39 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
*edx = env->features[FEAT_1_EDX];
if (threads_per_pkg > 1) {
- *ebx |= threads_per_pkg << 16;
+ uint32_t num;
+
+ /*
+ * For CPUID.01H.EBX[Bits 23-16], AMD requires logical processor
+ * count, but Intel needs maximum number of addressable IDs for
+ * logical processors per package.
+ */
+ if ((IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))) {
+ num = 1 << apicid_pkg_offset(topo_info);
+ } else {
+ num = threads_per_pkg;
+ }
+
+ /* Fixup overflow: max value for bits 23-16 is 255. */
+ *ebx |= MIN(num, 255) << 16;
}
- if (!cpu->enable_pmu) {
- *ecx &= ~CPUID_EXT_PDCM;
+ if (cpu->pdcm_on_even_without_pmu) {
+ if (!cpu->enable_pmu) {
+ *ecx &= ~CPUID_EXT_PDCM;
+ }
}
break;
- case 2:
- /* cache info: needed for Pentium Pro compatibility */
+ case 2: { /* cache info: needed for Pentium Pro compatibility */
+ const CPUCaches *caches;
+
+ if (env->enable_legacy_cpuid2_cache) {
+ caches = &legacy_intel_cpuid2_cache_info;
+ } else if (env->enable_legacy_vendor_cache) {
+ caches = &legacy_intel_cache_info;
+ } else {
+ caches = &env->cache_info;
+ }
+
if (cpu->cache_info_passthrough) {
x86_cpu_get_cache_cpuid(index, 0, eax, ebx, ecx, edx);
break;
@@ -6845,18 +8528,18 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*eax = *ebx = *ecx = *edx = 0;
break;
}
- *eax = 1; /* Number of CPUID[EAX=2] calls required */
- *ebx = 0;
- if (!cpu->enable_l3_cache) {
- *ecx = 0;
+ encode_cache_cpuid2(cpu, caches, eax, ebx, ecx, edx);
+ break;
+ }
+ case 4: {
+ const CPUCaches *caches;
+
+ if (env->enable_legacy_vendor_cache) {
+ caches = &legacy_intel_cache_info;
} else {
- *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
+ caches = &env->cache_info;
}
- *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
- (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
- (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
- break;
- case 4:
+
/* cache info: needed for Core compatibility */
if (cpu->cache_info_passthrough) {
x86_cpu_get_cache_cpuid(index, count, eax, ebx, ecx, edx);
@@ -6868,13 +8551,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14);
*eax &= ~0xFC000000;
- *eax |= max_core_ids_in_package(topo_info) << 26;
+ *eax |= MIN(max_core_ids_in_package(topo_info), 63) << 26;
if (host_vcpus_per_cache > threads_per_pkg) {
*eax &= ~0x3FFC000;
/* Share the cache at package level. */
- *eax |= max_thread_ids_for_cache(topo_info,
- CPU_TOPOLOGY_LEVEL_SOCKET) << 14;
+ *eax |= MIN(max_thread_ids_for_cache(topo_info,
+ CPU_TOPOLOGY_LEVEL_SOCKET), 4095) << 14;
}
}
} else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) {
@@ -6884,30 +8567,26 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
switch (count) {
case 0: /* L1 dcache info */
- encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
- topo_info,
+ encode_cache_cpuid4(caches->l1d_cache, topo_info,
eax, ebx, ecx, edx);
if (!cpu->l1_cache_per_core) {
*eax &= ~MAKE_64BIT_MASK(14, 12);
}
break;
case 1: /* L1 icache info */
- encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
- topo_info,
+ encode_cache_cpuid4(caches->l1i_cache, topo_info,
eax, ebx, ecx, edx);
if (!cpu->l1_cache_per_core) {
*eax &= ~MAKE_64BIT_MASK(14, 12);
}
break;
case 2: /* L2 cache info */
- encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
- topo_info,
+ encode_cache_cpuid4(caches->l2_cache, topo_info,
eax, ebx, ecx, edx);
break;
case 3: /* L3 cache info */
if (cpu->enable_l3_cache) {
- encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
- topo_info,
+ encode_cache_cpuid4(caches->l3_cache, topo_info,
eax, ebx, ecx, edx);
break;
}
@@ -6918,6 +8597,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
}
break;
+ }
case 5:
/* MONITOR/MWAIT Leaf */
*eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
@@ -6945,9 +8625,9 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
} else if (count == 1) {
*eax = env->features[FEAT_7_1_EAX];
+ *ecx = env->features[FEAT_7_1_ECX];
*edx = env->features[FEAT_7_1_EDX];
*ebx = 0;
- *ecx = 0;
} else if (count == 2) {
*edx = env->features[FEAT_7_2_EDX];
*eax = 0;
@@ -7008,21 +8688,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
assert(!(*eax & ~0x1f));
*ebx &= 0xffff; /* The count doesn't need to be reliable. */
break;
- case 0x1C:
- if (cpu->enable_pmu && (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
- x86_cpu_get_supported_cpuid(0x1C, 0, eax, ebx, ecx, edx);
- *edx = 0;
- }
- break;
- case 0x1F:
- /* V2 Extended Topology Enumeration Leaf */
- if (!x86_has_extended_topo(env->avail_cpu_topo)) {
- *eax = *ebx = *ecx = *edx = 0;
- break;
- }
-
- encode_topo_cpuid1f(env, count, topo_info, eax, ebx, ecx, edx);
- break;
case 0xD: {
/* Processor Extended State */
*eax = 0;
@@ -7052,29 +8717,12 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ebx = xsave_area_size(xstate, true);
*ecx = env->features[FEAT_XSAVE_XSS_LO];
*edx = env->features[FEAT_XSAVE_XSS_HI];
- if (kvm_enabled() && cpu->enable_pmu &&
- (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR) &&
- (*eax & CPUID_XSAVE_XSAVES)) {
- *ecx |= XSTATE_ARCH_LBR_MASK;
- } else {
- *ecx &= ~XSTATE_ARCH_LBR_MASK;
- }
- } else if (count == 0xf && cpu->enable_pmu
- && (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
- x86_cpu_get_supported_cpuid(0xD, count, eax, ebx, ecx, edx);
} else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
const ExtSaveArea *esa = &x86_ext_save_areas[count];
- if (x86_cpu_xsave_xcr0_components(cpu) & (1ULL << count)) {
- *eax = esa->size;
- *ebx = esa->offset;
- *ecx = esa->ecx &
- (ESA_FEATURE_ALIGN64_MASK | ESA_FEATURE_XFD_MASK);
- } else if (x86_cpu_xsave_xss_components(cpu) & (1ULL << count)) {
- *eax = esa->size;
- *ebx = 0;
- *ecx = 1;
- }
+ *eax = esa->size;
+ *ebx = esa->offset;
+ *ecx = esa->ecx;
}
break;
}
@@ -7163,8 +8811,19 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
break;
}
+ case 0x1C: /* Last Branch Records Information Leaf */
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ if (!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
+ break;
+ }
+ x86_cpu_get_supported_cpuid(0x1C, 0, eax, ebx, ecx, edx);
+ *edx = 0; /* EDX is reserved. */
+ break;
case 0x1D: {
- /* AMX TILE, for now hardcoded for Sapphire Rapids*/
+ /* AMX TILE */
*eax = 0;
*ebx = 0;
*ecx = 0;
@@ -7177,6 +8836,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
/* Highest numbered palette subleaf */
*eax = INTEL_AMX_TILE_MAX_SUBLEAF;
} else if (count == 1) {
+ /* Tile palette 1 */
*eax = INTEL_AMX_TOTAL_TILE_BYTES |
(INTEL_AMX_BYTES_PER_TILE << 16);
*ebx = INTEL_AMX_BYTES_PER_ROW | (INTEL_AMX_TILE_MAX_NAMES << 16);
@@ -7185,7 +8845,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
}
case 0x1E: {
- /* AMX TMUL, for now hardcoded for Sapphire Rapids */
+ /* AMX TMUL */
*eax = 0;
*ebx = 0;
*ecx = 0;
@@ -7195,21 +8855,51 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
if (count == 0) {
+ uint32_t unused;
+ x86_cpu_get_supported_cpuid(0x1E, 0, eax, &unused,
+ &unused, &unused);
/* Highest numbered palette subleaf */
*ebx = INTEL_AMX_TMUL_MAX_K | (INTEL_AMX_TMUL_MAX_N << 8);
+ } else if (count == 1) {
+ *eax = env->features[FEAT_1E_1_EAX];
}
break;
}
+ case 0x1F:
+ /* V2 Extended Topology Enumeration Leaf */
+ if (!x86_has_cpuid_0x1f(cpu)) {
+ *eax = *ebx = *ecx = *edx = 0;
+ break;
+ }
+
+ encode_topo_cpuid1f(env, count, topo_info, eax, ebx, ecx, edx);
+ break;
case 0x24: {
*eax = 0;
*ebx = 0;
*ecx = 0;
*edx = 0;
- if ((env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) && count == 0) {
+
+ if (!(env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10)) {
+ break;
+ }
+ if (count == 0) {
+ *eax = env->avx10_max_subleaf;
*ebx = env->features[FEAT_24_0_EBX] | env->avx10_version;
+ } else if (count == 1) {
+ *ecx = env->features[FEAT_24_1_ECX];
}
break;
}
+ case 0x29:
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ if ((env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_APXF) && count == 0) {
+ *ebx = env->features[FEAT_29_0_EBX];
+ }
+ break;
case 0x40000000:
/*
* CPUID code in kvm_arch_init_vcpu() ignores stuff
@@ -7236,9 +8926,15 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 0x80000000:
*eax = env->cpuid_xlevel;
- *ebx = env->cpuid_vendor1;
- *edx = env->cpuid_vendor2;
- *ecx = env->cpuid_vendor3;
+
+ if (cpu->vendor_cpuid_only_v2 &&
+ (IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))) {
+ *ebx = *ecx = *edx = 0;
+ } else {
+ *ebx = env->cpuid_vendor1;
+ *edx = env->cpuid_vendor2;
+ *ecx = env->cpuid_vendor3;
+ }
break;
case 0x80000001:
*eax = env->cpuid_version;
@@ -7246,7 +8942,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ecx = env->features[FEAT_8000_0001_ECX];
*edx = env->features[FEAT_8000_0001_EDX];
- if (tcg_enabled() && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 &&
+ if (tcg_enabled() && IS_INTEL_CPU(env) &&
!(env->hflags & HF_LMA_MASK)) {
*edx &= ~CPUID_EXT2_SYSCALL;
}
@@ -7259,41 +8955,78 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
*edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
break;
- case 0x80000005:
- /* cache info (L1 cache) */
+ case 0x80000005: {
+ /* cache info (L1 cache/TLB Associativity Field) */
+ const CPUCaches *caches;
+
+ if (env->enable_legacy_vendor_cache) {
+ caches = &legacy_amd_cache_info;
+ } else {
+ caches = &env->cache_info;
+ }
+
if (cpu->cache_info_passthrough) {
x86_cpu_get_cache_cpuid(index, 0, eax, ebx, ecx, edx);
break;
}
+
+ if (cpu->vendor_cpuid_only_v2 && IS_INTEL_CPU(env)) {
+ *eax = *ebx = *ecx = *edx = 0;
+ break;
+ }
+
*eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) |
(L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
*ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) |
(L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
- *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
- *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
+ *ecx = encode_cache_cpuid80000005(caches->l1d_cache);
+ *edx = encode_cache_cpuid80000005(caches->l1i_cache);
break;
- case 0x80000006:
- /* cache info (L2 cache) */
+ }
+ case 0x80000006: { /* cache info (L2 cache/TLB/L3 cache) */
+ const CPUCaches *caches;
+
+ if (env->enable_legacy_vendor_cache) {
+ caches = &legacy_amd_cache_info;
+ } else {
+ caches = &env->cache_info;
+ }
+
if (cpu->cache_info_passthrough) {
x86_cpu_get_cache_cpuid(index, 0, eax, ebx, ecx, edx);
break;
}
- *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) |
+
+ if (cpu->vendor_cpuid_only_v2 &&
+ (IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))) {
+ *eax = *ebx = 0;
+ encode_cache_cpuid80000006(caches->l2_cache,
+ NULL, ecx, edx);
+ break;
+ }
+
+ *eax = (X86_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) |
(L2_DTLB_2M_ENTRIES << 16) |
- (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) |
+ (X86_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) |
(L2_ITLB_2M_ENTRIES);
- *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) |
+ *ebx = (X86_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) |
(L2_DTLB_4K_ENTRIES << 16) |
- (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) |
+ (X86_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) |
(L2_ITLB_4K_ENTRIES);
- encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
+
+ encode_cache_cpuid80000006(caches->l2_cache,
cpu->enable_l3_cache ?
- env->cache_info_amd.l3_cache : NULL,
+ caches->l3_cache : NULL,
ecx, edx);
break;
+ }
case 0x80000007:
*eax = 0;
- *ebx = env->features[FEAT_8000_0007_EBX];
+ if (cpu->vendor_cpuid_only_v2 && IS_INTEL_CPU(env)) {
+ *ebx = 0;
+ } else {
+ *ebx = env->features[FEAT_8000_0007_EBX];
+ }
*ecx = 0;
*edx = env->features[FEAT_8000_0007_EDX];
break;
@@ -7306,6 +9039,17 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*eax |= (cpu->guest_phys_bits << 16);
}
*ebx = env->features[FEAT_8000_0008_EBX];
+
+ /*
+ * Don't emulate Bits [7:0] & Bits [15:12] for Intel/Zhaoxin, since
+ * they're using 0x1f leaf.
+ */
+ if (cpu->vendor_cpuid_only_v2 &&
+ (IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))) {
+ *ecx = *edx = 0;
+ break;
+ }
+
if (threads_per_pkg > 1) {
/*
* Bits 15:12 is "The number of bits in the initial
@@ -7341,19 +9085,19 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
switch (count) {
case 0: /* L1 dcache info */
- encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache,
+ encode_cache_cpuid8000001d(env->cache_info.l1d_cache,
topo_info, eax, ebx, ecx, edx);
break;
case 1: /* L1 icache info */
- encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache,
+ encode_cache_cpuid8000001d(env->cache_info.l1i_cache,
topo_info, eax, ebx, ecx, edx);
break;
case 2: /* L2 cache info */
- encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache,
+ encode_cache_cpuid8000001d(env->cache_info.l2_cache,
topo_info, eax, ebx, ecx, edx);
break;
case 3: /* L3 cache info */
- encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache,
+ encode_cache_cpuid8000001d(env->cache_info.l3_cache,
topo_info, eax, ebx, ecx, edx);
break;
default: /* end of info */
@@ -7374,6 +9118,22 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*edx = 0;
}
break;
+ case 0x8000001F:
+ *eax = *ebx = *ecx = *edx = 0;
+ if (sev_enabled()) {
+ *eax = 0x2;
+ *eax |= sev_es_enabled() ? 0x8 : 0;
+ *eax |= sev_snp_enabled() ? 0x10 : 0;
+ *ebx = sev_get_cbit_position() & 0x3f; /* EBX[5:0] */
+ *ebx |= (sev_get_reduced_phys_bits() & 0x3f) << 6; /* EBX[11:6] */
+ }
+ break;
+ case 0x80000021:
+ *eax = *ebx = *ecx = *edx = 0;
+ *eax = env->features[FEAT_8000_0021_EAX];
+ *ebx = env->features[FEAT_8000_0021_EBX];
+ *ecx = env->features[FEAT_8000_0021_ECX];
+ break;
case 0x80000022:
*eax = *ebx = *ecx = *edx = 0;
/* AMD Extended Performance Monitoring and Debug */
@@ -7406,21 +9166,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ecx = 0;
*edx = 0;
break;
- case 0x8000001F:
- *eax = *ebx = *ecx = *edx = 0;
- if (sev_enabled()) {
- *eax = 0x2;
- *eax |= sev_es_enabled() ? 0x8 : 0;
- *eax |= sev_snp_enabled() ? 0x10 : 0;
- *ebx = sev_get_cbit_position() & 0x3f; /* EBX[5:0] */
- *ebx |= (sev_get_reduced_phys_bits() & 0x3f) << 6; /* EBX[11:6] */
- }
- break;
- case 0x80000021:
- *eax = *ebx = *ecx = *edx = 0;
- *eax = env->features[FEAT_8000_0021_EAX];
- *ebx = env->features[FEAT_8000_0021_EBX];
- break;
default:
/* reserved values: zero */
*eax = 0;
@@ -7448,12 +9193,10 @@ static bool cpuid_has_xsave_feature(CPUX86State *env, const ExtSaveArea *esa)
return false;
}
- if (env->features[esa->feature] & esa->bits) {
- return true;
- }
- if (esa->feature == FEAT_7_0_EBX && esa->bits == CPUID_7_0_EBX_AVX512F
- && (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10)) {
- return true;
+ for (int i = 0; i < ARRAY_SIZE(esa->features); i++) {
+ if (env->features[esa->features[i].index] & esa->features[i].mask) {
+ return true;
+ }
}
return false;
@@ -7494,7 +9237,11 @@ static void x86_cpu_reset_hold(Object *obj, ResetType type)
env->idt.limit = 0xffff;
env->gdt.limit = 0xffff;
+#if defined(CONFIG_USER_ONLY)
+ env->ldt.limit = 0;
+#else
env->ldt.limit = 0xffff;
+#endif
env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
env->tr.limit = 0xffff;
env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
@@ -7611,6 +9358,12 @@ static void x86_cpu_reset_hold(Object *obj, ResetType type)
cs->halted = !cpu_is_bsp(cpu);
+#if defined(CONFIG_IGVM)
+ if (cpu_is_bsp(cpu)) {
+ qigvm_x86_bsp_reset(env);
+ }
+#endif
+
if (kvm_enabled()) {
kvm_arch_reset_vcpu(cpu);
}
@@ -7630,7 +9383,7 @@ void x86_cpu_after_reset(X86CPU *cpu)
}
if (cpu->apic_state) {
- device_cold_reset(cpu->apic_state);
+ device_cold_reset(DEVICE(cpu->apic_state));
}
#endif
}
@@ -7640,7 +9393,7 @@ static void mce_init(X86CPU *cpu)
CPUX86State *cenv = &cpu->env;
unsigned int bank;
- if (((cenv->cpuid_version >> 8) & 0xf) >= 6
+ if (x86_cpu_family(cenv->cpuid_version) >= 6
&& (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
(CPUID_MCE | CPUID_MCA)) {
cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
@@ -7708,6 +9461,12 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu)
mask = 0;
for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ /* Skip supervisor states if XSAVES is not supported. */
+ if (CPUID_XSTATE_XSS_MASK & (1 << i) &&
+ !(env->features[FEAT_XSAVE] & CPUID_XSAVE_XSAVES)) {
+ continue;
+ }
+
const ExtSaveArea *esa = &x86_ext_save_areas[i];
if (cpuid_has_xsave_feature(env, esa)) {
mask |= (1ULL << i);
@@ -7768,6 +9527,7 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu)
*/
void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
{
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
CPUX86State *env = &cpu->env;
FeatureWord w;
int i;
@@ -7787,12 +9547,12 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
}
}
- /*TODO: Now cpu->max_features doesn't overwrite features
+ /* TODO: Now xcc->max_features doesn't overwrite features
* set using QOM properties, and we can convert
* plus_features & minus_features to global properties
* inside x86_cpu_parse_featurestr() too.
*/
- if (cpu->max_features) {
+ if (xcc->max_features) {
for (w = 0; w < FEATURE_WORDS; w++) {
/* Override only features that weren't set explicitly
* by the user.
@@ -7804,9 +9564,12 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
}
if ((env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) && !env->avx10_version) {
- uint32_t eax, ebx, ecx, edx;
- x86_cpu_get_supported_cpuid(0x24, 0, &eax, &ebx, &ecx, &edx);
- env->avx10_version = ebx & 0xff;
+ uint8_t version = x86_cpu_get_host_avx10_version();
+
+ if (!object_property_set_uint(OBJECT(cpu), "avx10-version",
+ version, errp)) {
+ return;
+ }
}
}
@@ -7824,6 +9587,15 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
}
}
+ if (!cpu->enable_pmu) {
+ /* PDCM is fixed1 bit for TDX */
+ if (!cpu->pdcm_on_even_without_pmu && !is_tdx_vm()) {
+ env->features[FEAT_1_ECX] &= ~CPUID_EXT_PDCM;
+ }
+
+ env->features[FEAT_7_0_EDX] &= ~CPUID_7_0_EDX_ARCH_LBR;
+ }
+
for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
FeatureDep *d = &feature_dependencies[i];
if (!(env->features[d->from.index] & d->from.mask)) {
@@ -7842,72 +9614,89 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
env->features[FEAT_KVM] = 0;
}
+ /*
+ * Since Intel MPX had been previously deprecated, APX re-purposes the
+ * 128-byte XSAVE area that had been previously allocated by MPX (state
+ * component indices 3 and 4, making up a 128-byte area located at an
+ * offset of 960 bytes into an un-compacted XSAVE buffer), as a single
+ * state component housing 128-bytes of storage for EGPRs (8-bytes * 16
+ * registers).
+ *
+ * Check the conflict between MPX and APX before initializing xsave
+ * components.
+ */
+ if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_MPX) &&
+ (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_APXF)) {
+ mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_MPX,
+ "this feature is conflict with APX");
+ mark_unavailable_features(cpu, FEAT_7_1_EDX, CPUID_7_1_EDX_APXF,
+ "this feature is conflict with MPX");
+ }
+
x86_cpu_enable_xsave_components(cpu);
/* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
- if (cpu->full_cpuid_auto_level) {
- x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
- x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
- x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
- x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
- x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
- x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EDX);
- x86_cpu_adjust_feat_level(cpu, FEAT_7_2_EDX);
- x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
- x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
- x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
- x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
- x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
- x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
- x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
-
- /* Intel Processor Trace requires CPUID[0x14] */
- if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) {
- if (cpu->intel_pt_auto_level) {
- x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
- } else if (cpu->env.cpuid_min_level < 0x14) {
- mark_unavailable_features(cpu, FEAT_7_0_EBX,
- CPUID_7_0_EBX_INTEL_PT,
- "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,intel-pt=on,min-level=0x14\"");
- }
- }
+ x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_1_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_2_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
+ x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
+
+ /* Intel Processor Trace requires CPUID[0x14] */
+ if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) {
+ x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
+ }
- /*
- * Intel CPU topology with multi-dies support requires CPUID[0x1F].
- * For AMD Rome/Milan, cpuid level is 0x10, and guest OS should detect
- * extended toplogy by leaf 0xB. Only adjust it for Intel CPU, unless
- * cpu->vendor_cpuid_only has been unset for compatibility with older
- * machine types.
- */
- if (x86_has_extended_topo(env->avail_cpu_topo) &&
- (IS_INTEL_CPU(env) || !cpu->vendor_cpuid_only)) {
- x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
- }
+ /*
+ * Intel CPU topology with multi-dies support requires CPUID[0x1F].
+ * For AMD Rome/Milan, cpuid level is 0x10, and guest OS should detect
+ * extended toplogy by leaf 0xB. Only adjust it for Intel CPU, unless
+ * cpu->vendor_cpuid_only has been unset for compatibility with older
+ * machine types.
+ */
+ if (x86_has_cpuid_0x1f(cpu) &&
+ (IS_INTEL_CPU(env) || !cpu->vendor_cpuid_only)) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
+ }
- /* Advanced Vector Extensions 10 (AVX10) requires CPUID[0x24] */
- if (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) {
- x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x24);
- }
+ /* Advanced Vector Extensions 10 (AVX10) requires CPUID[0x24] */
+ if (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x24);
+ }
- /* SVM requires CPUID[0x8000000A] */
- if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
- x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
- }
+ /* Advanced Performance Extensions (APX) requires CPUID[0x29] */
+ if (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_APXF) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x29);
+ }
- /* SEV requires CPUID[0x8000001F] */
- if (sev_enabled()) {
- x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
- }
+ /* SVM requires CPUID[0x8000000A] */
+ if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
+ }
- if (env->features[FEAT_8000_0021_EAX]) {
- x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x80000021);
- }
+ /* SEV requires CPUID[0x8000001F] */
+ if (sev_enabled()) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
+ }
- /* SGX requires CPUID[0x12] for EPC enumeration */
- if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SGX) {
- x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x12);
- }
+ if (env->features[FEAT_8000_0021_EAX]) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x80000021);
+ }
+
+ /* SGX requires CPUID[0x12] for EPC enumeration */
+ if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SGX) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x12);
}
/* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
@@ -7992,16 +9781,30 @@ static bool x86_cpu_filter_features(X86CPU *cpu, bool verbose)
have_filtered_features = x86_cpu_have_filtered_features(cpu);
if (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) {
- x86_cpu_get_supported_cpuid(0x24, 0,
- &eax_0, &ebx_0, &ecx_0, &edx_0);
- uint8_t version = ebx_0 & 0xff;
+ uint8_t version = x86_cpu_get_host_avx10_version();
if (version < env->avx10_version) {
- if (prefix) {
- warn_report("%s: avx10.%d. Adjust to avx10.%d",
- prefix, env->avx10_version, version);
+ /*
+ * With x-force-features=on, CPUID_7_1_EDX_AVX10 will not be masked
+ * off, so there's no need to zero avx10 version.
+ */
+ if (!cpu->force_features) {
+ if (prefix) {
+ warn_report("%s: avx10.%d. Adjust to avx10.%d",
+ prefix, env->avx10_version, version);
+ }
+ /*
+ * Discrete feature bits have been checked and filtered based
+ * on host support. So it's safe to change version without
+ * reverting other feature bits.
+ */
+ env->avx10_version = version;
+ } else {
+ if (prefix) {
+ warn_report("%s: avx10.%d.",
+ prefix, env->avx10_version);
+ }
}
- env->avx10_version = version;
have_filtered_features = true;
}
} else if (env->avx10_version) {
@@ -8052,46 +9855,34 @@ static bool x86_cpu_update_smp_cache_topo(MachineState *ms, X86CPU *cpu,
level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D);
if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) {
- env->cache_info_cpuid4.l1d_cache->share_level = level;
- env->cache_info_amd.l1d_cache->share_level = level;
+ env->cache_info.l1d_cache->share_level = level;
} else {
machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D,
- env->cache_info_cpuid4.l1d_cache->share_level);
- machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D,
- env->cache_info_amd.l1d_cache->share_level);
+ env->cache_info.l1d_cache->share_level);
}
level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I);
if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) {
- env->cache_info_cpuid4.l1i_cache->share_level = level;
- env->cache_info_amd.l1i_cache->share_level = level;
+ env->cache_info.l1i_cache->share_level = level;
} else {
machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I,
- env->cache_info_cpuid4.l1i_cache->share_level);
- machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I,
- env->cache_info_amd.l1i_cache->share_level);
+ env->cache_info.l1i_cache->share_level);
}
level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2);
if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) {
- env->cache_info_cpuid4.l2_cache->share_level = level;
- env->cache_info_amd.l2_cache->share_level = level;
+ env->cache_info.l2_cache->share_level = level;
} else {
machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2,
- env->cache_info_cpuid4.l2_cache->share_level);
- machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2,
- env->cache_info_amd.l2_cache->share_level);
+ env->cache_info.l2_cache->share_level);
}
level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3);
if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) {
- env->cache_info_cpuid4.l3_cache->share_level = level;
- env->cache_info_amd.l3_cache->share_level = level;
+ env->cache_info.l3_cache->share_level = level;
} else {
machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3,
- env->cache_info_cpuid4.l3_cache->share_level);
- machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3,
- env->cache_info_amd.l3_cache->share_level);
+ env->cache_info.l3_cache->share_level);
}
if (!machine_check_smp_cache(ms, errp)) {
@@ -8115,6 +9906,16 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
tcg_cflags_set(cs, CF_PCREL);
#endif
+ /*
+ * x-vendor-cpuid-only and v2 should be initernal only. But
+ * QEMU doesn't support "internal" property.
+ */
+ if (!cpu->vendor_cpuid_only && cpu->vendor_cpuid_only_v2) {
+ error_setg(errp, "x-vendor-cpuid-only-v2 property "
+ "depends on x-vendor-cpuid-only");
+ return;
+ }
+
if (cpu->apic_id == UNASSIGNED_APIC_ID) {
error_setg(errp, "apic-id property was not initialized properly");
return;
@@ -8263,12 +10064,9 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
* accel-specific code in cpu_exec_realizefn.
*/
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
- if (cpu->phys_bits &&
- (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
- cpu->phys_bits < 32)) {
- error_setg(errp, "phys-bits should be between 32 and %u "
- " (but is %u)",
- TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
+ if (cpu->phys_bits && cpu->phys_bits < 32) {
+ error_setg(errp, "phys-bits should be at least 32"
+ " (but is %u)", cpu->phys_bits);
return;
}
/*
@@ -8309,33 +10107,32 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
/* Cache information initialization */
if (!cpu->legacy_cache) {
- const CPUCaches *cache_info =
- x86_cpu_get_versioned_cache_info(cpu, xcc->model);
+ const CPUCaches *cache_info = xcc->model
+ ? x86_cpu_get_versioned_cache_info(cpu, xcc->model)
+ : NULL;
- if (!xcc->model || !cache_info) {
+ if (!cache_info) {
g_autofree char *name = x86_cpu_class_get_model_name(xcc);
error_setg(errp,
"CPU model '%s' doesn't support legacy-cache=off", name);
return;
}
- env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
- *cache_info;
+ env->cache_info = *cache_info;
} else {
/* Build legacy cache information */
- env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
- env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
- env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
- env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
+ if (!cpu->consistent_cache) {
+ env->enable_legacy_cpuid2_cache = true;
+ }
- env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
- env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
- env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
- env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
+ if (!cpu->vendor_cpuid_only_v2) {
+ env->enable_legacy_vendor_cache = true;
+ }
- env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
- env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
- env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
- env->cache_info_amd.l3_cache = &legacy_l3_cache;
+ if (IS_AMD_CPU(env)) {
+ env->cache_info = legacy_amd_cache_info;
+ } else {
+ env->cache_info = legacy_intel_cache_info;
+ }
}
#ifndef CONFIG_USER_ONLY
@@ -8494,27 +10291,38 @@ static void x86_cpu_register_feature_bit_props(X86CPUClass *xcc,
static void x86_cpu_post_initfn(Object *obj)
{
+#ifndef CONFIG_USER_ONLY
+ if (current_machine && current_machine->cgs) {
+ x86_confidential_guest_cpu_instance_init(
+ X86_CONFIDENTIAL_GUEST(current_machine->cgs), (CPU(obj)));
+ }
+#endif
+}
+
+static void x86_cpu_init_xsave(void)
+{
static bool first = true;
- uint64_t supported_xcr0;
+ uint64_t supported_xcr0, supported_xss;
int i;
if (first) {
first = false;
supported_xcr0 =
- ((uint64_t) x86_cpu_get_supported_feature_word(NULL, FEAT_XSAVE_XCR0_HI) << 32) |
+ x86_cpu_get_supported_feature_word(NULL, FEAT_XSAVE_XCR0_HI) << 32 |
x86_cpu_get_supported_feature_word(NULL, FEAT_XSAVE_XCR0_LO);
+ supported_xss =
+ x86_cpu_get_supported_feature_word(NULL, FEAT_XSAVE_XSS_HI) << 32 |
+ x86_cpu_get_supported_feature_word(NULL, FEAT_XSAVE_XSS_LO);
for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) {
ExtSaveArea *esa = &x86_ext_save_areas[i];
- if (!(supported_xcr0 & (1 << i))) {
+ if (!((supported_xcr0 | supported_xss) & (1 << i))) {
esa->size = 0;
}
}
}
-
- accel_cpu_instance_init(CPU(obj));
}
static void x86_cpu_init_default_topo(X86CPU *cpu)
@@ -8583,6 +10391,13 @@ static void x86_cpu_initfn(Object *obj)
if (xcc->model) {
x86_cpu_load_model(cpu, xcc->model);
}
+
+ /*
+ * accel's cpu_instance_init may have the xsave check,
+ * so x86_ext_save_areas[] must be initialized before this.
+ */
+ x86_cpu_init_xsave();
+ accel_cpu_instance_init(CPU(obj));
}
static int64_t x86_cpu_get_arch_id(CPUState *cs)
@@ -8663,43 +10478,10 @@ static bool x86_cpu_has_work(CPUState *cs)
}
#endif /* !CONFIG_USER_ONLY */
-int x86_mmu_index_pl(CPUX86State *env, unsigned pl)
-{
- int mmu_index_32 = (env->hflags & HF_CS64_MASK) ? 0 : 1;
- int mmu_index_base =
- pl == 3 ? MMU_USER64_IDX :
- !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
- (env->eflags & AC_MASK) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
-
- return mmu_index_base + mmu_index_32;
-}
-
-static int x86_cpu_mmu_index(CPUState *cs, bool ifetch)
-{
- CPUX86State *env = cpu_env(cs);
- return x86_mmu_index_pl(env, env->hflags & HF_CPL_MASK);
-}
-
-static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl)
-{
- int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1;
- int mmu_index_base =
- !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
- (pl < 3 && (env->eflags & AC_MASK)
- ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX);
-
- return mmu_index_base + mmu_index_32;
-}
-
-int cpu_mmu_index_kernel(CPUX86State *env)
-{
- return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK);
-}
-
-static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
+static void x86_disas_set_info(const CPUState *cs, disassemble_info *info)
{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
+ const X86CPU *cpu = X86_CPU(cs);
+ const CPUX86State *env = &cpu->env;
info->endian = BFD_ENDIAN_LITTLE;
info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
@@ -8856,12 +10638,11 @@ static const Property x86_cpu_properties[] = {
DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
- DEFINE_PROP_UINT8("avx10-version", X86CPU, env.avx10_version, 0),
DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0),
- DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor),
DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
DEFINE_PROP_BOOL("x-vendor-cpuid-only", X86CPU, vendor_cpuid_only, true),
+ DEFINE_PROP_BOOL("x-vendor-cpuid-only-v2", X86CPU, vendor_cpuid_only_v2, true),
DEFINE_PROP_BOOL("x-amd-topoext-features-only", X86CPU, amd_topoext_features_only, true),
DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
@@ -8871,11 +10652,13 @@ static const Property x86_cpu_properties[] = {
DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
true),
+ DEFINE_PROP_BOOL("x-migrate-error-code", X86CPU, migrate_error_code, true),
/*
* lecacy_cache defaults to true unless the CPU model provides its
* own cache information (see x86_cpu_load_def()).
*/
DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
+ DEFINE_PROP_BOOL("x-consistent-cache", X86CPU, consistent_cache, true),
DEFINE_PROP_BOOL("legacy-multi-node", X86CPU, legacy_multi_node, false),
DEFINE_PROP_BOOL("xen-vapic", X86CPU, xen_vapic, false),
@@ -8892,11 +10675,13 @@ static const Property x86_cpu_properties[] = {
* to the specific Windows version being used."
*/
DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
- DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
- false),
- DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
- true),
DEFINE_PROP_BOOL("x-l1-cache-per-thread", X86CPU, l1_cache_per_core, true),
+ DEFINE_PROP_BOOL("x-force-cpuid-0x1f", X86CPU, force_cpuid_0x1f, false),
+
+ DEFINE_PROP_BOOL("x-arch-cap-always-on", X86CPU,
+ arch_cap_always_on, false),
+ DEFINE_PROP_BOOL("x-pdcm-on-even-without-pmu", X86CPU,
+ pdcm_on_even_without_pmu, false),
};
#ifndef CONFIG_USER_ONLY
@@ -8917,7 +10702,7 @@ static const struct SysemuCPUOps i386_sysemu_ops = {
};
#endif
-static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
+static void x86_cpu_common_class_init(ObjectClass *oc, const void *data)
{
X86CPUClass *xcc = X86_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
@@ -8936,8 +10721,8 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
cc->class_by_name = x86_cpu_class_by_name;
+ cc->list_cpus = x86_cpu_list;
cc->parse_features = x86_cpu_parse_featurestr;
- cc->mmu_index = x86_cpu_mmu_index;
cc->dump_state = x86_cpu_dump_state;
cc->set_pc = x86_cpu_set_pc;
cc->get_pc = x86_cpu_get_pc;
@@ -8946,8 +10731,12 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->get_arch_id = x86_cpu_get_arch_id;
#ifndef CONFIG_USER_ONLY
+ cc->max_as = X86ASIdx_MAX;
cc->sysemu_ops = &i386_sysemu_ops;
#endif /* !CONFIG_USER_ONLY */
+#ifdef CONFIG_TCG
+ cc->tcg_ops = &x86_tcg_ops;
+#endif /* CONFIG_TCG */
cc->gdb_arch_name = x86_gdb_arch_name;
#ifdef TARGET_X86_64
@@ -8987,6 +10776,11 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
x86_cpu_get_unavailable_features,
NULL, NULL, NULL);
+ object_class_property_add(oc, "avx10-version", "uint8",
+ x86_cpuid_get_avx10_version,
+ x86_cpuid_set_avx10_version,
+ NULL, NULL);
+
#if !defined(CONFIG_USER_ONLY)
object_class_property_add(oc, "crash-information", "GuestPanicInformation",
x86_cpu_get_crash_info_qom, NULL, NULL, NULL);
@@ -9014,7 +10808,7 @@ static const TypeInfo x86_cpu_type_info = {
};
/* "base" CPU model, used by query-cpu-model-expansion */
-static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
+static void x86_cpu_base_class_init(ObjectClass *oc, const void *data)
{
X86CPUClass *xcc = X86_CPU_CLASS(oc);