diff options
Diffstat (limited to 'target/i386/cpu.c')
-rw-r--r-- | target/i386/cpu.c | 1667 |
1 files changed, 1355 insertions, 312 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 4688d14..0d35e95 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -24,29 +24,35 @@ #include "qemu/hw-version.h" #include "cpu.h" #include "tcg/helper-tcg.h" -#include "sysemu/hvf.h" +#include "exec/translation-block.h" +#include "system/hvf.h" #include "hvf/hvf-i386.h" #include "kvm/kvm_i386.h" #include "sev.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "qapi/qapi-visit-machine.h" -#include "qapi/qmp/qerror.h" #include "standard-headers/asm-x86/kvm_para.h" #include "hw/qdev-properties.h" #include "hw/i386/topology.h" +#include "exec/watchpoint.h" #ifndef CONFIG_USER_ONLY -#include "sysemu/reset.h" -#include "qapi/qapi-commands-machine-target.h" -#include "exec/address-spaces.h" +#include "confidential-guest.h" +#include "system/reset.h" +#include "qapi/qapi-commands-machine.h" +#include "system/address-spaces.h" #include "hw/boards.h" #include "hw/i386/sgx-epc.h" #endif +#include "tcg/tcg-cpu.h" #include "disas/capstone.h" #include "cpu-internal.h" static void x86_cpu_realizefn(DeviceState *dev, Error **errp); +static void x86_cpu_get_supported_cpuid(uint32_t func, uint32_t index, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx); /* Helpers for building CPUID[2] descriptors: */ @@ -236,23 +242,26 @@ static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 0 /* Invalid value */) static uint32_t max_thread_ids_for_cache(X86CPUTopoInfo *topo_info, - enum CPUTopoLevel share_level) + enum CpuTopologyLevel share_level) { uint32_t num_ids = 0; switch (share_level) { - case CPU_TOPO_LEVEL_CORE: + case CPU_TOPOLOGY_LEVEL_CORE: num_ids = 1 << apicid_core_offset(topo_info); break; - case CPU_TOPO_LEVEL_DIE: + case CPU_TOPOLOGY_LEVEL_MODULE: + num_ids = 1 << apicid_module_offset(topo_info); + break; + case CPU_TOPOLOGY_LEVEL_DIE: num_ids = 1 << apicid_die_offset(topo_info); break; - case CPU_TOPO_LEVEL_PACKAGE: + case CPU_TOPOLOGY_LEVEL_SOCKET: num_ids = 1 << apicid_pkg_offset(topo_info); break; default: /* - * Currently there is no use case for SMT and MODULE, so use + * Currently there is no use case for THREAD, so use * assert directly to facilitate debugging. */ g_assert_not_reached(); @@ -301,21 +310,19 @@ static void encode_cache_cpuid4(CPUCacheInfo *cache, } static uint32_t num_threads_by_topo_level(X86CPUTopoInfo *topo_info, - enum CPUTopoLevel topo_level) + enum CpuTopologyLevel topo_level) { switch (topo_level) { - case CPU_TOPO_LEVEL_SMT: + case CPU_TOPOLOGY_LEVEL_THREAD: return 1; - case CPU_TOPO_LEVEL_CORE: + case CPU_TOPOLOGY_LEVEL_CORE: return topo_info->threads_per_core; - case CPU_TOPO_LEVEL_MODULE: - return topo_info->threads_per_core * topo_info->cores_per_module; - case CPU_TOPO_LEVEL_DIE: - return topo_info->threads_per_core * topo_info->cores_per_module * - topo_info->modules_per_die; - case CPU_TOPO_LEVEL_PACKAGE: - return topo_info->threads_per_core * topo_info->cores_per_module * - topo_info->modules_per_die * topo_info->dies_per_pkg; + case CPU_TOPOLOGY_LEVEL_MODULE: + return x86_threads_per_module(topo_info); + case CPU_TOPOLOGY_LEVEL_DIE: + return x86_threads_per_die(topo_info); + case CPU_TOPOLOGY_LEVEL_SOCKET: + return x86_threads_per_pkg(topo_info); default: g_assert_not_reached(); } @@ -323,18 +330,18 @@ static uint32_t num_threads_by_topo_level(X86CPUTopoInfo *topo_info, } static uint32_t apicid_offset_by_topo_level(X86CPUTopoInfo *topo_info, - enum CPUTopoLevel topo_level) + enum CpuTopologyLevel topo_level) { switch (topo_level) { - case CPU_TOPO_LEVEL_SMT: + case CPU_TOPOLOGY_LEVEL_THREAD: return 0; - case CPU_TOPO_LEVEL_CORE: + case CPU_TOPOLOGY_LEVEL_CORE: return apicid_core_offset(topo_info); - case CPU_TOPO_LEVEL_MODULE: + case CPU_TOPOLOGY_LEVEL_MODULE: return apicid_module_offset(topo_info); - case CPU_TOPO_LEVEL_DIE: + case CPU_TOPOLOGY_LEVEL_DIE: return apicid_die_offset(topo_info); - case CPU_TOPO_LEVEL_PACKAGE: + case CPU_TOPOLOGY_LEVEL_SOCKET: return apicid_pkg_offset(topo_info); default: g_assert_not_reached(); @@ -342,18 +349,18 @@ static uint32_t apicid_offset_by_topo_level(X86CPUTopoInfo *topo_info, return 0; } -static uint32_t cpuid1f_topo_type(enum CPUTopoLevel topo_level) +static uint32_t cpuid1f_topo_type(enum CpuTopologyLevel topo_level) { switch (topo_level) { - case CPU_TOPO_LEVEL_INVALID: + case CPU_TOPOLOGY_LEVEL_INVALID: return CPUID_1F_ECX_TOPO_LEVEL_INVALID; - case CPU_TOPO_LEVEL_SMT: + case CPU_TOPOLOGY_LEVEL_THREAD: return CPUID_1F_ECX_TOPO_LEVEL_SMT; - case CPU_TOPO_LEVEL_CORE: + case CPU_TOPOLOGY_LEVEL_CORE: return CPUID_1F_ECX_TOPO_LEVEL_CORE; - case CPU_TOPO_LEVEL_MODULE: + case CPU_TOPOLOGY_LEVEL_MODULE: return CPUID_1F_ECX_TOPO_LEVEL_MODULE; - case CPU_TOPO_LEVEL_DIE: + case CPU_TOPOLOGY_LEVEL_DIE: return CPUID_1F_ECX_TOPO_LEVEL_DIE; default: /* Other types are not supported in QEMU. */ @@ -368,38 +375,41 @@ static void encode_topo_cpuid1f(CPUX86State *env, uint32_t count, uint32_t *ecx, uint32_t *edx) { X86CPU *cpu = env_archcpu(env); - unsigned long level, next_level; + unsigned long level, base_level, next_level; uint32_t num_threads_next_level, offset_next_level; - assert(count + 1 < CPU_TOPO_LEVEL_MAX); + assert(count <= CPU_TOPOLOGY_LEVEL_SOCKET); /* * Find the No.(count + 1) topology level in avail_cpu_topo bitmap. - * The search starts from bit 1 (CPU_TOPO_LEVEL_INVALID + 1). + * The search starts from bit 0 (CPU_TOPOLOGY_LEVEL_THREAD). */ - level = CPU_TOPO_LEVEL_INVALID; + level = CPU_TOPOLOGY_LEVEL_THREAD; + base_level = level; for (int i = 0; i <= count; i++) { level = find_next_bit(env->avail_cpu_topo, - CPU_TOPO_LEVEL_PACKAGE, - level + 1); + CPU_TOPOLOGY_LEVEL_SOCKET, + base_level); /* * CPUID[0x1f] doesn't explicitly encode the package level, * and it just encodes the invalid level (all fields are 0) * into the last subleaf of 0x1f. */ - if (level == CPU_TOPO_LEVEL_PACKAGE) { - level = CPU_TOPO_LEVEL_INVALID; + if (level == CPU_TOPOLOGY_LEVEL_SOCKET) { + level = CPU_TOPOLOGY_LEVEL_INVALID; break; } + /* Search the next level. */ + base_level = level + 1; } - if (level == CPU_TOPO_LEVEL_INVALID) { + if (level == CPU_TOPOLOGY_LEVEL_INVALID) { num_threads_next_level = 0; offset_next_level = 0; } else { next_level = find_next_bit(env->avail_cpu_topo, - CPU_TOPO_LEVEL_PACKAGE, + CPU_TOPOLOGY_LEVEL_SOCKET, level + 1); num_threads_next_level = num_threads_by_topo_level(topo_info, next_level); @@ -575,7 +585,7 @@ static CPUCacheInfo legacy_l1d_cache = { .sets = 64, .partitions = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }; /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ @@ -590,7 +600,7 @@ static CPUCacheInfo legacy_l1d_cache_amd = { .partitions = 1, .lines_per_tag = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }; /* L1 instruction cache: */ @@ -604,7 +614,7 @@ static CPUCacheInfo legacy_l1i_cache = { .sets = 64, .partitions = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }; /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ @@ -619,7 +629,7 @@ static CPUCacheInfo legacy_l1i_cache_amd = { .partitions = 1, .lines_per_tag = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }; /* Level 2 unified cache: */ @@ -633,7 +643,7 @@ static CPUCacheInfo legacy_l2_cache = { .sets = 4096, .partitions = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }; /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ @@ -643,7 +653,7 @@ static CPUCacheInfo legacy_l2_cache_cpuid2 = { .size = 2 * MiB, .line_size = 64, .associativity = 8, - .share_level = CPU_TOPO_LEVEL_INVALID, + .share_level = CPU_TOPOLOGY_LEVEL_INVALID, }; @@ -657,7 +667,7 @@ static CPUCacheInfo legacy_l2_cache_amd = { .associativity = 16, .sets = 512, .partitions = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }; /* Level 3 unified cache: */ @@ -673,7 +683,7 @@ static CPUCacheInfo legacy_l3_cache = { .self_init = true, .inclusive = true, .complex_indexing = true, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }; /* TLB definitions: */ @@ -767,11 +777,12 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ - CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) + CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE | \ + CPUID_HT) /* partly implemented: CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ /* missing: - CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ + CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_TM, CPUID_PBE */ /* * Kernel-only features that can be shown to usermode programs even if @@ -839,7 +850,8 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A | \ - CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_KERNEL_FEATURES) + CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_KERNEL_FEATURES | \ + CPUID_EXT3_CMP_LEG) #define TCG_EXT4_FEATURES 0 @@ -888,6 +900,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, #define TCG_7_1_EAX_FEATURES (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | \ CPUID_7_1_EAX_FSRC | CPUID_7_1_EAX_CMPCCXADD) +#define TCG_7_1_ECX_FEATURES 0 #define TCG_7_1_EDX_FEATURES 0 #define TCG_7_2_EDX_FEATURES 0 #define TCG_APM_FEATURES 0 @@ -899,6 +912,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, #define TCG_SGX_12_0_EAX_FEATURES 0 #define TCG_SGX_12_0_EBX_FEATURES 0 #define TCG_SGX_12_1_EAX_FEATURES 0 +#define TCG_24_0_EBX_FEATURES 0 #if defined CONFIG_USER_ONLY #define CPUID_8000_0008_EBX_KERNEL_FEATURES (CPUID_8000_0008_EBX_IBPB | \ @@ -912,6 +926,17 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, #define TCG_8000_0008_EBX (CPUID_8000_0008_EBX_XSAVEERPTR | \ CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_KERNEL_FEATURES) +#if defined CONFIG_USER_ONLY +#define CPUID_8000_0021_EAX_KERNEL_FEATURES CPUID_8000_0021_EAX_AUTO_IBRS +#else +#define CPUID_8000_0021_EAX_KERNEL_FEATURES 0 +#endif + +#define TCG_8000_0021_EAX_FEATURES ( \ + CPUID_8000_0021_EAX_NO_NESTED_DATA_BP | \ + CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE | \ + CPUID_8000_0021_EAX_KERNEL_FEATURES) + FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_1_EDX] = { .type = CPUID_FEATURE_WORD, @@ -1054,9 +1079,9 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .type = CPUID_FEATURE_WORD, .feat_names = { "fsgsbase", "tsc-adjust", "sgx", "bmi1", - "hle", "avx2", NULL, "smep", + "hle", "avx2", "fdp-excptn-only", "smep", "bmi2", "erms", "invpcid", "rtm", - NULL, NULL, "mpx", NULL, + NULL, "zero-fcs-fds", "mpx", NULL, "avx512f", "avx512dq", "rdseed", "adx", "smap", "avx512ifma", "pcommit", "clflushopt", "clwb", "intel-pt", "avx512pf", "avx512er", @@ -1110,7 +1135,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_7_1_EAX] = { .type = CPUID_FEATURE_WORD, .feat_names = { - NULL, NULL, NULL, NULL, + "sha512", "sm3", "sm4", NULL, "avx-vnni", "avx512-bf16", NULL, "cmpccxadd", NULL, NULL, "fzrm", "fsrs", "fsrc", NULL, NULL, NULL, @@ -1126,6 +1151,25 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { }, .tcg_features = TCG_7_1_EAX_FEATURES, }, + [FEAT_7_1_ECX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, NULL, NULL, NULL, + NULL, "msr-imm", NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 7, + .needs_ecx = true, .ecx = 1, + .reg = R_ECX, + }, + .tcg_features = TCG_7_1_ECX_FEATURES, + }, [FEAT_7_1_EDX] = { .type = CPUID_FEATURE_WORD, .feat_names = { @@ -1133,7 +1177,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "avx-vnni-int8", "avx-ne-convert", NULL, NULL, "amx-complex", NULL, "avx-vnni-int16", NULL, NULL, NULL, "prefetchiti", NULL, - NULL, NULL, NULL, NULL, + NULL, NULL, NULL, "avx10", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -1148,8 +1192,8 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_7_2_EDX] = { .type = CPUID_FEATURE_WORD, .feat_names = { - NULL, NULL, NULL, NULL, - NULL, "mcdt-no", NULL, NULL, + "intel-psfd", "ipred-ctrl", "rrsba-ctrl", "ddpd-u", + "bhi-ctrl", "mcdt-no", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -1164,6 +1208,20 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { }, .tcg_features = TCG_7_2_EDX_FEATURES, }, + [FEAT_24_0_EBX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + [16] = "avx10-128", + [17] = "avx10-256", + [18] = "avx10-512", + }, + .cpuid = { + .eax = 0x24, + .needs_ecx = true, .ecx = 0, + .reg = R_EBX, + }, + .tcg_features = TCG_24_0_EBX_FEATURES, + }, [FEAT_8000_0007_EDX] = { .type = CPUID_FEATURE_WORD, .feat_names = { @@ -1215,16 +1273,38 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_8000_0021_EAX] = { .type = CPUID_FEATURE_WORD, .feat_names = { - "no-nested-data-bp", NULL, "lfence-always-serializing", NULL, + "no-nested-data-bp", "fs-gs-base-ns", "lfence-always-serializing", NULL, NULL, NULL, "null-sel-clr-base", NULL, "auto-ibrs", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + "prefetchi", NULL, NULL, NULL, + "eraps", NULL, NULL, "sbpb", + "ibpb-brtype", "srso-no", "srso-user-kernel-no", NULL, + }, + .cpuid = { .eax = 0x80000021, .reg = R_EAX, }, + .tcg_features = TCG_8000_0021_EAX_FEATURES, + .unmigratable_flags = 0, + }, + [FEAT_8000_0021_EBX] = { + .type = CPUID_FEATURE_WORD, + .cpuid = { .eax = 0x80000021, .reg = R_EBX, }, + .tcg_features = 0, + .unmigratable_flags = 0, + }, + [FEAT_8000_0022_EAX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "perfmon-v2", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, - .cpuid = { .eax = 0x80000021, .reg = R_EAX, }, + .cpuid = { .eax = 0x80000022, .reg = R_EAX, }, .tcg_features = 0, .unmigratable_flags = 0, }, @@ -1297,7 +1377,9 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .needs_ecx = true, .ecx = 0, .reg = R_EAX, }, - .tcg_features = ~0U, + .tcg_features = XSTATE_FP_MASK | XSTATE_SSE_MASK | + XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | + XSTATE_PKRU_MASK, .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | @@ -1310,7 +1392,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .needs_ecx = true, .ecx = 0, .reg = R_EDX, }, - .tcg_features = ~0U, + .tcg_features = 0U, }, /*Below are MSR exposed features*/ [FEAT_ARCH_CAPABILITIES] = { @@ -1321,9 +1403,17 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "taa-no", NULL, NULL, NULL, NULL, "sbdr-ssdp-no", "fbsdp-no", "psdp-no", NULL, "fb-clear", NULL, NULL, - NULL, NULL, NULL, NULL, + "bhi-no", NULL, NULL, NULL, "pbrsb-no", NULL, "gds-no", "rfds-no", "rfds-clear", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, "its-no", NULL, }, .msr = { .index = MSR_IA32_ARCH_CAPABILITIES, @@ -1435,7 +1525,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "vmx-exit-save-efer", "vmx-exit-load-efer", "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, - NULL, "vmx-exit-load-pkrs", NULL, NULL, + NULL, "vmx-exit-load-pkrs", NULL, "vmx-exit-secondary-ctls", }, .msr = { .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, @@ -1450,7 +1540,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { NULL, "vmx-entry-ia32e-mode", NULL, NULL, NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, - NULL, NULL, "vmx-entry-load-pkrs", NULL, + NULL, NULL, "vmx-entry-load-pkrs", "vmx-entry-load-fred", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, @@ -1608,14 +1698,21 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { }, }; -typedef struct FeatureMask { - FeatureWord index; - uint64_t mask; -} FeatureMask; +bool is_feature_word_cpuid(uint32_t feature, uint32_t index, int reg) +{ + FeatureWordInfo *wi; + FeatureWord w; -typedef struct FeatureDep { - FeatureMask from, to; -} FeatureDep; + for (w = 0; w < FEATURE_WORDS; w++) { + wi = &feature_word_info[w]; + if (wi->type == CPUID_FEATURE_WORD && wi->cpuid.eax == feature && + (!wi->cpuid.needs_ecx || wi->cpuid.ecx == index) && + wi->cpuid.reg == reg) { + return true; + } + } + return false; +} static FeatureDep feature_dependencies[] = { { @@ -1727,8 +1824,36 @@ static FeatureDep feature_dependencies[] = { .to = { FEAT_7_1_EAX, CPUID_7_1_EAX_FRED }, }, { - .from = { FEAT_7_1_EAX, CPUID_7_1_EAX_WRMSRNS }, - .to = { FEAT_7_1_EAX, CPUID_7_1_EAX_FRED }, + .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_SGX }, + .to = { FEAT_7_0_ECX, CPUID_7_0_ECX_SGX_LC }, + }, + { + .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_SGX }, + .to = { FEAT_SGX_12_0_EAX, ~0ull }, + }, + { + .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_SGX }, + .to = { FEAT_SGX_12_0_EBX, ~0ull }, + }, + { + .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_SGX }, + .to = { FEAT_SGX_12_1_EAX, ~0ull }, + }, + { + .from = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_128 }, + .to = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_256 }, + }, + { + .from = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_256 }, + .to = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_512 }, + }, + { + .from = { FEAT_24_0_EBX, CPUID_24_0_EBX_AVX10_VL_MASK }, + .to = { FEAT_7_1_EDX, CPUID_7_1_EDX_AVX10 }, + }, + { + .from = { FEAT_7_1_EDX, CPUID_7_1_EDX_AVX10 }, + .to = { FEAT_24_0_EBX, ~0ull }, }, }; @@ -1753,9 +1878,6 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { }; #undef REGISTER -/* CPUID feature bits available in XSS */ -#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK) - ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = { [XSTATE_FP_BIT] = { /* x87 FP state component is always enabled if XSAVE is supported */ @@ -1849,9 +1971,10 @@ static inline uint64_t x86_cpu_xsave_xss_components(X86CPU *cpu) * Returns the set of feature flags that are supported and migratable by * QEMU, for a given FeatureWord. */ -static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) +static uint64_t x86_cpu_get_migratable_flags(X86CPU *cpu, FeatureWord w) { FeatureWordInfo *wi = &feature_word_info[w]; + CPUX86State *env = &cpu->env; uint64_t r = 0; int i; @@ -1865,6 +1988,12 @@ static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) r |= f; } } + + /* when tsc-khz is set explicitly, invtsc is migratable */ + if ((w == FEAT_8000_0007_EDX) && env->user_tsc_khz) { + r |= CPUID_APM_INVTSC; + } + return r; } @@ -1943,6 +2072,7 @@ typedef struct X86CPUDefinition { int family; int model; int stepping; + uint8_t avx10_version; FeatureWordArray features; const char *model_id; const CPUCaches *const cache_info; @@ -2001,7 +2131,7 @@ static const CPUCaches epyc_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2014,7 +2144,7 @@ static const CPUCaches epyc_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2025,7 +2155,7 @@ static const CPUCaches epyc_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2039,7 +2169,7 @@ static const CPUCaches epyc_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = true, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2055,7 +2185,7 @@ static CPUCaches epyc_v4_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2068,7 +2198,7 @@ static CPUCaches epyc_v4_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2079,7 +2209,7 @@ static CPUCaches epyc_v4_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2093,7 +2223,61 @@ static CPUCaches epyc_v4_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = false, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + }, +}; + +static CPUCaches epyc_v5_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 64 * KiB, + .line_size = 64, + .associativity = 4, + .partitions = 1, + .sets = 256, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 8 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 8192, + .lines_per_tag = 1, + .self_init = true, + .no_invd_sharing = true, + .complex_indexing = false, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2109,7 +2293,7 @@ static const CPUCaches epyc_rome_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2122,7 +2306,7 @@ static const CPUCaches epyc_rome_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2133,7 +2317,7 @@ static const CPUCaches epyc_rome_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2147,7 +2331,7 @@ static const CPUCaches epyc_rome_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = true, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2163,7 +2347,7 @@ static const CPUCaches epyc_rome_v3_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2176,7 +2360,7 @@ static const CPUCaches epyc_rome_v3_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2187,7 +2371,7 @@ static const CPUCaches epyc_rome_v3_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2201,7 +2385,61 @@ static const CPUCaches epyc_rome_v3_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = false, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + }, +}; + +static const CPUCaches epyc_rome_v5_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 16 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 16384, + .lines_per_tag = 1, + .self_init = true, + .no_invd_sharing = true, + .complex_indexing = false, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2217,7 +2455,7 @@ static const CPUCaches epyc_milan_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2230,7 +2468,7 @@ static const CPUCaches epyc_milan_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2241,7 +2479,7 @@ static const CPUCaches epyc_milan_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2255,7 +2493,7 @@ static const CPUCaches epyc_milan_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = true, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2271,7 +2509,7 @@ static const CPUCaches epyc_milan_v2_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2284,7 +2522,7 @@ static const CPUCaches epyc_milan_v2_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2295,7 +2533,7 @@ static const CPUCaches epyc_milan_v2_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2309,7 +2547,61 @@ static const CPUCaches epyc_milan_v2_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = false, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + }, +}; + +static const CPUCaches epyc_milan_v3_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 32 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 32768, + .lines_per_tag = 1, + .self_init = true, + .no_invd_sharing = true, + .complex_indexing = false, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2325,7 +2617,7 @@ static const CPUCaches epyc_genoa_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2338,7 +2630,59 @@ static const CPUCaches epyc_genoa_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 1 * MiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 2048, + .lines_per_tag = 1, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 32 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 32768, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = false, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + }, +}; + +static const CPUCaches epyc_genoa_v2_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2349,7 +2693,9 @@ static const CPUCaches epyc_genoa_cache_info = { .partitions = 1, .sets = 2048, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .self_init = true, + .inclusive = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2361,9 +2707,63 @@ static const CPUCaches epyc_genoa_cache_info = { .sets = 32768, .lines_per_tag = 1, .self_init = true, + .no_invd_sharing = true, + .complex_indexing = false, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, + }, +}; + +static const CPUCaches epyc_turin_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 48 * KiB, + .line_size = 64, + .associativity = 12, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 1 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + .self_init = true, .inclusive = true, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 32 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 32768, + .lines_per_tag = 1, + .self_init = true, + .no_invd_sharing = true, .complex_indexing = false, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -3607,6 +4007,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { }, { .version = 4, + .note = "IBRS, EPT switching, no TSX", .props = (PropValue[]) { { "vmx-eptp-switching", "on" }, { /* end of list */ } @@ -3741,7 +4142,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { }, }, { .version = 4, - .note = "ARCH_CAPABILITIES, no TSX", + .note = "ARCH_CAPABILITIES, EPT switching, no TSX", .props = (PropValue[]) { { "vmx-eptp-switching", "on" }, { /* end of list */ } @@ -4322,6 +4723,23 @@ static const X86CPUDefinition builtin_x86_defs[] = { .model_id = "Intel Xeon Processor (GraniteRapids)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, + { + .version = 2, + .props = (PropValue[]) { + { "ss", "on" }, + { "tsc-adjust", "on" }, + { "cldemote", "on" }, + { "movdiri", "on" }, + { "movdir64b", "on" }, + { "avx10", "on" }, + { "avx10-128", "on" }, + { "avx10-256", "on" }, + { "avx10-512", "on" }, + { "avx10-version", "1" }, + { "stepping", "1" }, + { /* end of list */ } + } + }, { /* end of list */ }, }, }, @@ -4448,6 +4866,160 @@ static const X86CPUDefinition builtin_x86_defs[] = { .model_id = "Intel Xeon Processor (SierraForest)", .versions = (X86CPUVersionDefinition[]) { { .version = 1 }, + { + .version = 2, + .props = (PropValue[]) { + { "ss", "on" }, + { "tsc-adjust", "on" }, + { "cldemote", "on" }, + { "movdiri", "on" }, + { "movdir64b", "on" }, + { "gds-no", "on" }, + { "rfds-no", "on" }, + { "lam", "on" }, + { "intel-psfd", "on"}, + { "ipred-ctrl", "on"}, + { "rrsba-ctrl", "on"}, + { "bhi-ctrl", "on"}, + { "stepping", "3" }, + { /* end of list */ } + } + }, + { /* end of list */ }, + }, + }, + { + .name = "ClearwaterForest", + .level = 0x23, + .xlevel = 0x80000008, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 221, + .stepping = 0, + /* + * please keep the ascending order so that we can have a clear view of + * bit position of each feature. + */ + .features[FEAT_1_EDX] = + CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | + CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | + CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | + CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | + CPUID_SSE | CPUID_SSE2 | CPUID_SS, + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 | + CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID | CPUID_EXT_SSE41 | + CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | + CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | + CPUID_EXT_XSAVE | CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_WBNOINVD, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_TSC_ADJUST | + CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | + CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB | + CPUID_7_0_EBX_SHA_NI, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_GFNI | + CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | + CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_BUS_LOCK_DETECT | + CPUID_7_0_ECX_CLDEMOTE | CPUID_7_0_ECX_MOVDIRI | + CPUID_7_0_ECX_MOVDIR64B, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_SERIALIZE | + CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | + CPUID_7_0_EDX_SPEC_CTRL_SSBD, + .features[FEAT_ARCH_CAPABILITIES] = + MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | + MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | + MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_SBDR_SSDP_NO | + MSR_ARCH_CAP_FBSDP_NO | MSR_ARCH_CAP_PSDP_NO | + MSR_ARCH_CAP_BHI_NO | MSR_ARCH_CAP_PBRSB_NO | + MSR_ARCH_CAP_GDS_NO | MSR_ARCH_CAP_RFDS_NO, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_7_1_EAX] = + CPUID_7_1_EAX_SHA512 | CPUID_7_1_EAX_SM3 | CPUID_7_1_EAX_SM4 | + CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_CMPCCXADD | + CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_AVX_IFMA | + CPUID_7_1_EAX_LAM, + .features[FEAT_7_1_EDX] = + CPUID_7_1_EDX_AVX_VNNI_INT8 | CPUID_7_1_EDX_AVX_NE_CONVERT | + CPUID_7_1_EDX_AVX_VNNI_INT16 | CPUID_7_1_EDX_PREFETCHITI, + .features[FEAT_7_2_EDX] = + CPUID_7_2_EDX_PSFD | CPUID_7_2_EDX_IPRED_CTRL | + CPUID_7_2_EDX_RRSBA_CTRL | CPUID_7_2_EDX_DDPD_U | + CPUID_7_2_EDX_BHI_CTRL | CPUID_7_2_EDX_MCDT_NO, + .features[FEAT_VMX_BASIC] = + MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = + MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | + MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | + MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_AD_BITS | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | + MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_IA32_PAT | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = + VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | + VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | + VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = + VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | + VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | + VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | + VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | + VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_WBINVD_EXITING | + VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | + VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML | + VMX_SECONDARY_EXEC_XSAVES, + .features[FEAT_VMX_VMFUNC] = + MSR_VMX_VMFUNC_EPT_SWITCHING, + .model_id = "Intel Xeon Processor (ClearwaterForest)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, { /* end of list */ }, }, }, @@ -4952,6 +5524,25 @@ static const X86CPUDefinition builtin_x86_defs[] = { }, .cache_info = &epyc_v4_cache_info }, + { + .version = 5, + .props = (PropValue[]) { + { "overflow-recov", "on" }, + { "succor", "on" }, + { "lbrv", "on" }, + { "tsc-scale", "on" }, + { "vmcb-clean", "on" }, + { "flushbyasid", "on" }, + { "pause-filter", "on" }, + { "pfthreshold", "on" }, + { "v-vmsave-vmload", "on" }, + { "vgif", "on" }, + { "model-id", + "AMD EPYC-v5 Processor" }, + { /* end of list */ } + }, + .cache_info = &epyc_v5_cache_info + }, { /* end of list */ } } }, @@ -5090,6 +5681,25 @@ static const X86CPUDefinition builtin_x86_defs[] = { { /* end of list */ } }, }, + { + .version = 5, + .props = (PropValue[]) { + { "overflow-recov", "on" }, + { "succor", "on" }, + { "lbrv", "on" }, + { "tsc-scale", "on" }, + { "vmcb-clean", "on" }, + { "flushbyasid", "on" }, + { "pause-filter", "on" }, + { "pfthreshold", "on" }, + { "v-vmsave-vmload", "on" }, + { "vgif", "on" }, + { "model-id", + "AMD EPYC-Rome-v5 Processor" }, + { /* end of list */ } + }, + .cache_info = &epyc_rome_v5_cache_info + }, { /* end of list */ } } }, @@ -5165,6 +5775,25 @@ static const X86CPUDefinition builtin_x86_defs[] = { }, .cache_info = &epyc_milan_v2_cache_info }, + { + .version = 3, + .props = (PropValue[]) { + { "overflow-recov", "on" }, + { "succor", "on" }, + { "lbrv", "on" }, + { "tsc-scale", "on" }, + { "vmcb-clean", "on" }, + { "flushbyasid", "on" }, + { "pause-filter", "on" }, + { "pfthreshold", "on" }, + { "v-vmsave-vmload", "on" }, + { "vgif", "on" }, + { "model-id", + "AMD EPYC-Milan-v3 Processor" }, + { /* end of list */ } + }, + .cache_info = &epyc_milan_v3_cache_info + }, { /* end of list */ } } }, @@ -5204,7 +5833,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { CPUID_8000_0008_EBX_STIBP_ALWAYS_ON | CPUID_8000_0008_EBX_AMD_SSBD | CPUID_8000_0008_EBX_AMD_PSFD, .features[FEAT_8000_0021_EAX] = - CPUID_8000_0021_EAX_No_NESTED_DATA_BP | + CPUID_8000_0021_EAX_NO_NESTED_DATA_BP | CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING | CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE | CPUID_8000_0021_EAX_AUTO_IBRS, @@ -5239,6 +5868,250 @@ static const X86CPUDefinition builtin_x86_defs[] = { .xlevel = 0x80000022, .model_id = "AMD EPYC-Genoa Processor", .cache_info = &epyc_genoa_cache_info, + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .props = (PropValue[]) { + { "overflow-recov", "on" }, + { "succor", "on" }, + { "lbrv", "on" }, + { "tsc-scale", "on" }, + { "vmcb-clean", "on" }, + { "flushbyasid", "on" }, + { "pause-filter", "on" }, + { "pfthreshold", "on" }, + { "v-vmsave-vmload", "on" }, + { "vgif", "on" }, + { "fs-gs-base-ns", "on" }, + { "perfmon-v2", "on" }, + { "model-id", + "AMD EPYC-Genoa-v2 Processor" }, + { /* end of list */ } + }, + .cache_info = &epyc_genoa_v2_cache_info + }, + { /* end of list */ } + } + }, + { + .name = "YongFeng", + .level = 0x1F, + .vendor = CPUID_VENDOR_ZHAOXIN1, + .family = 7, + .model = 11, + .stepping = 3, + /* missing: CPUID_HT, CPUID_TM, CPUID_PBE */ + .features[FEAT_1_EDX] = + CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_ACPI | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | + CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | + CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | + CPUID_PSE | CPUID_DE | CPUID_VME | CPUID_FP87, + /* + * missing: CPUID_EXT_OSXSAVE, CPUID_EXT_XTPR, CPUID_EXT_TM2, + * CPUID_EXT_EST, CPUID_EXT_SMX, CPUID_EXT_VMX + */ + .features[FEAT_1_ECX] = + CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | + CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_TSC_DEADLINE_TIMER | + CPUID_EXT_POPCNT | CPUID_EXT_MOVBE | CPUID_EXT_X2APIC | + CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | CPUID_EXT_PCID | + CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | + CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_BMI2 | + CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_FSGSBASE, + /* missing: CPUID_7_0_ECX_OSPKE */ + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_UMIP, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, + .features[FEAT_8000_0007_EDX] = CPUID_APM_INVTSC, + /* + * TODO: When the Linux kernel introduces other existing definitions + * for this leaf, remember to update the definitions here. + */ + .features[FEAT_C000_0001_EDX] = + CPUID_C000_0001_EDX_PMM_EN | CPUID_C000_0001_EDX_PMM | + CPUID_C000_0001_EDX_PHE_EN | CPUID_C000_0001_EDX_PHE | + CPUID_C000_0001_EDX_ACE2 | + CPUID_C000_0001_EDX_XCRYPT_EN | CPUID_C000_0001_EDX_XCRYPT | + CPUID_C000_0001_EDX_XSTORE_EN | CPUID_C000_0001_EDX_XSTORE, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT, + .features[FEAT_ARCH_CAPABILITIES] = + MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | + MSR_ARCH_CAP_MDS_NO | MSR_ARCH_CAP_PSCHANGE_MC_NO | + MSR_ARCH_CAP_SSB_NO, + .features[FEAT_VMX_PROCBASED_CTLS] = + VMX_CPU_BASED_VIRTUAL_INTR_PENDING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_INVLPG_EXITING | + VMX_CPU_BASED_MWAIT_EXITING | VMX_CPU_BASED_RDPMC_EXITING | + VMX_CPU_BASED_RDTSC_EXITING | VMX_CPU_BASED_CR3_LOAD_EXITING | + VMX_CPU_BASED_CR3_STORE_EXITING | VMX_CPU_BASED_CR8_LOAD_EXITING | + VMX_CPU_BASED_CR8_STORE_EXITING | VMX_CPU_BASED_TPR_SHADOW | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + /* + * missing: VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING, + * VMX_SECONDARY_EXEC_TSC_SCALING + */ + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | + VMX_SECONDARY_EXEC_RDTSCP | VMX_SECONDARY_EXEC_ENABLE_VPID | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_WBINVD_EXITING | + VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | + VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | + VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_ENABLE_PML, + .features[FEAT_VMX_PINBASED_CTLS] = + VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | + VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | + VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_IA32_PAT | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + /* missing: VMX_VM_ENTRY_SMM, VMX_VM_ENTRY_DEACT_DUAL_MONITOR */ + .features[FEAT_VMX_ENTRY_CTLS] = + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_IA32_EFER, + /* + * missing: MSR_VMX_MISC_ACTIVITY_SHUTDOWN, + * MSR_VMX_MISC_ACTIVITY_WAIT_SIPI + */ + .features[FEAT_VMX_MISC] = + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_VMWRITE_VMEXIT, + /* missing: MSR_VMX_EPT_UC */ + .features[FEAT_VMX_EPT_VPID_CAPS] = + MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | + MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | + MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_AD_BITS | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID | + MSR_VMX_EPT_INVVPID_ALL_CONTEXT | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, + .features[FEAT_VMX_BASIC] = + MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = "Zhaoxin YongFeng Processor", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .note = "with the correct model number", + .props = (PropValue[]) { + { "model", "0x5b" }, + { /* end of list */ } + } + }, + { /* end of list */ } + } + }, + { + .name = "EPYC-Turin", + .level = 0xd, + .vendor = CPUID_VENDOR_AMD, + .family = 26, + .model = 0, + .stepping = 0, + .features[FEAT_1_ECX] = + CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | + CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | + CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_PCID | CPUID_EXT_CX16 | CPUID_EXT_FMA | + CPUID_EXT_SSSE3 | CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | + CPUID_EXT_SSE3, + .features[FEAT_1_EDX] = + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | + CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | + CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | + CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | + CPUID_VME | CPUID_FP87, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | + CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | + CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_AVX512F | + CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_AVX512IFMA | + CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB | + CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_SHA_NI | + CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | + CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | + CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | + CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | + CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57 | + CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_MOVDIRI | + CPUID_7_0_ECX_MOVDIR64B, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_AVX512_VP2INTERSECT, + .features[FEAT_7_1_EAX] = + CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_AVX512_BF16, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | + CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | + CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | + CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0007_EBX] = + CPUID_8000_0007_EBX_OVERFLOW_RECOV | CPUID_8000_0007_EBX_SUCCOR, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | + CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | + CPUID_8000_0008_EBX_IBRS | CPUID_8000_0008_EBX_STIBP | + CPUID_8000_0008_EBX_STIBP_ALWAYS_ON | + CPUID_8000_0008_EBX_AMD_SSBD | CPUID_8000_0008_EBX_AMD_PSFD, + .features[FEAT_8000_0021_EAX] = + CPUID_8000_0021_EAX_NO_NESTED_DATA_BP | + CPUID_8000_0021_EAX_FS_GS_BASE_NS | + CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING | + CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE | + CPUID_8000_0021_EAX_AUTO_IBRS | CPUID_8000_0021_EAX_PREFETCHI | + CPUID_8000_0021_EAX_SBPB | CPUID_8000_0021_EAX_IBPB_BRTYPE | + CPUID_8000_0021_EAX_SRSO_USER_KERNEL_NO, + .features[FEAT_8000_0022_EAX] = + CPUID_8000_0022_EAX_PERFMON_V2, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, + .features[FEAT_SVM] = + CPUID_SVM_NPT | CPUID_SVM_LBRV | CPUID_SVM_NRIPSAVE | + CPUID_SVM_TSCSCALE | CPUID_SVM_VMCBCLEAN | CPUID_SVM_FLUSHASID | + CPUID_SVM_PAUSEFILTER | CPUID_SVM_PFTHRESHOLD | + CPUID_SVM_V_VMSAVE_VMLOAD | CPUID_SVM_VGIF | + CPUID_SVM_VNMI | CPUID_SVM_SVME_ADDR_CHK, + .xlevel = 0x80000022, + .model_id = "AMD EPYC-Turin Processor", + .cache_info = &epyc_turin_cache_info, }, }; @@ -5283,10 +6156,9 @@ static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) return v; } -static Property max_x86_cpu_properties[] = { +static const Property max_x86_cpu_properties[] = { DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), - DEFINE_PROP_END_OF_LIST() }; static void max_x86_cpu_realize(DeviceState *dev, Error **errp) @@ -5308,7 +6180,7 @@ static void max_x86_cpu_realize(DeviceState *dev, Error **errp) x86_cpu_realizefn(dev, errp); } -static void max_x86_cpu_class_init(ObjectClass *oc, void *data) +static void max_x86_cpu_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); X86CPUClass *xcc = X86_CPU_CLASS(oc); @@ -5350,7 +6222,7 @@ static const TypeInfo max_x86_cpu_type_info = { .class_init = max_x86_cpu_class_init, }; -static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) +static char *feature_word_description(FeatureWordInfo *f) { assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); @@ -5359,11 +6231,15 @@ static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) { const char *reg = get_register_name_32(f->cpuid.reg); assert(reg); - return g_strdup_printf("CPUID.%02XH:%s", - f->cpuid.eax, reg); + if (!f->cpuid.needs_ecx) { + return g_strdup_printf("CPUID[eax=%02Xh].%s", f->cpuid.eax, reg); + } else { + return g_strdup_printf("CPUID[eax=%02Xh,ecx=%02Xh].%s", + f->cpuid.eax, f->cpuid.ecx, reg); + } } case MSR_FEATURE_WORD: - return g_strdup_printf("MSR(%02XH)", + return g_strdup_printf("MSR(%02Xh)", f->msr.index); } @@ -5383,12 +6259,13 @@ static bool x86_cpu_have_filtered_features(X86CPU *cpu) return false; } -static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, - const char *verbose_prefix) +void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, + const char *verbose_prefix) { CPUX86State *env = &cpu->env; FeatureWordInfo *f = &feature_word_info[w]; int i; + g_autofree char *feat_word_str = feature_word_description(f); if (!cpu->force_features) { env->features[w] &= ~mask; @@ -5401,7 +6278,35 @@ static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, for (i = 0; i < 64; ++i) { if ((1ULL << i) & mask) { - g_autofree char *feat_word_str = feature_word_description(f, i); + warn_report("%s: %s%s%s [bit %d]", + verbose_prefix, + feat_word_str, + f->feat_names[i] ? "." : "", + f->feat_names[i] ? f->feat_names[i] : "", i); + } + } +} + +void mark_forced_on_features(X86CPU *cpu, FeatureWord w, uint64_t mask, + const char *verbose_prefix) +{ + CPUX86State *env = &cpu->env; + FeatureWordInfo *f = &feature_word_info[w]; + int i; + + if (!cpu->force_features) { + env->features[w] |= mask; + } + + cpu->forced_on_features[w] |= mask; + + if (!verbose_prefix) { + return; + } + + for (i = 0; i < 64; ++i) { + if ((1ULL << i) & mask) { + g_autofree char *feat_word_str = feature_word_description(f); warn_report("%s: %s%s%s [bit %d]", verbose_prefix, feat_word_str, @@ -5417,13 +6322,13 @@ static void x86_cpuid_version_get_family(Object *obj, Visitor *v, { X86CPU *cpu = X86_CPU(obj); CPUX86State *env = &cpu->env; - int64_t value; + uint64_t value; value = (env->cpuid_version >> 8) & 0xf; if (value == 0xf) { value += (env->cpuid_version >> 20) & 0xff; } - visit_type_int(v, name, &value, errp); + visit_type_uint64(v, name, &value, errp); } static void x86_cpuid_version_set_family(Object *obj, Visitor *v, @@ -5432,16 +6337,15 @@ static void x86_cpuid_version_set_family(Object *obj, Visitor *v, { X86CPU *cpu = X86_CPU(obj); CPUX86State *env = &cpu->env; - const int64_t min = 0; - const int64_t max = 0xff + 0xf; - int64_t value; + const uint64_t max = 0xff + 0xf; + uint64_t value; - if (!visit_type_int(v, name, &value, errp)) { + if (!visit_type_uint64(v, name, &value, errp)) { return; } - if (value < min || value > max) { - error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", - name ? name : "null", value, min, max); + if (value > max) { + error_setg(errp, "parameter '%s' can be at most %" PRIu64, + name ? name : "null", max); return; } @@ -5459,11 +6363,11 @@ static void x86_cpuid_version_get_model(Object *obj, Visitor *v, { X86CPU *cpu = X86_CPU(obj); CPUX86State *env = &cpu->env; - int64_t value; + uint64_t value; value = (env->cpuid_version >> 4) & 0xf; value |= ((env->cpuid_version >> 16) & 0xf) << 4; - visit_type_int(v, name, &value, errp); + visit_type_uint64(v, name, &value, errp); } static void x86_cpuid_version_set_model(Object *obj, Visitor *v, @@ -5472,16 +6376,15 @@ static void x86_cpuid_version_set_model(Object *obj, Visitor *v, { X86CPU *cpu = X86_CPU(obj); CPUX86State *env = &cpu->env; - const int64_t min = 0; - const int64_t max = 0xff; - int64_t value; + const uint64_t max = 0xff; + uint64_t value; - if (!visit_type_int(v, name, &value, errp)) { + if (!visit_type_uint64(v, name, &value, errp)) { return; } - if (value < min || value > max) { - error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", - name ? name : "null", value, min, max); + if (value > max) { + error_setg(errp, "parameter '%s' can be at most %" PRIu64, + name ? name : "null", max); return; } @@ -5495,10 +6398,10 @@ static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, { X86CPU *cpu = X86_CPU(obj); CPUX86State *env = &cpu->env; - int64_t value; + uint64_t value; value = env->cpuid_version & 0xf; - visit_type_int(v, name, &value, errp); + visit_type_uint64(v, name, &value, errp); } static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, @@ -5507,16 +6410,15 @@ static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, { X86CPU *cpu = X86_CPU(obj); CPUX86State *env = &cpu->env; - const int64_t min = 0; - const int64_t max = 0xf; - int64_t value; + const uint64_t max = 0xf; + uint64_t value; - if (!visit_type_int(v, name, &value, errp)) { + if (!visit_type_uint64(v, name, &value, errp)) { return; } - if (value < min || value > max) { - error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", - name ? name : "null", value, min, max); + if (value > max) { + error_setg(errp, "parameter '%s' can be at most %" PRIu64, + name ? name : "null", max); return; } @@ -5610,16 +6512,15 @@ static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { X86CPU *cpu = X86_CPU(obj); - const int64_t min = 0; const int64_t max = INT64_MAX; int64_t value; if (!visit_type_int(v, name, &value, errp)) { return; } - if (value < min || value > max) { - error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", - name ? name : "null", value, min, max); + if (value < 0 || value > max) { + error_setg(errp, "parameter '%s' can be at most %" PRId64, + name ? name : "null", max); return; } @@ -5798,7 +6699,7 @@ static void x86_cpu_parse_featurestr(const char *typename, char *features, } } -static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); +static bool x86_cpu_filter_features(X86CPU *cpu, bool verbose); /* Build a list with the name of all features on a feature word array */ static void x86_cpu_list_feature_names(FeatureWordArray features, @@ -5849,7 +6750,7 @@ static void listflags(GList *features) } /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ -static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) +static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b, gpointer d) { ObjectClass *class_a = (ObjectClass *)a; ObjectClass *class_b = (ObjectClass *)b; @@ -5870,7 +6771,7 @@ static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) static GSList *get_sorted_cpu_model_list(void) { GSList *list = object_class_get_list(TYPE_X86_CPU, false); - list = g_slist_sort(list, x86_cpu_list_compare); + list = g_slist_sort_with_data(list, x86_cpu_list_compare, NULL); return list; } @@ -5916,7 +6817,7 @@ static void x86_cpu_list_entry(gpointer data, gpointer user_data) desc = g_strdup_printf("%s [%s]", model_id, cc->model->note); } if (!desc) { - desc = g_strdup_printf("%s", model_id); + desc = g_strdup(model_id); } if (cc->model && cc->model->cpudef->deprecation_note) { @@ -5927,8 +6828,13 @@ static void x86_cpu_list_entry(gpointer data, gpointer user_data) qemu_printf(" %-20s %s\n", name, desc); } +static gint strcmp_wrap(gconstpointer a, gconstpointer b, gpointer d) +{ + return strcmp(a, b); +} + /* list available CPU models and flags */ -void x86_cpu_list(void) +static void x86_cpu_list(void) { int i, j; GSList *list; @@ -5949,7 +6855,7 @@ void x86_cpu_list(void) } } - names = g_list_sort(names, (GCompareFunc)strcmp); + names = g_list_sort_with_data(names, strcmp_wrap, NULL); qemu_printf("\nRecognized CPUID flags:\n"); listflags(names); @@ -6039,7 +6945,7 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w) { FeatureWordInfo *wi = &feature_word_info[w]; uint64_t r = 0; - uint32_t unavail = 0; + uint64_t unavail = 0; if (kvm_enabled()) { switch (wi->type) { @@ -6087,13 +6993,28 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w) } break; + case FEAT_7_0_EBX: +#ifndef CONFIG_USER_ONLY + if (!check_sgx_support()) { + unavail = CPUID_7_0_EBX_SGX; + } +#endif + break; + case FEAT_7_0_ECX: +#ifndef CONFIG_USER_ONLY + if (!check_sgx_support()) { + unavail = CPUID_7_0_ECX_SGX_LC; + } +#endif + break; + default: break; } r &= ~unavail; if (cpu && cpu->migratable) { - r &= x86_cpu_get_migratable_flags(w); + r &= x86_cpu_get_migratable_flags(cpu, w); } return r; } @@ -6171,7 +7092,7 @@ void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) * Only for builtin_x86_defs models initialized with x86_register_cpudef_types. */ -static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) +static void x86_cpu_apply_version_props(X86CPU *cpu, const X86CPUModel *model) { const X86CPUVersionDefinition *vdef; X86CPUVersion version = x86_cpu_model_resolve_version(model); @@ -6200,7 +7121,7 @@ static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) } static const CPUCaches *x86_cpu_get_versioned_cache_info(X86CPU *cpu, - X86CPUModel *model) + const X86CPUModel *model) { const X86CPUVersionDefinition *vdef; X86CPUVersion version = x86_cpu_model_resolve_version(model); @@ -6228,7 +7149,7 @@ static const CPUCaches *x86_cpu_get_versioned_cache_info(X86CPU *cpu, * Load data from X86CPUDefinition into a X86CPU object. * Only for builtin_x86_defs models initialized with x86_register_cpudef_types. */ -static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) +static void x86_cpu_load_model(X86CPU *cpu, const X86CPUModel *model) { const X86CPUDefinition *def = model->cpudef; CPUX86State *env = &cpu->env; @@ -6274,6 +7195,9 @@ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) */ object_property_set_str(OBJECT(cpu), "vendor", def->vendor, &error_abort); + object_property_set_uint(OBJECT(cpu), "avx10-version", def->avx10_version, + &error_abort); + x86_cpu_apply_version_props(cpu, model); /* @@ -6293,9 +7217,9 @@ static const gchar *x86_gdb_arch_name(CPUState *cs) #endif } -static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) +static void x86_cpu_cpudef_class_init(ObjectClass *oc, const void *data) { - X86CPUModel *model = data; + const X86CPUModel *model = data; X86CPUClass *xcc = X86_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); @@ -6314,7 +7238,7 @@ static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) .class_data = model, }; - type_register(&ti); + type_register_static(&ti); } @@ -6382,18 +7306,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, CPUState *cs = env_cpu(env); uint32_t limit; uint32_t signature[3]; - X86CPUTopoInfo topo_info; - uint32_t cores_per_pkg; + X86CPUTopoInfo *topo_info = &env->topo_info; uint32_t threads_per_pkg; - topo_info.dies_per_pkg = env->nr_dies; - topo_info.modules_per_die = env->nr_modules; - topo_info.cores_per_module = cs->nr_cores / env->nr_dies / env->nr_modules; - topo_info.threads_per_core = cs->nr_threads; - - cores_per_pkg = topo_info.cores_per_module * topo_info.modules_per_die * - topo_info.dies_per_pkg; - threads_per_pkg = cores_per_pkg * topo_info.threads_per_core; + threads_per_pkg = x86_threads_per_pkg(topo_info); /* Calculate & apply limits for different index ranges */ if (index >= 0xC0000000) { @@ -6432,10 +7348,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *edx = env->features[FEAT_1_EDX]; if (threads_per_pkg > 1) { *ebx |= threads_per_pkg << 16; - *edx |= CPUID_HT; - } - if (!cpu->enable_pmu) { - *ecx &= ~CPUID_EXT_PDCM; } break; case 2: @@ -6470,13 +7382,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14); *eax &= ~0xFC000000; - *eax |= max_core_ids_in_package(&topo_info) << 26; + *eax |= max_core_ids_in_package(topo_info) << 26; if (host_vcpus_per_cache > threads_per_pkg) { *eax &= ~0x3FFC000; /* Share the cache at package level. */ - *eax |= max_thread_ids_for_cache(&topo_info, - CPU_TOPO_LEVEL_PACKAGE) << 14; + *eax |= max_thread_ids_for_cache(topo_info, + CPU_TOPOLOGY_LEVEL_SOCKET) << 14; } } } else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) { @@ -6487,7 +7399,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, switch (count) { case 0: /* L1 dcache info */ encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, - &topo_info, + topo_info, eax, ebx, ecx, edx); if (!cpu->l1_cache_per_core) { *eax &= ~MAKE_64BIT_MASK(14, 12); @@ -6495,7 +7407,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, break; case 1: /* L1 icache info */ encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, - &topo_info, + topo_info, eax, ebx, ecx, edx); if (!cpu->l1_cache_per_core) { *eax &= ~MAKE_64BIT_MASK(14, 12); @@ -6503,13 +7415,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, break; case 2: /* L2 cache info */ encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, - &topo_info, + topo_info, eax, ebx, ecx, edx); break; case 3: /* L3 cache info */ if (cpu->enable_l3_cache) { encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, - &topo_info, + topo_info, eax, ebx, ecx, edx); break; } @@ -6537,8 +7449,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, case 7: /* Structured Extended Feature Flags Enumeration Leaf */ if (count == 0) { - uint32_t eax_0_unused, ebx_0, ecx_0, edx_0_unused; - /* Maximum ECX value for sub-leaves */ *eax = env->cpuid_level_func7; *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ @@ -6547,28 +7457,11 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *ecx |= CPUID_7_0_ECX_OSPKE; } *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ - - /* - * SGX cannot be emulated in software. If hardware does not - * support enabling SGX and/or SGX flexible launch control, - * then we need to update the VM's CPUID values accordingly. - */ - x86_cpu_get_supported_cpuid(0x7, 0, - &eax_0_unused, &ebx_0, - &ecx_0, &edx_0_unused); - if ((*ebx & CPUID_7_0_EBX_SGX) && !(ebx_0 & CPUID_7_0_EBX_SGX)) { - *ebx &= ~CPUID_7_0_EBX_SGX; - } - - if ((*ecx & CPUID_7_0_ECX_SGX_LC) - && (!(*ebx & CPUID_7_0_EBX_SGX) || !(ecx_0 & CPUID_7_0_ECX_SGX_LC))) { - *ecx &= ~CPUID_7_0_ECX_SGX_LC; - } } else if (count == 1) { *eax = env->features[FEAT_7_1_EAX]; + *ecx = env->features[FEAT_7_1_ECX]; *edx = env->features[FEAT_7_1_EDX]; *ebx = 0; - *ecx = 0; } else if (count == 2) { *edx = env->features[FEAT_7_2_EDX]; *eax = 0; @@ -6611,12 +7504,12 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, switch (count) { case 0: - *eax = apicid_core_offset(&topo_info); - *ebx = topo_info.threads_per_core; + *eax = apicid_core_offset(topo_info); + *ebx = topo_info->threads_per_core; *ecx |= CPUID_B_ECX_TOPO_LEVEL_SMT << 8; break; case 1: - *eax = apicid_pkg_offset(&topo_info); + *eax = apicid_pkg_offset(topo_info); *ebx = threads_per_pkg; *ecx |= CPUID_B_ECX_TOPO_LEVEL_CORE << 8; break; @@ -6637,12 +7530,12 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, break; case 0x1F: /* V2 Extended Topology Enumeration Leaf */ - if (!x86_has_extended_topo(env->avail_cpu_topo)) { + if (!x86_has_cpuid_0x1f(cpu)) { *eax = *ebx = *ecx = *edx = 0; break; } - encode_topo_cpuid1f(env, count, &topo_info, eax, ebx, ecx, edx); + encode_topo_cpuid1f(env, count, topo_info, eax, ebx, ecx, edx); break; case 0xD: { /* Processor Extended State */ @@ -6821,6 +7714,16 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } break; } + case 0x24: { + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + if ((env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) && count == 0) { + *ebx = env->features[FEAT_24_0_EBX] | env->avx10_version; + } + break; + } case 0x40000000: /* * CPUID code in kvm_arch_init_vcpu() ignores stuff @@ -6857,17 +7760,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *ecx = env->features[FEAT_8000_0001_ECX]; *edx = env->features[FEAT_8000_0001_EDX]; - /* The Linux kernel checks for the CMPLegacy bit and - * discards multiple thread information if it is set. - * So don't set it here for Intel to make Linux guests happy. - */ - if (threads_per_pkg > 1) { - if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || - env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || - env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { - *ecx |= 1 << 1; /* CmpLegacy bit */ - } - } if (tcg_enabled() && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && !(env->hflags & HF_LMA_MASK)) { *edx &= ~CPUID_EXT2_SYSCALL; @@ -6935,7 +7827,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, * thread ID within a package". * Bits 7:0 is "The number of threads in the package is NC+1" */ - *ecx = (apicid_pkg_offset(&topo_info) << 12) | + *ecx = (apicid_pkg_offset(topo_info) << 12) | (threads_per_pkg - 1); } else { *ecx = 0; @@ -6964,19 +7856,19 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, switch (count) { case 0: /* L1 dcache info */ encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, - &topo_info, eax, ebx, ecx, edx); + topo_info, eax, ebx, ecx, edx); break; case 1: /* L1 icache info */ encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, - &topo_info, eax, ebx, ecx, edx); + topo_info, eax, ebx, ecx, edx); break; case 2: /* L2 cache info */ encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, - &topo_info, eax, ebx, ecx, edx); + topo_info, eax, ebx, ecx, edx); break; case 3: /* L3 cache info */ encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, - &topo_info, eax, ebx, ecx, edx); + topo_info, eax, ebx, ecx, edx); break; default: /* end of info */ *eax = *ebx = *ecx = *edx = 0; @@ -6988,7 +7880,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, break; case 0x8000001E: if (cpu->core_id <= 255) { - encode_topo_cpuid8000001e(cpu, &topo_info, eax, ebx, ecx, edx); + encode_topo_cpuid8000001e(cpu, topo_info, eax, ebx, ecx, edx); } else { *eax = 0; *ebx = 0; @@ -6996,6 +7888,16 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *edx = 0; } break; + case 0x80000022: + *eax = *ebx = *ecx = *edx = 0; + /* AMD Extended Performance Monitoring and Debug */ + if (kvm_enabled() && cpu->enable_pmu && + (env->features[FEAT_8000_0022_EAX] & CPUID_8000_0022_EAX_PERFMON_V2)) { + *eax |= CPUID_8000_0022_EAX_PERFMON_V2; + *ebx |= kvm_arch_get_supported_cpuid(cs->kvm_state, index, count, + R_EBX) & 0xf; + } + break; case 0xC0000000: *eax = env->cpuid_xlevel2; *ebx = 0; @@ -7029,8 +7931,9 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } break; case 0x80000021: + *eax = *ebx = *ecx = *edx = 0; *eax = env->features[FEAT_8000_0021_EAX]; - *ebx = *ecx = *edx = 0; + *ebx = env->features[FEAT_8000_0021_EBX]; break; default: /* reserved values: zero */ @@ -7053,6 +7956,23 @@ static void x86_cpu_set_sgxlepubkeyhash(CPUX86State *env) #endif } +static bool cpuid_has_xsave_feature(CPUX86State *env, const ExtSaveArea *esa) +{ + if (!esa->size) { + return false; + } + + if (env->features[esa->feature] & esa->bits) { + return true; + } + if (esa->feature == FEAT_7_0_EBX && esa->bits == CPUID_7_0_EBX_AVX512F + && (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10)) { + return true; + } + + return false; +} + static void x86_cpu_reset_hold(Object *obj, ResetType type) { CPUState *cs = CPU(obj); @@ -7069,6 +7989,10 @@ static void x86_cpu_reset_hold(Object *obj, ResetType type) memset(env, 0, offsetof(CPUX86State, end_reset_fields)); + if (tcg_enabled()) { + cpu_init_fp_statuses(env); + } + env->old_exception = -1; /* init to reset state */ @@ -7161,7 +8085,7 @@ static void x86_cpu_reset_hold(Object *obj, ResetType type) if (!((1 << i) & CPUID_XSTATE_XCR0_MASK)) { continue; } - if (env->features[esa->feature] & esa->bits) { + if (cpuid_has_xsave_feature(env, esa)) { xcr0 |= 1ull << i; } } @@ -7299,7 +8223,7 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu) mask = 0; for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { const ExtSaveArea *esa = &x86_ext_save_areas[i]; - if (env->features[esa->feature] & esa->bits) { + if (cpuid_has_xsave_feature(env, esa)) { mask |= (1ULL << i); } } @@ -7392,6 +8316,33 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) ~env->user_features[w] & ~feature_word_info[w].no_autoenable_flags; } + + if ((env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) && !env->avx10_version) { + uint32_t eax, ebx, ecx, edx; + x86_cpu_get_supported_cpuid(0x24, 0, &eax, &ebx, &ecx, &edx); + env->avx10_version = ebx & 0xff; + } + } + + if (x86_threads_per_pkg(&env->topo_info) > 1) { + env->features[FEAT_1_EDX] |= CPUID_HT; + + /* + * The Linux kernel checks for the CMPLegacy bit and + * discards multiple thread information if it is set. + * So don't set it here for Intel (and other processors + * following Intel's behavior) to make Linux guests happy. + */ + if (!IS_INTEL_CPU(env) && !IS_ZHAOXIN_CPU(env)) { + env->features[FEAT_8000_0001_ECX] |= CPUID_EXT3_CMP_LEG; + } + } + + if (!cpu->enable_pmu) { + mark_unavailable_features(cpu, FEAT_1_ECX, + env->user_features[FEAT_1_ECX] & CPUID_EXT_PDCM, + "This feature is not available due to PMU being disabled"); + env->features[FEAT_1_ECX] &= ~CPUID_EXT_PDCM; } for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) { @@ -7422,6 +8373,7 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); + x86_cpu_adjust_feat_level(cpu, FEAT_7_1_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_7_2_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); @@ -7450,11 +8402,16 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) * cpu->vendor_cpuid_only has been unset for compatibility with older * machine types. */ - if (x86_has_extended_topo(env->avail_cpu_topo) && + if (x86_has_cpuid_0x1f(cpu) && (IS_INTEL_CPU(env) || !cpu->vendor_cpuid_only)) { x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); } + /* Advanced Vector Extensions 10 (AVX10) requires CPUID[0x24] */ + if (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) { + x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x24); + } + /* SVM requires CPUID[0x8000000A] */ if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); @@ -7498,13 +8455,17 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) * Finishes initialization of CPUID data, filters CPU feature * words based on host availability of each feature. * - * Returns: 0 if all flags are supported by the host, non-zero otherwise. + * Returns: true if any flag is not supported by the host, false otherwise. */ -static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) +static bool x86_cpu_filter_features(X86CPU *cpu, bool verbose) { CPUX86State *env = &cpu->env; FeatureWord w; const char *prefix = NULL; + bool have_filtered_features; + + uint32_t eax_0, ebx_0, ecx_0, edx_0; + uint32_t eax_1, ebx_1, ecx_1, edx_1; if (verbose) { prefix = accel_uses_host_cpuid() @@ -7526,13 +8487,10 @@ static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) */ if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && kvm_enabled()) { - uint32_t eax_0, ebx_0, ecx_0, edx_0_unused; - uint32_t eax_1, ebx_1, ecx_1_unused, edx_1_unused; - x86_cpu_get_supported_cpuid(0x14, 0, - &eax_0, &ebx_0, &ecx_0, &edx_0_unused); + &eax_0, &ebx_0, &ecx_0, &edx_0); x86_cpu_get_supported_cpuid(0x14, 1, - &eax_1, &ebx_1, &ecx_1_unused, &edx_1_unused); + &eax_1, &ebx_1, &ecx_1, &edx_1); if (!eax_0 || ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || @@ -7552,6 +8510,30 @@ static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix); } } + + have_filtered_features = x86_cpu_have_filtered_features(cpu); + + if (env->features[FEAT_7_1_EDX] & CPUID_7_1_EDX_AVX10) { + x86_cpu_get_supported_cpuid(0x24, 0, + &eax_0, &ebx_0, &ecx_0, &edx_0); + uint8_t version = ebx_0 & 0xff; + + if (version < env->avx10_version) { + if (prefix) { + warn_report("%s: avx10.%d. Adjust to avx10.%d", + prefix, env->avx10_version, version); + } + env->avx10_version = version; + have_filtered_features = true; + } + } else if (env->avx10_version) { + if (prefix) { + warn_report("%s: avx10.%d.", prefix, env->avx10_version); + } + have_filtered_features = true; + } + + return have_filtered_features; } static void x86_cpu_hyperv_realize(X86CPU *cpu) @@ -7583,6 +8565,64 @@ static void x86_cpu_hyperv_realize(X86CPU *cpu) cpu->hyperv_limits[2] = 0; } +#ifndef CONFIG_USER_ONLY +static bool x86_cpu_update_smp_cache_topo(MachineState *ms, X86CPU *cpu, + Error **errp) +{ + CPUX86State *env = &cpu->env; + CpuTopologyLevel level; + + level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D); + if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) { + env->cache_info_cpuid4.l1d_cache->share_level = level; + env->cache_info_amd.l1d_cache->share_level = level; + } else { + machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D, + env->cache_info_cpuid4.l1d_cache->share_level); + machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D, + env->cache_info_amd.l1d_cache->share_level); + } + + level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I); + if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) { + env->cache_info_cpuid4.l1i_cache->share_level = level; + env->cache_info_amd.l1i_cache->share_level = level; + } else { + machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I, + env->cache_info_cpuid4.l1i_cache->share_level); + machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I, + env->cache_info_amd.l1i_cache->share_level); + } + + level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2); + if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) { + env->cache_info_cpuid4.l2_cache->share_level = level; + env->cache_info_amd.l2_cache->share_level = level; + } else { + machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2, + env->cache_info_cpuid4.l2_cache->share_level); + machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2, + env->cache_info_amd.l2_cache->share_level); + } + + level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3); + if (level != CPU_TOPOLOGY_LEVEL_DEFAULT) { + env->cache_info_cpuid4.l3_cache->share_level = level; + env->cache_info_amd.l3_cache->share_level = level; + } else { + machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3, + env->cache_info_cpuid4.l3_cache->share_level); + machine_set_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3, + env->cache_info_amd.l3_cache->share_level); + } + + if (!machine_check_smp_cache(ms, errp)) { + return false; + } + return true; +} +#endif + static void x86_cpu_realizefn(DeviceState *dev, Error **errp) { CPUState *cs = CPU(dev); @@ -7649,14 +8689,14 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) } } - x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); - - if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { - error_setg(&local_err, - accel_uses_host_cpuid() ? + if (x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid)) { + if (cpu->enforce_cpuid) { + error_setg(&local_err, + accel_uses_host_cpuid() ? "Host doesn't support requested features" : "TCG doesn't support requested features"); - goto out; + goto out; + } } /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on @@ -7718,6 +8758,21 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) */ cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; + /* + * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU + * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX + * based on inputs (sockets,cores,threads), it is still better to give + * users a warning. + */ + if (IS_AMD_CPU(env) && + !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && + env->topo_info.threads_per_core > 1) { + warn_report_once("This family of AMD CPU doesn't support " + "hyperthreading(%d). Please configure -smp " + "options properly or try enabling topoext " + "feature.", env->topo_info.threads_per_core); + } + /* For 64bit systems think about the number of physical bits to present. * ideally this should be the same as the host; anything other than matching * the host can cause incorrect guest behaviour. @@ -7807,6 +8862,14 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) #ifndef CONFIG_USER_ONLY MachineState *ms = MACHINE(qdev_get_machine()); + MachineClass *mc = MACHINE_GET_CLASS(ms); + + if (mc->smp_props.has_caches) { + if (!x86_cpu_update_smp_cache_topo(ms, cpu, errp)) { + return; + } + } + qemu_register_reset(x86_cpu_machine_reset_cb, cpu); if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { @@ -7819,26 +8882,9 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) mce_init(cpu); + x86_cpu_gdb_init(cs); qemu_init_vcpu(cs); - /* - * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU - * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX - * based on inputs (sockets,cores,threads), it is still better to give - * users a warning. - * - * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise - * cs->nr_threads hasn't be populated yet and the checking is incorrect. - */ - if (IS_AMD_CPU(env) && - !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && - cs->nr_threads > 1) { - warn_report_once("This family of AMD CPU doesn't support " - "hyperthreading(%d). Please configure -smp " - "options properly or try enabling topoext " - "feature.", cs->nr_threads); - } - #ifndef CONFIG_USER_ONLY x86_cpu_apic_realize(cpu, &local_err); if (local_err != NULL) { @@ -7970,20 +9016,46 @@ static void x86_cpu_register_feature_bit_props(X86CPUClass *xcc, static void x86_cpu_post_initfn(Object *obj) { + static bool first = true; + uint64_t supported_xcr0; + int i; + + if (first) { + first = false; + + supported_xcr0 = + ((uint64_t) x86_cpu_get_supported_feature_word(NULL, FEAT_XSAVE_XCR0_HI) << 32) | + x86_cpu_get_supported_feature_word(NULL, FEAT_XSAVE_XCR0_LO); + + for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) { + ExtSaveArea *esa = &x86_ext_save_areas[i]; + + if (!(supported_xcr0 & (1 << i))) { + esa->size = 0; + } + } + } + accel_cpu_instance_init(CPU(obj)); + +#ifndef CONFIG_USER_ONLY + if (current_machine && current_machine->cgs) { + x86_confidential_guest_cpu_instance_init( + X86_CONFIDENTIAL_GUEST(current_machine->cgs), (CPU(obj))); + } +#endif } static void x86_cpu_init_default_topo(X86CPU *cpu) { CPUX86State *env = &cpu->env; - env->nr_modules = 1; - env->nr_dies = 1; + env->topo_info = (X86CPUTopoInfo) {1, 1, 1, 1}; - /* SMT, core and package levels are set by default. */ - set_bit(CPU_TOPO_LEVEL_SMT, env->avail_cpu_topo); - set_bit(CPU_TOPO_LEVEL_CORE, env->avail_cpu_topo); - set_bit(CPU_TOPO_LEVEL_PACKAGE, env->avail_cpu_topo); + /* thread, core and socket levels are set by default. */ + set_bit(CPU_TOPOLOGY_LEVEL_THREAD, env->avail_cpu_topo); + set_bit(CPU_TOPOLOGY_LEVEL_CORE, env->avail_cpu_topo); + set_bit(CPU_TOPOLOGY_LEVEL_SOCKET, env->avail_cpu_topo); } static void x86_cpu_initfn(Object *obj) @@ -8073,16 +9145,15 @@ static vaddr x86_cpu_get_pc(CPUState *cs) return cpu->env.eip + cpu->env.segs[R_CS].base; } +#if !defined(CONFIG_USER_ONLY) int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; -#if !defined(CONFIG_USER_ONLY) if (interrupt_request & CPU_INTERRUPT_POLL) { return CPU_INTERRUPT_POLL; } -#endif if (interrupt_request & CPU_INTERRUPT_SIPI) { return CPU_INTERRUPT_SIPI; } @@ -8103,14 +9174,12 @@ int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) (env->eflags & IF_MASK && !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { return CPU_INTERRUPT_HARD; -#if !defined(CONFIG_USER_ONLY) } else if (env->hflags2 & HF2_VGIF_MASK) { if((interrupt_request & CPU_INTERRUPT_VIRQ) && (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) { return CPU_INTERRUPT_VIRQ; } -#endif } } @@ -8121,45 +9190,14 @@ static bool x86_cpu_has_work(CPUState *cs) { return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; } - -int x86_mmu_index_pl(CPUX86State *env, unsigned pl) -{ - int mmu_index_32 = (env->hflags & HF_CS64_MASK) ? 0 : 1; - int mmu_index_base = - pl == 3 ? MMU_USER64_IDX : - !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX : - (env->eflags & AC_MASK) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX; - - return mmu_index_base + mmu_index_32; -} - -static int x86_cpu_mmu_index(CPUState *cs, bool ifetch) -{ - CPUX86State *env = cpu_env(cs); - return x86_mmu_index_pl(env, env->hflags & HF_CPL_MASK); -} - -static int x86_mmu_index_kernel_pl(CPUX86State *env, unsigned pl) -{ - int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1; - int mmu_index_base = - !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX : - (pl < 3 && (env->eflags & AC_MASK) - ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX); - - return mmu_index_base + mmu_index_32; -} - -int cpu_mmu_index_kernel(CPUX86State *env) -{ - return x86_mmu_index_kernel_pl(env, env->hflags & HF_CPL_MASK); -} +#endif /* !CONFIG_USER_ONLY */ static void x86_disas_set_info(CPUState *cs, disassemble_info *info) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; + info->endian = BFD_ENDIAN_LITTLE; info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 : bfd_mach_i386_i8086); @@ -8214,7 +9252,7 @@ void x86_update_hflags(CPUX86State *env) env->hflags = hflags; } -static Property x86_cpu_properties[] = { +static const Property x86_cpu_properties[] = { #ifdef CONFIG_USER_ONLY /* apic_id = 0 by default for *-user, see commit 9886e834 */ DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), @@ -8279,8 +9317,10 @@ static Property x86_cpu_properties[] = { HYPERV_FEAT_TLBFLUSH_DIRECT, 0), DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), +#ifdef CONFIG_SYNDBG DEFINE_PROP_BIT64("hv-syndbg", X86CPU, hyperv_features, HYPERV_FEAT_SYNDBG, 0), +#endif DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), DEFINE_PROP_BOOL("hv-enforce-cpuid", X86CPU, hyperv_enforce_cpuid, false), @@ -8312,6 +9352,7 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), + DEFINE_PROP_UINT8("avx10-version", X86CPU, env.avx10_version, 0), DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0), DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor), @@ -8352,13 +9393,13 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, true), DEFINE_PROP_BOOL("x-l1-cache-per-thread", X86CPU, l1_cache_per_core, true), - DEFINE_PROP_END_OF_LIST() }; #ifndef CONFIG_USER_ONLY #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps i386_sysemu_ops = { + .has_work = x86_cpu_has_work, .get_memory_mapping = x86_cpu_get_memory_mapping, .get_paging_enabled = x86_cpu_get_paging_enabled, .get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug, @@ -8372,7 +9413,7 @@ static const struct SysemuCPUOps i386_sysemu_ops = { }; #endif -static void x86_cpu_common_class_init(ObjectClass *oc, void *data) +static void x86_cpu_common_class_init(ObjectClass *oc, const void *data) { X86CPUClass *xcc = X86_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); @@ -8391,9 +9432,8 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data) cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; cc->class_by_name = x86_cpu_class_by_name; + cc->list_cpus = x86_cpu_list; cc->parse_features = x86_cpu_parse_featurestr; - cc->has_work = x86_cpu_has_work; - cc->mmu_index = x86_cpu_mmu_index; cc->dump_state = x86_cpu_dump_state; cc->set_pc = x86_cpu_set_pc; cc->get_pc = x86_cpu_get_pc; @@ -8404,6 +9444,9 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data) #ifndef CONFIG_USER_ONLY cc->sysemu_ops = &i386_sysemu_ops; #endif /* !CONFIG_USER_ONLY */ +#ifdef CONFIG_TCG + cc->tcg_ops = &x86_tcg_ops; +#endif /* CONFIG_TCG */ cc->gdb_arch_name = x86_gdb_arch_name; #ifdef TARGET_X86_64 @@ -8470,7 +9513,7 @@ static const TypeInfo x86_cpu_type_info = { }; /* "base" CPU model, used by query-cpu-model-expansion */ -static void x86_cpu_base_class_init(ObjectClass *oc, void *data) +static void x86_cpu_base_class_init(ObjectClass *oc, const void *data) { X86CPUClass *xcc = X86_CPU_CLASS(oc); |