aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-05-16 14:21:50 -0700
committerRichard Henderson <richard.henderson@linaro.org>2022-05-16 14:21:50 -0700
commitafdb415e67e13e8726edc21238c9883447b2c704 (patch)
tree691c911809a56d796dcabde30d6cd6d047712ccb /target
parent54b592c427ca2575187082b35d5d6ae85db59f7b (diff)
parent8eccdb9eb84615291faef1257d5779ebfef7a0d0 (diff)
downloadqemu-afdb415e67e13e8726edc21238c9883447b2c704.zip
qemu-afdb415e67e13e8726edc21238c9883447b2c704.tar.gz
qemu-afdb415e67e13e8726edc21238c9883447b2c704.tar.bz2
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging
* fix WHPX debugging * misc qga-vss fixes * remove the deprecated CPU model 'Icelake-Client' * support for x86 architectural LBR * remove deprecated properties * replace deprecated -soundhw with -audio # -----BEGIN PGP SIGNATURE----- # # iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmJ/hZ4UHHBib256aW5p # QHJlZGhhdC5jb20ACgkQv/vSX3jHroN2Igf/bFs+yluOikt0eFNmXYnshrGBWPXr # oam0iumPox34vTzZnjpSjF6tJGxHWOgi+wbgIvbwOYHA/ONxx8akW580j+1VhEWa # X29VyUzjZBffgFtmlF4fM74/ELYm7s4c1a1/D9TpVP6Dr0fSWbMujbx4dfeVstvf # sONN+A8sVxaNdV9QKPE6BvqfMlPLoCiigrOetf6iY1KuUtkQDF8xDB0MdzdutqAQ # szAtQ0rrzjxDx9EuGN1SECFM1/riDUbtOOoA9g2C7gGKrx3/iUc6pzrkIcAfWLFK # xXbH7+6Wynia0cbUxnrvRdY4daMIxm4N3wUvN7szXgF9kxYxeQcsdgGsNA== # =n4lu # -----END PGP SIGNATURE----- # gpg: Signature made Sat 14 May 2022 03:34:06 AM PDT # gpg: using RSA key F13338574B662389866C7682BFFBD25F78C7AE83 # gpg: issuer "pbonzini@redhat.com" # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [undefined] # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [undefined] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * tag 'for-upstream' of https://gitlab.com/bonzini/qemu: (23 commits) configure: remove duplicate help messages configure: remove another dead variable build: remove useless dependency introduce -audio as a replacement for -soundhw soundhw: move help handling to vl.c soundhw: unify initialization for ISA and PCI soundhw soundhw: extract soundhw help to a separate function soundhw: remove ability to create multiple soundcards rng: make opened property read-only crypto: make loaded property read-only target/i386: Support Arch LBR in CPUID enumeration target/i386: introduce helper to access supported CPUID target/i386: Enable Arch LBR migration states in vmstate target/i386: Add MSR access interface for Arch LBR target/i386: Add XSAVES support for Arch LBR target/i386: Enable support for XSAVES based features target/i386: Add kvm_get_one_msr helper target/i386: Add lbr-fmt vPMU option to support guest LBR qdev-properties: Add a new macro with bitmask check for uint64_t property i386/cpu: Remove the deprecated cpu model 'Icelake-Client' ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/i386/cpu.c331
-rw-r--r--target/i386/cpu.h57
-rw-r--r--target/i386/kvm/kvm.c113
-rw-r--r--target/i386/machine.c38
-rw-r--r--target/i386/whpx/whpx-all.c13
5 files changed, 364 insertions, 188 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index c4a17c9..35c3475 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -855,7 +855,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"fsrm", NULL, NULL, NULL,
"avx512-vp2intersect", NULL, "md-clear", NULL,
NULL, NULL, "serialize", NULL,
- "tsx-ldtrk", NULL, NULL /* pconfig */, NULL,
+ "tsx-ldtrk", NULL, NULL /* pconfig */, "arch-lbr",
NULL, NULL, "amx-bf16", "avx512-fp16",
"amx-tile", "amx-int8", "spec-ctrl", "stibp",
NULL, "arch-capabilities", "core-capability", "ssbd",
@@ -937,6 +937,34 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
.tcg_features = TCG_XSAVE_FEATURES,
},
+ [FEAT_XSAVE_XSS_LO] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = {
+ .eax = 0xD,
+ .needs_ecx = true,
+ .ecx = 1,
+ .reg = R_ECX,
+ },
+ },
+ [FEAT_XSAVE_XSS_HI] = {
+ .type = CPUID_FEATURE_WORD,
+ .cpuid = {
+ .eax = 0xD,
+ .needs_ecx = true,
+ .ecx = 1,
+ .reg = R_EDX
+ },
+ },
[FEAT_6_EAX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
@@ -952,7 +980,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.cpuid = { .eax = 6, .reg = R_EAX, },
.tcg_features = TCG_6_EAX_FEATURES,
},
- [FEAT_XSAVE_COMP_LO] = {
+ [FEAT_XSAVE_XCR0_LO] = {
.type = CPUID_FEATURE_WORD,
.cpuid = {
.eax = 0xD,
@@ -965,7 +993,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
XSTATE_PKRU_MASK,
},
- [FEAT_XSAVE_COMP_HI] = {
+ [FEAT_XSAVE_XCR0_HI] = {
.type = CPUID_FEATURE_WORD,
.cpuid = {
.eax = 0xD,
@@ -1382,6 +1410,9 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
};
#undef REGISTER
+/* CPUID feature bits available in XSS */
+#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK)
+
ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
[XSTATE_FP_BIT] = {
/* x87 FP state component is always enabled if XSAVE is supported */
@@ -1414,6 +1445,10 @@ ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
[XSTATE_PKRU_BIT] =
{ .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
.size = sizeof(XSavePKRU) },
+ [XSTATE_ARCH_LBR_BIT] = {
+ .feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_ARCH_LBR,
+ .offset = 0 /*supervisor mode component, offset = 0 */,
+ .size = sizeof(XSavesArchLBR) },
[XSTATE_XTILE_CFG_BIT] = {
.feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_AMX_TILE,
.size = sizeof(XSaveXTILECFG),
@@ -1424,15 +1459,18 @@ ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
},
};
-static uint32_t xsave_area_size(uint64_t mask)
+static uint32_t xsave_area_size(uint64_t mask, bool compacted)
{
+ uint64_t ret = x86_ext_save_areas[0].size;
+ const ExtSaveArea *esa;
+ uint32_t offset = 0;
int i;
- uint64_t ret = 0;
- for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
- const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ esa = &x86_ext_save_areas[i];
if ((mask >> i) & 1) {
- ret = MAX(ret, esa->offset + esa->size);
+ offset = compacted ? ret : esa->offset;
+ ret = MAX(ret, offset + esa->size);
}
}
return ret;
@@ -1443,10 +1481,10 @@ static inline bool accel_uses_host_cpuid(void)
return kvm_enabled() || hvf_enabled();
}
-static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
+static inline uint64_t x86_cpu_xsave_xcr0_components(X86CPU *cpu)
{
- return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
- cpu->env.features[FEAT_XSAVE_COMP_LO];
+ return ((uint64_t)cpu->env.features[FEAT_XSAVE_XCR0_HI]) << 32 |
+ cpu->env.features[FEAT_XSAVE_XCR0_LO];
}
/* Return name of 32-bit register, from a R_* constant */
@@ -1458,6 +1496,12 @@ static const char *get_register_name_32(unsigned int reg)
return x86_reg_info_32[reg].name;
}
+static inline uint64_t x86_cpu_xsave_xss_components(X86CPU *cpu)
+{
+ return ((uint64_t)cpu->env.features[FEAT_XSAVE_XSS_HI]) << 32 |
+ cpu->env.features[FEAT_XSAVE_XSS_LO];
+}
+
/*
* Returns the set of feature flags that are supported and migratable by
* QEMU, for a given FeatureWord.
@@ -3259,128 +3303,6 @@ static const X86CPUDefinition builtin_x86_defs[] = {
}
},
{
- .name = "Icelake-Client",
- .level = 0xd,
- .vendor = CPUID_VENDOR_INTEL,
- .family = 6,
- .model = 126,
- .stepping = 0,
- .features[FEAT_1_EDX] =
- CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
- CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
- CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
- CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
- CPUID_DE | CPUID_FP87,
- .features[FEAT_1_ECX] =
- CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
- CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
- CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
- CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
- CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
- CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
- .features[FEAT_8000_0001_EDX] =
- CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
- CPUID_EXT2_SYSCALL,
- .features[FEAT_8000_0001_ECX] =
- CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
- .features[FEAT_8000_0008_EBX] =
- CPUID_8000_0008_EBX_WBNOINVD,
- .features[FEAT_7_0_EBX] =
- CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
- CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
- CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
- CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
- CPUID_7_0_EBX_SMAP,
- .features[FEAT_7_0_ECX] =
- CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
- CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
- CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
- CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
- CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
- .features[FEAT_7_0_EDX] =
- CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
- /* XSAVES is added in version 3 */
- .features[FEAT_XSAVE] =
- CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
- CPUID_XSAVE_XGETBV1,
- .features[FEAT_6_EAX] =
- CPUID_6_EAX_ARAT,
- /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
- .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
- MSR_VMX_BASIC_TRUE_CTLS,
- .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
- VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
- VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
- .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
- MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
- MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
- MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
- MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
- MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
- MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
- .features[FEAT_VMX_EXIT_CTLS] =
- VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
- VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
- VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
- VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
- VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
- .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
- MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
- .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
- VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
- VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
- .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
- VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
- VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
- VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
- VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
- VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
- VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
- VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
- VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
- VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
- VMX_CPU_BASED_MONITOR_TRAP_FLAG |
- VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
- .features[FEAT_VMX_SECONDARY_CTLS] =
- VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
- VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
- VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
- VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
- VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
- VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
- VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
- .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
- .xlevel = 0x80000008,
- .model_id = "Intel Core Processor (Icelake)",
- .versions = (X86CPUVersionDefinition[]) {
- {
- .version = 1,
- .note = "deprecated"
- },
- {
- .version = 2,
- .note = "no TSX, deprecated",
- .alias = "Icelake-Client-noTSX",
- .props = (PropValue[]) {
- { "hle", "off" },
- { "rtm", "off" },
- { /* end of list */ }
- },
- },
- {
- .version = 3,
- .note = "no TSX, XSAVES, deprecated",
- .props = (PropValue[]) {
- { "xsaves", "on" },
- { "vmx-xsaves", "on" },
- { /* end of list */ }
- },
- },
- { /* end of list */ }
- },
- .deprecation_note = "use Icelake-Server instead"
- },
- {
.name = "Icelake-Server",
.level = 0xd,
.vendor = CPUID_VENDOR_INTEL,
@@ -4633,8 +4555,8 @@ static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
/* XSAVE components are automatically enabled by other features,
* so return the original feature name instead
*/
- if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
- int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
+ if (w == FEAT_XSAVE_XCR0_LO || w == FEAT_XSAVE_XCR0_HI) {
+ int comp = (w == FEAT_XSAVE_XCR0_HI) ? bitnr + 32 : bitnr;
if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
x86_ext_save_areas[comp].bits) {
@@ -5022,6 +4944,28 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
return r;
}
+static void x86_cpu_get_supported_cpuid(uint32_t func, uint32_t index,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ if (kvm_enabled()) {
+ *eax = kvm_arch_get_supported_cpuid(kvm_state, func, index, R_EAX);
+ *ebx = kvm_arch_get_supported_cpuid(kvm_state, func, index, R_EBX);
+ *ecx = kvm_arch_get_supported_cpuid(kvm_state, func, index, R_ECX);
+ *edx = kvm_arch_get_supported_cpuid(kvm_state, func, index, R_EDX);
+ } else if (hvf_enabled()) {
+ *eax = hvf_get_supported_cpuid(func, index, R_EAX);
+ *ebx = hvf_get_supported_cpuid(func, index, R_EBX);
+ *ecx = hvf_get_supported_cpuid(func, index, R_ECX);
+ *edx = hvf_get_supported_cpuid(func, index, R_EDX);
+ } else {
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ }
+}
+
static void x86_cpu_get_cache_cpuid(uint32_t func, uint32_t index,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
@@ -5437,18 +5381,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 0xA:
/* Architectural Performance Monitoring Leaf */
- if (kvm_enabled() && cpu->enable_pmu) {
- KVMState *s = cs->kvm_state;
-
- *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
- *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
- *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
- *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
- } else if (hvf_enabled() && cpu->enable_pmu) {
- *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
- *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
- *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
- *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
+ if (accel_uses_host_cpuid() && cpu->enable_pmu) {
+ x86_cpu_get_supported_cpuid(0xA, count, eax, ebx, ecx, edx);
} else {
*eax = 0;
*ebx = 0;
@@ -5486,6 +5420,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
assert(!(*eax & ~0x1f));
*ebx &= 0xffff; /* The count doesn't need to be reliable. */
break;
+ case 0x1C:
+ if (accel_uses_host_cpuid() && cpu->enable_pmu &&
+ (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
+ x86_cpu_get_supported_cpuid(0x1C, 0, eax, ebx, ecx, edx);
+ *edx = 0;
+ }
+ break;
case 0x1F:
/* V2 Extended Topology Enumeration Leaf */
if (env->nr_dies < 2) {
@@ -5530,25 +5471,47 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
if (count == 0) {
- *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
- *eax = env->features[FEAT_XSAVE_COMP_LO];
- *edx = env->features[FEAT_XSAVE_COMP_HI];
+ *ecx = xsave_area_size(x86_cpu_xsave_xcr0_components(cpu), false);
+ *eax = env->features[FEAT_XSAVE_XCR0_LO];
+ *edx = env->features[FEAT_XSAVE_XCR0_HI];
/*
* The initial value of xcr0 and ebx == 0, On host without kvm
* commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0
* even through guest update xcr0, this will crash some legacy guest
* (e.g., CentOS 6), So set ebx == ecx to workaroud it.
*/
- *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0);
+ *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0, false);
} else if (count == 1) {
+ uint64_t xstate = x86_cpu_xsave_xcr0_components(cpu) |
+ x86_cpu_xsave_xss_components(cpu);
+
*eax = env->features[FEAT_XSAVE];
+ *ebx = xsave_area_size(xstate, true);
+ *ecx = env->features[FEAT_XSAVE_XSS_LO];
+ *edx = env->features[FEAT_XSAVE_XSS_HI];
+ if (kvm_enabled() && cpu->enable_pmu &&
+ (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR) &&
+ (*eax & CPUID_XSAVE_XSAVES)) {
+ *ecx |= XSTATE_ARCH_LBR_MASK;
+ } else {
+ *ecx &= ~XSTATE_ARCH_LBR_MASK;
+ }
+ } else if (count == 0xf &&
+ accel_uses_host_cpuid() && cpu->enable_pmu &&
+ (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
+ x86_cpu_get_supported_cpuid(0xD, count, eax, ebx, ecx, edx);
} else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
- if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
- const ExtSaveArea *esa = &x86_ext_save_areas[count];
+ const ExtSaveArea *esa = &x86_ext_save_areas[count];
+
+ if (x86_cpu_xsave_xcr0_components(cpu) & (1ULL << count)) {
*eax = esa->size;
*ebx = esa->offset;
*ecx = esa->ecx &
(ESA_FEATURE_ALIGN64_MASK | ESA_FEATURE_XFD_MASK);
+ } else if (x86_cpu_xsave_xss_components(cpu) & (1ULL << count)) {
+ *eax = esa->size;
+ *ebx = 0;
+ *ecx = 1;
}
}
break;
@@ -5588,10 +5551,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
* supports. Features can be further restricted by userspace, but not
* made more permissive.
*/
- *eax = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x12, count, R_EAX);
- *ebx = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x12, count, R_EBX);
- *ecx = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x12, count, R_ECX);
- *edx = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x12, count, R_EDX);
+ x86_cpu_get_supported_cpuid(0x12, index, eax, ebx, ecx, edx);
if (count == 0) {
*eax &= env->features[FEAT_SGX_12_0_EAX];
@@ -5599,8 +5559,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
} else {
*eax &= env->features[FEAT_SGX_12_1_EAX];
*ebx &= 0; /* ebx reserve */
- *ecx &= env->features[FEAT_XSAVE_COMP_LO];
- *edx &= env->features[FEAT_XSAVE_COMP_HI];
+ *ecx &= env->features[FEAT_XSAVE_XSS_LO];
+ *edx &= env->features[FEAT_XSAVE_XSS_HI];
/* FP and SSE are always allowed regardless of XSAVE/XCR0. */
*ecx |= XSTATE_FP_MASK | XSTATE_SSE_MASK;
@@ -5996,6 +5956,9 @@ static void x86_cpu_reset(DeviceState *dev)
}
for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ if (!((1 << i) & CPUID_XSTATE_XCR0_MASK)) {
+ continue;
+ }
if (env->features[esa->feature] & esa->bits) {
xcr0 |= 1ull << i;
}
@@ -6110,8 +6073,8 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu)
static bool request_perm;
if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
- env->features[FEAT_XSAVE_COMP_LO] = 0;
- env->features[FEAT_XSAVE_COMP_HI] = 0;
+ env->features[FEAT_XSAVE_XCR0_LO] = 0;
+ env->features[FEAT_XSAVE_XCR0_HI] = 0;
return;
}
@@ -6129,8 +6092,10 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu)
request_perm = true;
}
- env->features[FEAT_XSAVE_COMP_LO] = mask;
- env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
+ env->features[FEAT_XSAVE_XCR0_LO] = mask & CPUID_XSTATE_XCR0_MASK;
+ env->features[FEAT_XSAVE_XCR0_HI] = mask >> 32;
+ env->features[FEAT_XSAVE_XSS_LO] = mask & CPUID_XSTATE_XSS_MASK;
+ env->features[FEAT_XSAVE_XSS_HI] = mask >> 32;
}
/***** Steps involved on loading and filtering CPUID data
@@ -6397,6 +6362,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
CPUX86State *env = &cpu->env;
Error *local_err = NULL;
static bool ht_warned;
+ unsigned requested_lbr_fmt;
if (cpu->apic_id == UNASSIGNED_APIC_ID) {
error_setg(errp, "apic-id property was not initialized properly");
@@ -6414,6 +6380,42 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
goto out;
}
+ /*
+ * Override env->features[FEAT_PERF_CAPABILITIES].LBR_FMT
+ * with user-provided setting.
+ */
+ if (cpu->lbr_fmt != ~PERF_CAP_LBR_FMT) {
+ if ((cpu->lbr_fmt & PERF_CAP_LBR_FMT) != cpu->lbr_fmt) {
+ error_setg(errp, "invalid lbr-fmt");
+ return;
+ }
+ env->features[FEAT_PERF_CAPABILITIES] &= ~PERF_CAP_LBR_FMT;
+ env->features[FEAT_PERF_CAPABILITIES] |= cpu->lbr_fmt;
+ }
+
+ /*
+ * vPMU LBR is supported when 1) KVM is enabled 2) Option pmu=on and
+ * 3)vPMU LBR format matches that of host setting.
+ */
+ requested_lbr_fmt =
+ env->features[FEAT_PERF_CAPABILITIES] & PERF_CAP_LBR_FMT;
+ if (requested_lbr_fmt && kvm_enabled()) {
+ uint64_t host_perf_cap =
+ x86_cpu_get_supported_feature_word(FEAT_PERF_CAPABILITIES, false);
+ unsigned host_lbr_fmt = host_perf_cap & PERF_CAP_LBR_FMT;
+
+ if (!cpu->enable_pmu) {
+ error_setg(errp, "vPMU: LBR is unsupported without pmu=on");
+ return;
+ }
+ if (requested_lbr_fmt != host_lbr_fmt) {
+ error_setg(errp, "vPMU: the lbr-fmt value (0x%x) does not match "
+ "the host value (0x%x).",
+ requested_lbr_fmt, host_lbr_fmt);
+ return;
+ }
+ }
+
x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid);
if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) {
@@ -6766,6 +6768,8 @@ static void x86_cpu_initfn(Object *obj)
object_property_add_alias(obj, "sse4_2", obj, "sse4.2");
object_property_add_alias(obj, "hv-apicv", obj, "hv-avic");
+ cpu->lbr_fmt = ~PERF_CAP_LBR_FMT;
+ object_property_add_alias(obj, "lbr_fmt", obj, "lbr-fmt");
if (xcc->model) {
x86_cpu_load_model(cpu, xcc->model);
@@ -6920,6 +6924,7 @@ static Property x86_cpu_properties[] = {
#endif
DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
+ DEFINE_PROP_UINT64_CHECKMASK("lbr-fmt", X86CPU, lbr_fmt, PERF_CAP_LBR_FMT),
DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
HYPERV_SPINLOCK_NEVER_NOTIFY),
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 9661f9f..0d528ac 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -386,10 +386,16 @@ typedef enum X86Seg {
#define ARCH_CAP_TSX_CTRL_MSR (1<<7)
#define MSR_IA32_PERF_CAPABILITIES 0x345
+#define PERF_CAP_LBR_FMT 0x3f
#define MSR_IA32_TSX_CTRL 0x122
#define MSR_IA32_TSCDEADLINE 0x6e0
#define MSR_IA32_PKRS 0x6e1
+#define MSR_ARCH_LBR_CTL 0x000014ce
+#define MSR_ARCH_LBR_DEPTH 0x000014cf
+#define MSR_ARCH_LBR_FROM_0 0x00001500
+#define MSR_ARCH_LBR_TO_0 0x00001600
+#define MSR_ARCH_LBR_INFO_0 0x00001200
#define FEATURE_CONTROL_LOCKED (1<<0)
#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1ULL << 1)
@@ -543,6 +549,7 @@ typedef enum X86Seg {
#define XSTATE_ZMM_Hi256_BIT 6
#define XSTATE_Hi16_ZMM_BIT 7
#define XSTATE_PKRU_BIT 9
+#define XSTATE_ARCH_LBR_BIT 15
#define XSTATE_XTILE_CFG_BIT 17
#define XSTATE_XTILE_DATA_BIT 18
@@ -555,6 +562,7 @@ typedef enum X86Seg {
#define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
#define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
#define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
+#define XSTATE_ARCH_LBR_MASK (1ULL << XSTATE_ARCH_LBR_BIT)
#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT)
#define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT)
@@ -567,6 +575,14 @@ typedef enum X86Seg {
#define ESA_FEATURE_XFD_MASK (1U << ESA_FEATURE_XFD_BIT)
+/* CPUID feature bits available in XCR0 */
+#define CPUID_XSTATE_XCR0_MASK (XSTATE_FP_MASK | XSTATE_SSE_MASK | \
+ XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | \
+ XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK | \
+ XSTATE_ZMM_Hi256_MASK | \
+ XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \
+ XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK)
+
/* CPUID feature words */
typedef enum FeatureWord {
FEAT_1_EDX, /* CPUID[1].EDX */
@@ -585,8 +601,8 @@ typedef enum FeatureWord {
FEAT_SVM, /* CPUID[8000_000A].EDX */
FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
FEAT_6_EAX, /* CPUID[6].EAX */
- FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
- FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
+ FEAT_XSAVE_XCR0_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
+ FEAT_XSAVE_XCR0_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
FEAT_ARCH_CAPABILITIES,
FEAT_CORE_CAPABILITY,
FEAT_PERF_CAPABILITIES,
@@ -603,6 +619,8 @@ typedef enum FeatureWord {
FEAT_SGX_12_0_EAX, /* CPUID[EAX=0x12,ECX=0].EAX (SGX) */
FEAT_SGX_12_0_EBX, /* CPUID[EAX=0x12,ECX=0].EBX (SGX MISCSELECT[31:0]) */
FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */
+ FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */
+ FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */
FEATURE_WORDS,
} FeatureWord;
@@ -859,6 +877,8 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
#define CPUID_7_0_EDX_SERIALIZE (1U << 14)
/* TSX Suspend Load Address Tracking instruction */
#define CPUID_7_0_EDX_TSX_LDTRK (1U << 16)
+/* Architectural LBRs */
+#define CPUID_7_0_EDX_ARCH_LBR (1U << 19)
/* AVX512_FP16 instruction */
#define CPUID_7_0_EDX_AVX512_FP16 (1U << 23)
/* AMX tile (two-dimensional register) */
@@ -1365,6 +1385,24 @@ typedef struct XSaveXTILEDATA {
uint8_t xtiledata[8][1024];
} XSaveXTILEDATA;
+typedef struct {
+ uint64_t from;
+ uint64_t to;
+ uint64_t info;
+} LBREntry;
+
+#define ARCH_LBR_NR_ENTRIES 32
+
+/* Ext. save area 19: Supervisor mode Arch LBR state */
+typedef struct XSavesArchLBR {
+ uint64_t lbr_ctl;
+ uint64_t lbr_depth;
+ uint64_t ler_from;
+ uint64_t ler_to;
+ uint64_t ler_info;
+ LBREntry lbr_records[ARCH_LBR_NR_ENTRIES];
+} XSavesArchLBR;
+
QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
@@ -1374,6 +1412,7 @@ QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
QEMU_BUILD_BUG_ON(sizeof(XSaveXTILECFG) != 0x40);
QEMU_BUILD_BUG_ON(sizeof(XSaveXTILEDATA) != 0x2000);
+QEMU_BUILD_BUG_ON(sizeof(XSavesArchLBR) != 0x328);
typedef struct ExtSaveArea {
uint32_t feature, bits;
@@ -1616,6 +1655,11 @@ typedef struct CPUArchState {
uint64_t msr_xfd;
uint64_t msr_xfd_err;
+ /* Per-VCPU Arch LBR MSRs */
+ uint64_t msr_lbr_ctl;
+ uint64_t msr_lbr_depth;
+ LBREntry lbr_records[ARCH_LBR_NR_ENTRIES];
+
/* exception/interrupt handling */
int error_code;
int exception_is_int;
@@ -1810,6 +1854,15 @@ struct ArchCPU {
*/
bool enable_pmu;
+ /*
+ * Enable LBR_FMT bits of IA32_PERF_CAPABILITIES MSR.
+ * This can't be initialized with a default because it doesn't have
+ * stable ABI support yet. It is only allowed to pass all LBR_FMT bits
+ * returned by kvm_arch_get_supported_msr_feature()(which depends on both
+ * host CPU and kernel capabilities) to the guest.
+ */
+ uint64_t lbr_fmt;
+
/* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is
* disabled by default to avoid breaking migration between QEMU with
* different LMCE configurations.
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index c885763..a9ee8ee 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -141,6 +141,7 @@ static struct kvm_msr_list *kvm_feature_msrs;
#define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */
static RateLimit bus_lock_ratelimit_ctrl;
+static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value);
int kvm_has_pit_state2(void)
{
@@ -211,28 +212,21 @@ static int kvm_get_tsc(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
- struct {
- struct kvm_msrs info;
- struct kvm_msr_entry entries[1];
- } msr_data = {};
+ uint64_t value;
int ret;
if (env->tsc_valid) {
return 0;
}
- memset(&msr_data, 0, sizeof(msr_data));
- msr_data.info.nmsrs = 1;
- msr_data.entries[0].index = MSR_IA32_TSC;
env->tsc_valid = !runstate_is_running();
- ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
+ ret = kvm_get_one_msr(cpu, MSR_IA32_TSC, &value);
if (ret < 0) {
return ret;
}
- assert(ret == 1);
- env->tsc = msr_data.entries[0].data;
+ env->tsc = value;
return 0;
}
@@ -1566,21 +1560,14 @@ static int hyperv_init_vcpu(X86CPU *cpu)
* the kernel doesn't support setting vp_index; assert that its value
* is in sync
*/
- struct {
- struct kvm_msrs info;
- struct kvm_msr_entry entries[1];
- } msr_data = {
- .info.nmsrs = 1,
- .entries[0].index = HV_X64_MSR_VP_INDEX,
- };
+ uint64_t value;
- ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, &msr_data);
+ ret = kvm_get_one_msr(cpu, HV_X64_MSR_VP_INDEX, &value);
if (ret < 0) {
return ret;
}
- assert(ret == 1);
- if (msr_data.entries[0].data != hyperv_vp_index(CPU(cpu))) {
+ if (value != hyperv_vp_index(CPU(cpu))) {
error_report("kernel's vp_index != QEMU's vp_index");
return -ENXIO;
}
@@ -2839,6 +2826,25 @@ static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
}
+static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value)
+{
+ int ret;
+ struct {
+ struct kvm_msrs info;
+ struct kvm_msr_entry entries[1];
+ } msr_data = {
+ .info.nmsrs = 1,
+ .entries[0].index = index,
+ };
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
+ if (ret < 0) {
+ return ret;
+ }
+ assert(ret == 1);
+ *value = msr_data.entries[0].data;
+ return ret;
+}
void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
{
int ret;
@@ -3361,6 +3367,38 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
env->msr_xfd_err);
}
+ if (kvm_enabled() && cpu->enable_pmu &&
+ (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
+ uint64_t depth;
+ int i, ret;
+
+ /*
+ * Only migrate Arch LBR states when: 1) Arch LBR is enabled
+ * for migrated vcpu. 2) the host Arch LBR depth equals that
+ * of source guest's, this is to avoid mismatch of guest/host
+ * config for the msr hence avoid unexpected misbehavior.
+ */
+ ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
+
+ if (ret == 1 && (env->msr_lbr_ctl & 0x1) && !!depth &&
+ depth == env->msr_lbr_depth) {
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth);
+
+ for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
+ if (!env->lbr_records[i].from) {
+ continue;
+ }
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i,
+ env->lbr_records[i].from);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i,
+ env->lbr_records[i].to);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i,
+ env->lbr_records[i].info);
+ }
+ }
+ }
+
/* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
* kvm_put_msr_feature_control. */
}
@@ -3761,6 +3799,26 @@ static int kvm_get_msrs(X86CPU *cpu)
kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0);
}
+ if (kvm_enabled() && cpu->enable_pmu &&
+ (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
+ uint64_t ctl, depth;
+ int i, ret2;
+
+ ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_CTL, &ctl);
+ ret2 = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
+ if (ret == 1 && ret2 == 1 && (ctl & 0x1) &&
+ depth == ARCH_LBR_NR_ENTRIES) {
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, 0);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, 0);
+
+ for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, 0);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, 0);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, 0);
+ }
+ }
+ }
+
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
if (ret < 0) {
return ret;
@@ -4066,6 +4124,21 @@ static int kvm_get_msrs(X86CPU *cpu)
case MSR_IA32_XFD_ERR:
env->msr_xfd_err = msrs[i].data;
break;
+ case MSR_ARCH_LBR_CTL:
+ env->msr_lbr_ctl = msrs[i].data;
+ break;
+ case MSR_ARCH_LBR_DEPTH:
+ env->msr_lbr_depth = msrs[i].data;
+ break;
+ case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
+ env->lbr_records[index - MSR_ARCH_LBR_FROM_0].from = msrs[i].data;
+ break;
+ case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
+ env->lbr_records[index - MSR_ARCH_LBR_TO_0].to = msrs[i].data;
+ break;
+ case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
+ env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data;
+ break;
}
}
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 7c54bad..cecd476 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -136,6 +136,22 @@ static const VMStateDescription vmstate_mtrr_var = {
#define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \
VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
+static const VMStateDescription vmstate_lbr_records_var = {
+ .name = "lbr_records_var",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(from, LBREntry),
+ VMSTATE_UINT64(to, LBREntry),
+ VMSTATE_UINT64(info, LBREntry),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_LBR_VARS(_field, _state, _n, _v) \
+ VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_lbr_records_var, \
+ LBREntry)
+
typedef struct x86_FPReg_tmp {
FPReg *parent;
uint64_t tmp_mant;
@@ -1525,6 +1541,27 @@ static const VMStateDescription vmstate_amx_xtile = {
};
#endif
+static bool arch_lbr_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR);
+}
+
+static const VMStateDescription vmstate_arch_lbr = {
+ .name = "cpu/arch_lbr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = arch_lbr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_lbr_ctl, X86CPU),
+ VMSTATE_UINT64(env.msr_lbr_depth, X86CPU),
+ VMSTATE_LBR_VARS(env.lbr_records, X86CPU, ARCH_LBR_NR_ENTRIES, 1),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_x86_cpu = {
.name = "cpu",
.version_id = 12,
@@ -1668,6 +1705,7 @@ const VMStateDescription vmstate_x86_cpu = {
#ifdef TARGET_X86_64
&vmstate_amx_xtile,
#endif
+ &vmstate_arch_lbr,
NULL
}
};
diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c
index 23ae639..b22a331 100644
--- a/target/i386/whpx/whpx-all.c
+++ b/target/i386/whpx/whpx-all.c
@@ -373,6 +373,8 @@ static int whpx_set_tsc(CPUState *cpu)
*
* This mechanism is described in section 10.8.6.1 of Volume 3 of Intel 64
* and IA-32 Architectures Software Developer's Manual.
+ *
+ * The functions below translate the value of CR8 to TPR and vice versa.
*/
static uint64_t whpx_apic_tpr_to_cr8(uint64_t tpr)
@@ -380,6 +382,11 @@ static uint64_t whpx_apic_tpr_to_cr8(uint64_t tpr)
return tpr >> 4;
}
+static uint64_t whpx_cr8_to_apic_tpr(uint64_t cr8)
+{
+ return cr8 << 4;
+}
+
static void whpx_set_registers(CPUState *cpu, int level)
{
struct whpx_state *whpx = &whpx_global;
@@ -687,7 +694,7 @@ static void whpx_get_registers(CPUState *cpu)
tpr = vcxt.values[idx++].Reg64;
if (tpr != vcpu->tpr) {
vcpu->tpr = tpr;
- cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
+ cpu_set_apic_tpr(x86_cpu->apic_state, whpx_cr8_to_apic_tpr(tpr));
}
/* 8 Debug Registers - Skipped */
@@ -1547,7 +1554,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
}
/* Sync the TPR to the CR8 if was modified during the intercept */
- tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
+ tpr = whpx_apic_tpr_to_cr8(cpu_get_apic_tpr(x86_cpu->apic_state));
if (tpr != vcpu->tpr) {
vcpu->tpr = tpr;
reg_values[reg_count].Reg64 = tpr;
@@ -1596,7 +1603,7 @@ static void whpx_vcpu_post_run(CPUState *cpu)
if (vcpu->tpr != tpr) {
vcpu->tpr = tpr;
qemu_mutex_lock_iothread();
- cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr);
+ cpu_set_apic_tpr(x86_cpu->apic_state, whpx_cr8_to_apic_tpr(vcpu->tpr));
qemu_mutex_unlock_iothread();
}