diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2024-10-04 19:28:37 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2024-10-04 19:28:37 +0100 |
commit | b5ab62b3c0050612c7f9b0b4baeb44ebab42775a (patch) | |
tree | 601a427bebe91063aa44710509155af27dd20d03 /target | |
parent | a3fb4e93a3a7cf2be355c41cd550bef856f5ffe4 (diff) | |
parent | 7cca79fa52128054b02ecbea249aa51e1916ba72 (diff) | |
download | qemu-b5ab62b3c0050612c7f9b0b4baeb44ebab42775a.zip qemu-b5ab62b3c0050612c7f9b0b4baeb44ebab42775a.tar.gz qemu-b5ab62b3c0050612c7f9b0b4baeb44ebab42775a.tar.bz2 |
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging
* pc: Add a description for the i8042 property
* kvm: support for nested FRED
* tests/unit: fix warning when compiling test-nested-aio-poll with LTO
* kvm: refactoring of VM creation
* target/i386: expose IBPB-BRTYPE and SBPB CPUID bits to the guest
* hw/char: clean up serial
* remove virtfs-proxy-helper
* target/i386/kvm: Report which action failed in kvm_arch_put/get_registers
* qom: improvements to object_resolve_path*()
# -----BEGIN PGP SIGNATURE-----
#
# iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmb++MsUHHBib256aW5p
# QHJlZGhhdC5jb20ACgkQv/vSX3jHroPVnwf/cdvfxvDm22tEdlh8vHlV17HtVdcC
# Hw334M/3PDvbTmGzPBg26lzo4nFS6SLrZ8ETCeqvuJrtKzqVk9bI8ssZW5KA4ijM
# nkxguRPHO8E6U33ZSucc+Hn56+bAx4I2X80dLKXJ87OsbMffIeJ6aHGSEI1+fKVh
# pK7q53+Y3lQWuRBGhDIyKNuzqU4g+irpQwXOhux63bV3ADadmsqzExP6Gmtl8OKM
# DylPu1oK7EPZumlSiJa7Gy1xBqL4Rc4wGPNYx2RVRjp+i7W2/Y1uehm3wSBw+SXC
# a6b7SvLoYfWYS14/qCF4cBL3sJH/0f/4g8ZAhDDxi2i5kBr0/5oioDyE/A==
# =/zo4
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 03 Oct 2024 21:04:27 BST
# gpg: using RSA key F13338574B662389866C7682BFFBD25F78C7AE83
# gpg: issuer "pbonzini@redhat.com"
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full]
# gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full]
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1
# Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83
* tag 'for-upstream' of https://gitlab.com/bonzini/qemu: (23 commits)
qom: update object_resolve_path*() documentation
qom: set *ambiguous on all paths
qom: rename object_resolve_path_type() "ambiguousp"
target/i386/kvm: Report which action failed in kvm_arch_put/get_registers
kvm: Allow kvm_arch_get/put_registers to accept Error**
accel/kvm: refactor dirty ring setup
minikconf: print error entirely on stderr
9p: remove 'proxy' filesystem backend driver
hw/char: Extract serial-mm
hw/char/serial.h: Extract serial-isa.h
hw: Remove unused inclusion of hw/char/serial.h
target/i386: Expose IBPB-BRTYPE and SBPB CPUID bits to the guest
kvm: refactor core virtual machine creation into its own function
kvm/i386: replace identity_base variable with a constant
kvm/i386: refactor kvm_arch_init and split it into smaller functions
kvm: replace fprintf with error_report()/printf() in kvm_init()
kvm/i386: fix return values of is_host_cpu_intel()
kvm/i386: make kvm_filter_msr() and related definitions private to kvm module
hw/i386/pc: Add a description for the i8042 property
tests/unit: remove block layer code from test-nested-aio-poll
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
# Conflicts:
# hw/arm/Kconfig
# hw/arm/pxa2xx.c
Diffstat (limited to 'target')
-rw-r--r-- | target/arm/kvm.c | 4 | ||||
-rw-r--r-- | target/i386/cpu.c | 8 | ||||
-rw-r--r-- | target/i386/cpu.h | 7 | ||||
-rw-r--r-- | target/i386/kvm/kvm.c | 383 | ||||
-rw-r--r-- | target/i386/kvm/kvm_i386.h | 11 | ||||
-rw-r--r-- | target/i386/kvm/vmsr_energy.c | 2 | ||||
-rw-r--r-- | target/loongarch/kvm/kvm.c | 4 | ||||
-rw-r--r-- | target/mips/kvm.c | 4 | ||||
-rw-r--r-- | target/ppc/kvm.c | 4 | ||||
-rw-r--r-- | target/riscv/kvm/kvm-cpu.c | 4 | ||||
-rw-r--r-- | target/s390x/kvm/kvm.c | 4 |
11 files changed, 267 insertions, 168 deletions
diff --git a/target/arm/kvm.c b/target/arm/kvm.c index 849e2e2..f1f1b5b 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -2042,7 +2042,7 @@ static int kvm_arch_put_sve(CPUState *cs) return 0; } -int kvm_arch_put_registers(CPUState *cs, int level) +int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) { uint64_t val; uint32_t fpr; @@ -2226,7 +2226,7 @@ static int kvm_arch_get_sve(CPUState *cs) return 0; } -int kvm_arch_get_registers(CPUState *cs) +int kvm_arch_get_registers(CPUState *cs, Error **errp) { uint64_t val; unsigned int el; diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 85ef745..ff227a8 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -1221,8 +1221,8 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, + NULL, NULL, NULL, "sbpb", + "ibpb-brtype", NULL, NULL, NULL, }, .cpuid = { .eax = 0x80000021, .reg = R_EAX, }, .tcg_features = 0, @@ -1435,7 +1435,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "vmx-exit-save-efer", "vmx-exit-load-efer", "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, - NULL, "vmx-exit-load-pkrs", NULL, NULL, + NULL, "vmx-exit-load-pkrs", NULL, "vmx-exit-secondary-ctls", }, .msr = { .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, @@ -1450,7 +1450,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { NULL, "vmx-entry-ia32e-mode", NULL, NULL, NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, - NULL, NULL, "vmx-entry-load-pkrs", NULL, + NULL, NULL, "vmx-entry-load-pkrs", "vmx-entry-load-fred", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 14edd57..9c39384 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -267,12 +267,6 @@ typedef enum X86Seg { #define CR4_FRED_MASK 0 #endif -#ifdef TARGET_X86_64 -#define CR4_FRED_MASK (1ULL << 32) -#else -#define CR4_FRED_MASK 0 -#endif - #define CR4_RESERVED_MASK \ (~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \ | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \ @@ -1192,6 +1186,7 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); #define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000 #define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000 #define VMX_VM_EXIT_LOAD_IA32_PKRS 0x20000000 +#define VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS 0x80000000 #define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 #define VMX_VM_ENTRY_IA32E_MODE 0x00000200 diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index c8056ef..e6f9490 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -81,6 +81,16 @@ do { } while (0) #endif +/* + * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly. + * In order to use vm86 mode, an EPT identity map and a TSS are needed. + * Since these must be part of guest physical memory, we need to allocate + * them, both by setting their start addresses in the kernel and by + * creating a corresponding e820 entry. We need 4 pages before the BIOS, + * so this value allows up to 16M BIOSes. + */ +#define KVM_IDENTITY_BASE 0xfeffc000 + /* From arch/x86/kvm/lapic.h */ #define KVM_APIC_BUS_CYCLE_NS 1 #define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS) @@ -92,7 +102,17 @@ * 255 kvm_msr_entry structs */ #define MSR_BUF_SIZE 4096 +typedef bool QEMURDMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t *val); +typedef bool QEMUWRMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t val); +typedef struct { + uint32_t msr; + QEMURDMSRHandler *rdmsr; + QEMUWRMSRHandler *wrmsr; +} KVMMSRHandlers; + static void kvm_init_msrs(X86CPU *cpu); +static bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr, + QEMUWRMSRHandler *wrmsr); const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_INFO(SET_TSS_ADDR), @@ -2896,9 +2916,9 @@ static int kvm_msr_energy_thread_init(KVMState *s, MachineState *ms) * 1. Host cpu must be Intel cpu * 2. RAPL must be enabled on the Host */ - if (is_host_cpu_intel()) { - error_report("The RAPL feature can only be enabled on hosts\ - with Intel CPU models"); + if (!is_host_cpu_intel()) { + error_report("The RAPL feature can only be enabled on hosts " + "with Intel CPU models"); ret = 1; goto out; } @@ -2995,10 +3015,174 @@ int kvm_arch_get_default_type(MachineState *ms) return 0; } -int kvm_arch_init(MachineState *ms, KVMState *s) +static int kvm_vm_enable_exception_payload(KVMState *s) +{ + int ret = 0; + has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD); + if (has_exception_payload) { + ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true); + if (ret < 0) { + error_report("kvm: Failed to enable exception payload cap: %s", + strerror(-ret)); + } + } + + return ret; +} + +static int kvm_vm_enable_triple_fault_event(KVMState *s) +{ + int ret = 0; + has_triple_fault_event = \ + kvm_check_extension(s, + KVM_CAP_X86_TRIPLE_FAULT_EVENT); + if (has_triple_fault_event) { + ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true); + if (ret < 0) { + error_report("kvm: Failed to enable triple fault event cap: %s", + strerror(-ret)); + } + } + return ret; +} + +static int kvm_vm_set_identity_map_addr(KVMState *s, uint64_t identity_base) +{ + return kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); +} + +static int kvm_vm_set_nr_mmu_pages(KVMState *s) { - uint64_t identity_base = 0xfffbc000; uint64_t shadow_mem; + int ret = 0; + shadow_mem = object_property_get_int(OBJECT(s), + "kvm-shadow-mem", + &error_abort); + if (shadow_mem != -1) { + shadow_mem /= 4096; + ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem); + } + return ret; +} + +static int kvm_vm_set_tss_addr(KVMState *s, uint64_t tss_base) +{ + return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, tss_base); +} + +static int kvm_vm_enable_disable_exits(KVMState *s) +{ + int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS); +/* Work around for kernel header with a typo. TODO: fix header and drop. */ +#if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT) +#define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL +#endif + if (disable_exits) { + disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT | + KVM_X86_DISABLE_EXITS_HLT | + KVM_X86_DISABLE_EXITS_PAUSE | + KVM_X86_DISABLE_EXITS_CSTATE); + } + + return kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0, + disable_exits); +} + +static int kvm_vm_enable_bus_lock_exit(KVMState *s) +{ + int ret = 0; + ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT); + if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) { + error_report("kvm: bus lock detection unsupported"); + return -ENOTSUP; + } + ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0, + KVM_BUS_LOCK_DETECTION_EXIT); + if (ret < 0) { + error_report("kvm: Failed to enable bus lock detection cap: %s", + strerror(-ret)); + } + + return ret; +} + +static int kvm_vm_enable_notify_vmexit(KVMState *s) +{ + int ret = 0; + if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE) { + uint64_t notify_window_flags = + ((uint64_t)s->notify_window << 32) | + KVM_X86_NOTIFY_VMEXIT_ENABLED | + KVM_X86_NOTIFY_VMEXIT_USER; + ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0, + notify_window_flags); + if (ret < 0) { + error_report("kvm: Failed to enable notify vmexit cap: %s", + strerror(-ret)); + } + } + return ret; +} + +static int kvm_vm_enable_userspace_msr(KVMState *s) +{ + int ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0, + KVM_MSR_EXIT_REASON_FILTER); + if (ret < 0) { + error_report("Could not enable user space MSRs: %s", + strerror(-ret)); + exit(1); + } + + if (!kvm_filter_msr(s, MSR_CORE_THREAD_COUNT, + kvm_rdmsr_core_thread_count, NULL)) { + error_report("Could not install MSR_CORE_THREAD_COUNT handler!"); + exit(1); + } + + return 0; +} + +static void kvm_vm_enable_energy_msrs(KVMState *s) +{ + bool r; + if (s->msr_energy.enable == true) { + r = kvm_filter_msr(s, MSR_RAPL_POWER_UNIT, + kvm_rdmsr_rapl_power_unit, NULL); + if (!r) { + error_report("Could not install MSR_RAPL_POWER_UNIT \ + handler"); + exit(1); + } + + r = kvm_filter_msr(s, MSR_PKG_POWER_LIMIT, + kvm_rdmsr_pkg_power_limit, NULL); + if (!r) { + error_report("Could not install MSR_PKG_POWER_LIMIT \ + handler"); + exit(1); + } + + r = kvm_filter_msr(s, MSR_PKG_POWER_INFO, + kvm_rdmsr_pkg_power_info, NULL); + if (!r) { + error_report("Could not install MSR_PKG_POWER_INFO \ + handler"); + exit(1); + } + r = kvm_filter_msr(s, MSR_PKG_ENERGY_STATUS, + kvm_rdmsr_pkg_energy_status, NULL); + if (!r) { + error_report("Could not install MSR_PKG_ENERGY_STATUS \ + handler"); + exit(1); + } + } + return; +} + +int kvm_arch_init(MachineState *ms, KVMState *s) +{ int ret; struct utsname utsname; Error *local_err = NULL; @@ -3028,24 +3212,14 @@ int kvm_arch_init(MachineState *ms, KVMState *s) hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX); - has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD); - if (has_exception_payload) { - ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true); - if (ret < 0) { - error_report("kvm: Failed to enable exception payload cap: %s", - strerror(-ret)); - return ret; - } + ret = kvm_vm_enable_exception_payload(s); + if (ret < 0) { + return ret; } - has_triple_fault_event = kvm_check_extension(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT); - if (has_triple_fault_event) { - ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true); - if (ret < 0) { - error_report("kvm: Failed to enable triple fault event cap: %s", - strerror(-ret)); - return ret; - } + ret = kvm_vm_enable_triple_fault_event(s); + if (ret < 0) { + return ret; } if (s->xen_version) { @@ -3076,36 +3250,23 @@ int kvm_arch_init(MachineState *ms, KVMState *s) uname(&utsname); lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0; - /* - * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly. - * In order to use vm86 mode, an EPT identity map and a TSS are needed. - * Since these must be part of guest physical memory, we need to allocate - * them, both by setting their start addresses in the kernel and by - * creating a corresponding e820 entry. We need 4 pages before the BIOS, - * so this value allows up to 16M BIOSes. - */ - identity_base = 0xfeffc000; - ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); + ret = kvm_vm_set_identity_map_addr(s, KVM_IDENTITY_BASE); if (ret < 0) { return ret; } /* Set TSS base one page after EPT identity map. */ - ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000); + ret = kvm_vm_set_tss_addr(s, KVM_IDENTITY_BASE + 0x1000); if (ret < 0) { return ret; } /* Tell fw_cfg to notify the BIOS to reserve the range. */ - e820_add_entry(identity_base, 0x4000, E820_RESERVED); + e820_add_entry(KVM_IDENTITY_BASE, 0x4000, E820_RESERVED); - shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort); - if (shadow_mem != -1) { - shadow_mem /= 4096; - ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem); - if (ret < 0) { - return ret; - } + ret = kvm_vm_set_nr_mmu_pages(s); + if (ret < 0) { + return ret; } if (kvm_check_extension(s, KVM_CAP_X86_SMM) && @@ -3116,20 +3277,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } if (enable_cpu_pm) { - int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS); -/* Work around for kernel header with a typo. TODO: fix header and drop. */ -#if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT) -#define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL -#endif - if (disable_exits) { - disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT | - KVM_X86_DISABLE_EXITS_HLT | - KVM_X86_DISABLE_EXITS_PAUSE | - KVM_X86_DISABLE_EXITS_CSTATE); - } - - ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0, - disable_exits); + ret = kvm_vm_enable_disable_exits(s); if (ret < 0) { error_report("kvm: guest stopping CPU not supported: %s", strerror(-ret)); @@ -3140,16 +3288,8 @@ int kvm_arch_init(MachineState *ms, KVMState *s) X86MachineState *x86ms = X86_MACHINE(ms); if (x86ms->bus_lock_ratelimit > 0) { - ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT); - if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) { - error_report("kvm: bus lock detection unsupported"); - return -ENOTSUP; - } - ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0, - KVM_BUS_LOCK_DETECTION_EXIT); + ret = kvm_vm_enable_bus_lock_exit(s); if (ret < 0) { - error_report("kvm: Failed to enable bus lock detection cap: %s", - strerror(-ret)); return ret; } ratelimit_init(&bus_lock_ratelimit_ctrl); @@ -3158,80 +3298,25 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } - if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE && - kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) { - uint64_t notify_window_flags = - ((uint64_t)s->notify_window << 32) | - KVM_X86_NOTIFY_VMEXIT_ENABLED | - KVM_X86_NOTIFY_VMEXIT_USER; - ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0, - notify_window_flags); - if (ret < 0) { - error_report("kvm: Failed to enable notify vmexit cap: %s", - strerror(-ret)); - return ret; - } - } - if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) { - bool r; - - ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0, - KVM_MSR_EXIT_REASON_FILTER); - if (ret) { - error_report("Could not enable user space MSRs: %s", - strerror(-ret)); - exit(1); + if (kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) { + ret = kvm_vm_enable_notify_vmexit(s); + if (ret < 0) { + return ret; } + } - r = kvm_filter_msr(s, MSR_CORE_THREAD_COUNT, - kvm_rdmsr_core_thread_count, NULL); - if (!r) { - error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s", - strerror(-ret)); - exit(1); + if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) { + ret = kvm_vm_enable_userspace_msr(s); + if (ret < 0) { + return ret; } if (s->msr_energy.enable == true) { - r = kvm_filter_msr(s, MSR_RAPL_POWER_UNIT, - kvm_rdmsr_rapl_power_unit, NULL); - if (!r) { - error_report("Could not install MSR_RAPL_POWER_UNIT \ - handler: %s", - strerror(-ret)); - exit(1); - } - - r = kvm_filter_msr(s, MSR_PKG_POWER_LIMIT, - kvm_rdmsr_pkg_power_limit, NULL); - if (!r) { - error_report("Could not install MSR_PKG_POWER_LIMIT \ - handler: %s", - strerror(-ret)); - exit(1); - } - - r = kvm_filter_msr(s, MSR_PKG_POWER_INFO, - kvm_rdmsr_pkg_power_info, NULL); - if (!r) { - error_report("Could not install MSR_PKG_POWER_INFO \ - handler: %s", - strerror(-ret)); + kvm_vm_enable_energy_msrs(s); + if (kvm_msr_energy_thread_init(s, ms)) { + error_report("kvm : error RAPL feature requirement not met"); exit(1); } - r = kvm_filter_msr(s, MSR_PKG_ENERGY_STATUS, - kvm_rdmsr_pkg_energy_status, NULL); - if (!r) { - error_report("Could not install MSR_PKG_ENERGY_STATUS \ - handler: %s", - strerror(-ret)); - exit(1); - } - r = kvm_msr_energy_thread_init(s, ms); - if (r) { - error_report("kvm : error RAPL feature requirement not meet"); - exit(1); - } - } } @@ -3694,7 +3779,14 @@ static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f) kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0, CR4_VMXE_MASK); - if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) { + if (f[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED) { + /* FRED injected-event data (0x2052). */ + kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x52); + } else if (f[FEAT_VMX_EXIT_CTLS] & + VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS) { + /* Secondary VM-exit controls (0x2044). */ + kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x44); + } else if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) { /* TSC multiplier (0x2032). */ kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32); } else { @@ -5118,7 +5210,7 @@ static int kvm_get_nested_state(X86CPU *cpu) return ret; } -int kvm_arch_put_registers(CPUState *cpu, int level) +int kvm_arch_put_registers(CPUState *cpu, int level, Error **errp) { X86CPU *x86_cpu = X86_CPU(cpu); int ret; @@ -5133,6 +5225,7 @@ int kvm_arch_put_registers(CPUState *cpu, int level) if (level >= KVM_PUT_RESET_STATE) { ret = kvm_put_msr_feature_control(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set feature control MSR"); return ret; } } @@ -5140,12 +5233,14 @@ int kvm_arch_put_registers(CPUState *cpu, int level) /* must be before kvm_put_nested_state so that EFER.SVME is set */ ret = has_sregs2 ? kvm_put_sregs2(x86_cpu) : kvm_put_sregs(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set special registers"); return ret; } if (level >= KVM_PUT_RESET_STATE) { ret = kvm_put_nested_state(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set nested state"); return ret; } } @@ -5163,6 +5258,7 @@ int kvm_arch_put_registers(CPUState *cpu, int level) if (xen_mode == XEN_EMULATE && level == KVM_PUT_FULL_STATE) { ret = kvm_put_xen_state(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set Xen state"); return ret; } } @@ -5170,43 +5266,51 @@ int kvm_arch_put_registers(CPUState *cpu, int level) ret = kvm_getput_regs(x86_cpu, 1); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set general purpose registers"); return ret; } ret = kvm_put_xsave(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set XSAVE"); return ret; } ret = kvm_put_xcrs(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set XCRs"); return ret; } ret = kvm_put_msrs(x86_cpu, level); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set MSRs"); return ret; } ret = kvm_put_vcpu_events(x86_cpu, level); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set vCPU events"); return ret; } if (level >= KVM_PUT_RESET_STATE) { ret = kvm_put_mp_state(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set MP state"); return ret; } } ret = kvm_put_tscdeadline_msr(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set TSC deadline MSR"); return ret; } ret = kvm_put_debugregs(x86_cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to set debug registers"); return ret; } return 0; } -int kvm_arch_get_registers(CPUState *cs) +int kvm_arch_get_registers(CPUState *cs, Error **errp) { X86CPU *cpu = X86_CPU(cs); int ret; @@ -5215,6 +5319,7 @@ int kvm_arch_get_registers(CPUState *cs) ret = kvm_get_vcpu_events(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get vCPU events"); goto out; } /* @@ -5223,44 +5328,54 @@ int kvm_arch_get_registers(CPUState *cs) */ ret = kvm_get_mp_state(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get MP state"); goto out; } ret = kvm_getput_regs(cpu, 0); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get general purpose registers"); goto out; } ret = kvm_get_xsave(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get XSAVE"); goto out; } ret = kvm_get_xcrs(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get XCRs"); goto out; } ret = has_sregs2 ? kvm_get_sregs2(cpu) : kvm_get_sregs(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get special registers"); goto out; } ret = kvm_get_msrs(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get MSRs"); goto out; } ret = kvm_get_apic(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get APIC"); goto out; } ret = kvm_get_debugregs(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get debug registers"); goto out; } ret = kvm_get_nested_state(cpu); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get nested state"); goto out; } #ifdef CONFIG_XEN_EMU if (xen_mode == XEN_EMULATE) { ret = kvm_get_xen_state(cs); if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to get Xen state"); goto out; } } @@ -5729,7 +5844,7 @@ static bool kvm_install_msr_filters(KVMState *s) return true; } -bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr, +static bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr, QEMUWRMSRHandler *wrmsr) { int i; diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h index 34fc607..9de9c0d 100644 --- a/target/i386/kvm/kvm_i386.h +++ b/target/i386/kvm/kvm_i386.h @@ -66,17 +66,6 @@ uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address); void kvm_update_msi_routes_all(void *private, bool global, uint32_t index, uint32_t mask); -typedef bool QEMURDMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t *val); -typedef bool QEMUWRMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t val); -typedef struct kvm_msr_handlers { - uint32_t msr; - QEMURDMSRHandler *rdmsr; - QEMUWRMSRHandler *wrmsr; -} KVMMSRHandlers; - -bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr, - QEMUWRMSRHandler *wrmsr); - #endif /* CONFIG_KVM */ void kvm_pc_setup_irq_routing(bool pci_enabled); diff --git a/target/i386/kvm/vmsr_energy.c b/target/i386/kvm/vmsr_energy.c index 7e064c5..31508d4 100644 --- a/target/i386/kvm/vmsr_energy.c +++ b/target/i386/kvm/vmsr_energy.c @@ -34,7 +34,7 @@ bool is_host_cpu_intel(void) host_cpu_vendor_fms(vendor, &family, &model, &stepping); - return strcmp(vendor, CPUID_VENDOR_INTEL); + return g_str_equal(vendor, CPUID_VENDOR_INTEL); } int is_rapl_enabled(void) diff --git a/target/loongarch/kvm/kvm.c b/target/loongarch/kvm/kvm.c index 4786cd5..30ec160 100644 --- a/target/loongarch/kvm/kvm.c +++ b/target/loongarch/kvm/kvm.c @@ -588,7 +588,7 @@ static int kvm_loongarch_put_cpucfg(CPUState *cs) return ret; } -int kvm_arch_get_registers(CPUState *cs) +int kvm_arch_get_registers(CPUState *cs, Error **errp) { int ret; @@ -616,7 +616,7 @@ int kvm_arch_get_registers(CPUState *cs) return ret; } -int kvm_arch_put_registers(CPUState *cs, int level) +int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) { int ret; diff --git a/target/mips/kvm.c b/target/mips/kvm.c index a631ab5..a98798c 100644 --- a/target/mips/kvm.c +++ b/target/mips/kvm.c @@ -1172,7 +1172,7 @@ static int kvm_mips_get_cp0_registers(CPUState *cs) return ret; } -int kvm_arch_put_registers(CPUState *cs, int level) +int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) { CPUMIPSState *env = cpu_env(cs); struct kvm_regs regs; @@ -1207,7 +1207,7 @@ int kvm_arch_put_registers(CPUState *cs, int level) return ret; } -int kvm_arch_get_registers(CPUState *cs) +int kvm_arch_get_registers(CPUState *cs, Error **errp) { CPUMIPSState *env = cpu_env(cs); int ret = 0; diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 907dba6..3efc28f 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -900,7 +900,7 @@ int kvmppc_put_books_sregs(PowerPCCPU *cpu) return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs); } -int kvm_arch_put_registers(CPUState *cs, int level) +int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; @@ -1205,7 +1205,7 @@ static int kvmppc_get_books_sregs(PowerPCCPU *cpu) return 0; } -int kvm_arch_get_registers(CPUState *cs) +int kvm_arch_get_registers(CPUState *cs, Error **errp) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index 341af90..8233a32 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -1192,7 +1192,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_LAST_INFO }; -int kvm_arch_get_registers(CPUState *cs) +int kvm_arch_get_registers(CPUState *cs, Error **errp) { int ret = 0; @@ -1237,7 +1237,7 @@ int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state) return 0; } -int kvm_arch_put_registers(CPUState *cs, int level) +int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) { int ret = 0; diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c index 94181d9..8ffe015 100644 --- a/target/s390x/kvm/kvm.c +++ b/target/s390x/kvm/kvm.c @@ -472,7 +472,7 @@ static int can_sync_regs(CPUState *cs, int regs) #define KVM_SYNC_REQUIRED_REGS (KVM_SYNC_GPRS | KVM_SYNC_ACRS | \ KVM_SYNC_CRS | KVM_SYNC_PREFIX) -int kvm_arch_put_registers(CPUState *cs, int level) +int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) { CPUS390XState *env = cpu_env(cs); struct kvm_fpu fpu = {}; @@ -598,7 +598,7 @@ int kvm_arch_put_registers(CPUState *cs, int level) return 0; } -int kvm_arch_get_registers(CPUState *cs) +int kvm_arch_get_registers(CPUState *cs, Error **errp) { CPUS390XState *env = cpu_env(cs); struct kvm_fpu fpu; |