aboutsummaryrefslogtreecommitdiff
path: root/target/i386/cpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'target/i386/cpu.h')
-rw-r--r--target/i386/cpu.h348
1 files changed, 269 insertions, 79 deletions
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 1e121ac..51e1013 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -20,23 +20,21 @@
#ifndef I386_CPU_H
#define I386_CPU_H
-#include "sysemu/tcg.h"
+#include "system/tcg.h"
#include "cpu-qom.h"
#include "kvm/hyperv-proto.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
+#include "exec/memop.h"
#include "hw/i386/topology.h"
#include "qapi/qapi-types-common.h"
#include "qemu/cpu-float.h"
#include "qemu/timer.h"
+#include "standard-headers/asm-x86/kvm_para.h"
#define XEN_NR_VIRQS 24
-#define KVM_HAVE_MCE_INJECTION 1
-
-/* support for self modifying code even if the modified instruction is
- close to the modifying instruction */
-#define TARGET_HAS_PRECISE_SMC
-
#ifdef TARGET_X86_64
#define I386_ELF_MACHINE EM_X86_64
#define ELF_MACHINE_UNAME "x86_64"
@@ -267,12 +265,6 @@ typedef enum X86Seg {
#define CR4_FRED_MASK 0
#endif
-#ifdef TARGET_X86_64
-#define CR4_FRED_MASK (1ULL << 32)
-#else
-#define CR4_FRED_MASK 0
-#endif
-
#define CR4_RESERVED_MASK \
(~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \
| CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \
@@ -351,6 +343,7 @@ typedef enum X86Seg {
#define PG_MODE_PKE (1 << 17)
#define PG_MODE_PKS (1 << 18)
#define PG_MODE_SMEP (1 << 19)
+#define PG_MODE_PG (1 << 20)
#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
@@ -414,6 +407,10 @@ typedef enum X86Seg {
#define MSR_IA32_TSX_CTRL 0x122
#define MSR_IA32_TSCDEADLINE 0x6e0
#define MSR_IA32_PKRS 0x6e1
+#define MSR_RAPL_POWER_UNIT 0x00000606
+#define MSR_PKG_POWER_LIMIT 0x00000610
+#define MSR_PKG_ENERGY_STATUS 0x00000611
+#define MSR_PKG_POWER_INFO 0x00000614
#define MSR_ARCH_LBR_CTL 0x000014ce
#define MSR_ARCH_LBR_DEPTH 0x000014cf
#define MSR_ARCH_LBR_FROM_0 0x00001500
@@ -535,6 +532,8 @@ typedef enum X86Seg {
#define MSR_AMD64_TSC_RATIO_DEFAULT 0x100000000ULL
+#define MSR_K7_HWCR 0xc0010015
+
#define MSR_VM_HSAVE_PA 0xc0010117
#define MSR_IA32_XFD 0x000001c4
@@ -585,6 +584,7 @@ typedef enum X86Seg {
#define XSTATE_OPMASK_BIT 5
#define XSTATE_ZMM_Hi256_BIT 6
#define XSTATE_Hi16_ZMM_BIT 7
+#define XSTATE_PT_BIT 8
#define XSTATE_PKRU_BIT 9
#define XSTATE_ARCH_LBR_BIT 15
#define XSTATE_XTILE_CFG_BIT 17
@@ -598,6 +598,7 @@ typedef enum X86Seg {
#define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
#define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
#define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
+#define XSTATE_PT_MASK (1ULL << XSTATE_PT_BIT)
#define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
#define XSTATE_ARCH_LBR_MASK (1ULL << XSTATE_ARCH_LBR_BIT)
#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT)
@@ -620,6 +621,11 @@ typedef enum X86Seg {
XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \
XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK)
+/* CPUID feature bits available in XSS */
+#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK)
+
+#define CPUID_XSTATE_MASK (CPUID_XSTATE_XCR0_MASK | CPUID_XSTATE_XSS_MASK)
+
/* CPUID feature words */
typedef enum FeatureWord {
FEAT_1_EDX, /* CPUID[1].EDX */
@@ -634,6 +640,8 @@ typedef enum FeatureWord {
FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */
FEAT_8000_0021_EAX, /* CPUID[8000_0021].EAX */
+ FEAT_8000_0021_EBX, /* CPUID[8000_0021].EBX */
+ FEAT_8000_0022_EAX, /* CPUID[8000_0022].EAX */
FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */
@@ -660,11 +668,22 @@ typedef enum FeatureWord {
FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */
FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */
FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */
+ FEAT_7_1_ECX, /* CPUID[EAX=7,ECX=1].ECX */
FEAT_7_1_EDX, /* CPUID[EAX=7,ECX=1].EDX */
FEAT_7_2_EDX, /* CPUID[EAX=7,ECX=2].EDX */
+ FEAT_24_0_EBX, /* CPUID[EAX=0x24,ECX=0].EBX */
FEATURE_WORDS,
} FeatureWord;
+typedef struct FeatureMask {
+ FeatureWord index;
+ uint64_t mask;
+} FeatureMask;
+
+typedef struct FeatureDep {
+ FeatureMask from, to;
+} FeatureDep;
+
typedef uint64_t FeatureWordArray[FEATURE_WORDS];
uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
@@ -822,6 +841,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_0_EBX_HLE (1U << 4)
/* Intel Advanced Vector Extensions 2 */
#define CPUID_7_0_EBX_AVX2 (1U << 5)
+/* FPU data pointer updated only on x87 exceptions */
+#define CPUID_7_0_EBX_FDP_EXCPTN_ONLY (1u << 6)
/* Supervisor-mode Execution Prevention */
#define CPUID_7_0_EBX_SMEP (1U << 7)
/* 2nd Group of Advanced Bit Manipulation Extensions */
@@ -832,6 +853,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_0_EBX_INVPCID (1U << 10)
/* Restricted Transactional Memory */
#define CPUID_7_0_EBX_RTM (1U << 11)
+/* Zero out FPU CS and FPU DS */
+#define CPUID_7_0_EBX_ZERO_FCS_FDS (1U << 13)
/* Memory Protection Extension */
#define CPUID_7_0_EBX_MPX (1U << 14)
/* AVX-512 Foundation */
@@ -893,6 +916,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_0_ECX_LA57 (1U << 16)
/* Read Processor ID */
#define CPUID_7_0_ECX_RDPID (1U << 22)
+/* KeyLocker */
+#define CPUID_7_0_ECX_KeyLocker (1U << 23)
/* Bus Lock Debug Exception */
#define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24)
/* Cache Line Demote Instruction */
@@ -914,6 +939,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_0_EDX_FSRM (1U << 4)
/* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
#define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
+ /* "md_clear" VERW clears CPU buffers */
+#define CPUID_7_0_EDX_MD_CLEAR (1U << 10)
/* SERIALIZE instruction */
#define CPUID_7_0_EDX_SERIALIZE (1U << 14)
/* TSX Suspend Load Address Tracking instruction */
@@ -941,10 +968,18 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
/* Speculative Store Bypass Disable */
#define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31)
+/* SHA512 Instruction */
+#define CPUID_7_1_EAX_SHA512 (1U << 0)
+/* SM3 Instruction */
+#define CPUID_7_1_EAX_SM3 (1U << 1)
+/* SM4 Instruction */
+#define CPUID_7_1_EAX_SM4 (1U << 2)
/* AVX VNNI Instruction */
#define CPUID_7_1_EAX_AVX_VNNI (1U << 4)
/* AVX512 BFloat16 Instruction */
#define CPUID_7_1_EAX_AVX512_BF16 (1U << 5)
+/* Linear address space separation */
+#define CPUID_7_1_EAX_LASS (1U << 6)
/* CMPCCXADD Instructions */
#define CPUID_7_1_EAX_CMPCCXADD (1U << 7)
/* Fast Zero REP MOVS */
@@ -953,6 +988,12 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_1_EAX_FSRS (1U << 11)
/* Fast Short REP CMPS/SCAS */
#define CPUID_7_1_EAX_FSRC (1U << 12)
+/* Flexible return and event delivery (FRED) */
+#define CPUID_7_1_EAX_FRED (1U << 17)
+/* Load into IA32_KERNEL_GS_BASE (LKGS) */
+#define CPUID_7_1_EAX_LKGS (1U << 18)
+/* Non-Serializing Write to Model Specific Register (WRMSRNS) */
+#define CPUID_7_1_EAX_WRMSRNS (1U << 19)
/* Support Tile Computational Operations on FP16 Numbers */
#define CPUID_7_1_EAX_AMX_FP16 (1U << 21)
/* Support for VPMADD52[H,L]UQ */
@@ -960,20 +1001,32 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
/* Linear Address Masking */
#define CPUID_7_1_EAX_LAM (1U << 26)
+/* The immediate form of MSR access instructions */
+#define CPUID_7_1_ECX_MSR_IMM (1U << 5)
+
/* Support for VPDPB[SU,UU,SS]D[,S] */
#define CPUID_7_1_EDX_AVX_VNNI_INT8 (1U << 4)
/* AVX NE CONVERT Instructions */
#define CPUID_7_1_EDX_AVX_NE_CONVERT (1U << 5)
/* AMX COMPLEX Instructions */
#define CPUID_7_1_EDX_AMX_COMPLEX (1U << 8)
+/* AVX-VNNI-INT16 Instructions */
+#define CPUID_7_1_EDX_AVX_VNNI_INT16 (1U << 10)
/* PREFETCHIT0/1 Instructions */
#define CPUID_7_1_EDX_PREFETCHITI (1U << 14)
-/* Flexible return and event delivery (FRED) */
-#define CPUID_7_1_EAX_FRED (1U << 17)
-/* Load into IA32_KERNEL_GS_BASE (LKGS) */
-#define CPUID_7_1_EAX_LKGS (1U << 18)
-/* Non-Serializing Write to Model Specific Register (WRMSRNS) */
-#define CPUID_7_1_EAX_WRMSRNS (1U << 19)
+/* Support for Advanced Vector Extensions 10 */
+#define CPUID_7_1_EDX_AVX10 (1U << 19)
+
+/* Indicate bit 7 of the IA32_SPEC_CTRL MSR is supported */
+#define CPUID_7_2_EDX_PSFD (1U << 0)
+/* Indicate bits 3 and 4 of the IA32_SPEC_CTRL MSR are supported */
+#define CPUID_7_2_EDX_IPRED_CTRL (1U << 1)
+/* Indicate bits 5 and 6 of the IA32_SPEC_CTRL MSR are supported */
+#define CPUID_7_2_EDX_RRSBA_CTRL (1U << 2)
+/* Indicate bit 8 of the IA32_SPEC_CTRL MSR is supported */
+#define CPUID_7_2_EDX_DDPD_U (1U << 3)
+/* Indicate bit 10 of the IA32_SPEC_CTRL MSR is supported */
+#define CPUID_7_2_EDX_BHI_CTRL (1U << 4)
/* Do not exhibit MXCSR Configuration Dependent Timing (MCDT) behavior */
#define CPUID_7_2_EDX_MCDT_NO (1U << 5)
@@ -984,10 +1037,43 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
/* Packets which contain IP payload have LIP values */
#define CPUID_14_0_ECX_LIP (1U << 31)
+/* AVX10 128-bit vector support is present */
+#define CPUID_24_0_EBX_AVX10_128 (1U << 16)
+/* AVX10 256-bit vector support is present */
+#define CPUID_24_0_EBX_AVX10_256 (1U << 17)
+/* AVX10 512-bit vector support is present */
+#define CPUID_24_0_EBX_AVX10_512 (1U << 18)
+/* AVX10 vector length support mask */
+#define CPUID_24_0_EBX_AVX10_VL_MASK (CPUID_24_0_EBX_AVX10_128 | \
+ CPUID_24_0_EBX_AVX10_256 | \
+ CPUID_24_0_EBX_AVX10_512)
+
/* RAS Features */
#define CPUID_8000_0007_EBX_OVERFLOW_RECOV (1U << 0)
#define CPUID_8000_0007_EBX_SUCCOR (1U << 1)
+/* (Old) KVM paravirtualized clocksource */
+#define CPUID_KVM_CLOCK (1U << KVM_FEATURE_CLOCKSOURCE)
+/* (New) KVM specific paravirtualized clocksource */
+#define CPUID_KVM_CLOCK2 (1U << KVM_FEATURE_CLOCKSOURCE2)
+/* KVM asynchronous page fault */
+#define CPUID_KVM_ASYNCPF (1U << KVM_FEATURE_ASYNC_PF)
+/* KVM stolen (when guest vCPU is not running) time accounting */
+#define CPUID_KVM_STEAL_TIME (1U << KVM_FEATURE_STEAL_TIME)
+/* KVM paravirtualized end-of-interrupt signaling */
+#define CPUID_KVM_PV_EOI (1U << KVM_FEATURE_PV_EOI)
+/* KVM paravirtualized spinlocks support */
+#define CPUID_KVM_PV_UNHALT (1U << KVM_FEATURE_PV_UNHALT)
+/* KVM host-side polling on HLT control from the guest */
+#define CPUID_KVM_POLL_CONTROL (1U << KVM_FEATURE_POLL_CONTROL)
+/* KVM interrupt based asynchronous page fault*/
+#define CPUID_KVM_ASYNCPF_INT (1U << KVM_FEATURE_ASYNC_PF_INT)
+/* KVM 'Extended Destination ID' support for external interrupts */
+#define CPUID_KVM_MSI_EXT_DEST_ID (1U << KVM_FEATURE_MSI_EXT_DEST_ID)
+
+/* Hint to KVM that vCPUs expect never preempted for an unlimited time */
+#define CPUID_KVM_HINTS_REALTIME (1U << KVM_HINTS_REALTIME)
+
/* CLZERO instruction */
#define CPUID_8000_0008_EBX_CLZERO (1U << 0)
/* Always save/restore FP error pointers */
@@ -1010,24 +1096,69 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_8000_0008_EBX_AMD_PSFD (1U << 28)
/* Processor ignores nested data breakpoints */
-#define CPUID_8000_0021_EAX_No_NESTED_DATA_BP (1U << 0)
+#define CPUID_8000_0021_EAX_NO_NESTED_DATA_BP (1U << 0)
+/* WRMSR to FS_BASE, GS_BASE, or KERNEL_GS_BASE is non-serializing */
+#define CPUID_8000_0021_EAX_FS_GS_BASE_NS (1U << 1)
/* LFENCE is always serializing */
#define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2)
/* Null Selector Clears Base */
-#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6)
+#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6)
/* Automatic IBRS */
-#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8)
+#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8)
+/* Indicates support for IC prefetch */
+#define CPUID_8000_0021_EAX_PREFETCHI (1U << 20)
+/* Enhanced Return Address Predictor Scurity */
+#define CPUID_8000_0021_EAX_ERAPS (1U << 24)
+/* Selective Branch Predictor Barrier */
+#define CPUID_8000_0021_EAX_SBPB (1U << 27)
+/* IBPB includes branch type prediction flushing */
+#define CPUID_8000_0021_EAX_IBPB_BRTYPE (1U << 28)
+/* Not vulnerable to Speculative Return Stack Overflow */
+#define CPUID_8000_0021_EAX_SRSO_NO (1U << 29)
+/* Not vulnerable to SRSO at the user-kernel boundary */
+#define CPUID_8000_0021_EAX_SRSO_USER_KERNEL_NO (1U << 30)
+
+/*
+ * Return Address Predictor size. RapSize x 8 is the minimum number of
+ * CALL instructions software needs to execute to flush the RAP.
+ */
+#define CPUID_8000_0021_EBX_RAPSIZE (8U << 16)
+
+/* Performance Monitoring Version 2 */
+#define CPUID_8000_0022_EAX_PERFMON_V2 (1U << 0)
#define CPUID_XSAVE_XSAVEOPT (1U << 0)
#define CPUID_XSAVE_XSAVEC (1U << 1)
#define CPUID_XSAVE_XGETBV1 (1U << 2)
#define CPUID_XSAVE_XSAVES (1U << 3)
+#define CPUID_XSAVE_XFD (1U << 4)
#define CPUID_6_EAX_ARAT (1U << 2)
/* CPUID[0x80000007].EDX flags: */
#define CPUID_APM_INVTSC (1U << 8)
+/* "rng" RNG present (xstore) */
+#define CPUID_C000_0001_EDX_XSTORE (1U << 2)
+/* "rng_en" RNG enabled */
+#define CPUID_C000_0001_EDX_XSTORE_EN (1U << 3)
+/* "ace" on-CPU crypto (xcrypt) */
+#define CPUID_C000_0001_EDX_XCRYPT (1U << 6)
+/* "ace_en" on-CPU crypto enabled */
+#define CPUID_C000_0001_EDX_XCRYPT_EN (1U << 7)
+/* Advanced Cryptography Engine v2 */
+#define CPUID_C000_0001_EDX_ACE2 (1U << 8)
+/* ACE v2 enabled */
+#define CPUID_C000_0001_EDX_ACE2_EN (1U << 9)
+/* PadLock Hash Engine */
+#define CPUID_C000_0001_EDX_PHE (1U << 10)
+/* PHE enabled */
+#define CPUID_C000_0001_EDX_PHE_EN (1U << 11)
+/* PadLock Montgomery Multiplier */
+#define CPUID_C000_0001_EDX_PMM (1U << 12)
+/* PMM enabled */
+#define CPUID_C000_0001_EDX_PMM_EN (1U << 13)
+
#define CPUID_VENDOR_SZ 12
#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
@@ -1040,7 +1171,16 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
#define CPUID_VENDOR_AMD "AuthenticAMD"
-#define CPUID_VENDOR_VIA "CentaurHauls"
+#define CPUID_VENDOR_ZHAOXIN1_1 0x746E6543 /* "Cent" */
+#define CPUID_VENDOR_ZHAOXIN1_2 0x48727561 /* "aurH" */
+#define CPUID_VENDOR_ZHAOXIN1_3 0x736C7561 /* "auls" */
+
+#define CPUID_VENDOR_ZHAOXIN2_1 0x68532020 /* " Sh" */
+#define CPUID_VENDOR_ZHAOXIN2_2 0x68676E61 /* "angh" */
+#define CPUID_VENDOR_ZHAOXIN2_3 0x20206961 /* "ai " */
+
+#define CPUID_VENDOR_ZHAOXIN1 "CentaurHauls"
+#define CPUID_VENDOR_ZHAOXIN2 " Shanghai "
#define CPUID_VENDOR_HYGON "HygonGenuine"
@@ -1050,6 +1190,15 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
(env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
(env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
+#define IS_ZHAOXIN1_CPU(env) \
+ ((env)->cpuid_vendor1 == CPUID_VENDOR_ZHAOXIN1_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_ZHAOXIN1_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_ZHAOXIN1_3)
+#define IS_ZHAOXIN2_CPU(env) \
+ ((env)->cpuid_vendor1 == CPUID_VENDOR_ZHAOXIN2_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_ZHAOXIN2_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_ZHAOXIN2_3)
+#define IS_ZHAOXIN_CPU(env) (IS_ZHAOXIN1_CPU(env) || IS_ZHAOXIN2_CPU(env))
#define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
#define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
@@ -1080,7 +1229,10 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define MSR_ARCH_CAP_FBSDP_NO (1U << 14)
#define MSR_ARCH_CAP_PSDP_NO (1U << 15)
#define MSR_ARCH_CAP_FB_CLEAR (1U << 17)
+#define MSR_ARCH_CAP_BHI_NO (1U << 20)
#define MSR_ARCH_CAP_PBRSB_NO (1U << 24)
+#define MSR_ARCH_CAP_GDS_NO (1U << 26)
+#define MSR_ARCH_CAP_RFDS_NO (1U << 27)
#define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5)
@@ -1188,6 +1340,7 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000
#define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000
#define VMX_VM_EXIT_LOAD_IA32_PKRS 0x20000000
+#define VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS 0x80000000
#define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
#define VMX_VM_ENTRY_IA32E_MODE 0x00000200
@@ -1273,14 +1426,14 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
* are only needed for conditional branches.
*/
typedef enum {
- CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
- CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
- CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
- CC_OP_ADOX, /* CC_SRC2 = O, CC_SRC = rest. */
- CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
- CC_OP_CLR, /* Z and P set, all other flags clear. */
-
- CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
+ CC_OP_EFLAGS = 0, /* all cc are explicitly computed, CC_SRC = flags */
+ CC_OP_ADCX = 1, /* CC_DST = C, CC_SRC = rest. */
+ CC_OP_ADOX = 2, /* CC_SRC2 = O, CC_SRC = rest. */
+ CC_OP_ADCOX = 3, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
+
+ /* Low 2 bits = MemOp constant for the size */
+#define CC_OP_FIRST_BWLQ CC_OP_MULB
+ CC_OP_MULB = 4, /* modify all flags, C, O = (CC_SRC != 0) */
CC_OP_MULW,
CC_OP_MULL,
CC_OP_MULQ,
@@ -1335,6 +1488,11 @@ typedef enum {
CC_OP_BMILGL,
CC_OP_BMILGQ,
+ CC_OP_BLSIB, /* Z,S via CC_DST, C = SRC!=0; O=0; P,A undefined */
+ CC_OP_BLSIW,
+ CC_OP_BLSIL,
+ CC_OP_BLSIQ,
+
/*
* Note that only CC_OP_POPCNT (i.e. the one with MO_TL size)
* is used or implemented, because the translation needs
@@ -1345,10 +1503,24 @@ typedef enum {
CC_OP_POPCNTL__,
CC_OP_POPCNTQ__,
CC_OP_POPCNT = sizeof(target_ulong) == 8 ? CC_OP_POPCNTQ__ : CC_OP_POPCNTL__,
+#define CC_OP_LAST_BWLQ CC_OP_POPCNTQ__
- CC_OP_NB,
+ CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
} CCOp;
-QEMU_BUILD_BUG_ON(CC_OP_NB >= 128);
+
+/* See X86DecodedInsn.cc_op, using int8_t. */
+QEMU_BUILD_BUG_ON(CC_OP_DYNAMIC > INT8_MAX);
+
+static inline MemOp cc_op_size(CCOp op)
+{
+ MemOp size = op & 3;
+
+ QEMU_BUILD_BUG_ON(CC_OP_FIRST_BWLQ & 3);
+ assert(op >= CC_OP_FIRST_BWLQ && op <= CC_OP_LAST_BWLQ);
+ assert(size <= MO_TL);
+
+ return size;
+}
typedef struct SegmentCache {
uint32_t selector;
@@ -1466,8 +1638,6 @@ typedef struct {
#define MAX_FIXED_COUNTERS 3
#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
-#define TARGET_INSN_START_EXTRA_WORDS 1
-
#define NB_OPMASK_REGS 8
/* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
@@ -1603,12 +1773,6 @@ typedef enum TPRAccess {
/* Cache information data structures: */
-enum CacheType {
- DATA_CACHE,
- INSTRUCTION_CACHE,
- UNIFIED_CACHE
-};
-
typedef struct CPUCacheInfo {
enum CacheType type;
uint8_t level;
@@ -1656,7 +1820,7 @@ typedef struct CPUCacheInfo {
* Used to encode CPUID[4].EAX[bits 25:14] or
* CPUID[0x8000001D].EAX[bits 25:14].
*/
- enum CPUTopoLevel share_level;
+ CpuTopologyLevel share_level;
} CPUCacheInfo;
@@ -1667,11 +1831,6 @@ typedef struct CPUCaches {
CPUCacheInfo *l3_cache;
} CPUCaches;
-typedef struct HVFX86LazyFlags {
- target_ulong result;
- target_ulong auxbits;
-} HVFX86LazyFlags;
-
typedef struct CPUArchState {
/* standard registers */
target_ulong regs[CPU_NB_REGS];
@@ -1850,6 +2009,9 @@ typedef struct CPUArchState {
uint64_t msr_lbr_depth;
LBREntry lbr_records[ARCH_LBR_NR_ENTRIES];
+ /* AMD MSRC001_0015 Hardware Configuration */
+ uint64_t msr_hwcr;
+
/* exception/interrupt handling */
int error_code;
int exception_is_int;
@@ -1880,6 +2042,10 @@ typedef struct CPUArchState {
uintptr_t retaddr;
+ /* RAPL MSR */
+ uint64_t msr_rapl_power_unit;
+ uint64_t msr_pkg_energy_status;
+
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
@@ -1901,6 +2067,8 @@ typedef struct CPUArchState {
uint32_t cpuid_vendor3;
uint32_t cpuid_version;
FeatureWordArray features;
+ /* AVX10 version */
+ uint8_t avx10_version;
/* Features that were explicitly enabled/disabled */
FeatureWordArray user_features;
uint32_t cpuid_model[12];
@@ -1955,8 +2123,7 @@ typedef struct CPUArchState {
QemuMutex xen_timers_lock;
#endif
#if defined(CONFIG_HVF)
- HVFX86LazyFlags hvf_lflags;
- void *hvf_mmio_buf;
+ void *emu_mmio_buf;
#endif
uint64_t mcg_cap;
@@ -1975,14 +2142,10 @@ typedef struct CPUArchState {
TPRAccess tpr_access_type;
- /* Number of dies within this CPU package. */
- unsigned nr_dies;
-
- /* Number of modules within one die. */
- unsigned nr_modules;
+ X86CPUTopoInfo topo_info;
/* Bitmap of available CPU topology levels for this CPU. */
- DECLARE_BITMAP(avail_cpu_topo, CPU_TOPO_LEVEL_MAX);
+ DECLARE_BITMAP(avail_cpu_topo, CPU_TOPOLOGY_LEVEL__MAX);
} CPUX86State;
struct kvm_msrs;
@@ -2055,6 +2218,9 @@ struct ArchCPU {
/* Features that were filtered out because of missing host capabilities */
FeatureWordArray filtered_features;
+ /* Features that are forced enabled by underlying hypervisor, e.g., TDX */
+ FeatureWordArray forced_on_features;
+
/* Enable PMU CPUID bits. This can't be enabled by default yet because
* it doesn't have ABI stability guarantees, as it passes all PMU CPUID
* bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
@@ -2102,6 +2268,9 @@ struct ArchCPU {
/* Compatibility bits for old machine types: */
bool enable_cpuid_0xb;
+ /* Force to enable cpuid 0x1f */
+ bool force_cpuid_0x1f;
+
/* Enable auto level-increase for all CPUID leaves */
bool full_cpuid_auto_level;
@@ -2178,7 +2347,7 @@ struct X86CPUClass {
* CPU definition, automatically loaded by instance_init if not NULL.
* Should be eventually replaced by subclass-specific property defaults.
*/
- X86CPUModel *model;
+ const X86CPUModel *model;
bool host_cpuid_required;
int ordering;
@@ -2200,8 +2369,6 @@ struct X86CPUClass {
extern const VMStateDescription vmstate_x86_cpu;
#endif
-int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
-
int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
int cpuid, DumpState *s);
int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
@@ -2218,11 +2385,13 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags);
int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void x86_cpu_gdb_init(CPUState *cs);
-void x86_cpu_list(void);
int cpu_x86_support_mca_broadcast(CPUX86State *env);
#ifndef CONFIG_USER_ONLY
+int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
+
hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
MemTxAttrs *attrs);
int cpu_get_pic_interrupt(CPUX86State *s);
@@ -2319,6 +2488,8 @@ static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
cs->halted = 0;
}
+uint64_t cpu_x86_get_msr_core_thread_count(X86CPU *cpu);
+
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
target_ulong *base, unsigned int *limit,
unsigned int *flags);
@@ -2360,6 +2531,17 @@ void cpu_set_apic_feature(CPUX86State *env);
void host_cpuid(uint32_t function, uint32_t count,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
bool cpu_has_x2apic_feature(CPUX86State *env);
+bool is_feature_word_cpuid(uint32_t feature, uint32_t index, int reg);
+void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix);
+void mark_forced_on_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix);
+
+static inline bool x86_has_cpuid_0x1f(X86CPU *cpu)
+{
+ return cpu->force_cpuid_0x1f ||
+ x86_has_extended_topo(cpu->env.avail_cpu_topo);
+}
/* helper.c */
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
@@ -2409,8 +2591,6 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32")
#endif
-#define cpu_list x86_cpu_list
-
/* MMU modes definitions */
#define MMU_KSMAP64_IDX 0
#define MMU_KSMAP32_IDX 1
@@ -2445,35 +2625,17 @@ static inline bool is_mmu_index_32(int mmu_index)
return mmu_index & 1;
}
-int x86_mmu_index_pl(CPUX86State *env, unsigned pl);
-int cpu_mmu_index_kernel(CPUX86State *env);
-
#define CC_DST (env->cc_dst)
#define CC_SRC (env->cc_src)
#define CC_SRC2 (env->cc_src2)
#define CC_OP (env->cc_op)
-#include "exec/cpu-all.h"
#include "svm.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/i386/apic.h"
#endif
-static inline void cpu_get_tb_cpu_state(CPUX86State *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *flags = env->hflags |
- (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
- if (env->hflags & HF_CS64_MASK) {
- *cs_base = 0;
- *pc = env->eip;
- } else {
- *cs_base = env->segs[R_CS].base;
- *pc = (uint32_t)(*cs_base + env->eip);
- }
-}
-
void do_cpu_init(X86CPU *cpu);
#define MCE_INJECT_BROADCAST 1
@@ -2544,6 +2706,9 @@ static inline bool cpu_vmx_maybe_enabled(CPUX86State *env)
int get_pg_mode(CPUX86State *env);
/* fpu_helper.c */
+
+/* Set all non-runtime-variable float_status fields to x86 handling */
+void cpu_init_fp_statuses(CPUX86State *env);
void update_fp_status(CPUX86State *env);
void update_mxcsr_status(CPUX86State *env);
void update_mxcsr_from_sse_status(CPUX86State *env);
@@ -2688,4 +2853,29 @@ static inline bool ctl_has_irq(CPUX86State *env)
# define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20)
#endif
+/* majority(NOT a, b, c) = (a ^ b) ? b : c */
+#define MAJ_INV1(a, b, c) ((((a) ^ (b)) & ((b) ^ (c))) ^ (c))
+
+/*
+ * ADD_COUT_VEC(x, y) = majority((x + y) ^ x ^ y, x, y)
+ *
+ * If two corresponding bits in x and y are the same, that's the carry
+ * independent of the value (x+y)^x^y. Hence x^y can be replaced with
+ * 1 in (x+y)^x^y, resulting in majority(NOT (x+y), x, y)
+ */
+#define ADD_COUT_VEC(op1, op2, result) \
+ MAJ_INV1(result, op1, op2)
+
+/*
+ * SUB_COUT_VEC(x, y) = NOT majority(x, NOT y, (x - y) ^ x ^ NOT y)
+ * = majority(NOT x, y, (x - y) ^ x ^ y)
+ *
+ * Note that the carry out is actually a borrow, i.e. it is inverted.
+ * If two corresponding bits in x and y are different, the value of the
+ * bit in (x-y)^x^y likewise does not matter. Hence, x^y can be replaced
+ * with 0 in (x-y)^x^y, resulting in majority(NOT x, y, x-y)
+ */
+#define SUB_COUT_VEC(op1, op2, result) \
+ MAJ_INV1(op1, op2, result)
+
#endif /* I386_CPU_H */