aboutsummaryrefslogtreecommitdiff
path: root/target/i386/cpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'target/i386/cpu.h')
-rw-r--r--target/i386/cpu.h188
1 files changed, 140 insertions, 48 deletions
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 76f2444..f977fc4 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -23,7 +23,9 @@
#include "system/tcg.h"
#include "cpu-qom.h"
#include "kvm/hyperv-proto.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "exec/memop.h"
#include "hw/i386/topology.h"
#include "qapi/qapi-types-common.h"
@@ -33,12 +35,6 @@
#define XEN_NR_VIRQS 24
-#define KVM_HAVE_MCE_INJECTION 1
-
-/* support for self modifying code even if the modified instruction is
- close to the modifying instruction */
-#define TARGET_HAS_PRECISE_SMC
-
#ifdef TARGET_X86_64
#define I386_ELF_MACHINE EM_X86_64
#define ELF_MACHINE_UNAME "x86_64"
@@ -588,6 +584,7 @@ typedef enum X86Seg {
#define XSTATE_OPMASK_BIT 5
#define XSTATE_ZMM_Hi256_BIT 6
#define XSTATE_Hi16_ZMM_BIT 7
+#define XSTATE_PT_BIT 8
#define XSTATE_PKRU_BIT 9
#define XSTATE_ARCH_LBR_BIT 15
#define XSTATE_XTILE_CFG_BIT 17
@@ -601,6 +598,7 @@ typedef enum X86Seg {
#define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
#define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
#define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
+#define XSTATE_PT_MASK (1ULL << XSTATE_PT_BIT)
#define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
#define XSTATE_ARCH_LBR_MASK (1ULL << XSTATE_ARCH_LBR_BIT)
#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT)
@@ -623,6 +621,11 @@ typedef enum X86Seg {
XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \
XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK)
+/* CPUID feature bits available in XSS */
+#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK)
+
+#define CPUID_XSTATE_MASK (CPUID_XSTATE_XCR0_MASK | CPUID_XSTATE_XSS_MASK)
+
/* CPUID feature words */
typedef enum FeatureWord {
FEAT_1_EDX, /* CPUID[1].EDX */
@@ -665,12 +668,22 @@ typedef enum FeatureWord {
FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */
FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */
FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */
+ FEAT_7_1_ECX, /* CPUID[EAX=7,ECX=1].ECX */
FEAT_7_1_EDX, /* CPUID[EAX=7,ECX=1].EDX */
FEAT_7_2_EDX, /* CPUID[EAX=7,ECX=2].EDX */
FEAT_24_0_EBX, /* CPUID[EAX=0x24,ECX=0].EBX */
FEATURE_WORDS,
} FeatureWord;
+typedef struct FeatureMask {
+ FeatureWord index;
+ uint64_t mask;
+} FeatureMask;
+
+typedef struct FeatureDep {
+ FeatureMask from, to;
+} FeatureDep;
+
typedef uint64_t FeatureWordArray[FEATURE_WORDS];
uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
@@ -903,6 +916,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_0_ECX_LA57 (1U << 16)
/* Read Processor ID */
#define CPUID_7_0_ECX_RDPID (1U << 22)
+/* KeyLocker */
+#define CPUID_7_0_ECX_KeyLocker (1U << 23)
/* Bus Lock Debug Exception */
#define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24)
/* Cache Line Demote Instruction */
@@ -924,6 +939,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_0_EDX_FSRM (1U << 4)
/* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
#define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
+ /* "md_clear" VERW clears CPU buffers */
+#define CPUID_7_0_EDX_MD_CLEAR (1U << 10)
/* SERIALIZE instruction */
#define CPUID_7_0_EDX_SERIALIZE (1U << 14)
/* TSX Suspend Load Address Tracking instruction */
@@ -961,6 +978,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_1_EAX_AVX_VNNI (1U << 4)
/* AVX512 BFloat16 Instruction */
#define CPUID_7_1_EAX_AVX512_BF16 (1U << 5)
+/* Linear address space separation */
+#define CPUID_7_1_EAX_LASS (1U << 6)
/* CMPCCXADD Instructions */
#define CPUID_7_1_EAX_CMPCCXADD (1U << 7)
/* Fast Zero REP MOVS */
@@ -982,6 +1001,9 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
/* Linear Address Masking */
#define CPUID_7_1_EAX_LAM (1U << 26)
+/* The immediate form of MSR access instructions */
+#define CPUID_7_1_ECX_MSR_IMM (1U << 5)
+
/* Support for VPDPB[SU,UU,SS]D[,S] */
#define CPUID_7_1_EDX_AVX_VNNI_INT8 (1U << 4)
/* AVX NE CONVERT Instructions */
@@ -1005,6 +1027,7 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_2_EDX_DDPD_U (1U << 3)
/* Indicate bit 10 of the IA32_SPEC_CTRL MSR is supported */
#define CPUID_7_2_EDX_BHI_CTRL (1U << 4)
+
/* Do not exhibit MXCSR Configuration Dependent Timing (MCDT) behavior */
#define CPUID_7_2_EDX_MCDT_NO (1U << 5)
@@ -1074,12 +1097,16 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
/* Processor ignores nested data breakpoints */
#define CPUID_8000_0021_EAX_NO_NESTED_DATA_BP (1U << 0)
+/* WRMSR to FS_BASE, GS_BASE, or KERNEL_GS_BASE is non-serializing */
+#define CPUID_8000_0021_EAX_FS_GS_BASE_NS (1U << 1)
/* LFENCE is always serializing */
#define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2)
/* Null Selector Clears Base */
#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6)
/* Automatic IBRS */
#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8)
+/* Indicates support for IC prefetch */
+#define CPUID_8000_0021_EAX_PREFETCHI (1U << 20)
/* Enhanced Return Address Predictor Scurity */
#define CPUID_8000_0021_EAX_ERAPS (1U << 24)
/* Selective Branch Predictor Barrier */
@@ -1104,6 +1131,7 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_XSAVE_XSAVEC (1U << 1)
#define CPUID_XSAVE_XGETBV1 (1U << 2)
#define CPUID_XSAVE_XSAVES (1U << 3)
+#define CPUID_XSAVE_XFD (1U << 4)
#define CPUID_6_EAX_ARAT (1U << 2)
@@ -1131,7 +1159,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
/* PMM enabled */
#define CPUID_C000_0001_EDX_PMM_EN (1U << 13)
-#define CPUID_VENDOR_SZ 12
+#define CPUID_VENDOR_SZ 12
+#define CPUID_MODEL_ID_SZ 48
#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
@@ -1610,8 +1639,6 @@ typedef struct {
#define MAX_FIXED_COUNTERS 3
#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
-#define TARGET_INSN_START_EXTRA_WORDS 1
-
#define NB_OPMASK_REGS 8
/* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
@@ -1747,12 +1774,6 @@ typedef enum TPRAccess {
/* Cache information data structures: */
-enum CacheType {
- DATA_CACHE,
- INSTRUCTION_CACHE,
- UNIFIED_CACHE
-};
-
typedef struct CPUCacheInfo {
enum CacheType type;
uint8_t level;
@@ -1811,11 +1832,6 @@ typedef struct CPUCaches {
CPUCacheInfo *l3_cache;
} CPUCaches;
-typedef struct HVFX86LazyFlags {
- target_ulong result;
- target_ulong auxbits;
-} HVFX86LazyFlags;
-
typedef struct CPUArchState {
/* standard registers */
target_ulong regs[CPU_NB_REGS];
@@ -2057,11 +2073,14 @@ typedef struct CPUArchState {
/* Features that were explicitly enabled/disabled */
FeatureWordArray user_features;
uint32_t cpuid_model[12];
- /* Cache information for CPUID. When legacy-cache=on, the cache data
+ /*
+ * Cache information for CPUID. When legacy-cache=on, the cache data
* on each CPUID leaf will be different, because we keep compatibility
* with old QEMU versions.
*/
- CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd;
+ CPUCaches cache_info;
+ bool enable_legacy_cpuid2_cache;
+ bool enable_legacy_vendor_cache;
/* MTRRs */
uint64_t mtrr_fixed[11];
@@ -2108,8 +2127,7 @@ typedef struct CPUArchState {
QemuMutex xen_timers_lock;
#endif
#if defined(CONFIG_HVF)
- HVFX86LazyFlags hvf_lflags;
- void *hvf_mmio_buf;
+ void *emu_mmio_buf;
#endif
uint64_t mcg_cap;
@@ -2182,7 +2200,6 @@ struct ArchCPU {
bool expose_tcg;
bool migratable;
bool migrate_smi_count;
- bool max_features; /* Enable all supported features automatically */
uint32_t apic_id;
/* Enables publishing of TSC increment and Local APIC bus frequencies to
@@ -2204,6 +2221,9 @@ struct ArchCPU {
/* Features that were filtered out because of missing host capabilities */
FeatureWordArray filtered_features;
+ /* Features that are forced enabled by underlying hypervisor, e.g., TDX */
+ FeatureWordArray forced_on_features;
+
/* Enable PMU CPUID bits. This can't be enabled by default yet because
* it doesn't have ABI stability guarantees, as it passes all PMU CPUID
* bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
@@ -2242,6 +2262,13 @@ struct ArchCPU {
*/
bool legacy_cache;
+ /*
+ * Compatibility bits for old machine types.
+ * If true, use the same cache model in CPUID leaf 0x2
+ * and 0x4.
+ */
+ bool consistent_cache;
+
/* Compatibility bits for old machine types.
* If true decode the CPUID Function 0x8000001E_ECX to support multiple
* nodes per processor
@@ -2251,12 +2278,24 @@ struct ArchCPU {
/* Compatibility bits for old machine types: */
bool enable_cpuid_0xb;
+ /* Force to enable cpuid 0x1f */
+ bool force_cpuid_0x1f;
+
/* Enable auto level-increase for all CPUID leaves */
bool full_cpuid_auto_level;
- /* Only advertise CPUID leaves defined by the vendor */
+ /*
+ * Compatibility bits for old machine types (PC machine v6.0 and older).
+ * Only advertise CPUID leaves defined by the vendor.
+ */
bool vendor_cpuid_only;
+ /*
+ * Compatibility bits for old machine types (PC machine v10.0 and older).
+ * Only advertise CPUID leaves defined by the vendor.
+ */
+ bool vendor_cpuid_only_v2;
+
/* Only advertise TOPOEXT features that AMD defines */
bool amd_topoext_features_only;
@@ -2329,6 +2368,7 @@ struct X86CPUClass {
*/
const X86CPUModel *model;
+ bool max_features; /* Enable all supported features automatically */
bool host_cpuid_required;
int ordering;
bool migration_safe;
@@ -2367,7 +2407,6 @@ int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void x86_cpu_gdb_init(CPUState *cs);
-void x86_cpu_list(void);
int cpu_x86_support_mca_broadcast(CPUX86State *env);
#ifndef CONFIG_USER_ONLY
@@ -2398,7 +2437,14 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
SegmentCache *sc;
unsigned int new_hflags;
- sc = &env->segs[seg_reg];
+ if (seg_reg == R_LDTR) {
+ sc = &env->ldt;
+ } else if (seg_reg == R_TR) {
+ sc = &env->tr;
+ } else {
+ sc = &env->segs[seg_reg];
+ }
+
sc->selector = selector;
sc->base = base;
sc->limit = limit;
@@ -2512,6 +2558,17 @@ void cpu_set_apic_feature(CPUX86State *env);
void host_cpuid(uint32_t function, uint32_t count,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
bool cpu_has_x2apic_feature(CPUX86State *env);
+bool is_feature_word_cpuid(uint32_t feature, uint32_t index, int reg);
+void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix);
+void mark_forced_on_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix);
+
+static inline bool x86_has_cpuid_0x1f(X86CPU *cpu)
+{
+ return cpu->force_cpuid_0x1f ||
+ x86_has_extended_topo(cpu->env.avail_cpu_topo);
+}
/* helper.c */
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
@@ -2561,8 +2618,6 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32")
#endif
-#define cpu_list x86_cpu_list
-
/* MMU modes definitions */
#define MMU_KSMAP64_IDX 0
#define MMU_KSMAP32_IDX 1
@@ -2597,35 +2652,17 @@ static inline bool is_mmu_index_32(int mmu_index)
return mmu_index & 1;
}
-int x86_mmu_index_pl(CPUX86State *env, unsigned pl);
-int cpu_mmu_index_kernel(CPUX86State *env);
-
#define CC_DST (env->cc_dst)
#define CC_SRC (env->cc_src)
#define CC_SRC2 (env->cc_src2)
#define CC_OP (env->cc_op)
-#include "exec/cpu-all.h"
#include "svm.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/i386/apic.h"
#endif
-static inline void cpu_get_tb_cpu_state(CPUX86State *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *flags = env->hflags |
- (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
- if (env->hflags & HF_CS64_MASK) {
- *cs_base = 0;
- *pc = env->eip;
- } else {
- *cs_base = env->segs[R_CS].base;
- *pc = (uint32_t)(*cs_base + env->eip);
- }
-}
-
void do_cpu_init(X86CPU *cpu);
#define MCE_INJECT_BROADCAST 1
@@ -2660,6 +2697,36 @@ static inline int32_t x86_get_a20_mask(CPUX86State *env)
}
}
+static inline uint32_t x86_cpu_family(uint32_t eax)
+{
+ uint32_t family = (eax >> 8) & 0xf;
+
+ if (family == 0xf) {
+ family += (eax >> 20) & 0xff;
+ }
+
+ return family;
+}
+
+static inline uint32_t x86_cpu_model(uint32_t eax)
+{
+ uint32_t family, model;
+
+ family = x86_cpu_family(eax);
+ model = (eax >> 4) & 0xf;
+
+ if (family >= 0x6) {
+ model += ((eax >> 16) & 0xf) << 4;
+ }
+
+ return model;
+}
+
+static inline uint32_t x86_cpu_stepping(uint32_t eax)
+{
+ return eax & 0xf;
+}
+
static inline bool cpu_has_vmx(CPUX86State *env)
{
return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
@@ -2843,4 +2910,29 @@ static inline bool ctl_has_irq(CPUX86State *env)
# define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20)
#endif
+/* majority(NOT a, b, c) = (a ^ b) ? b : c */
+#define MAJ_INV1(a, b, c) ((((a) ^ (b)) & ((b) ^ (c))) ^ (c))
+
+/*
+ * ADD_COUT_VEC(x, y) = majority((x + y) ^ x ^ y, x, y)
+ *
+ * If two corresponding bits in x and y are the same, that's the carry
+ * independent of the value (x+y)^x^y. Hence x^y can be replaced with
+ * 1 in (x+y)^x^y, resulting in majority(NOT (x+y), x, y)
+ */
+#define ADD_COUT_VEC(op1, op2, result) \
+ MAJ_INV1(result, op1, op2)
+
+/*
+ * SUB_COUT_VEC(x, y) = NOT majority(x, NOT y, (x - y) ^ x ^ NOT y)
+ * = majority(NOT x, y, (x - y) ^ x ^ y)
+ *
+ * Note that the carry out is actually a borrow, i.e. it is inverted.
+ * If two corresponding bits in x and y are different, the value of the
+ * bit in (x-y)^x^y likewise does not matter. Hence, x^y can be replaced
+ * with 0 in (x-y)^x^y, resulting in majority(NOT x, y, x-y)
+ */
+#define SUB_COUT_VEC(op1, op2, result) \
+ MAJ_INV1(op1, op2, result)
+
#endif /* I386_CPU_H */