aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/alpha/cpu.c1
-rw-r--r--target/arm/arm-qmp-cmds.c5
-rw-r--r--target/arm/cpregs.h1
-rw-r--r--target/arm/cpu-features.h1
-rw-r--r--target/arm/cpu.c24
-rw-r--r--target/arm/hvf-stub.c20
-rw-r--r--target/arm/hvf_arm.h18
-rw-r--r--target/arm/kvm.c5
-rw-r--r--target/arm/kvm_arm.h1
-rw-r--r--target/arm/meson.build6
-rw-r--r--target/arm/tcg/cpu-v7m.c1
-rw-r--r--target/arm/tcg/meson.build2
-rw-r--r--target/avr/cpu.c6
-rw-r--r--target/hppa/cpu.c1
-rw-r--r--target/i386/confidential-guest.h44
-rw-r--r--target/i386/cpu-system.c2
-rw-r--r--target/i386/cpu.c524
-rw-r--r--target/i386/cpu.h44
-rw-r--r--target/i386/emulate/x86_flags.c16
-rw-r--r--target/i386/host-cpu.c2
-rw-r--r--target/i386/host-cpu.h1
-rw-r--r--target/i386/kvm/kvm.c110
-rw-r--r--target/i386/kvm/kvm_i386.h15
-rw-r--r--target/i386/kvm/meson.build2
-rw-r--r--target/i386/kvm/tdx-stub.c20
-rw-r--r--target/i386/kvm/tdx.c1289
-rw-r--r--target/i386/kvm/tdx.h65
-rw-r--r--target/i386/machine.c5
-rw-r--r--target/i386/monitor.c1
-rw-r--r--target/i386/sev-system-stub.c32
-rw-r--r--target/i386/sev.c14
-rw-r--r--target/i386/tcg/helper-tcg.h4
-rw-r--r--target/i386/tcg/tcg-cpu.c7
-rw-r--r--target/loongarch/cpu.c7
-rw-r--r--target/loongarch/kvm/kvm.c4
-rw-r--r--target/loongarch/loongarch-qmp-cmds.c2
-rw-r--r--target/m68k/cpu.c1
-rw-r--r--target/microblaze/cpu.c1
-rw-r--r--target/microblaze/cpu.h2
-rw-r--r--target/microblaze/helper.c71
-rw-r--r--target/microblaze/helper.h22
-rw-r--r--target/microblaze/mmu.c3
-rw-r--r--target/microblaze/op_helper.c104
-rw-r--r--target/microblaze/translate.c128
-rw-r--r--target/mips/cpu.c9
-rw-r--r--target/mips/kvm.c5
-rw-r--r--target/mips/system/mips-qmp-cmds.c12
-rw-r--r--target/openrisc/cpu.c1
-rw-r--r--target/ppc/cpu_init.c7
-rw-r--r--target/ppc/kvm.c5
-rw-r--r--target/ppc/ppc-qmp-cmds.c12
-rw-r--r--target/riscv/kvm/kvm-cpu.c5
-rw-r--r--target/riscv/riscv-qmp-cmds.c2
-rw-r--r--target/riscv/tcg/tcg-cpu.c26
-rw-r--r--target/rx/cpu.c1
-rw-r--r--target/s390x/cpu.c9
-rw-r--r--target/s390x/cpu_models_system.c2
-rw-r--r--target/s390x/kvm/kvm.c5
-rw-r--r--target/sh4/cpu.c1
-rw-r--r--target/sh4/translate.c2
-rw-r--r--target/sparc/cpu.c13
-rw-r--r--target/sparc/fop_helper.c1
-rw-r--r--target/tricore/cpu.c1
-rw-r--r--target/xtensa/cpu.c1
64 files changed, 2472 insertions, 282 deletions
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
index 890b84c..2082db4 100644
--- a/target/alpha/cpu.c
+++ b/target/alpha/cpu.c
@@ -261,6 +261,7 @@ static const TCGCPUOps alpha_tcg_ops = {
.record_sigbus = alpha_cpu_record_sigbus,
#else
.tlb_fill = alpha_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_notreached,
.cpu_exec_interrupt = alpha_cpu_exec_interrupt,
.cpu_exec_halt = alpha_cpu_has_work,
.cpu_exec_reset = cpu_reset,
diff --git a/target/arm/arm-qmp-cmds.c b/target/arm/arm-qmp-cmds.c
index a1a944a..cefd235 100644
--- a/target/arm/arm-qmp-cmds.c
+++ b/target/arm/arm-qmp-cmds.c
@@ -26,10 +26,11 @@
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qapi/qobject-input-visitor.h"
-#include "qapi/qapi-commands-machine-target.h"
-#include "qapi/qapi-commands-misc-target.h"
+#include "qapi/qapi-commands-machine.h"
+#include "qapi/qapi-commands-misc-arm.h"
#include "qobject/qdict.h"
#include "qom/qom-qobject.h"
+#include "cpu.h"
static GICCapability *gic_cap_new(int version)
{
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
index 2183de8..c1a7ae3 100644
--- a/target/arm/cpregs.h
+++ b/target/arm/cpregs.h
@@ -23,6 +23,7 @@
#include "hw/registerfields.h"
#include "target/arm/kvm-consts.h"
+#include "cpu.h"
/*
* ARMCPRegInfo type field bits:
diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h
index 525e4ce..4452e7c 100644
--- a/target/arm/cpu-features.h
+++ b/target/arm/cpu-features.h
@@ -22,6 +22,7 @@
#include "hw/registerfields.h"
#include "qemu/host-utils.h"
+#include "cpu.h"
/*
* Naming convention for isar_feature functions:
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index ca5ed78..e025e24 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -2703,6 +2703,29 @@ static const struct SysemuCPUOps arm_sysemu_ops = {
#endif
#ifdef CONFIG_TCG
+#ifndef CONFIG_USER_ONLY
+static vaddr aprofile_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ /*
+ * The Stage2 and Phys indexes are only used for ptw on arm32,
+ * and all pte's are aligned, so we never produce a wrap for these.
+ * Double check that we're not truncating a 40-bit physical address.
+ */
+ assert((unsigned)mmu_idx < (ARMMMUIdx_Stage2_S & ARM_MMU_IDX_COREIDX_MASK));
+
+ if (!is_a64(cpu_env(cs))) {
+ return (uint32_t)result;
+ }
+
+ /*
+ * TODO: For FEAT_CPA2, decide how to we want to resolve
+ * Unpredictable_CPACHECK in AddressIncrement.
+ */
+ return result;
+}
+#endif /* !CONFIG_USER_ONLY */
+
static const TCGCPUOps arm_tcg_ops = {
.mttcg_supported = true,
/* ARM processors have a weak memory model */
@@ -2722,6 +2745,7 @@ static const TCGCPUOps arm_tcg_ops = {
.untagged_addr = aarch64_untagged_addr,
#else
.tlb_fill_align = arm_cpu_tlb_fill_align,
+ .pointer_wrap = aprofile_pointer_wrap,
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
.cpu_exec_halt = arm_cpu_exec_halt,
.cpu_exec_reset = cpu_reset,
diff --git a/target/arm/hvf-stub.c b/target/arm/hvf-stub.c
new file mode 100644
index 0000000..ff13726
--- /dev/null
+++ b/target/arm/hvf-stub.c
@@ -0,0 +1,20 @@
+/*
+ * QEMU Hypervisor.framework (HVF) stubs for ARM
+ *
+ * Copyright (c) Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hvf_arm.h"
+
+uint32_t hvf_arm_get_default_ipa_bit_size(void)
+{
+ g_assert_not_reached();
+}
+
+uint32_t hvf_arm_get_max_ipa_bit_size(void)
+{
+ g_assert_not_reached();
+}
diff --git a/target/arm/hvf_arm.h b/target/arm/hvf_arm.h
index 26c717b..ea82f26 100644
--- a/target/arm/hvf_arm.h
+++ b/target/arm/hvf_arm.h
@@ -11,7 +11,7 @@
#ifndef QEMU_HVF_ARM_H
#define QEMU_HVF_ARM_H
-#include "cpu.h"
+#include "target/arm/cpu-qom.h"
/**
* hvf_arm_init_debug() - initialize guest debug capabilities
@@ -22,23 +22,7 @@ void hvf_arm_init_debug(void);
void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu);
-#ifdef CONFIG_HVF
-
uint32_t hvf_arm_get_default_ipa_bit_size(void);
uint32_t hvf_arm_get_max_ipa_bit_size(void);
-#else
-
-static inline uint32_t hvf_arm_get_default_ipa_bit_size(void)
-{
- return 0;
-}
-
-static inline uint32_t hvf_arm_get_max_ipa_bit_size(void)
-{
- return 0;
-}
-
-#endif
-
#endif
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index a2791aa..74fda8b 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -1846,6 +1846,11 @@ static int kvm_arm_sve_set_vls(ARMCPU *cpu)
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
int ret;
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index c4178d1..7dc83ca 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -12,6 +12,7 @@
#define QEMU_KVM_ARM_H
#include "system/kvm.h"
+#include "target/arm/cpu-qom.h"
#define KVM_ARM_VGIC_V2 (1 << 0)
#define KVM_ARM_VGIC_V3 (1 << 1)
diff --git a/target/arm/meson.build b/target/arm/meson.build
index b404fa5..7aa81e3 100644
--- a/target/arm/meson.build
+++ b/target/arm/meson.build
@@ -3,7 +3,6 @@ arm_common_ss = ss.source_set()
arm_ss.add(files(
'gdbstub.c',
))
-arm_ss.add(zlib)
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
'cpu64.c',
@@ -28,10 +27,11 @@ arm_user_ss.add(files(
'vfp_fpscr.c',
))
-arm_common_system_ss.add(files('cpu.c'), capstone)
+arm_common_system_ss.add(files('cpu.c'))
arm_common_system_ss.add(when: 'TARGET_AARCH64', if_false: files(
'cpu32-stubs.c'))
arm_common_system_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
+arm_common_system_ss.add(when: 'CONFIG_HVF', if_false: files('hvf-stub.c'))
arm_common_system_ss.add(files(
'arch_dump.c',
'arm-powerctl.c',
@@ -48,7 +48,7 @@ subdir('hvf')
if 'CONFIG_TCG' in config_all_accel
subdir('tcg')
else
- arm_ss.add(files('tcg-stubs.c'))
+ arm_common_system_ss.add(files('tcg-stubs.c'))
endif
target_arch += {'arm': arm_ss}
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
index 95b23d9..8e1a083 100644
--- a/target/arm/tcg/cpu-v7m.c
+++ b/target/arm/tcg/cpu-v7m.c
@@ -249,6 +249,7 @@ static const TCGCPUOps arm_v7m_tcg_ops = {
.record_sigbus = arm_cpu_record_sigbus,
#else
.tlb_fill_align = arm_cpu_tlb_fill_align,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
.cpu_exec_halt = arm_cpu_exec_halt,
.cpu_exec_reset = cpu_reset,
diff --git a/target/arm/tcg/meson.build b/target/arm/tcg/meson.build
index 2d1502b..c59f0f0 100644
--- a/target/arm/tcg/meson.build
+++ b/target/arm/tcg/meson.build
@@ -56,6 +56,8 @@ arm_system_ss.add(files(
arm_system_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('cpu-v7m.c'))
arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files('cpu-v7m.c'))
+arm_common_ss.add(zlib)
+
arm_common_ss.add(files(
'arith_helper.c',
'crypto_helper.c',
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
index 2502415..6995de6 100644
--- a/target/avr/cpu.c
+++ b/target/avr/cpu.c
@@ -250,6 +250,12 @@ static const TCGCPUOps avr_tcg_ops = {
.cpu_exec_reset = cpu_reset,
.tlb_fill = avr_cpu_tlb_fill,
.do_interrupt = avr_cpu_do_interrupt,
+ /*
+ * TODO: code and data wrapping are different, but for the most part
+ * AVR only references bytes or aligned code fetches. But we use
+ * non-aligned MO_16 accesses for stack push/pop.
+ */
+ .pointer_wrap = cpu_pointer_wrap_uint32,
};
static void avr_cpu_class_init(ObjectClass *oc, const void *data)
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index 6465181..2477772 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -269,6 +269,7 @@ static const TCGCPUOps hppa_tcg_ops = {
#ifndef CONFIG_USER_ONLY
.tlb_fill_align = hppa_cpu_tlb_fill_align,
+ .pointer_wrap = cpu_pointer_wrap_notreached,
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
.cpu_exec_halt = hppa_cpu_has_work,
.cpu_exec_reset = cpu_reset,
diff --git a/target/i386/confidential-guest.h b/target/i386/confidential-guest.h
index 164be76..48b88db 100644
--- a/target/i386/confidential-guest.h
+++ b/target/i386/confidential-guest.h
@@ -39,8 +39,10 @@ struct X86ConfidentialGuestClass {
/* <public> */
int (*kvm_type)(X86ConfidentialGuest *cg);
- uint32_t (*mask_cpuid_features)(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index,
- int reg, uint32_t value);
+ void (*cpu_instance_init)(X86ConfidentialGuest *cg, CPUState *cpu);
+ uint32_t (*adjust_cpuid_features)(X86ConfidentialGuest *cg, uint32_t feature,
+ uint32_t index, int reg, uint32_t value);
+ int (*check_features)(X86ConfidentialGuest *cg, CPUState *cs);
};
/**
@@ -59,25 +61,47 @@ static inline int x86_confidential_guest_kvm_type(X86ConfidentialGuest *cg)
}
}
+static inline void x86_confidential_guest_cpu_instance_init(X86ConfidentialGuest *cg,
+ CPUState *cpu)
+{
+ X86ConfidentialGuestClass *klass = X86_CONFIDENTIAL_GUEST_GET_CLASS(cg);
+
+ if (klass->cpu_instance_init) {
+ klass->cpu_instance_init(cg, cpu);
+ }
+}
+
/**
- * x86_confidential_guest_mask_cpuid_features:
+ * x86_confidential_guest_adjust_cpuid_features:
*
- * Removes unsupported features from a confidential guest's CPUID values, returns
- * the value with the bits removed. The bits removed should be those that KVM
- * provides independent of host-supported CPUID features, but are not supported by
- * the confidential computing firmware.
+ * Adjust the supported features from a confidential guest's CPUID values,
+ * returns the adjusted value. There are bits being removed that are not
+ * supported by the confidential computing firmware or bits being added that
+ * are forcibly exposed to guest by the confidential computing firmware.
*/
-static inline int x86_confidential_guest_mask_cpuid_features(X86ConfidentialGuest *cg,
+static inline int x86_confidential_guest_adjust_cpuid_features(X86ConfidentialGuest *cg,
uint32_t feature, uint32_t index,
int reg, uint32_t value)
{
X86ConfidentialGuestClass *klass = X86_CONFIDENTIAL_GUEST_GET_CLASS(cg);
- if (klass->mask_cpuid_features) {
- return klass->mask_cpuid_features(cg, feature, index, reg, value);
+ if (klass->adjust_cpuid_features) {
+ return klass->adjust_cpuid_features(cg, feature, index, reg, value);
} else {
return value;
}
}
+static inline int x86_confidential_guest_check_features(X86ConfidentialGuest *cg,
+ CPUState *cs)
+{
+ X86ConfidentialGuestClass *klass = X86_CONFIDENTIAL_GUEST_GET_CLASS(cg);
+
+ if (klass->check_features) {
+ return klass->check_features(cg, cs);
+ }
+
+ return 0;
+}
+
#endif
diff --git a/target/i386/cpu-system.c b/target/i386/cpu-system.c
index 55f192e..b1494aa 100644
--- a/target/i386/cpu-system.c
+++ b/target/i386/cpu-system.c
@@ -24,7 +24,7 @@
#include "qobject/qdict.h"
#include "qapi/qobject-input-visitor.h"
#include "qom/qom-qobject.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/qapi-commands-machine.h"
#include "cpu-internal.h"
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 9689f63..c9bd344 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -37,8 +37,9 @@
#include "hw/i386/topology.h"
#include "exec/watchpoint.h"
#ifndef CONFIG_USER_ONLY
+#include "confidential-guest.h"
#include "system/reset.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/qapi-commands-machine.h"
#include "system/address-spaces.h"
#include "hw/boards.h"
#include "hw/i386/sgx-epc.h"
@@ -1252,12 +1253,12 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_8000_0021_EAX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
- "no-nested-data-bp", NULL, "lfence-always-serializing", NULL,
+ "no-nested-data-bp", "fs-gs-base-ns", "lfence-always-serializing", NULL,
NULL, NULL, "null-sel-clr-base", NULL,
"auto-ibrs", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
+ "prefetchi", NULL, NULL, NULL,
"eraps", NULL, NULL, "sbpb",
"ibpb-brtype", "srso-no", "srso-user-kernel-no", NULL,
},
@@ -1677,14 +1678,21 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
};
-typedef struct FeatureMask {
- FeatureWord index;
- uint64_t mask;
-} FeatureMask;
+bool is_feature_word_cpuid(uint32_t feature, uint32_t index, int reg)
+{
+ FeatureWordInfo *wi;
+ FeatureWord w;
-typedef struct FeatureDep {
- FeatureMask from, to;
-} FeatureDep;
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ wi = &feature_word_info[w];
+ if (wi->type == CPUID_FEATURE_WORD && wi->cpuid.eax == feature &&
+ (!wi->cpuid.needs_ecx || wi->cpuid.ecx == index) &&
+ wi->cpuid.reg == reg) {
+ return true;
+ }
+ }
+ return false;
+}
static FeatureDep feature_dependencies[] = {
{
@@ -1854,9 +1862,6 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
};
#undef REGISTER
-/* CPUID feature bits available in XSS */
-#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK)
-
ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
[XSTATE_FP_BIT] = {
/* x87 FP state component is always enabled if XSAVE is supported */
@@ -2206,6 +2211,60 @@ static CPUCaches epyc_v4_cache_info = {
},
};
+static CPUCaches epyc_v5_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 64 * KiB,
+ .line_size = 64,
+ .associativity = 4,
+ .partitions = 1,
+ .sets = 256,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 8 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 8192,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
static const CPUCaches epyc_rome_cache_info = {
.l1d_cache = &(CPUCacheInfo) {
.type = DATA_CACHE,
@@ -2314,6 +2373,60 @@ static const CPUCaches epyc_rome_v3_cache_info = {
},
};
+static const CPUCaches epyc_rome_v5_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 16 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 16384,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
static const CPUCaches epyc_milan_cache_info = {
.l1d_cache = &(CPUCacheInfo) {
.type = DATA_CACHE,
@@ -2422,6 +2535,60 @@ static const CPUCaches epyc_milan_v2_cache_info = {
},
};
+static const CPUCaches epyc_milan_v3_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 32 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 32768,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
static const CPUCaches epyc_genoa_cache_info = {
.l1d_cache = &(CPUCacheInfo) {
.type = DATA_CACHE,
@@ -2476,6 +2643,114 @@ static const CPUCaches epyc_genoa_cache_info = {
},
};
+static const CPUCaches epyc_genoa_v2_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 1 * MiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 2048,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 32 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 32768,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
+static const CPUCaches epyc_turin_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 48 * KiB,
+ .line_size = 64,
+ .associativity = 12,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 1 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 32 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 32768,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
/* The following VMX features are not supported by KVM and are left out in the
* CPU definitions:
*
@@ -5233,6 +5508,25 @@ static const X86CPUDefinition builtin_x86_defs[] = {
},
.cache_info = &epyc_v4_cache_info
},
+ {
+ .version = 5,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "model-id",
+ "AMD EPYC-v5 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_v5_cache_info
+ },
{ /* end of list */ }
}
},
@@ -5371,6 +5665,25 @@ static const X86CPUDefinition builtin_x86_defs[] = {
{ /* end of list */ }
},
},
+ {
+ .version = 5,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "model-id",
+ "AMD EPYC-Rome-v5 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_rome_v5_cache_info
+ },
{ /* end of list */ }
}
},
@@ -5446,6 +5759,25 @@ static const X86CPUDefinition builtin_x86_defs[] = {
},
.cache_info = &epyc_milan_v2_cache_info
},
+ {
+ .version = 3,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "model-id",
+ "AMD EPYC-Milan-v3 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_milan_v3_cache_info
+ },
{ /* end of list */ }
}
},
@@ -5520,6 +5852,31 @@ static const X86CPUDefinition builtin_x86_defs[] = {
.xlevel = 0x80000022,
.model_id = "AMD EPYC-Genoa Processor",
.cache_info = &epyc_genoa_cache_info,
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "fs-gs-base-ns", "on" },
+ { "perfmon-v2", "on" },
+ { "model-id",
+ "AMD EPYC-Genoa-v2 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_genoa_v2_cache_info
+ },
+ { /* end of list */ }
+ }
},
{
.name = "YongFeng",
@@ -5657,6 +6014,89 @@ static const X86CPUDefinition builtin_x86_defs[] = {
{ /* end of list */ }
}
},
+ {
+ .name = "EPYC-Turin",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 26,
+ .model = 0,
+ .stepping = 0,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
+ CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
+ CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_PCID | CPUID_EXT_CX16 | CPUID_EXT_FMA |
+ CPUID_EXT_SSSE3 | CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3,
+ .features[FEAT_1_EDX] =
+ CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
+ CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
+ CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
+ CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
+ CPUID_VME | CPUID_FP87,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
+ CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_AVX512F |
+ CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_AVX512IFMA |
+ CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB |
+ CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_SHA_NI |
+ CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
+ CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
+ CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
+ CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
+ CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57 |
+ CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_MOVDIRI |
+ CPUID_7_0_ECX_MOVDIR64B,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_AVX512_VP2INTERSECT,
+ .features[FEAT_7_1_EAX] =
+ CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_AVX512_BF16,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
+ CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
+ CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
+ CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0007_EBX] =
+ CPUID_8000_0007_EBX_OVERFLOW_RECOV | CPUID_8000_0007_EBX_SUCCOR,
+ .features[FEAT_8000_0008_EBX] =
+ CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR |
+ CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB |
+ CPUID_8000_0008_EBX_IBRS | CPUID_8000_0008_EBX_STIBP |
+ CPUID_8000_0008_EBX_STIBP_ALWAYS_ON |
+ CPUID_8000_0008_EBX_AMD_SSBD | CPUID_8000_0008_EBX_AMD_PSFD,
+ .features[FEAT_8000_0021_EAX] =
+ CPUID_8000_0021_EAX_NO_NESTED_DATA_BP |
+ CPUID_8000_0021_EAX_FS_GS_BASE_NS |
+ CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING |
+ CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE |
+ CPUID_8000_0021_EAX_AUTO_IBRS | CPUID_8000_0021_EAX_PREFETCHI |
+ CPUID_8000_0021_EAX_SBPB | CPUID_8000_0021_EAX_IBPB_BRTYPE |
+ CPUID_8000_0021_EAX_SRSO_USER_KERNEL_NO,
+ .features[FEAT_8000_0022_EAX] =
+ CPUID_8000_0022_EAX_PERFMON_V2,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
+ .features[FEAT_SVM] =
+ CPUID_SVM_NPT | CPUID_SVM_LBRV | CPUID_SVM_NRIPSAVE |
+ CPUID_SVM_TSCSCALE | CPUID_SVM_VMCBCLEAN | CPUID_SVM_FLUSHASID |
+ CPUID_SVM_PAUSEFILTER | CPUID_SVM_PFTHRESHOLD |
+ CPUID_SVM_V_VMSAVE_VMLOAD | CPUID_SVM_VGIF |
+ CPUID_SVM_VNMI | CPUID_SVM_SVME_ADDR_CHK,
+ .xlevel = 0x80000022,
+ .model_id = "AMD EPYC-Turin Processor",
+ .cache_info = &epyc_turin_cache_info,
+ },
};
/*
@@ -5766,7 +6206,7 @@ static const TypeInfo max_x86_cpu_type_info = {
.class_init = max_x86_cpu_class_init,
};
-static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
+static char *feature_word_description(FeatureWordInfo *f)
{
assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
@@ -5775,11 +6215,15 @@ static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
{
const char *reg = get_register_name_32(f->cpuid.reg);
assert(reg);
- return g_strdup_printf("CPUID.%02XH:%s",
- f->cpuid.eax, reg);
+ if (!f->cpuid.needs_ecx) {
+ return g_strdup_printf("CPUID[eax=%02Xh].%s", f->cpuid.eax, reg);
+ } else {
+ return g_strdup_printf("CPUID[eax=%02Xh,ecx=%02Xh].%s",
+ f->cpuid.eax, f->cpuid.ecx, reg);
+ }
}
case MSR_FEATURE_WORD:
- return g_strdup_printf("MSR(%02XH)",
+ return g_strdup_printf("MSR(%02Xh)",
f->msr.index);
}
@@ -5799,12 +6243,13 @@ static bool x86_cpu_have_filtered_features(X86CPU *cpu)
return false;
}
-static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
- const char *verbose_prefix)
+void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix)
{
CPUX86State *env = &cpu->env;
FeatureWordInfo *f = &feature_word_info[w];
int i;
+ g_autofree char *feat_word_str = feature_word_description(f);
if (!cpu->force_features) {
env->features[w] &= ~mask;
@@ -5817,7 +6262,35 @@ static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
for (i = 0; i < 64; ++i) {
if ((1ULL << i) & mask) {
- g_autofree char *feat_word_str = feature_word_description(f, i);
+ warn_report("%s: %s%s%s [bit %d]",
+ verbose_prefix,
+ feat_word_str,
+ f->feat_names[i] ? "." : "",
+ f->feat_names[i] ? f->feat_names[i] : "", i);
+ }
+ }
+}
+
+void mark_forced_on_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix)
+{
+ CPUX86State *env = &cpu->env;
+ FeatureWordInfo *f = &feature_word_info[w];
+ int i;
+
+ if (!cpu->force_features) {
+ env->features[w] |= mask;
+ }
+
+ cpu->forced_on_features[w] |= mask;
+
+ if (!verbose_prefix) {
+ return;
+ }
+
+ for (i = 0; i < 64; ++i) {
+ if ((1ULL << i) & mask) {
+ g_autofree char *feat_word_str = feature_word_description(f);
warn_report("%s: %s%s%s [bit %d]",
verbose_prefix,
feat_word_str,
@@ -7044,7 +7517,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 0x1F:
/* V2 Extended Topology Enumeration Leaf */
- if (!x86_has_extended_topo(env->avail_cpu_topo)) {
+ if (!x86_has_cpuid_0x1f(cpu)) {
*eax = *ebx = *ecx = *edx = 0;
break;
}
@@ -7908,7 +8381,7 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
* cpu->vendor_cpuid_only has been unset for compatibility with older
* machine types.
*/
- if (x86_has_extended_topo(env->avail_cpu_topo) &&
+ if (x86_has_cpuid_0x1f(cpu) &&
(IS_INTEL_CPU(env) || !cpu->vendor_cpuid_only)) {
x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
}
@@ -8543,6 +9016,13 @@ static void x86_cpu_post_initfn(Object *obj)
}
accel_cpu_instance_init(CPU(obj));
+
+#ifndef CONFIG_USER_ONLY
+ if (current_machine && current_machine->cgs) {
+ x86_confidential_guest_cpu_instance_init(
+ X86_CONFIDENTIAL_GUEST(current_machine->cgs), (CPU(obj)));
+ }
+#endif
}
static void x86_cpu_init_default_topo(X86CPU *cpu)
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index c51e0a4..1146465 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -584,6 +584,7 @@ typedef enum X86Seg {
#define XSTATE_OPMASK_BIT 5
#define XSTATE_ZMM_Hi256_BIT 6
#define XSTATE_Hi16_ZMM_BIT 7
+#define XSTATE_PT_BIT 8
#define XSTATE_PKRU_BIT 9
#define XSTATE_ARCH_LBR_BIT 15
#define XSTATE_XTILE_CFG_BIT 17
@@ -597,6 +598,7 @@ typedef enum X86Seg {
#define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
#define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
#define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
+#define XSTATE_PT_MASK (1ULL << XSTATE_PT_BIT)
#define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
#define XSTATE_ARCH_LBR_MASK (1ULL << XSTATE_ARCH_LBR_BIT)
#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT)
@@ -619,6 +621,11 @@ typedef enum X86Seg {
XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \
XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK)
+/* CPUID feature bits available in XSS */
+#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK)
+
+#define CPUID_XSTATE_MASK (CPUID_XSTATE_XCR0_MASK | CPUID_XSTATE_XSS_MASK)
+
/* CPUID feature words */
typedef enum FeatureWord {
FEAT_1_EDX, /* CPUID[1].EDX */
@@ -667,6 +674,15 @@ typedef enum FeatureWord {
FEATURE_WORDS,
} FeatureWord;
+typedef struct FeatureMask {
+ FeatureWord index;
+ uint64_t mask;
+} FeatureMask;
+
+typedef struct FeatureDep {
+ FeatureMask from, to;
+} FeatureDep;
+
typedef uint64_t FeatureWordArray[FEATURE_WORDS];
uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
@@ -899,6 +915,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_0_ECX_LA57 (1U << 16)
/* Read Processor ID */
#define CPUID_7_0_ECX_RDPID (1U << 22)
+/* KeyLocker */
+#define CPUID_7_0_ECX_KeyLocker (1U << 23)
/* Bus Lock Debug Exception */
#define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24)
/* Cache Line Demote Instruction */
@@ -920,6 +938,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_0_EDX_FSRM (1U << 4)
/* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
#define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
+ /* "md_clear" VERW clears CPU buffers */
+#define CPUID_7_0_EDX_MD_CLEAR (1U << 10)
/* SERIALIZE instruction */
#define CPUID_7_0_EDX_SERIALIZE (1U << 14)
/* TSX Suspend Load Address Tracking instruction */
@@ -957,6 +977,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_1_EAX_AVX_VNNI (1U << 4)
/* AVX512 BFloat16 Instruction */
#define CPUID_7_1_EAX_AVX512_BF16 (1U << 5)
+/* Linear address space separation */
+#define CPUID_7_1_EAX_LASS (1U << 6)
/* CMPCCXADD Instructions */
#define CPUID_7_1_EAX_CMPCCXADD (1U << 7)
/* Fast Zero REP MOVS */
@@ -1070,12 +1092,16 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
/* Processor ignores nested data breakpoints */
#define CPUID_8000_0021_EAX_NO_NESTED_DATA_BP (1U << 0)
+/* WRMSR to FS_BASE, GS_BASE, or KERNEL_GS_BASE is non-serializing */
+#define CPUID_8000_0021_EAX_FS_GS_BASE_NS (1U << 1)
/* LFENCE is always serializing */
#define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2)
/* Null Selector Clears Base */
#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6)
/* Automatic IBRS */
#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8)
+/* Indicates support for IC prefetch */
+#define CPUID_8000_0021_EAX_PREFETCHI (1U << 20)
/* Enhanced Return Address Predictor Scurity */
#define CPUID_8000_0021_EAX_ERAPS (1U << 24)
/* Selective Branch Predictor Barrier */
@@ -1100,6 +1126,7 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_XSAVE_XSAVEC (1U << 1)
#define CPUID_XSAVE_XGETBV1 (1U << 2)
#define CPUID_XSAVE_XSAVES (1U << 3)
+#define CPUID_XSAVE_XFD (1U << 4)
#define CPUID_6_EAX_ARAT (1U << 2)
@@ -2192,6 +2219,9 @@ struct ArchCPU {
/* Features that were filtered out because of missing host capabilities */
FeatureWordArray filtered_features;
+ /* Features that are forced enabled by underlying hypervisor, e.g., TDX */
+ FeatureWordArray forced_on_features;
+
/* Enable PMU CPUID bits. This can't be enabled by default yet because
* it doesn't have ABI stability guarantees, as it passes all PMU CPUID
* bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
@@ -2239,6 +2269,9 @@ struct ArchCPU {
/* Compatibility bits for old machine types: */
bool enable_cpuid_0xb;
+ /* Force to enable cpuid 0x1f */
+ bool enable_cpuid_0x1f;
+
/* Enable auto level-increase for all CPUID leaves */
bool full_cpuid_auto_level;
@@ -2499,6 +2532,17 @@ void cpu_set_apic_feature(CPUX86State *env);
void host_cpuid(uint32_t function, uint32_t count,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
bool cpu_has_x2apic_feature(CPUX86State *env);
+bool is_feature_word_cpuid(uint32_t feature, uint32_t index, int reg);
+void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix);
+void mark_forced_on_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix);
+
+static inline bool x86_has_cpuid_0x1f(X86CPU *cpu)
+{
+ return cpu->enable_cpuid_0x1f ||
+ x86_has_extended_topo(cpu->env.avail_cpu_topo);
+}
/* helper.c */
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
diff --git a/target/i386/emulate/x86_flags.c b/target/i386/emulate/x86_flags.c
index 47bc197..cc138c7 100644
--- a/target/i386/emulate/x86_flags.c
+++ b/target/i386/emulate/x86_flags.c
@@ -255,19 +255,19 @@ void lflags_to_rflags(CPUX86State *env)
void rflags_to_lflags(CPUX86State *env)
{
- target_ulong cf_xor_of;
+ target_ulong cf_af, cf_xor_of;
+ /* Leave the low byte zero so that parity is always even... */
+ env->cc_dst = !(env->eflags & CC_Z) << 8;
+
+ /* ... and therefore cc_src always uses opposite polarity. */
env->cc_src = CC_P;
env->cc_src ^= env->eflags & (CC_S | CC_P);
/* rotate right by one to move CF and AF into the carry-out positions */
- env->cc_src |= (
- (env->eflags >> 1) |
- (env->eflags << (TARGET_LONG_BITS - 1))) & (CC_C | CC_A);
+ cf_af = env->eflags & (CC_C | CC_A);
+ env->cc_src |= ((cf_af >> 1) | (cf_af << (TARGET_LONG_BITS - 1)));
- cf_xor_of = (env->eflags & (CC_C | CC_O)) + (CC_O - CC_C);
+ cf_xor_of = ((env->eflags & (CC_C | CC_O)) + (CC_O - CC_C)) & CC_O;
env->cc_src |= -cf_xor_of & LF_MASK_PO;
-
- /* Leave the low byte zero so that parity is not affected. */
- env->cc_dst = !(env->eflags & CC_Z) << 8;
}
diff --git a/target/i386/host-cpu.c b/target/i386/host-cpu.c
index a2d3830..7512567 100644
--- a/target/i386/host-cpu.c
+++ b/target/i386/host-cpu.c
@@ -15,7 +15,7 @@
#include "system/system.h"
/* Note: Only safe for use on x86(-64) hosts */
-static uint32_t host_cpu_phys_bits(void)
+uint32_t host_cpu_phys_bits(void)
{
uint32_t eax;
uint32_t host_phys_bits;
diff --git a/target/i386/host-cpu.h b/target/i386/host-cpu.h
index 6a9bc91..b97ec01 100644
--- a/target/i386/host-cpu.h
+++ b/target/i386/host-cpu.h
@@ -10,6 +10,7 @@
#ifndef HOST_CPU_H
#define HOST_CPU_H
+uint32_t host_cpu_phys_bits(void);
void host_cpu_instance_init(X86CPU *cpu);
void host_cpu_max_instance_init(X86CPU *cpu);
bool host_cpu_realizefn(CPUState *cs, Error **errp);
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index c9a3c02..a6bc089 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -38,6 +38,7 @@
#include "kvm_i386.h"
#include "../confidential-guest.h"
#include "sev.h"
+#include "tdx.h"
#include "xen-emu.h"
#include "hyperv.h"
#include "hyperv-proto.h"
@@ -192,6 +193,7 @@ static const char *vm_type_name[] = {
[KVM_X86_SEV_VM] = "SEV",
[KVM_X86_SEV_ES_VM] = "SEV-ES",
[KVM_X86_SNP_VM] = "SEV-SNP",
+ [KVM_X86_TDX_VM] = "TDX",
};
bool kvm_is_vm_type_supported(int type)
@@ -326,7 +328,7 @@ void kvm_synchronize_all_tsc(void)
{
CPUState *cpu;
- if (kvm_enabled()) {
+ if (kvm_enabled() && !is_tdx_vm()) {
CPU_FOREACH(cpu) {
run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
}
@@ -392,7 +394,7 @@ static bool host_tsx_broken(void)
/* Returns the value for a specific register on the cpuid entry
*/
-static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
+uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
{
uint32_t ret = 0;
switch (reg) {
@@ -414,9 +416,9 @@ static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
/* Find matching entry for function/index on kvm_cpuid2 struct
*/
-static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
- uint32_t function,
- uint32_t index)
+struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
+ uint32_t function,
+ uint32_t index)
{
int i;
for (i = 0; i < cpuid->nent; ++i) {
@@ -572,7 +574,7 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
}
if (current_machine->cgs) {
- ret = x86_confidential_guest_mask_cpuid_features(
+ ret = x86_confidential_guest_adjust_cpuid_features(
X86_CONFIDENTIAL_GUEST(current_machine->cgs),
function, index, reg, ret);
}
@@ -868,6 +870,15 @@ static int kvm_arch_set_tsc_khz(CPUState *cs)
int r, cur_freq;
bool set_ioctl = false;
+ /*
+ * TSC of TD vcpu is immutable, it cannot be set/changed via vcpu scope
+ * VM_SET_TSC_KHZ, but only be initialized via VM scope VM_SET_TSC_KHZ
+ * before ioctl KVM_TDX_INIT_VM in tdx_pre_create_vcpu()
+ */
+ if (is_tdx_vm()) {
+ return 0;
+ }
+
if (!env->tsc_khz) {
return 0;
}
@@ -1779,8 +1790,6 @@ static int hyperv_init_vcpu(X86CPU *cpu)
static Error *invtsc_mig_blocker;
-#define KVM_MAX_CPUID_ENTRIES 100
-
static void kvm_init_xsave(CPUX86State *env)
{
if (has_xsave2) {
@@ -1823,9 +1832,8 @@ static void kvm_init_nested_state(CPUX86State *env)
}
}
-static uint32_t kvm_x86_build_cpuid(CPUX86State *env,
- struct kvm_cpuid_entry2 *entries,
- uint32_t cpuid_i)
+uint32_t kvm_x86_build_cpuid(CPUX86State *env, struct kvm_cpuid_entry2 *entries,
+ uint32_t cpuid_i)
{
uint32_t limit, i, j;
uint32_t unused;
@@ -1864,7 +1872,7 @@ static uint32_t kvm_x86_build_cpuid(CPUX86State *env,
break;
}
case 0x1f:
- if (!x86_has_extended_topo(env->avail_cpu_topo)) {
+ if (!x86_has_cpuid_0x1f(env_archcpu(env))) {
cpuid_i--;
break;
}
@@ -2052,6 +2060,15 @@ full:
abort();
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ if (is_tdx_vm()) {
+ return tdx_pre_create_vcpu(cpu, errp);
+ }
+
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
struct {
@@ -2076,6 +2093,14 @@ int kvm_arch_init_vcpu(CPUState *cs)
int r;
Error *local_err = NULL;
+ if (current_machine->cgs) {
+ r = x86_confidential_guest_check_features(
+ X86_CONFIDENTIAL_GUEST(current_machine->cgs), cs);
+ if (r < 0) {
+ return r;
+ }
+ }
+
memset(&cpuid_data, 0, sizeof(cpuid_data));
cpuid_i = 0;
@@ -3206,16 +3231,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
Error *local_err = NULL;
/*
- * Initialize SEV context, if required
- *
- * If no memory encryption is requested (ms->cgs == NULL) this is
- * a no-op.
- *
- * It's also a no-op if a non-SEV confidential guest support
- * mechanism is selected. SEV is the only mechanism available to
- * select on x86 at present, so this doesn't arise, but if new
- * mechanisms are supported in future (e.g. TDX), they'll need
- * their own initialization either here or elsewhere.
+ * Initialize confidential guest (SEV/TDX) context, if required
*/
if (ms->cgs) {
ret = confidential_guest_kvm_init(ms->cgs, &local_err);
@@ -3856,32 +3872,34 @@ static void kvm_init_msrs(X86CPU *cpu)
CPUX86State *env = &cpu->env;
kvm_msr_buf_reset(cpu);
- if (has_msr_arch_capabs) {
- kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
- env->features[FEAT_ARCH_CAPABILITIES]);
- }
- if (has_msr_core_capabs) {
- kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
- env->features[FEAT_CORE_CAPABILITY]);
- }
+ if (!is_tdx_vm()) {
+ if (has_msr_arch_capabs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
+ env->features[FEAT_ARCH_CAPABILITIES]);
+ }
+
+ if (has_msr_core_capabs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
+ env->features[FEAT_CORE_CAPABILITY]);
+ }
+
+ if (has_msr_perf_capabs && cpu->enable_pmu) {
+ kvm_msr_entry_add_perf(cpu, env->features);
+ }
- if (has_msr_perf_capabs && cpu->enable_pmu) {
- kvm_msr_entry_add_perf(cpu, env->features);
+ /*
+ * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
+ * all kernels with MSR features should have them.
+ */
+ if (kvm_feature_msrs && cpu_has_vmx(env)) {
+ kvm_msr_entry_add_vmx(cpu, env->features);
+ }
}
if (has_msr_ucode_rev) {
kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev);
}
-
- /*
- * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
- * all kernels with MSR features should have them.
- */
- if (kvm_feature_msrs && cpu_has_vmx(env)) {
- kvm_msr_entry_add_vmx(cpu, env->features);
- }
-
assert(kvm_buf_set_msrs(cpu) == 0);
}
@@ -6121,6 +6139,16 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
case KVM_EXIT_HYPERCALL:
ret = kvm_handle_hypercall(run);
break;
+ case KVM_EXIT_SYSTEM_EVENT:
+ switch (run->system_event.type) {
+ case KVM_SYSTEM_EVENT_TDX_FATAL:
+ ret = tdx_handle_report_fatal_error(cpu, run);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ break;
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
ret = -1;
diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h
index 88565e8..5f83e88 100644
--- a/target/i386/kvm/kvm_i386.h
+++ b/target/i386/kvm/kvm_i386.h
@@ -13,6 +13,8 @@
#include "system/kvm.h"
+#define KVM_MAX_CPUID_ENTRIES 100
+
/* always false if !CONFIG_KVM */
#define kvm_pit_in_kernel() \
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
@@ -42,6 +44,13 @@ void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask);
#ifdef CONFIG_KVM
+#include <linux/kvm.h>
+
+typedef struct KvmCpuidInfo {
+ struct kvm_cpuid2 cpuid;
+ struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
+} KvmCpuidInfo;
+
bool kvm_is_vm_type_supported(int type);
bool kvm_has_adjust_clock_stable(void);
bool kvm_has_exception_payload(void);
@@ -57,6 +66,12 @@ uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address);
void kvm_update_msi_routes_all(void *private, bool global,
uint32_t index, uint32_t mask);
+struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
+ uint32_t function,
+ uint32_t index);
+uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg);
+uint32_t kvm_x86_build_cpuid(CPUX86State *env, struct kvm_cpuid_entry2 *entries,
+ uint32_t cpuid_i);
#endif /* CONFIG_KVM */
void kvm_pc_setup_irq_routing(bool pci_enabled);
diff --git a/target/i386/kvm/meson.build b/target/i386/kvm/meson.build
index 3996caf..3f44cde 100644
--- a/target/i386/kvm/meson.build
+++ b/target/i386/kvm/meson.build
@@ -8,6 +8,8 @@ i386_kvm_ss.add(files(
i386_kvm_ss.add(when: 'CONFIG_XEN_EMU', if_true: files('xen-emu.c'))
+i386_kvm_ss.add(when: 'CONFIG_TDX', if_true: files('tdx.c'), if_false: files('tdx-stub.c'))
+
i386_system_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c'), if_false: files('hyperv-stub.c'))
i386_system_ss.add_all(when: 'CONFIG_KVM', if_true: i386_kvm_ss)
diff --git a/target/i386/kvm/tdx-stub.c b/target/i386/kvm/tdx-stub.c
new file mode 100644
index 0000000..720a4ff
--- /dev/null
+++ b/target/i386/kvm/tdx-stub.c
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+
+#include "tdx.h"
+
+int tdx_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return -EINVAL;
+}
+
+int tdx_parse_tdvf(void *flash_ptr, int size)
+{
+ return -EINVAL;
+}
+
+int tdx_handle_report_fatal_error(X86CPU *cpu, struct kvm_run *run)
+{
+ return -EINVAL;
+}
diff --git a/target/i386/kvm/tdx.c b/target/i386/kvm/tdx.c
new file mode 100644
index 0000000..0a21ae5
--- /dev/null
+++ b/target/i386/kvm/tdx.c
@@ -0,0 +1,1289 @@
+/*
+ * QEMU TDX support
+ *
+ * Copyright (c) 2025 Intel Corporation
+ *
+ * Author:
+ * Xiaoyao Li <xiaoyao.li@intel.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qemu/base64.h"
+#include "qemu/mmap-alloc.h"
+#include "qapi/error.h"
+#include "qom/object_interfaces.h"
+#include "crypto/hash.h"
+#include "system/kvm_int.h"
+#include "system/runstate.h"
+#include "system/system.h"
+#include "system/ramblock.h"
+
+#include <linux/kvm_para.h>
+
+#include "cpu.h"
+#include "cpu-internal.h"
+#include "host-cpu.h"
+#include "hw/i386/e820_memory_layout.h"
+#include "hw/i386/tdvf.h"
+#include "hw/i386/x86.h"
+#include "hw/i386/tdvf-hob.h"
+#include "kvm_i386.h"
+#include "tdx.h"
+
+#include "standard-headers/asm-x86/kvm_para.h"
+
+#define TDX_MIN_TSC_FREQUENCY_KHZ (100 * 1000)
+#define TDX_MAX_TSC_FREQUENCY_KHZ (10 * 1000 * 1000)
+
+#define TDX_TD_ATTRIBUTES_DEBUG BIT_ULL(0)
+#define TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE BIT_ULL(28)
+#define TDX_TD_ATTRIBUTES_PKS BIT_ULL(30)
+#define TDX_TD_ATTRIBUTES_PERFMON BIT_ULL(63)
+
+#define TDX_SUPPORTED_TD_ATTRS (TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE |\
+ TDX_TD_ATTRIBUTES_PKS | \
+ TDX_TD_ATTRIBUTES_PERFMON)
+
+#define TDX_SUPPORTED_KVM_FEATURES ((1U << KVM_FEATURE_NOP_IO_DELAY) | \
+ (1U << KVM_FEATURE_PV_UNHALT) | \
+ (1U << KVM_FEATURE_PV_TLB_FLUSH) | \
+ (1U << KVM_FEATURE_PV_SEND_IPI) | \
+ (1U << KVM_FEATURE_POLL_CONTROL) | \
+ (1U << KVM_FEATURE_PV_SCHED_YIELD) | \
+ (1U << KVM_FEATURE_MSI_EXT_DEST_ID))
+
+static TdxGuest *tdx_guest;
+
+static struct kvm_tdx_capabilities *tdx_caps;
+static struct kvm_cpuid2 *tdx_supported_cpuid;
+
+/* Valid after kvm_arch_init()->confidential_guest_kvm_init()->tdx_kvm_init() */
+bool is_tdx_vm(void)
+{
+ return !!tdx_guest;
+}
+
+enum tdx_ioctl_level {
+ TDX_VM_IOCTL,
+ TDX_VCPU_IOCTL,
+};
+
+static int tdx_ioctl_internal(enum tdx_ioctl_level level, void *state,
+ int cmd_id, __u32 flags, void *data,
+ Error **errp)
+{
+ struct kvm_tdx_cmd tdx_cmd = {};
+ int r;
+
+ const char *tdx_ioctl_name[] = {
+ [KVM_TDX_CAPABILITIES] = "KVM_TDX_CAPABILITIES",
+ [KVM_TDX_INIT_VM] = "KVM_TDX_INIT_VM",
+ [KVM_TDX_INIT_VCPU] = "KVM_TDX_INIT_VCPU",
+ [KVM_TDX_INIT_MEM_REGION] = "KVM_TDX_INIT_MEM_REGION",
+ [KVM_TDX_FINALIZE_VM] = "KVM_TDX_FINALIZE_VM",
+ [KVM_TDX_GET_CPUID] = "KVM_TDX_GET_CPUID",
+ };
+
+ tdx_cmd.id = cmd_id;
+ tdx_cmd.flags = flags;
+ tdx_cmd.data = (__u64)(unsigned long)data;
+
+ switch (level) {
+ case TDX_VM_IOCTL:
+ r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd);
+ break;
+ case TDX_VCPU_IOCTL:
+ r = kvm_vcpu_ioctl(state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd);
+ break;
+ default:
+ error_setg(errp, "Invalid tdx_ioctl_level %d", level);
+ return -EINVAL;
+ }
+
+ if (r < 0) {
+ error_setg_errno(errp, -r, "TDX ioctl %s failed, hw_errors: 0x%llx",
+ tdx_ioctl_name[cmd_id], tdx_cmd.hw_error);
+ }
+ return r;
+}
+
+static inline int tdx_vm_ioctl(int cmd_id, __u32 flags, void *data,
+ Error **errp)
+{
+ return tdx_ioctl_internal(TDX_VM_IOCTL, NULL, cmd_id, flags, data, errp);
+}
+
+static inline int tdx_vcpu_ioctl(CPUState *cpu, int cmd_id, __u32 flags,
+ void *data, Error **errp)
+{
+ return tdx_ioctl_internal(TDX_VCPU_IOCTL, cpu, cmd_id, flags, data, errp);
+}
+
+static int get_tdx_capabilities(Error **errp)
+{
+ struct kvm_tdx_capabilities *caps;
+ /* 1st generation of TDX reports 6 cpuid configs */
+ int nr_cpuid_configs = 6;
+ size_t size;
+ int r;
+
+ do {
+ Error *local_err = NULL;
+ size = sizeof(struct kvm_tdx_capabilities) +
+ nr_cpuid_configs * sizeof(struct kvm_cpuid_entry2);
+ caps = g_malloc0(size);
+ caps->cpuid.nent = nr_cpuid_configs;
+
+ r = tdx_vm_ioctl(KVM_TDX_CAPABILITIES, 0, caps, &local_err);
+ if (r == -E2BIG) {
+ g_free(caps);
+ nr_cpuid_configs *= 2;
+ if (nr_cpuid_configs > KVM_MAX_CPUID_ENTRIES) {
+ error_report("KVM TDX seems broken that number of CPUID entries"
+ " in kvm_tdx_capabilities exceeds limit: %d",
+ KVM_MAX_CPUID_ENTRIES);
+ error_propagate(errp, local_err);
+ return r;
+ }
+ error_free(local_err);
+ } else if (r < 0) {
+ g_free(caps);
+ error_propagate(errp, local_err);
+ return r;
+ }
+ } while (r == -E2BIG);
+
+ tdx_caps = caps;
+
+ return 0;
+}
+
+void tdx_set_tdvf_region(MemoryRegion *tdvf_mr)
+{
+ assert(!tdx_guest->tdvf_mr);
+ tdx_guest->tdvf_mr = tdvf_mr;
+}
+
+static TdxFirmwareEntry *tdx_get_hob_entry(TdxGuest *tdx)
+{
+ TdxFirmwareEntry *entry;
+
+ for_each_tdx_fw_entry(&tdx->tdvf, entry) {
+ if (entry->type == TDVF_SECTION_TYPE_TD_HOB) {
+ return entry;
+ }
+ }
+ error_report("TDVF metadata doesn't specify TD_HOB location.");
+ exit(1);
+}
+
+static void tdx_add_ram_entry(uint64_t address, uint64_t length,
+ enum TdxRamType type)
+{
+ uint32_t nr_entries = tdx_guest->nr_ram_entries;
+ tdx_guest->ram_entries = g_renew(TdxRamEntry, tdx_guest->ram_entries,
+ nr_entries + 1);
+
+ tdx_guest->ram_entries[nr_entries].address = address;
+ tdx_guest->ram_entries[nr_entries].length = length;
+ tdx_guest->ram_entries[nr_entries].type = type;
+ tdx_guest->nr_ram_entries++;
+}
+
+static int tdx_accept_ram_range(uint64_t address, uint64_t length)
+{
+ uint64_t head_start, tail_start, head_length, tail_length;
+ uint64_t tmp_address, tmp_length;
+ TdxRamEntry *e;
+ int i = 0;
+
+ do {
+ if (i == tdx_guest->nr_ram_entries) {
+ return -1;
+ }
+
+ e = &tdx_guest->ram_entries[i++];
+ } while (address + length <= e->address || address >= e->address + e->length);
+
+ /*
+ * The to-be-accepted ram range must be fully contained by one
+ * RAM entry.
+ */
+ if (e->address > address ||
+ e->address + e->length < address + length) {
+ return -1;
+ }
+
+ if (e->type == TDX_RAM_ADDED) {
+ return 0;
+ }
+
+ tmp_address = e->address;
+ tmp_length = e->length;
+
+ e->address = address;
+ e->length = length;
+ e->type = TDX_RAM_ADDED;
+
+ head_length = address - tmp_address;
+ if (head_length > 0) {
+ head_start = tmp_address;
+ tdx_add_ram_entry(head_start, head_length, TDX_RAM_UNACCEPTED);
+ }
+
+ tail_start = address + length;
+ if (tail_start < tmp_address + tmp_length) {
+ tail_length = tmp_address + tmp_length - tail_start;
+ tdx_add_ram_entry(tail_start, tail_length, TDX_RAM_UNACCEPTED);
+ }
+
+ return 0;
+}
+
+static int tdx_ram_entry_compare(const void *lhs_, const void* rhs_)
+{
+ const TdxRamEntry *lhs = lhs_;
+ const TdxRamEntry *rhs = rhs_;
+
+ if (lhs->address == rhs->address) {
+ return 0;
+ }
+ if (le64_to_cpu(lhs->address) > le64_to_cpu(rhs->address)) {
+ return 1;
+ }
+ return -1;
+}
+
+static void tdx_init_ram_entries(void)
+{
+ unsigned i, j, nr_e820_entries;
+
+ nr_e820_entries = e820_get_table(NULL);
+ tdx_guest->ram_entries = g_new(TdxRamEntry, nr_e820_entries);
+
+ for (i = 0, j = 0; i < nr_e820_entries; i++) {
+ uint64_t addr, len;
+
+ if (e820_get_entry(i, E820_RAM, &addr, &len)) {
+ tdx_guest->ram_entries[j].address = addr;
+ tdx_guest->ram_entries[j].length = len;
+ tdx_guest->ram_entries[j].type = TDX_RAM_UNACCEPTED;
+ j++;
+ }
+ }
+ tdx_guest->nr_ram_entries = j;
+}
+
+static void tdx_post_init_vcpus(void)
+{
+ TdxFirmwareEntry *hob;
+ CPUState *cpu;
+
+ hob = tdx_get_hob_entry(tdx_guest);
+ CPU_FOREACH(cpu) {
+ tdx_vcpu_ioctl(cpu, KVM_TDX_INIT_VCPU, 0, (void *)hob->address,
+ &error_fatal);
+ }
+}
+
+static void tdx_finalize_vm(Notifier *notifier, void *unused)
+{
+ TdxFirmware *tdvf = &tdx_guest->tdvf;
+ TdxFirmwareEntry *entry;
+ RAMBlock *ram_block;
+ Error *local_err = NULL;
+ int r;
+
+ tdx_init_ram_entries();
+
+ for_each_tdx_fw_entry(tdvf, entry) {
+ switch (entry->type) {
+ case TDVF_SECTION_TYPE_BFV:
+ case TDVF_SECTION_TYPE_CFV:
+ entry->mem_ptr = tdvf->mem_ptr + entry->data_offset;
+ break;
+ case TDVF_SECTION_TYPE_TD_HOB:
+ case TDVF_SECTION_TYPE_TEMP_MEM:
+ entry->mem_ptr = qemu_ram_mmap(-1, entry->size,
+ qemu_real_host_page_size(), 0, 0);
+ if (entry->mem_ptr == MAP_FAILED) {
+ error_report("Failed to mmap memory for TDVF section %d",
+ entry->type);
+ exit(1);
+ }
+ if (tdx_accept_ram_range(entry->address, entry->size)) {
+ error_report("Failed to accept memory for TDVF section %d",
+ entry->type);
+ qemu_ram_munmap(-1, entry->mem_ptr, entry->size);
+ exit(1);
+ }
+ break;
+ default:
+ error_report("Unsupported TDVF section %d", entry->type);
+ exit(1);
+ }
+ }
+
+ qsort(tdx_guest->ram_entries, tdx_guest->nr_ram_entries,
+ sizeof(TdxRamEntry), &tdx_ram_entry_compare);
+
+ tdvf_hob_create(tdx_guest, tdx_get_hob_entry(tdx_guest));
+
+ tdx_post_init_vcpus();
+
+ for_each_tdx_fw_entry(tdvf, entry) {
+ struct kvm_tdx_init_mem_region region;
+ uint32_t flags;
+
+ region = (struct kvm_tdx_init_mem_region) {
+ .source_addr = (uint64_t)entry->mem_ptr,
+ .gpa = entry->address,
+ .nr_pages = entry->size >> 12,
+ };
+
+ flags = entry->attributes & TDVF_SECTION_ATTRIBUTES_MR_EXTEND ?
+ KVM_TDX_MEASURE_MEMORY_REGION : 0;
+
+ do {
+ error_free(local_err);
+ local_err = NULL;
+ r = tdx_vcpu_ioctl(first_cpu, KVM_TDX_INIT_MEM_REGION, flags,
+ &region, &local_err);
+ } while (r == -EAGAIN || r == -EINTR);
+ if (r < 0) {
+ error_report_err(local_err);
+ exit(1);
+ }
+
+ if (entry->type == TDVF_SECTION_TYPE_TD_HOB ||
+ entry->type == TDVF_SECTION_TYPE_TEMP_MEM) {
+ qemu_ram_munmap(-1, entry->mem_ptr, entry->size);
+ entry->mem_ptr = NULL;
+ }
+ }
+
+ /*
+ * TDVF image has been copied into private region above via
+ * KVM_MEMORY_MAPPING. It becomes useless.
+ */
+ ram_block = tdx_guest->tdvf_mr->ram_block;
+ ram_block_discard_range(ram_block, 0, ram_block->max_length);
+
+ tdx_vm_ioctl(KVM_TDX_FINALIZE_VM, 0, NULL, &error_fatal);
+ CONFIDENTIAL_GUEST_SUPPORT(tdx_guest)->ready = true;
+}
+
+static Notifier tdx_machine_done_notify = {
+ .notify = tdx_finalize_vm,
+};
+
+/*
+ * Some CPUID bits change from fixed1 to configurable bits when TDX module
+ * supports TDX_FEATURES0.VE_REDUCTION. e.g., MCA/MCE/MTRR/CORE_CAPABILITY.
+ *
+ * To make QEMU work with all the versions of TDX module, keep the fixed1 bits
+ * here if they are ever fixed1 bits in any of the version though not fixed1 in
+ * the latest version. Otherwise, with the older version of TDX module, QEMU may
+ * treat the fixed1 bit as unsupported.
+ *
+ * For newer TDX module, it does no harm to keep them in tdx_fixed1_bits even
+ * though they changed to configurable bits. Because tdx_fixed1_bits is used to
+ * setup the supported bits.
+ */
+KvmCpuidInfo tdx_fixed1_bits = {
+ .cpuid.nent = 8,
+ .entries[0] = {
+ .function = 0x1,
+ .index = 0,
+ .ecx = CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_DTES64 |
+ CPUID_EXT_DSCPL | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 |
+ CPUID_EXT_PDCM | CPUID_EXT_PCID | CPUID_EXT_SSE41 |
+ CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
+ CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_XSAVE |
+ CPUID_EXT_RDRAND | CPUID_EXT_HYPERVISOR,
+ .edx = CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
+ CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
+ CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
+ CPUID_PAT | CPUID_CLFLUSH | CPUID_DTS | CPUID_MMX | CPUID_FXSR |
+ CPUID_SSE | CPUID_SSE2,
+ },
+ .entries[1] = {
+ .function = 0x6,
+ .index = 0,
+ .eax = CPUID_6_EAX_ARAT,
+ },
+ .entries[2] = {
+ .function = 0x7,
+ .index = 0,
+ .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX,
+ .ebx = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_FDP_EXCPTN_ONLY |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_ZERO_FCS_FDS | CPUID_7_0_EBX_RDSEED |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
+ CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_SHA_NI,
+ .ecx = CPUID_7_0_ECX_BUS_LOCK_DETECT | CPUID_7_0_ECX_MOVDIRI |
+ CPUID_7_0_ECX_MOVDIR64B,
+ .edx = CPUID_7_0_EDX_MD_CLEAR | CPUID_7_0_EDX_SPEC_CTRL |
+ CPUID_7_0_EDX_STIBP | CPUID_7_0_EDX_FLUSH_L1D |
+ CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_CORE_CAPABILITY |
+ CPUID_7_0_EDX_SPEC_CTRL_SSBD,
+ },
+ .entries[3] = {
+ .function = 0x7,
+ .index = 2,
+ .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX,
+ .edx = CPUID_7_2_EDX_PSFD | CPUID_7_2_EDX_IPRED_CTRL |
+ CPUID_7_2_EDX_RRSBA_CTRL | CPUID_7_2_EDX_BHI_CTRL,
+ },
+ .entries[4] = {
+ .function = 0xD,
+ .index = 0,
+ .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX,
+ .eax = XSTATE_FP_MASK | XSTATE_SSE_MASK,
+ },
+ .entries[5] = {
+ .function = 0xD,
+ .index = 1,
+ .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX,
+ .eax = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC|
+ CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
+ },
+ .entries[6] = {
+ .function = 0x80000001,
+ .index = 0,
+ .ecx = CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_3DNOWPREFETCH,
+ /*
+ * Strictly speaking, SYSCALL is not fixed1 bit since it depends on
+ * the CPU to be in 64-bit mode. But here fixed1 is used to serve the
+ * purpose of supported bits for TDX. In this sense, SYACALL is always
+ * supported.
+ */
+ .edx = CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_RDTSCP | CPUID_EXT2_LM,
+ },
+ .entries[7] = {
+ .function = 0x80000007,
+ .index = 0,
+ .edx = CPUID_APM_INVTSC,
+ },
+};
+
+typedef struct TdxAttrsMap {
+ uint32_t attr_index;
+ uint32_t cpuid_leaf;
+ uint32_t cpuid_subleaf;
+ int cpuid_reg;
+ uint32_t feat_mask;
+} TdxAttrsMap;
+
+static TdxAttrsMap tdx_attrs_maps[] = {
+ {.attr_index = 27,
+ .cpuid_leaf = 7,
+ .cpuid_subleaf = 1,
+ .cpuid_reg = R_EAX,
+ .feat_mask = CPUID_7_1_EAX_LASS,},
+
+ {.attr_index = 30,
+ .cpuid_leaf = 7,
+ .cpuid_subleaf = 0,
+ .cpuid_reg = R_ECX,
+ .feat_mask = CPUID_7_0_ECX_PKS,},
+
+ {.attr_index = 31,
+ .cpuid_leaf = 7,
+ .cpuid_subleaf = 0,
+ .cpuid_reg = R_ECX,
+ .feat_mask = CPUID_7_0_ECX_KeyLocker,},
+};
+
+typedef struct TdxXFAMDep {
+ int xfam_bit;
+ FeatureMask feat_mask;
+} TdxXFAMDep;
+
+/*
+ * Note, only the CPUID bits whose virtualization type are "XFAM & Native" are
+ * defiend here.
+ *
+ * For those whose virtualization type are "XFAM & Configured & Native", they
+ * are reported as configurable bits. And they are not supported if not in the
+ * configureable bits list from KVM even if the corresponding XFAM bit is
+ * supported.
+ */
+TdxXFAMDep tdx_xfam_deps[] = {
+ { XSTATE_YMM_BIT, { FEAT_1_ECX, CPUID_EXT_FMA }},
+ { XSTATE_YMM_BIT, { FEAT_7_0_EBX, CPUID_7_0_EBX_AVX2 }},
+ { XSTATE_OPMASK_BIT, { FEAT_7_0_ECX, CPUID_7_0_ECX_AVX512_VBMI}},
+ { XSTATE_OPMASK_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AVX512_FP16}},
+ { XSTATE_PT_BIT, { FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT}},
+ { XSTATE_PKRU_BIT, { FEAT_7_0_ECX, CPUID_7_0_ECX_PKU}},
+ { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_BF16 }},
+ { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_TILE }},
+ { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_INT8 }},
+};
+
+static struct kvm_cpuid_entry2 *find_in_supported_entry(uint32_t function,
+ uint32_t index)
+{
+ struct kvm_cpuid_entry2 *e;
+
+ e = cpuid_find_entry(tdx_supported_cpuid, function, index);
+ if (!e) {
+ if (tdx_supported_cpuid->nent >= KVM_MAX_CPUID_ENTRIES) {
+ error_report("tdx_supported_cpuid requries more space than %d entries",
+ KVM_MAX_CPUID_ENTRIES);
+ exit(1);
+ }
+ e = &tdx_supported_cpuid->entries[tdx_supported_cpuid->nent++];
+ e->function = function;
+ e->index = index;
+ }
+
+ return e;
+}
+
+static void tdx_add_supported_cpuid_by_fixed1_bits(void)
+{
+ struct kvm_cpuid_entry2 *e, *e1;
+ int i;
+
+ for (i = 0; i < tdx_fixed1_bits.cpuid.nent; i++) {
+ e = &tdx_fixed1_bits.entries[i];
+
+ e1 = find_in_supported_entry(e->function, e->index);
+ e1->eax |= e->eax;
+ e1->ebx |= e->ebx;
+ e1->ecx |= e->ecx;
+ e1->edx |= e->edx;
+ }
+}
+
+static void tdx_add_supported_cpuid_by_attrs(void)
+{
+ struct kvm_cpuid_entry2 *e;
+ TdxAttrsMap *map;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tdx_attrs_maps); i++) {
+ map = &tdx_attrs_maps[i];
+ if (!((1ULL << map->attr_index) & tdx_caps->supported_attrs)) {
+ continue;
+ }
+
+ e = find_in_supported_entry(map->cpuid_leaf, map->cpuid_subleaf);
+
+ switch(map->cpuid_reg) {
+ case R_EAX:
+ e->eax |= map->feat_mask;
+ break;
+ case R_EBX:
+ e->ebx |= map->feat_mask;
+ break;
+ case R_ECX:
+ e->ecx |= map->feat_mask;
+ break;
+ case R_EDX:
+ e->edx |= map->feat_mask;
+ break;
+ }
+ }
+}
+
+static void tdx_add_supported_cpuid_by_xfam(void)
+{
+ struct kvm_cpuid_entry2 *e;
+ int i;
+
+ const TdxXFAMDep *xfam_dep;
+ const FeatureWordInfo *f;
+ for (i = 0; i < ARRAY_SIZE(tdx_xfam_deps); i++) {
+ xfam_dep = &tdx_xfam_deps[i];
+ if (!((1ULL << xfam_dep->xfam_bit) & tdx_caps->supported_xfam)) {
+ continue;
+ }
+
+ f = &feature_word_info[xfam_dep->feat_mask.index];
+ if (f->type != CPUID_FEATURE_WORD) {
+ continue;
+ }
+
+ e = find_in_supported_entry(f->cpuid.eax, f->cpuid.ecx);
+ switch(f->cpuid.reg) {
+ case R_EAX:
+ e->eax |= xfam_dep->feat_mask.mask;
+ break;
+ case R_EBX:
+ e->ebx |= xfam_dep->feat_mask.mask;
+ break;
+ case R_ECX:
+ e->ecx |= xfam_dep->feat_mask.mask;
+ break;
+ case R_EDX:
+ e->edx |= xfam_dep->feat_mask.mask;
+ break;
+ }
+ }
+
+ e = find_in_supported_entry(0xd, 0);
+ e->eax |= (tdx_caps->supported_xfam & CPUID_XSTATE_XCR0_MASK);
+ e->edx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XCR0_MASK) >> 32;
+
+ e = find_in_supported_entry(0xd, 1);
+ /*
+ * Mark XFD always support for TDX, it will be cleared finally in
+ * tdx_adjust_cpuid_features() if XFD is unavailable on the hardware
+ * because in this case the original data has it as 0.
+ */
+ e->eax |= CPUID_XSAVE_XFD;
+ e->ecx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XSS_MASK);
+ e->edx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XSS_MASK) >> 32;
+}
+
+static void tdx_add_supported_kvm_features(void)
+{
+ struct kvm_cpuid_entry2 *e;
+
+ e = find_in_supported_entry(0x40000001, 0);
+ e->eax = TDX_SUPPORTED_KVM_FEATURES;
+}
+
+static void tdx_setup_supported_cpuid(void)
+{
+ if (tdx_supported_cpuid) {
+ return;
+ }
+
+ tdx_supported_cpuid = g_malloc0(sizeof(*tdx_supported_cpuid) +
+ KVM_MAX_CPUID_ENTRIES * sizeof(struct kvm_cpuid_entry2));
+
+ memcpy(tdx_supported_cpuid->entries, tdx_caps->cpuid.entries,
+ tdx_caps->cpuid.nent * sizeof(struct kvm_cpuid_entry2));
+ tdx_supported_cpuid->nent = tdx_caps->cpuid.nent;
+
+ tdx_add_supported_cpuid_by_fixed1_bits();
+ tdx_add_supported_cpuid_by_attrs();
+ tdx_add_supported_cpuid_by_xfam();
+
+ tdx_add_supported_kvm_features();
+}
+
+static int tdx_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ X86MachineState *x86ms = X86_MACHINE(ms);
+ TdxGuest *tdx = TDX_GUEST(cgs);
+ int r = 0;
+
+ kvm_mark_guest_state_protected();
+
+ if (x86ms->smm == ON_OFF_AUTO_AUTO) {
+ x86ms->smm = ON_OFF_AUTO_OFF;
+ } else if (x86ms->smm == ON_OFF_AUTO_ON) {
+ error_setg(errp, "TDX VM doesn't support SMM");
+ return -EINVAL;
+ }
+
+ if (x86ms->pic == ON_OFF_AUTO_AUTO) {
+ x86ms->pic = ON_OFF_AUTO_OFF;
+ } else if (x86ms->pic == ON_OFF_AUTO_ON) {
+ error_setg(errp, "TDX VM doesn't support PIC");
+ return -EINVAL;
+ }
+
+ if (kvm_state->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
+ kvm_state->kernel_irqchip_split = ON_OFF_AUTO_ON;
+ } else if (kvm_state->kernel_irqchip_split != ON_OFF_AUTO_ON) {
+ error_setg(errp, "TDX VM requires kernel_irqchip to be split");
+ return -EINVAL;
+ }
+
+ if (!tdx_caps) {
+ r = get_tdx_capabilities(errp);
+ if (r) {
+ return r;
+ }
+ }
+
+ tdx_setup_supported_cpuid();
+
+ /* TDX relies on KVM_HC_MAP_GPA_RANGE to handle TDG.VP.VMCALL<MapGPA> */
+ if (!kvm_enable_hypercall(BIT_ULL(KVM_HC_MAP_GPA_RANGE))) {
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Set kvm_readonly_mem_allowed to false, because TDX only supports readonly
+ * memory for shared memory but not for private memory. Besides, whether a
+ * memslot is private or shared is not determined by QEMU.
+ *
+ * Thus, just mark readonly memory not supported for simplicity.
+ */
+ kvm_readonly_mem_allowed = false;
+
+ qemu_add_machine_init_done_notifier(&tdx_machine_done_notify);
+
+ tdx_guest = tdx;
+ return 0;
+}
+
+static int tdx_kvm_type(X86ConfidentialGuest *cg)
+{
+ /* Do the object check */
+ TDX_GUEST(cg);
+
+ return KVM_X86_TDX_VM;
+}
+
+static void tdx_cpu_instance_init(X86ConfidentialGuest *cg, CPUState *cpu)
+{
+ X86CPU *x86cpu = X86_CPU(cpu);
+
+ object_property_set_bool(OBJECT(cpu), "pmu", false, &error_abort);
+
+ /* invtsc is fixed1 for TD guest */
+ object_property_set_bool(OBJECT(cpu), "invtsc", true, &error_abort);
+
+ x86cpu->enable_cpuid_0x1f = true;
+}
+
+static uint32_t tdx_adjust_cpuid_features(X86ConfidentialGuest *cg,
+ uint32_t feature, uint32_t index,
+ int reg, uint32_t value)
+{
+ struct kvm_cpuid_entry2 *e;
+
+ e = cpuid_find_entry(&tdx_fixed1_bits.cpuid, feature, index);
+ if (e) {
+ value |= cpuid_entry_get_reg(e, reg);
+ }
+
+ if (is_feature_word_cpuid(feature, index, reg)) {
+ e = cpuid_find_entry(tdx_supported_cpuid, feature, index);
+ if (e) {
+ value &= cpuid_entry_get_reg(e, reg);
+ }
+ }
+
+ return value;
+}
+
+static struct kvm_cpuid2 *tdx_fetch_cpuid(CPUState *cpu, int *ret)
+{
+ struct kvm_cpuid2 *fetch_cpuid;
+ int size = KVM_MAX_CPUID_ENTRIES;
+ Error *local_err = NULL;
+ int r;
+
+ do {
+ error_free(local_err);
+ local_err = NULL;
+
+ fetch_cpuid = g_malloc0(sizeof(*fetch_cpuid) +
+ sizeof(struct kvm_cpuid_entry2) * size);
+ fetch_cpuid->nent = size;
+ r = tdx_vcpu_ioctl(cpu, KVM_TDX_GET_CPUID, 0, fetch_cpuid, &local_err);
+ if (r == -E2BIG) {
+ g_free(fetch_cpuid);
+ size = fetch_cpuid->nent;
+ }
+ } while (r == -E2BIG);
+
+ if (r < 0) {
+ error_report_err(local_err);
+ *ret = r;
+ return NULL;
+ }
+
+ return fetch_cpuid;
+}
+
+static int tdx_check_features(X86ConfidentialGuest *cg, CPUState *cs)
+{
+ uint64_t actual, requested, unavailable, forced_on;
+ g_autofree struct kvm_cpuid2 *fetch_cpuid;
+ const char *forced_on_prefix = NULL;
+ const char *unav_prefix = NULL;
+ struct kvm_cpuid_entry2 *entry;
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ FeatureWordInfo *wi;
+ FeatureWord w;
+ bool mismatch = false;
+ int r;
+
+ fetch_cpuid = tdx_fetch_cpuid(cs, &r);
+ if (!fetch_cpuid) {
+ return r;
+ }
+
+ if (cpu->check_cpuid || cpu->enforce_cpuid) {
+ unav_prefix = "TDX doesn't support requested feature";
+ forced_on_prefix = "TDX forcibly sets the feature";
+ }
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ wi = &feature_word_info[w];
+ actual = 0;
+
+ switch (wi->type) {
+ case CPUID_FEATURE_WORD:
+ entry = cpuid_find_entry(fetch_cpuid, wi->cpuid.eax, wi->cpuid.ecx);
+ if (!entry) {
+ /*
+ * If KVM doesn't report it means it's totally configurable
+ * by QEMU
+ */
+ continue;
+ }
+
+ actual = cpuid_entry_get_reg(entry, wi->cpuid.reg);
+ break;
+ case MSR_FEATURE_WORD:
+ /*
+ * TODO:
+ * validate MSR features when KVM has interface report them.
+ */
+ continue;
+ }
+
+ /* Fixup for special cases */
+ switch (w) {
+ case FEAT_8000_0001_EDX:
+ /*
+ * Intel enumerates SYSCALL bit as 1 only when processor in 64-bit
+ * mode and before vcpu running it's not in 64-bit mode.
+ */
+ actual |= CPUID_EXT2_SYSCALL;
+ break;
+ default:
+ break;
+ }
+
+ requested = env->features[w];
+ unavailable = requested & ~actual;
+ mark_unavailable_features(cpu, w, unavailable, unav_prefix);
+ if (unavailable) {
+ mismatch = true;
+ }
+
+ forced_on = actual & ~requested;
+ mark_forced_on_features(cpu, w, forced_on, forced_on_prefix);
+ if (forced_on) {
+ mismatch = true;
+ }
+ }
+
+ if (cpu->enforce_cpuid && mismatch) {
+ return -EINVAL;
+ }
+
+ if (cpu->phys_bits != host_cpu_phys_bits()) {
+ error_report("TDX requires guest CPU physical bits (%u) "
+ "to match host CPU physical bits (%u)",
+ cpu->phys_bits, host_cpu_phys_bits());
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tdx_validate_attributes(TdxGuest *tdx, Error **errp)
+{
+ if ((tdx->attributes & ~tdx_caps->supported_attrs)) {
+ error_setg(errp, "Invalid attributes 0x%lx for TDX VM "
+ "(KVM supported: 0x%llx)", tdx->attributes,
+ tdx_caps->supported_attrs);
+ return -1;
+ }
+
+ if (tdx->attributes & ~TDX_SUPPORTED_TD_ATTRS) {
+ error_setg(errp, "Some QEMU unsupported TD attribute bits being "
+ "requested: 0x%lx (QEMU supported: 0x%llx)",
+ tdx->attributes, TDX_SUPPORTED_TD_ATTRS);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int setup_td_guest_attributes(X86CPU *x86cpu, Error **errp)
+{
+ CPUX86State *env = &x86cpu->env;
+
+ tdx_guest->attributes |= (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS) ?
+ TDX_TD_ATTRIBUTES_PKS : 0;
+ tdx_guest->attributes |= x86cpu->enable_pmu ? TDX_TD_ATTRIBUTES_PERFMON : 0;
+
+ return tdx_validate_attributes(tdx_guest, errp);
+}
+
+static int setup_td_xfam(X86CPU *x86cpu, Error **errp)
+{
+ CPUX86State *env = &x86cpu->env;
+ uint64_t xfam;
+
+ xfam = env->features[FEAT_XSAVE_XCR0_LO] |
+ env->features[FEAT_XSAVE_XCR0_HI] |
+ env->features[FEAT_XSAVE_XSS_LO] |
+ env->features[FEAT_XSAVE_XSS_HI];
+
+ if (xfam & ~tdx_caps->supported_xfam) {
+ error_setg(errp, "Invalid XFAM 0x%lx for TDX VM (supported: 0x%llx))",
+ xfam, tdx_caps->supported_xfam);
+ return -1;
+ }
+
+ tdx_guest->xfam = xfam;
+ return 0;
+}
+
+static void tdx_filter_cpuid(struct kvm_cpuid2 *cpuids)
+{
+ int i, dest_cnt = 0;
+ struct kvm_cpuid_entry2 *src, *dest, *conf;
+
+ for (i = 0; i < cpuids->nent; i++) {
+ src = cpuids->entries + i;
+ conf = cpuid_find_entry(&tdx_caps->cpuid, src->function, src->index);
+ if (!conf) {
+ continue;
+ }
+ dest = cpuids->entries + dest_cnt;
+
+ dest->function = src->function;
+ dest->index = src->index;
+ dest->flags = src->flags;
+ dest->eax = src->eax & conf->eax;
+ dest->ebx = src->ebx & conf->ebx;
+ dest->ecx = src->ecx & conf->ecx;
+ dest->edx = src->edx & conf->edx;
+
+ dest_cnt++;
+ }
+ cpuids->nent = dest_cnt++;
+}
+
+int tdx_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ X86CPU *x86cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86cpu->env;
+ g_autofree struct kvm_tdx_init_vm *init_vm = NULL;
+ Error *local_err = NULL;
+ size_t data_len;
+ int retry = 10000;
+ int r = 0;
+
+ QEMU_LOCK_GUARD(&tdx_guest->lock);
+ if (tdx_guest->initialized) {
+ return r;
+ }
+
+ init_vm = g_malloc0(sizeof(struct kvm_tdx_init_vm) +
+ sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
+
+ if (!kvm_check_extension(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS)) {
+ error_setg(errp, "KVM doesn't support KVM_CAP_X86_APIC_BUS_CYCLES_NS");
+ return -EOPNOTSUPP;
+ }
+
+ r = kvm_vm_enable_cap(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS,
+ 0, TDX_APIC_BUS_CYCLES_NS);
+ if (r < 0) {
+ error_setg_errno(errp, -r,
+ "Unable to set core crystal clock frequency to 25MHz");
+ return r;
+ }
+
+ if (env->tsc_khz && (env->tsc_khz < TDX_MIN_TSC_FREQUENCY_KHZ ||
+ env->tsc_khz > TDX_MAX_TSC_FREQUENCY_KHZ)) {
+ error_setg(errp, "Invalid TSC %ld KHz, must specify cpu_frequency "
+ "between [%d, %d] kHz", env->tsc_khz,
+ TDX_MIN_TSC_FREQUENCY_KHZ, TDX_MAX_TSC_FREQUENCY_KHZ);
+ return -EINVAL;
+ }
+
+ if (env->tsc_khz % (25 * 1000)) {
+ error_setg(errp, "Invalid TSC %ld KHz, it must be multiple of 25MHz",
+ env->tsc_khz);
+ return -EINVAL;
+ }
+
+ /* it's safe even env->tsc_khz is 0. KVM uses host's tsc_khz in this case */
+ r = kvm_vm_ioctl(kvm_state, KVM_SET_TSC_KHZ, env->tsc_khz);
+ if (r < 0) {
+ error_setg_errno(errp, -r, "Unable to set TSC frequency to %ld kHz",
+ env->tsc_khz);
+ return r;
+ }
+
+ if (tdx_guest->mrconfigid) {
+ g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrconfigid,
+ strlen(tdx_guest->mrconfigid), &data_len, errp);
+ if (!data) {
+ return -1;
+ }
+ if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
+ error_setg(errp, "TDX: failed to decode mrconfigid");
+ return -1;
+ }
+ memcpy(init_vm->mrconfigid, data, data_len);
+ }
+
+ if (tdx_guest->mrowner) {
+ g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrowner,
+ strlen(tdx_guest->mrowner), &data_len, errp);
+ if (!data) {
+ return -1;
+ }
+ if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
+ error_setg(errp, "TDX: failed to decode mrowner");
+ return -1;
+ }
+ memcpy(init_vm->mrowner, data, data_len);
+ }
+
+ if (tdx_guest->mrownerconfig) {
+ g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrownerconfig,
+ strlen(tdx_guest->mrownerconfig), &data_len, errp);
+ if (!data) {
+ return -1;
+ }
+ if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
+ error_setg(errp, "TDX: failed to decode mrownerconfig");
+ return -1;
+ }
+ memcpy(init_vm->mrownerconfig, data, data_len);
+ }
+
+ r = setup_td_guest_attributes(x86cpu, errp);
+ if (r) {
+ return r;
+ }
+
+ r = setup_td_xfam(x86cpu, errp);
+ if (r) {
+ return r;
+ }
+
+ init_vm->cpuid.nent = kvm_x86_build_cpuid(env, init_vm->cpuid.entries, 0);
+ tdx_filter_cpuid(&init_vm->cpuid);
+
+ init_vm->attributes = tdx_guest->attributes;
+ init_vm->xfam = tdx_guest->xfam;
+
+ /*
+ * KVM_TDX_INIT_VM gets -EAGAIN when KVM side SEAMCALL(TDH_MNG_CREATE)
+ * gets TDX_RND_NO_ENTROPY due to Random number generation (e.g., RDRAND or
+ * RDSEED) is busy.
+ *
+ * Retry for the case.
+ */
+ do {
+ error_free(local_err);
+ local_err = NULL;
+ r = tdx_vm_ioctl(KVM_TDX_INIT_VM, 0, init_vm, &local_err);
+ } while (r == -EAGAIN && --retry);
+
+ if (r < 0) {
+ if (!retry) {
+ error_append_hint(&local_err, "Hardware RNG (Random Number "
+ "Generator) is busy occupied by someone (via RDRAND/RDSEED) "
+ "maliciously, which leads to KVM_TDX_INIT_VM keeping failure "
+ "due to lack of entropy.\n");
+ }
+ error_propagate(errp, local_err);
+ return r;
+ }
+
+ tdx_guest->initialized = true;
+
+ return 0;
+}
+
+int tdx_parse_tdvf(void *flash_ptr, int size)
+{
+ return tdvf_parse_metadata(&tdx_guest->tdvf, flash_ptr, size);
+}
+
+static void tdx_panicked_on_fatal_error(X86CPU *cpu, uint64_t error_code,
+ char *message, uint64_t gpa)
+{
+ GuestPanicInformation *panic_info;
+
+ panic_info = g_new0(GuestPanicInformation, 1);
+ panic_info->type = GUEST_PANIC_INFORMATION_TYPE_TDX;
+ panic_info->u.tdx.error_code = (uint32_t) error_code;
+ panic_info->u.tdx.message = message;
+ panic_info->u.tdx.gpa = gpa;
+
+ qemu_system_guest_panicked(panic_info);
+}
+
+/*
+ * Only 8 registers can contain valid ASCII byte stream to form the fatal
+ * message, and their sequence is: R14, R15, RBX, RDI, RSI, R8, R9, RDX
+ */
+#define TDX_FATAL_MESSAGE_MAX 64
+
+#define TDX_REPORT_FATAL_ERROR_GPA_VALID BIT_ULL(63)
+
+int tdx_handle_report_fatal_error(X86CPU *cpu, struct kvm_run *run)
+{
+ uint64_t error_code = run->system_event.data[R_R12];
+ uint64_t reg_mask = run->system_event.data[R_ECX];
+ char *message = NULL;
+ uint64_t *tmp;
+ uint64_t gpa = -1ull;
+
+ if (error_code & 0xffff) {
+ error_report("TDX: REPORT_FATAL_ERROR: invalid error code: 0x%lx",
+ error_code);
+ return -1;
+ }
+
+ if (reg_mask) {
+ message = g_malloc0(TDX_FATAL_MESSAGE_MAX + 1);
+ tmp = (uint64_t *)message;
+
+#define COPY_REG(REG) \
+ do { \
+ if (reg_mask & BIT_ULL(REG)) { \
+ *(tmp++) = run->system_event.data[REG]; \
+ } \
+ } while (0)
+
+ COPY_REG(R_R14);
+ COPY_REG(R_R15);
+ COPY_REG(R_EBX);
+ COPY_REG(R_EDI);
+ COPY_REG(R_ESI);
+ COPY_REG(R_R8);
+ COPY_REG(R_R9);
+ COPY_REG(R_EDX);
+ *((char *)tmp) = '\0';
+ }
+#undef COPY_REG
+
+ if (error_code & TDX_REPORT_FATAL_ERROR_GPA_VALID) {
+ gpa = run->system_event.data[R_R13];
+ }
+
+ tdx_panicked_on_fatal_error(cpu, error_code, message, gpa);
+
+ return -1;
+}
+
+static bool tdx_guest_get_sept_ve_disable(Object *obj, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ return !!(tdx->attributes & TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE);
+}
+
+static void tdx_guest_set_sept_ve_disable(Object *obj, bool value, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ if (value) {
+ tdx->attributes |= TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
+ } else {
+ tdx->attributes &= ~TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
+ }
+}
+
+static char *tdx_guest_get_mrconfigid(Object *obj, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ return g_strdup(tdx->mrconfigid);
+}
+
+static void tdx_guest_set_mrconfigid(Object *obj, const char *value, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ g_free(tdx->mrconfigid);
+ tdx->mrconfigid = g_strdup(value);
+}
+
+static char *tdx_guest_get_mrowner(Object *obj, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ return g_strdup(tdx->mrowner);
+}
+
+static void tdx_guest_set_mrowner(Object *obj, const char *value, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ g_free(tdx->mrowner);
+ tdx->mrowner = g_strdup(value);
+}
+
+static char *tdx_guest_get_mrownerconfig(Object *obj, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ return g_strdup(tdx->mrownerconfig);
+}
+
+static void tdx_guest_set_mrownerconfig(Object *obj, const char *value, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ g_free(tdx->mrownerconfig);
+ tdx->mrownerconfig = g_strdup(value);
+}
+
+/* tdx guest */
+OBJECT_DEFINE_TYPE_WITH_INTERFACES(TdxGuest,
+ tdx_guest,
+ TDX_GUEST,
+ X86_CONFIDENTIAL_GUEST,
+ { TYPE_USER_CREATABLE },
+ { NULL })
+
+static void tdx_guest_init(Object *obj)
+{
+ ConfidentialGuestSupport *cgs = CONFIDENTIAL_GUEST_SUPPORT(obj);
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ qemu_mutex_init(&tdx->lock);
+
+ cgs->require_guest_memfd = true;
+ tdx->attributes = TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
+
+ object_property_add_uint64_ptr(obj, "attributes", &tdx->attributes,
+ OBJ_PROP_FLAG_READWRITE);
+ object_property_add_bool(obj, "sept-ve-disable",
+ tdx_guest_get_sept_ve_disable,
+ tdx_guest_set_sept_ve_disable);
+ object_property_add_str(obj, "mrconfigid",
+ tdx_guest_get_mrconfigid,
+ tdx_guest_set_mrconfigid);
+ object_property_add_str(obj, "mrowner",
+ tdx_guest_get_mrowner, tdx_guest_set_mrowner);
+ object_property_add_str(obj, "mrownerconfig",
+ tdx_guest_get_mrownerconfig,
+ tdx_guest_set_mrownerconfig);
+}
+
+static void tdx_guest_finalize(Object *obj)
+{
+}
+
+static void tdx_guest_class_init(ObjectClass *oc, const void *data)
+{
+ ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc);
+ X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc);
+
+ klass->kvm_init = tdx_kvm_init;
+ x86_klass->kvm_type = tdx_kvm_type;
+ x86_klass->cpu_instance_init = tdx_cpu_instance_init;
+ x86_klass->adjust_cpuid_features = tdx_adjust_cpuid_features;
+ x86_klass->check_features = tdx_check_features;
+}
diff --git a/target/i386/kvm/tdx.h b/target/i386/kvm/tdx.h
new file mode 100644
index 0000000..04b5afe
--- /dev/null
+++ b/target/i386/kvm/tdx.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef QEMU_I386_TDX_H
+#define QEMU_I386_TDX_H
+
+#ifndef CONFIG_USER_ONLY
+#include CONFIG_DEVICES /* CONFIG_TDX */
+#endif
+
+#include "confidential-guest.h"
+#include "cpu.h"
+#include "hw/i386/tdvf.h"
+
+#define TYPE_TDX_GUEST "tdx-guest"
+#define TDX_GUEST(obj) OBJECT_CHECK(TdxGuest, (obj), TYPE_TDX_GUEST)
+
+typedef struct TdxGuestClass {
+ X86ConfidentialGuestClass parent_class;
+} TdxGuestClass;
+
+/* TDX requires bus frequency 25MHz */
+#define TDX_APIC_BUS_CYCLES_NS 40
+
+enum TdxRamType {
+ TDX_RAM_UNACCEPTED,
+ TDX_RAM_ADDED,
+};
+
+typedef struct TdxRamEntry {
+ uint64_t address;
+ uint64_t length;
+ enum TdxRamType type;
+} TdxRamEntry;
+
+typedef struct TdxGuest {
+ X86ConfidentialGuest parent_obj;
+
+ QemuMutex lock;
+
+ bool initialized;
+ uint64_t attributes; /* TD attributes */
+ uint64_t xfam;
+ char *mrconfigid; /* base64 encoded sha348 digest */
+ char *mrowner; /* base64 encoded sha348 digest */
+ char *mrownerconfig; /* base64 encoded sha348 digest */
+
+ MemoryRegion *tdvf_mr;
+ TdxFirmware tdvf;
+
+ uint32_t nr_ram_entries;
+ TdxRamEntry *ram_entries;
+} TdxGuest;
+
+#ifdef CONFIG_TDX
+bool is_tdx_vm(void);
+#else
+#define is_tdx_vm() 0
+#endif /* CONFIG_TDX */
+
+int tdx_pre_create_vcpu(CPUState *cpu, Error **errp);
+void tdx_set_tdvf_region(MemoryRegion *tdvf_mr);
+int tdx_parse_tdvf(void *flash_ptr, int size);
+int tdx_handle_report_fatal_error(X86CPU *cpu, struct kvm_run *run);
+
+#endif /* QEMU_I386_TDX_H */
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 6cb561c..dd2dac1 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -1060,9 +1060,8 @@ static bool tsc_khz_needed(void *opaque)
{
X86CPU *cpu = opaque;
CPUX86State *env = &cpu->env;
- MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
- X86MachineClass *x86mc = X86_MACHINE_CLASS(mc);
- return env->tsc_khz && x86mc->save_tsc_khz;
+
+ return env->tsc_khz;
}
static const VMStateDescription vmstate_tsc_khz = {
diff --git a/target/i386/monitor.c b/target/i386/monitor.c
index 3ea92b0..3c9b6ca 100644
--- a/target/i386/monitor.c
+++ b/target/i386/monitor.c
@@ -29,7 +29,6 @@
#include "monitor/hmp.h"
#include "qobject/qdict.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-misc-target.h"
#include "qapi/qapi-commands-misc.h"
/* Perform linear address sign extension */
diff --git a/target/i386/sev-system-stub.c b/target/i386/sev-system-stub.c
index d5bf886..7c5c02a 100644
--- a/target/i386/sev-system-stub.c
+++ b/target/i386/sev-system-stub.c
@@ -14,34 +14,9 @@
#include "qemu/osdep.h"
#include "monitor/monitor.h"
#include "monitor/hmp-target.h"
-#include "qapi/qapi-commands-misc-target.h"
#include "qapi/error.h"
#include "sev.h"
-SevInfo *qmp_query_sev(Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
- return NULL;
-}
-
-SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
- return NULL;
-}
-
-SevCapability *qmp_query_sev_capabilities(Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
- return NULL;
-}
-
-void qmp_sev_inject_launch_secret(const char *packet_header, const char *secret,
- bool has_gpa, uint64_t gpa, Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
-}
-
int sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp)
{
g_assert_not_reached();
@@ -56,13 +31,6 @@ int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size)
g_assert_not_reached();
}
-SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce,
- Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
- return NULL;
-}
-
void hmp_info_sev(Monitor *mon, const QDict *qdict)
{
monitor_printf(mon, "SEV is not available in this QEMU\n");
diff --git a/target/i386/sev.c b/target/i386/sev.c
index 7ee700d..1a12f06 100644
--- a/target/i386/sev.c
+++ b/target/i386/sev.c
@@ -37,7 +37,7 @@
#include "qom/object.h"
#include "monitor/monitor.h"
#include "monitor/hmp-target.h"
-#include "qapi/qapi-commands-misc-target.h"
+#include "qapi/qapi-commands-misc-i386.h"
#include "confidential-guest.h"
#include "hw/i386/pc.h"
#include "system/address-spaces.h"
@@ -212,14 +212,6 @@ static const char *const sev_fw_errlist[] = {
#define SEV_FW_MAX_ERROR ARRAY_SIZE(sev_fw_errlist)
-/* <linux/kvm.h> doesn't expose this, so re-use the max from kvm.c */
-#define KVM_MAX_CPUID_ENTRIES 100
-
-typedef struct KvmCpuidInfo {
- struct kvm_cpuid2 cpuid;
- struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
-} KvmCpuidInfo;
-
#define SNP_CPUID_FUNCTION_MAXCOUNT 64
#define SNP_CPUID_FUNCTION_UNKNOWN 0xFFFFFFFF
@@ -947,7 +939,7 @@ out:
}
static uint32_t
-sev_snp_mask_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index,
+sev_snp_adjust_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index,
int reg, uint32_t value)
{
switch (feature) {
@@ -2405,7 +2397,7 @@ sev_snp_guest_class_init(ObjectClass *oc, const void *data)
klass->launch_finish = sev_snp_launch_finish;
klass->launch_update_data = sev_snp_launch_update_data;
klass->kvm_init = sev_snp_kvm_init;
- x86_klass->mask_cpuid_features = sev_snp_mask_cpuid_features;
+ x86_klass->adjust_cpuid_features = sev_snp_adjust_cpuid_features;
x86_klass->kvm_type = sev_snp_kvm_type;
object_class_property_add(oc, "policy", "uint64",
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
index 6b3f198..be011b0 100644
--- a/target/i386/tcg/helper-tcg.h
+++ b/target/i386/tcg/helper-tcg.h
@@ -97,7 +97,7 @@ static inline unsigned int compute_pf(uint8_t x)
/* misc_helper.c */
void cpu_load_eflags(CPUX86State *env, int eflags, int update_mask);
-/* sysemu/svm_helper.c */
+/* system/svm_helper.c */
#ifndef CONFIG_USER_ONLY
G_NORETURN void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code,
uint64_t exit_info_1, uintptr_t retaddr);
@@ -115,7 +115,7 @@ int exception_has_error_code(int intno);
/* smm_helper.c */
void do_smm_enter(X86CPU *cpu);
-/* sysemu/bpt_helper.c */
+/* system/bpt_helper.c */
bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update);
/*
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
index 179dfdf..6f5dc06 100644
--- a/target/i386/tcg/tcg-cpu.c
+++ b/target/i386/tcg/tcg-cpu.c
@@ -149,6 +149,12 @@ static void x86_cpu_exec_reset(CPUState *cs)
do_cpu_init(env_archcpu(env));
cs->exception_index = EXCP_HALTED;
}
+
+static vaddr x86_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return cpu_env(cs)->hflags & HF_CS64_MASK ? result : (uint32_t)result;
+}
#endif
const TCGCPUOps x86_tcg_ops = {
@@ -172,6 +178,7 @@ const TCGCPUOps x86_tcg_ops = {
.record_sigbus = x86_cpu_record_sigbus,
#else
.tlb_fill = x86_cpu_tlb_fill,
+ .pointer_wrap = x86_pointer_wrap,
.do_interrupt = x86_cpu_do_interrupt,
.cpu_exec_halt = x86_cpu_exec_halt,
.cpu_exec_interrupt = x86_cpu_exec_interrupt,
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
index f7535d1..abad84c 100644
--- a/target/loongarch/cpu.c
+++ b/target/loongarch/cpu.c
@@ -334,6 +334,12 @@ static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
return false;
}
+
+static vaddr loongarch_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return is_va32(cpu_env(cs)) ? (uint32_t)result : result;
+}
#endif
static TCGTBCPUState loongarch_get_tb_cpu_state(CPUState *cs)
@@ -889,6 +895,7 @@ static const TCGCPUOps loongarch_tcg_ops = {
#ifndef CONFIG_USER_ONLY
.tlb_fill = loongarch_cpu_tlb_fill,
+ .pointer_wrap = loongarch_pointer_wrap,
.cpu_exec_interrupt = loongarch_cpu_exec_interrupt,
.cpu_exec_halt = loongarch_cpu_has_work,
.cpu_exec_reset = cpu_reset,
diff --git a/target/loongarch/kvm/kvm.c b/target/loongarch/kvm/kvm.c
index 1bda570..c66bdd5 100644
--- a/target/loongarch/kvm/kvm.c
+++ b/target/loongarch/kvm/kvm.c
@@ -1071,7 +1071,11 @@ static int kvm_cpu_check_pv_features(CPUState *cs, Error **errp)
env->pv_features |= BIT(KVM_FEATURE_VIRT_EXTIOI);
}
}
+ return 0;
+}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
return 0;
}
diff --git a/target/loongarch/loongarch-qmp-cmds.c b/target/loongarch/loongarch-qmp-cmds.c
index 6f732d8..f5f1cd0 100644
--- a/target/loongarch/loongarch-qmp-cmds.c
+++ b/target/loongarch/loongarch-qmp-cmds.c
@@ -8,7 +8,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/qapi-commands-machine.h"
#include "cpu.h"
#include "qobject/qdict.h"
#include "qapi/qobject-input-visitor.h"
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
index c5196a6..6a09db3 100644
--- a/target/m68k/cpu.c
+++ b/target/m68k/cpu.c
@@ -619,6 +619,7 @@ static const TCGCPUOps m68k_tcg_ops = {
#ifndef CONFIG_USER_ONLY
.tlb_fill = m68k_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = m68k_cpu_exec_interrupt,
.cpu_exec_halt = m68k_cpu_has_work,
.cpu_exec_reset = cpu_reset,
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
index 615a959..ee0a869 100644
--- a/target/microblaze/cpu.c
+++ b/target/microblaze/cpu.c
@@ -447,6 +447,7 @@ static const TCGCPUOps mb_tcg_ops = {
#ifndef CONFIG_USER_ONLY
.tlb_fill = mb_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = mb_cpu_exec_interrupt,
.cpu_exec_halt = mb_cpu_has_work,
.cpu_exec_reset = cpu_reset,
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index 6ad8643..3ce28b3 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -248,7 +248,7 @@ struct CPUArchState {
uint32_t pc;
uint32_t msr; /* All bits of MSR except MSR[C] and MSR[CC] */
uint32_t msr_c; /* MSR[C], in low bit; other bits must be 0 */
- target_ulong ear;
+ uint64_t ear;
uint32_t esr;
uint32_t fsr;
uint32_t btr;
diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index 9203192..ef0e2f9 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -26,8 +26,51 @@
#include "exec/target_page.h"
#include "qemu/host-utils.h"
#include "exec/log.h"
+#include "exec/helper-proto.h"
+
+
+G_NORETURN
+static void mb_unaligned_access_internal(CPUState *cs, uint64_t addr,
+ uintptr_t retaddr)
+{
+ CPUMBState *env = cpu_env(cs);
+ uint32_t esr, iflags;
+
+ /* Recover the pc and iflags from the corresponding insn_start. */
+ cpu_restore_state(cs, retaddr);
+ iflags = env->iflags;
+
+ qemu_log_mask(CPU_LOG_INT,
+ "Unaligned access addr=0x%" PRIx64 " pc=%x iflags=%x\n",
+ addr, env->pc, iflags);
+
+ esr = ESR_EC_UNALIGNED_DATA;
+ if (likely(iflags & ESR_ESS_FLAG)) {
+ esr |= iflags & ESR_ESS_MASK;
+ } else {
+ qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
+ }
+
+ env->ear = addr;
+ env->esr = esr;
+ cs->exception_index = EXCP_HW_EXCP;
+ cpu_loop_exit(cs);
+}
+
+void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ mb_unaligned_access_internal(cs, addr, retaddr);
+}
#ifndef CONFIG_USER_ONLY
+
+void HELPER(unaligned_access)(CPUMBState *env, uint64_t addr)
+{
+ mb_unaligned_access_internal(env_cpu(env), addr, GETPC());
+}
+
static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu,
MMUAccessType access_type)
{
@@ -269,31 +312,3 @@ bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
#endif /* !CONFIG_USER_ONLY */
-
-void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
- MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr)
-{
- MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
- uint32_t esr, iflags;
-
- /* Recover the pc and iflags from the corresponding insn_start. */
- cpu_restore_state(cs, retaddr);
- iflags = cpu->env.iflags;
-
- qemu_log_mask(CPU_LOG_INT,
- "Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n",
- (target_ulong)addr, cpu->env.pc, iflags);
-
- esr = ESR_EC_UNALIGNED_DATA;
- if (likely(iflags & ESR_ESS_FLAG)) {
- esr |= iflags & ESR_ESS_MASK;
- } else {
- qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
- }
-
- cpu->env.ear = addr;
- cpu->env.esr = esr;
- cs->exception_index = EXCP_HW_EXCP;
- cpu_loop_exit(cs);
-}
diff --git a/target/microblaze/helper.h b/target/microblaze/helper.h
index f740835..ef4fad9 100644
--- a/target/microblaze/helper.h
+++ b/target/microblaze/helper.h
@@ -20,12 +20,22 @@ DEF_HELPER_FLAGS_3(fcmp_ne, TCG_CALL_NO_WG, i32, env, i32, i32)
DEF_HELPER_FLAGS_3(fcmp_ge, TCG_CALL_NO_WG, i32, env, i32, i32)
DEF_HELPER_FLAGS_2(pcmpbf, TCG_CALL_NO_RWG_SE, i32, i32, i32)
-#if !defined(CONFIG_USER_ONLY)
-DEF_HELPER_FLAGS_3(mmu_read, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_4(mmu_write, TCG_CALL_NO_RWG, void, env, i32, i32, i32)
-#endif
-
DEF_HELPER_FLAGS_2(stackprot, TCG_CALL_NO_WG, void, env, tl)
-
DEF_HELPER_FLAGS_2(get, TCG_CALL_NO_RWG, i32, i32, i32)
DEF_HELPER_FLAGS_3(put, TCG_CALL_NO_RWG, void, i32, i32, i32)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_FLAGS_3(mmu_read, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_4(mmu_write, TCG_CALL_NO_RWG, void, env, i32, i32, i32)
+DEF_HELPER_FLAGS_2(unaligned_access, TCG_CALL_NO_WG, noreturn, env, i64)
+DEF_HELPER_FLAGS_2(lbuea, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_2(lhuea_be, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_2(lhuea_le, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_2(lwea_be, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_2(lwea_le, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_3(sbea, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_3(shea_be, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_3(shea_le, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_3(swea_be, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_3(swea_le, TCG_CALL_NO_WG, void, env, i32, i64)
+#endif
diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
index 95a12e1..8703ff5 100644
--- a/target/microblaze/mmu.c
+++ b/target/microblaze/mmu.c
@@ -172,7 +172,8 @@ unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
}
done:
qemu_log_mask(CPU_LOG_MMU,
- "MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
+ "MMU vaddr=0x" TARGET_FMT_lx
+ " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
vaddr, rw, tlb_wr, tlb_ex, hit);
return hit;
}
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
index 9e838df..b8365b3 100644
--- a/target/microblaze/op_helper.c
+++ b/target/microblaze/op_helper.c
@@ -382,6 +382,8 @@ void helper_stackprot(CPUMBState *env, target_ulong addr)
}
#if !defined(CONFIG_USER_ONLY)
+#include "system/memory.h"
+
/* Writes/reads to the MMU's special regs end up here. */
uint32_t helper_mmu_read(CPUMBState *env, uint32_t ext, uint32_t rn)
{
@@ -393,38 +395,90 @@ void helper_mmu_write(CPUMBState *env, uint32_t ext, uint32_t rn, uint32_t v)
mmu_write(env, ext, rn, v);
}
+static void mb_transaction_failed_internal(CPUState *cs, hwaddr physaddr,
+ uint64_t addr, unsigned size,
+ MMUAccessType access_type,
+ uintptr_t retaddr)
+{
+ CPUMBState *env = cpu_env(cs);
+ MicroBlazeCPU *cpu = env_archcpu(env);
+ const char *access_name = "INVALID";
+ bool take = env->msr & MSR_EE;
+ uint32_t esr = ESR_EC_DATA_BUS;
+
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ access_name = "INST_FETCH";
+ esr = ESR_EC_INSN_BUS;
+ take &= cpu->cfg.iopb_bus_exception;
+ break;
+ case MMU_DATA_LOAD:
+ access_name = "DATA_LOAD";
+ take &= cpu->cfg.dopb_bus_exception;
+ break;
+ case MMU_DATA_STORE:
+ access_name = "DATA_STORE";
+ take &= cpu->cfg.dopb_bus_exception;
+ break;
+ }
+
+ qemu_log_mask(CPU_LOG_INT, "Transaction failed: addr 0x%" PRIx64
+ "physaddr 0x" HWADDR_FMT_plx " size %d access-type %s (%s)\n",
+ addr, physaddr, size, access_name,
+ take ? "TAKEN" : "DROPPED");
+
+ if (take) {
+ env->esr = esr;
+ env->ear = addr;
+ cs->exception_index = EXCP_HW_EXCP;
+ cpu_loop_exit_restore(cs, retaddr);
+ }
+}
+
void mb_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
unsigned size, MMUAccessType access_type,
int mmu_idx, MemTxAttrs attrs,
MemTxResult response, uintptr_t retaddr)
{
- MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
- CPUMBState *env = &cpu->env;
+ mb_transaction_failed_internal(cs, physaddr, addr, size,
+ access_type, retaddr);
+}
- qemu_log_mask(CPU_LOG_INT, "Transaction failed: vaddr 0x%" VADDR_PRIx
- " physaddr 0x" HWADDR_FMT_plx " size %d access type %s\n",
- addr, physaddr, size,
- access_type == MMU_INST_FETCH ? "INST_FETCH" :
- (access_type == MMU_DATA_LOAD ? "DATA_LOAD" : "DATA_STORE"));
+#define LD_EA(NAME, TYPE, FUNC) \
+uint32_t HELPER(NAME)(CPUMBState *env, uint64_t ea) \
+{ \
+ CPUState *cs = env_cpu(env); \
+ MemTxResult txres; \
+ TYPE ret = FUNC(cs->as, ea, MEMTXATTRS_UNSPECIFIED, &txres); \
+ if (unlikely(txres != MEMTX_OK)) { \
+ mb_transaction_failed_internal(cs, ea, ea, sizeof(TYPE), \
+ MMU_DATA_LOAD, GETPC()); \
+ } \
+ return ret; \
+}
- if (!(env->msr & MSR_EE)) {
- return;
- }
+LD_EA(lbuea, uint8_t, address_space_ldub)
+LD_EA(lhuea_be, uint16_t, address_space_lduw_be)
+LD_EA(lhuea_le, uint16_t, address_space_lduw_le)
+LD_EA(lwea_be, uint32_t, address_space_ldl_be)
+LD_EA(lwea_le, uint32_t, address_space_ldl_le)
+
+#define ST_EA(NAME, TYPE, FUNC) \
+void HELPER(NAME)(CPUMBState *env, uint32_t data, uint64_t ea) \
+{ \
+ CPUState *cs = env_cpu(env); \
+ MemTxResult txres; \
+ FUNC(cs->as, ea, data, MEMTXATTRS_UNSPECIFIED, &txres); \
+ if (unlikely(txres != MEMTX_OK)) { \
+ mb_transaction_failed_internal(cs, ea, ea, sizeof(TYPE), \
+ MMU_DATA_STORE, GETPC()); \
+ } \
+}
- if (access_type == MMU_INST_FETCH) {
- if (!cpu->cfg.iopb_bus_exception) {
- return;
- }
- env->esr = ESR_EC_INSN_BUS;
- } else {
- if (!cpu->cfg.dopb_bus_exception) {
- return;
- }
- env->esr = ESR_EC_DATA_BUS;
- }
+ST_EA(sbea, uint8_t, address_space_stb)
+ST_EA(shea_be, uint16_t, address_space_stw_be)
+ST_EA(shea_le, uint16_t, address_space_stw_le)
+ST_EA(swea_be, uint32_t, address_space_stl_be)
+ST_EA(swea_le, uint32_t, address_space_stl_le)
- env->ear = addr;
- cs->exception_index = EXCP_HW_EXCP;
- cpu_loop_exit_restore(cs, retaddr);
-}
#endif
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 671b1ae..5098a1d 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -63,9 +63,6 @@ typedef struct DisasContext {
DisasContextBase base;
const MicroBlazeCPUConfig *cfg;
- TCGv_i32 r0;
- bool r0_set;
-
/* Decoder. */
uint32_t ext_imm;
unsigned int tb_flags;
@@ -179,14 +176,7 @@ static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
if (likely(reg != 0)) {
return cpu_R[reg];
}
- if (!dc->r0_set) {
- if (dc->r0 == NULL) {
- dc->r0 = tcg_temp_new_i32();
- }
- tcg_gen_movi_i32(dc->r0, 0);
- dc->r0_set = true;
- }
- return dc->r0;
+ return tcg_constant_i32(0);
}
static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
@@ -194,10 +184,7 @@ static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
if (likely(reg != 0)) {
return cpu_R[reg];
}
- if (dc->r0 == NULL) {
- dc->r0 = tcg_temp_new_i32();
- }
- return dc->r0;
+ return tcg_temp_new_i32();
}
static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
@@ -619,19 +606,18 @@ DO_TYPEBI(xori, false, tcg_gen_xori_i32)
static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
{
- TCGv ret = tcg_temp_new();
+ TCGv ret;
/* If any of the regs is r0, set t to the value of the other reg. */
if (ra && rb) {
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
- tcg_gen_extu_i32_tl(ret, tmp);
+ ret = tcg_temp_new_i32();
+ tcg_gen_add_i32(ret, cpu_R[ra], cpu_R[rb]);
} else if (ra) {
- tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
+ ret = cpu_R[ra];
} else if (rb) {
- tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
+ ret = cpu_R[rb];
} else {
- tcg_gen_movi_tl(ret, 0);
+ ret = tcg_constant_i32(0);
}
if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
@@ -642,15 +628,16 @@ static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
{
- TCGv ret = tcg_temp_new();
+ TCGv ret;
/* If any of the regs is r0, set t to the value of the other reg. */
- if (ra) {
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
- tcg_gen_extu_i32_tl(ret, tmp);
+ if (ra && imm) {
+ ret = tcg_temp_new_i32();
+ tcg_gen_addi_i32(ret, cpu_R[ra], imm);
+ } else if (ra) {
+ ret = cpu_R[ra];
} else {
- tcg_gen_movi_tl(ret, (uint32_t)imm);
+ ret = tcg_constant_i32(imm);
}
if (ra == 1 && dc->cfg->stackprot) {
@@ -660,23 +647,23 @@ static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
}
#ifndef CONFIG_USER_ONLY
-static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
+static TCGv_i64 compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
{
int addr_size = dc->cfg->addr_size;
- TCGv ret = tcg_temp_new();
+ TCGv_i64 ret = tcg_temp_new_i64();
if (addr_size == 32 || ra == 0) {
if (rb) {
- tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
+ tcg_gen_extu_i32_i64(ret, cpu_R[rb]);
} else {
- tcg_gen_movi_tl(ret, 0);
+ return tcg_constant_i64(0);
}
} else {
if (rb) {
tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
} else {
- tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
- tcg_gen_shli_tl(ret, ret, 32);
+ tcg_gen_extu_i32_i64(ret, cpu_R[ra]);
+ tcg_gen_shli_i64(ret, ret, 32);
}
if (addr_size < 64) {
/* Mask off out of range bits. */
@@ -700,6 +687,20 @@ static void record_unaligned_ess(DisasContext *dc, int rd,
tcg_set_insn_start_param(dc->base.insn_start, 1, iflags);
}
+
+static void gen_alignment_check_ea(DisasContext *dc, TCGv_i64 ea, int rb,
+ int rd, MemOp size, bool store)
+{
+ if (rb && (dc->tb_flags & MSR_EE) && dc->cfg->unaligned_exceptions) {
+ TCGLabel *over = gen_new_label();
+
+ record_unaligned_ess(dc, rd, size, store);
+
+ tcg_gen_brcondi_i64(TCG_COND_TSTEQ, ea, (1 << size) - 1, over);
+ gen_helper_unaligned_access(tcg_env, ea);
+ gen_set_label(over);
+ }
+}
#endif
static inline MemOp mo_endian(DisasContext *dc)
@@ -765,10 +766,11 @@ static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_helper_lbuea(reg_for_write(dc, arg->rd), tcg_env, addr);
+ return true;
#endif
}
@@ -796,10 +798,13 @@ static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_load(dc, arg->rd, addr, MO_UW, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, false);
+ (mo_endian(dc) == MO_BE ? gen_helper_lhuea_be : gen_helper_lhuea_le)
+ (reg_for_write(dc, arg->rd), tcg_env, addr);
+ return true;
#endif
}
@@ -827,10 +832,13 @@ static bool trans_lwea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_load(dc, arg->rd, addr, MO_UL, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, false);
+ (mo_endian(dc) == MO_BE ? gen_helper_lwea_be : gen_helper_lwea_le)
+ (reg_for_write(dc, arg->rd), tcg_env, addr);
+ return true;
#endif
}
@@ -918,10 +926,11 @@ static bool trans_sbea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_helper_sbea(tcg_env, reg_for_read(dc, arg->rd), addr);
+ return true;
#endif
}
@@ -949,10 +958,13 @@ static bool trans_shea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_store(dc, arg->rd, addr, MO_UW, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, true);
+ (mo_endian(dc) == MO_BE ? gen_helper_shea_be : gen_helper_shea_le)
+ (tcg_env, reg_for_read(dc, arg->rd), addr);
+ return true;
#endif
}
@@ -980,10 +992,13 @@ static bool trans_swea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_store(dc, arg->rd, addr, MO_UL, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, true);
+ (mo_endian(dc) == MO_BE ? gen_helper_swea_be : gen_helper_swea_le)
+ (tcg_env, reg_for_read(dc, arg->rd), addr);
+ return true;
#endif
}
@@ -1607,8 +1622,6 @@ static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
dc->cfg = &cpu->cfg;
dc->tb_flags = dc->base.tb->flags;
dc->ext_imm = dc->base.tb->cs_base;
- dc->r0 = NULL;
- dc->r0_set = false;
dc->mem_index = cpu_mmu_index(cs, false);
dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
dc->jmp_dest = -1;
@@ -1647,11 +1660,6 @@ static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
trap_illegal(dc, true);
}
- if (dc->r0) {
- dc->r0 = NULL;
- dc->r0_set = false;
- }
-
/* Discard the imm global when its contents cannot be used. */
if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
tcg_gen_discard_i32(cpu_imm);
@@ -1829,7 +1837,7 @@ void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
- "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
+ "ear=0x%" PRIx64 " slr=0x%x shr=0x%x\n",
env->esr, env->fsr, env->btr, env->edr,
env->ear, env->slr, env->shr);
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
index 4cbfb94..1f6c41f 100644
--- a/target/mips/cpu.c
+++ b/target/mips/cpu.c
@@ -560,6 +560,14 @@ static TCGTBCPUState mips_get_tb_cpu_state(CPUState *cs)
};
}
+#ifndef CONFIG_USER_ONLY
+static vaddr mips_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return cpu_env(cs)->hflags & MIPS_HFLAG_AWRAP ? (int32_t)result : result;
+}
+#endif
+
static const TCGCPUOps mips_tcg_ops = {
.mttcg_supported = TARGET_LONG_BITS == 32,
.guest_default_memory_order = 0,
@@ -573,6 +581,7 @@ static const TCGCPUOps mips_tcg_ops = {
#if !defined(CONFIG_USER_ONLY)
.tlb_fill = mips_cpu_tlb_fill,
+ .pointer_wrap = mips_pointer_wrap,
.cpu_exec_interrupt = mips_cpu_exec_interrupt,
.cpu_exec_halt = mips_cpu_has_work,
.cpu_exec_reset = cpu_reset,
diff --git a/target/mips/kvm.c b/target/mips/kvm.c
index d67b7c1..ec53acb 100644
--- a/target/mips/kvm.c
+++ b/target/mips/kvm.c
@@ -61,6 +61,11 @@ int kvm_arch_irqchip_create(KVMState *s)
return 0;
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
CPUMIPSState *env = cpu_env(cs);
diff --git a/target/mips/system/mips-qmp-cmds.c b/target/mips/system/mips-qmp-cmds.c
index 7340ac7..d98d662 100644
--- a/target/mips/system/mips-qmp-cmds.c
+++ b/target/mips/system/mips-qmp-cmds.c
@@ -7,9 +7,19 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-machine.h"
#include "cpu.h"
+CpuModelExpansionInfo *
+qmp_query_cpu_model_expansion(CpuModelExpansionType type,
+ CpuModelInfo *model,
+ Error **errp)
+{
+ error_setg(errp, "CPU model expansion is not supported on this target");
+ return NULL;
+}
+
static void mips_cpu_add_definition(gpointer data, gpointer user_data)
{
ObjectClass *oc = data;
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
index 054ad33..dfbb2df 100644
--- a/target/openrisc/cpu.c
+++ b/target/openrisc/cpu.c
@@ -265,6 +265,7 @@ static const TCGCPUOps openrisc_tcg_ops = {
#ifndef CONFIG_USER_ONLY
.tlb_fill = openrisc_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
.cpu_exec_halt = openrisc_cpu_has_work,
.cpu_exec_reset = cpu_reset,
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index 9642812..a0e77f2 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -7386,6 +7386,12 @@ static void ppc_cpu_exec_exit(CPUState *cs)
cpu->vhyp_class->cpu_exec_exit(cpu->vhyp, cpu);
}
}
+
+static vaddr ppc_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return (cpu_env(cs)->hflags >> HFLAGS_64) & 1 ? result : (uint32_t)result;
+}
#endif /* CONFIG_TCG */
#endif /* !CONFIG_USER_ONLY */
@@ -7490,6 +7496,7 @@ static const TCGCPUOps ppc_tcg_ops = {
.record_sigsegv = ppc_cpu_record_sigsegv,
#else
.tlb_fill = ppc_cpu_tlb_fill,
+ .pointer_wrap = ppc_pointer_wrap,
.cpu_exec_interrupt = ppc_cpu_exec_interrupt,
.cpu_exec_halt = ppc_cpu_has_work,
.cpu_exec_reset = cpu_reset,
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 8a957c3..0156580 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -479,6 +479,11 @@ static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
}
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
diff --git a/target/ppc/ppc-qmp-cmds.c b/target/ppc/ppc-qmp-cmds.c
index a25d86a..7022564 100644
--- a/target/ppc/ppc-qmp-cmds.c
+++ b/target/ppc/ppc-qmp-cmds.c
@@ -28,7 +28,8 @@
#include "qemu/ctype.h"
#include "monitor/hmp-target.h"
#include "monitor/hmp.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-machine.h"
#include "cpu-models.h"
#include "cpu-qom.h"
@@ -175,6 +176,15 @@ int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval)
return -EINVAL;
}
+CpuModelExpansionInfo *
+qmp_query_cpu_model_expansion(CpuModelExpansionType type,
+ CpuModelInfo *model,
+ Error **errp)
+{
+ error_setg(errp, "CPU model expansion is not supported on this target");
+ return NULL;
+}
+
static void ppc_cpu_defs_entry(gpointer data, gpointer user_data)
{
ObjectClass *oc = data;
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
index efb41fa..e1a04be 100644
--- a/target/riscv/kvm/kvm-cpu.c
+++ b/target/riscv/kvm/kvm-cpu.c
@@ -1472,6 +1472,11 @@ static int kvm_vcpu_enable_sbi_dbcn(RISCVCPU *cpu, CPUState *cs)
return kvm_set_one_reg(cs, kvm_sbi_dbcn.kvm_reg_id, &reg);
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
int ret = 0;
diff --git a/target/riscv/riscv-qmp-cmds.c b/target/riscv/riscv-qmp-cmds.c
index d0a3243..8ba8aa0 100644
--- a/target/riscv/riscv-qmp-cmds.c
+++ b/target/riscv/riscv-qmp-cmds.c
@@ -25,7 +25,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/qapi-commands-machine.h"
#include "qobject/qbool.h"
#include "qobject/qdict.h"
#include "qapi/qobject-input-visitor.h"
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
index 305912b..55fd9e5 100644
--- a/target/riscv/tcg/tcg-cpu.c
+++ b/target/riscv/tcg/tcg-cpu.c
@@ -237,6 +237,31 @@ static void riscv_restore_state_to_opc(CPUState *cs,
env->excp_uw2 = data[2];
}
+#ifndef CONFIG_USER_ONLY
+static vaddr riscv_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ CPURISCVState *env = cpu_env(cs);
+ uint32_t pm_len;
+ bool pm_signext;
+
+ if (cpu_address_xl(env) == MXL_RV32) {
+ return (uint32_t)result;
+ }
+
+ pm_len = riscv_pm_get_pmlen(riscv_pm_get_pmm(env));
+ if (pm_len == 0) {
+ return result;
+ }
+
+ pm_signext = riscv_cpu_virt_mem_enabled(env);
+ if (pm_signext) {
+ return sextract64(result, 0, 64 - pm_len);
+ }
+ return extract64(result, 0, 64 - pm_len);
+}
+#endif
+
const TCGCPUOps riscv_tcg_ops = {
.mttcg_supported = true,
.guest_default_memory_order = 0,
@@ -250,6 +275,7 @@ const TCGCPUOps riscv_tcg_ops = {
#ifndef CONFIG_USER_ONLY
.tlb_fill = riscv_cpu_tlb_fill,
+ .pointer_wrap = riscv_pointer_wrap,
.cpu_exec_interrupt = riscv_cpu_exec_interrupt,
.cpu_exec_halt = riscv_cpu_has_work,
.cpu_exec_reset = cpu_reset,
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
index 36eba75..c6dd5d6 100644
--- a/target/rx/cpu.c
+++ b/target/rx/cpu.c
@@ -225,6 +225,7 @@ static const TCGCPUOps rx_tcg_ops = {
.restore_state_to_opc = rx_restore_state_to_opc,
.mmu_index = rx_cpu_mmu_index,
.tlb_fill = rx_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = rx_cpu_exec_interrupt,
.cpu_exec_halt = rx_cpu_has_work,
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
index 9c1158e..f05ce31 100644
--- a/target/s390x/cpu.c
+++ b/target/s390x/cpu.c
@@ -347,6 +347,14 @@ static TCGTBCPUState s390x_get_tb_cpu_state(CPUState *cs)
};
}
+#ifndef CONFIG_USER_ONLY
+static vaddr s390_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return wrap_address(cpu_env(cs), result);
+}
+#endif
+
static const TCGCPUOps s390_tcg_ops = {
.mttcg_supported = true,
.precise_smc = true,
@@ -367,6 +375,7 @@ static const TCGCPUOps s390_tcg_ops = {
.record_sigbus = s390_cpu_record_sigbus,
#else
.tlb_fill = s390_cpu_tlb_fill,
+ .pointer_wrap = s390_pointer_wrap,
.cpu_exec_interrupt = s390_cpu_exec_interrupt,
.cpu_exec_halt = s390_cpu_has_work,
.cpu_exec_reset = cpu_reset,
diff --git a/target/s390x/cpu_models_system.c b/target/s390x/cpu_models_system.c
index 4351182..9d84faa 100644
--- a/target/s390x/cpu_models_system.c
+++ b/target/s390x/cpu_models_system.c
@@ -19,7 +19,7 @@
#include "qapi/visitor.h"
#include "qapi/qobject-input-visitor.h"
#include "qobject/qdict.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/qapi-commands-machine.h"
static void list_add_feat(const char *name, void *opaque);
diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c
index 6cd2ebc..67d9a19 100644
--- a/target/s390x/kvm/kvm.c
+++ b/target/s390x/kvm/kvm.c
@@ -398,6 +398,11 @@ unsigned long kvm_arch_vcpu_id(CPUState *cpu)
return cpu->cpu_index;
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus;
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
index b35f18e..4f561e8 100644
--- a/target/sh4/cpu.c
+++ b/target/sh4/cpu.c
@@ -296,6 +296,7 @@ static const TCGCPUOps superh_tcg_ops = {
#ifndef CONFIG_USER_ONLY
.tlb_fill = superh_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_notreached,
.cpu_exec_interrupt = superh_cpu_exec_interrupt,
.cpu_exec_halt = superh_cpu_has_work,
.cpu_exec_reset = cpu_reset,
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index bf8828f..70fd13a 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -54,7 +54,7 @@ typedef struct DisasContext {
#define UNALIGN(C) (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
#else
#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
-#define UNALIGN(C) 0
+#define UNALIGN(C) MO_ALIGN
#endif
/* Target-specific values for ctx->base.is_jmp. */
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
index 2a3e408..ed7701b 100644
--- a/target/sparc/cpu.c
+++ b/target/sparc/cpu.c
@@ -1002,6 +1002,18 @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
#ifdef CONFIG_TCG
#include "accel/tcg/cpu-ops.h"
+#ifndef CONFIG_USER_ONLY
+static vaddr sparc_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+#ifdef TARGET_SPARC64
+ return cpu_env(cs)->pstate & PS_AM ? (uint32_t)result : result;
+#else
+ return (uint32_t)result;
+#endif
+}
+#endif
+
static const TCGCPUOps sparc_tcg_ops = {
/*
* From Oracle SPARC Architecture 2015:
@@ -1036,6 +1048,7 @@ static const TCGCPUOps sparc_tcg_ops = {
#ifndef CONFIG_USER_ONLY
.tlb_fill = sparc_cpu_tlb_fill,
+ .pointer_wrap = sparc_pointer_wrap,
.cpu_exec_interrupt = sparc_cpu_exec_interrupt,
.cpu_exec_halt = sparc_cpu_has_work,
.cpu_exec_reset = cpu_reset,
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
index a493341..29fd166 100644
--- a/target/sparc/fop_helper.c
+++ b/target/sparc/fop_helper.c
@@ -445,7 +445,6 @@ static uint32_t finish_fcmp(CPUSPARCState *env, FloatRelation r, uintptr_t ra)
case float_relation_greater:
return 2;
case float_relation_unordered:
- env->fsr |= FSR_NVA;
return 3;
}
g_assert_not_reached();
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
index e56f90f..4f035b6 100644
--- a/target/tricore/cpu.c
+++ b/target/tricore/cpu.c
@@ -190,6 +190,7 @@ static const TCGCPUOps tricore_tcg_ops = {
.restore_state_to_opc = tricore_restore_state_to_opc,
.mmu_index = tricore_cpu_mmu_index,
.tlb_fill = tricore_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = tricore_cpu_exec_interrupt,
.cpu_exec_halt = tricore_cpu_has_work,
.cpu_exec_reset = cpu_reset,
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
index 91b71b6c..ea9b6df 100644
--- a/target/xtensa/cpu.c
+++ b/target/xtensa/cpu.c
@@ -318,6 +318,7 @@ static const TCGCPUOps xtensa_tcg_ops = {
#ifndef CONFIG_USER_ONLY
.tlb_fill = xtensa_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
.cpu_exec_halt = xtensa_cpu_has_work,
.cpu_exec_reset = cpu_reset,