aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/alpha/cpu.c19
-rw-r--r--target/alpha/cpu.h11
-rw-r--r--target/alpha/fpu_helper.c1
-rw-r--r--target/alpha/int_helper.c1
-rw-r--r--target/alpha/machine.c2
-rw-r--r--target/alpha/mem_helper.c1
-rw-r--r--target/alpha/translate.c1
-rw-r--r--target/alpha/vax_helper.c1
-rw-r--r--target/arm/arch_dump.c6
-rw-r--r--target/arm/arm-qmp-cmds.c7
-rw-r--r--target/arm/cpregs.h1
-rw-r--r--target/arm/cpu-features.h318
-rw-r--r--target/arm/cpu-param.h7
-rw-r--r--target/arm/cpu-qom.h5
-rw-r--r--target/arm/cpu-sysregs.h42
-rw-r--r--target/arm/cpu-sysregs.h.inc36
-rw-r--r--target/arm/cpu.c261
-rw-r--r--target/arm/cpu.h125
-rw-r--r--target/arm/cpu32-stubs.c26
-rw-r--r--target/arm/cpu64.c210
-rw-r--r--target/arm/debug_helper.c7
-rw-r--r--target/arm/helper.c221
-rw-r--r--target/arm/helper.h1152
-rw-r--r--target/arm/hvf-stub.c20
-rw-r--r--target/arm/hvf/hvf.c99
-rw-r--r--target/arm/hvf/trace-events5
-rw-r--r--target/arm/hvf_arm.h18
-rw-r--r--target/arm/hyp_gdbstub.c6
-rw-r--r--target/arm/internals.h25
-rw-r--r--target/arm/kvm-stub.c97
-rw-r--r--target/arm/kvm.c244
-rw-r--r--target/arm/kvm_arm.h95
-rw-r--r--target/arm/machine.c15
-rw-r--r--target/arm/meson.build45
-rw-r--r--target/arm/ptw.c79
-rw-r--r--target/arm/tcg-stubs.c4
-rw-r--r--target/arm/tcg/arith_helper.c5
-rw-r--r--target/arm/tcg/cpu-v7m.c177
-rw-r--r--target/arm/tcg/cpu32.c320
-rw-r--r--target/arm/tcg/cpu64.c461
-rw-r--r--target/arm/tcg/crypto_helper.c6
-rw-r--r--target/arm/tcg/helper-a64.c3
-rw-r--r--target/arm/tcg/helper.h1153
-rw-r--r--target/arm/tcg/hflags.c121
-rw-r--r--target/arm/tcg/iwmmxt_helper.c4
-rw-r--r--target/arm/tcg/m_helper.c1
-rw-r--r--target/arm/tcg/meson.build31
-rw-r--r--target/arm/tcg/mte_helper.c6
-rw-r--r--target/arm/tcg/mve_helper.c1
-rw-r--r--target/arm/tcg/neon_helper.c4
-rw-r--r--target/arm/tcg/op_helper.c4
-rw-r--r--target/arm/tcg/pauth_helper.c1
-rw-r--r--target/arm/tcg/sme_helper.c2
-rw-r--r--target/arm/tcg/sve_helper.c3
-rw-r--r--target/arm/tcg/tlb-insns.c7
-rw-r--r--target/arm/tcg/tlb_helper.c6
-rw-r--r--target/arm/tcg/translate-a64.c23
-rw-r--r--target/arm/tcg/translate-sve.c2
-rw-r--r--target/arm/tcg/translate.c24
-rw-r--r--target/arm/tcg/translate.h2
-rw-r--r--target/arm/tcg/vec_internal.h2
-rw-r--r--target/arm/tcg/vfp_helper.c4
-rw-r--r--target/avr/cpu.c27
-rw-r--r--target/avr/cpu.h18
-rw-r--r--target/avr/helper.c1
-rw-r--r--target/avr/translate.c1
-rw-r--r--target/hexagon/cpu.c21
-rw-r--r--target/hexagon/cpu.h15
-rw-r--r--target/hexagon/mmvec/macros.h1
-rw-r--r--target/hexagon/op_helper.c2
-rw-r--r--target/hppa/cpu.c17
-rw-r--r--target/hppa/cpu.h3
-rw-r--r--target/hppa/fpu_helper.c21
-rw-r--r--target/hppa/helper.c1
-rw-r--r--target/hppa/int_helper.c4
-rw-r--r--target/hppa/machine.c2
-rw-r--r--target/hppa/mem_helper.c2
-rw-r--r--target/hppa/op_helper.c2
-rw-r--r--target/hppa/sys_helper.c1
-rw-r--r--target/hppa/translate.c18
-rw-r--r--target/i386/confidential-guest.h44
-rw-r--r--target/i386/cpu-system.c2
-rw-r--r--target/i386/cpu.c601
-rw-r--r--target/i386/cpu.h79
-rw-r--r--target/i386/emulate/x86_decode.c93
-rw-r--r--target/i386/emulate/x86_decode.h9
-rw-r--r--target/i386/emulate/x86_emu.c129
-rw-r--r--target/i386/emulate/x86_emu.h8
-rw-r--r--target/i386/emulate/x86_flags.c200
-rw-r--r--target/i386/emulate/x86_flags.h16
-rw-r--r--target/i386/helper.c2
-rw-r--r--target/i386/host-cpu.c2
-rw-r--r--target/i386/host-cpu.h1
-rw-r--r--target/i386/hvf/hvf.c1
-rw-r--r--target/i386/hvf/x86_cpuid.c2
-rw-r--r--target/i386/kvm/kvm.c156
-rw-r--r--target/i386/kvm/kvm_i386.h15
-rw-r--r--target/i386/kvm/meson.build2
-rw-r--r--target/i386/kvm/tdx-quote-generator.c300
-rw-r--r--target/i386/kvm/tdx-quote-generator.h82
-rw-r--r--target/i386/kvm/tdx-stub.c28
-rw-r--r--target/i386/kvm/tdx.c1487
-rw-r--r--target/i386/kvm/tdx.h84
-rw-r--r--target/i386/machine.c5
-rw-r--r--target/i386/monitor.c1
-rw-r--r--target/i386/ops_sse.h16
-rw-r--r--target/i386/sev-system-stub.c32
-rw-r--r--target/i386/sev.c14
-rw-r--r--target/i386/tcg/access.c2
-rw-r--r--target/i386/tcg/decode-new.c.inc36
-rw-r--r--target/i386/tcg/emit.c.inc16
-rw-r--r--target/i386/tcg/excp_helper.c1
-rw-r--r--target/i386/tcg/fpu_helper.c101
-rw-r--r--target/i386/tcg/helper-tcg.h5
-rw-r--r--target/i386/tcg/int_helper.c1
-rw-r--r--target/i386/tcg/mem_helper.c1
-rw-r--r--target/i386/tcg/mpx_helper.c1
-rw-r--r--target/i386/tcg/seg_helper.c83
-rw-r--r--target/i386/tcg/system/bpt_helper.c1
-rw-r--r--target/i386/tcg/system/excp_helper.c1
-rw-r--r--target/i386/tcg/tcg-cpu.c41
-rw-r--r--target/i386/tcg/translate.c37
-rw-r--r--target/i386/tcg/user/excp_helper.c1
-rw-r--r--target/i386/tcg/user/seg_helper.c1
-rw-r--r--target/loongarch/README2
-rw-r--r--target/loongarch/cpu.c27
-rw-r--r--target/loongarch/cpu.h13
-rw-r--r--target/loongarch/kvm/kvm.c27
-rw-r--r--target/loongarch/loongarch-qmp-cmds.c2
-rw-r--r--target/loongarch/tcg/fpu_helper.c1
-rw-r--r--target/loongarch/tcg/insn_trans/trans_fcmp.c.inc25
-rw-r--r--target/loongarch/tcg/insn_trans/trans_vec.c.inc29
-rw-r--r--target/loongarch/tcg/iocsr_helper.c1
-rw-r--r--target/loongarch/tcg/op_helper.c1
-rw-r--r--target/loongarch/tcg/tlb_helper.c1
-rw-r--r--target/loongarch/tcg/vec_helper.c1
-rw-r--r--target/m68k/cpu.c25
-rw-r--r--target/m68k/cpu.h16
-rw-r--r--target/m68k/fpu_helper.c1
-rw-r--r--target/m68k/helper.c1
-rw-r--r--target/m68k/op_helper.c1
-rw-r--r--target/m68k/translate.c1
-rw-r--r--target/microblaze/cpu.c40
-rw-r--r--target/microblaze/cpu.h10
-rw-r--r--target/microblaze/helper.c71
-rw-r--r--target/microblaze/helper.h22
-rw-r--r--target/microblaze/machine.c2
-rw-r--r--target/microblaze/mmu.c3
-rw-r--r--target/microblaze/op_helper.c105
-rw-r--r--target/microblaze/translate.c139
-rw-r--r--target/mips/cpu.c23
-rw-r--r--target/mips/cpu.h9
-rw-r--r--target/mips/kvm.c5
-rw-r--r--target/mips/system/mips-qmp-cmds.c12
-rw-r--r--target/mips/system/physaddr.c1
-rw-r--r--target/mips/tcg/exception.c1
-rw-r--r--target/mips/tcg/fpu_helper.c1
-rw-r--r--target/mips/tcg/ldst_helper.c1
-rw-r--r--target/mips/tcg/msa_helper.c2
-rw-r--r--target/mips/tcg/op_helper.c1
-rw-r--r--target/mips/tcg/system/special_helper.c1
-rw-r--r--target/mips/tcg/system/tlb_helper.c1
-rw-r--r--target/openrisc/cpu.c19
-rw-r--r--target/openrisc/cpu.h10
-rw-r--r--target/openrisc/exception.c1
-rw-r--r--target/openrisc/exception_helper.c1
-rw-r--r--target/openrisc/fpu_helper.c1
-rw-r--r--target/openrisc/interrupt.c1
-rw-r--r--target/openrisc/interrupt_helper.c1
-rw-r--r--target/openrisc/machine.c2
-rw-r--r--target/openrisc/sys_helper.c3
-rw-r--r--target/openrisc/translate.c4
-rw-r--r--target/ppc/cpu.h13
-rw-r--r--target/ppc/cpu_init.c14
-rw-r--r--target/ppc/excp_helper.c1
-rw-r--r--target/ppc/fpu_helper.c1
-rw-r--r--target/ppc/helper_regs.c19
-rw-r--r--target/ppc/internal.h3
-rw-r--r--target/ppc/kvm.c5
-rw-r--r--target/ppc/machine.c1
-rw-r--r--target/ppc/mem_helper.c3
-rw-r--r--target/ppc/misc_helper.c1
-rw-r--r--target/ppc/mmu-hash32.c1
-rw-r--r--target/ppc/mmu-hash64.c1
-rw-r--r--target/ppc/mmu-radix64.c1
-rw-r--r--target/ppc/mmu_common.c1
-rw-r--r--target/ppc/mmu_helper.c1
-rw-r--r--target/ppc/power8-pmu.c1
-rw-r--r--target/ppc/ppc-qmp-cmds.c12
-rw-r--r--target/ppc/tcg-excp_helper.c1
-rw-r--r--target/ppc/timebase_helper.c1
-rw-r--r--target/ppc/translate.c12
-rw-r--r--target/ppc/user_only_helper.c1
-rw-r--r--target/riscv/cpu-qom.h2
-rw-r--r--target/riscv/cpu.c1011
-rw-r--r--target/riscv/cpu.h61
-rw-r--r--target/riscv/cpu_cfg.h178
-rw-r--r--target/riscv/cpu_cfg_fields.h.inc170
-rw-r--r--target/riscv/cpu_helper.c104
-rw-r--r--target/riscv/crypto_helper.c1
-rw-r--r--target/riscv/csr.c291
-rw-r--r--target/riscv/debug.c1
-rw-r--r--target/riscv/fpu_helper.c1
-rw-r--r--target/riscv/gdbstub.c6
-rw-r--r--target/riscv/insn32.decode18
-rw-r--r--target/riscv/insn_trans/trans_rvbf16.c.inc9
-rw-r--r--target/riscv/insn_trans/trans_rvv.c.inc644
-rw-r--r--target/riscv/internals.h5
-rw-r--r--target/riscv/kvm/kvm-cpu.c365
-rw-r--r--target/riscv/m128_helper.c1
-rw-r--r--target/riscv/machine.c2
-rw-r--r--target/riscv/op_helper.c15
-rw-r--r--target/riscv/pmp.c147
-rw-r--r--target/riscv/riscv-qmp-cmds.c2
-rw-r--r--target/riscv/tcg/tcg-cpu.c140
-rw-r--r--target/riscv/th_csr.c30
-rw-r--r--target/riscv/translate.c8
-rw-r--r--target/riscv/vcrypto_helper.c1
-rw-r--r--target/riscv/vector_helper.c65
-rw-r--r--target/riscv/zce_helper.c1
-rw-r--r--target/rx/cpu.c17
-rw-r--r--target/rx/cpu.h9
-rw-r--r--target/rx/op_helper.c1
-rw-r--r--target/rx/translate.c1
-rw-r--r--target/s390x/cpu.c27
-rw-r--r--target/s390x/cpu.h11
-rw-r--r--target/s390x/cpu_features_def.h.inc2
-rw-r--r--target/s390x/cpu_models.c4
-rw-r--r--target/s390x/cpu_models_system.c5
-rw-r--r--target/s390x/gen-features.c17
-rw-r--r--target/s390x/interrupt.c1
-rw-r--r--target/s390x/ioinst.c11
-rw-r--r--target/s390x/kvm/kvm.c11
-rw-r--r--target/s390x/kvm/pv.c66
-rw-r--r--target/s390x/kvm/pv.h26
-rw-r--r--target/s390x/mmu_helper.c1
-rw-r--r--target/s390x/sigp.c1
-rw-r--r--target/s390x/tcg/cc_helper.c1
-rw-r--r--target/s390x/tcg/crypto_helper.c1
-rw-r--r--target/s390x/tcg/excp_helper.c1
-rw-r--r--target/s390x/tcg/fpu_helper.c1
-rw-r--r--target/s390x/tcg/int_helper.c1
-rw-r--r--target/s390x/tcg/mem_helper.c3
-rw-r--r--target/s390x/tcg/misc_helper.c1
-rw-r--r--target/s390x/tcg/translate.c7
-rw-r--r--target/s390x/tcg/vec_fpu_helper.c1
-rw-r--r--target/s390x/tcg/vec_helper.c1
-rw-r--r--target/sh4/cpu.c30
-rw-r--r--target/sh4/cpu.h15
-rw-r--r--target/sh4/helper.c1
-rw-r--r--target/sh4/op_helper.c1
-rw-r--r--target/sh4/translate.c39
-rw-r--r--target/sparc/cpu.c33
-rw-r--r--target/sparc/cpu.h3
-rw-r--r--target/sparc/fop_helper.c2
-rw-r--r--target/sparc/helper.c1
-rw-r--r--target/sparc/ldst_helper.c1
-rw-r--r--target/sparc/machine.c1
-rw-r--r--target/sparc/translate.c4
-rw-r--r--target/sparc/win_helper.c1
-rw-r--r--target/tricore/cpu.c17
-rw-r--r--target/tricore/cpu.h12
-rw-r--r--target/tricore/op_helper.c1
-rw-r--r--target/tricore/translate.c13
-rw-r--r--target/xtensa/core-dc232b/gdb-config.c.inc5
-rw-r--r--target/xtensa/core-dc232b/xtensa-modules.c.inc5
-rw-r--r--target/xtensa/core-fsf/xtensa-modules.c.inc5
-rw-r--r--target/xtensa/cpu.c80
-rw-r--r--target/xtensa/cpu.h68
-rw-r--r--target/xtensa/dbg_helper.c1
-rw-r--r--target/xtensa/exc_helper.c1
-rw-r--r--target/xtensa/fpu_helper.c1
-rw-r--r--target/xtensa/mmu_helper.c2
-rw-r--r--target/xtensa/op_helper.c1
-rw-r--r--target/xtensa/translate.c1
-rw-r--r--target/xtensa/win_helper.c1
276 files changed, 8747 insertions, 5352 deletions
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
index 27e2008..2082db4 100644
--- a/target/alpha/cpu.c
+++ b/target/alpha/cpu.c
@@ -23,9 +23,9 @@
#include "qapi/error.h"
#include "qemu/qemu-print.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "exec/target_page.h"
+#include "accel/tcg/cpu-ops.h"
#include "fpu/softfloat.h"
@@ -41,6 +41,18 @@ static vaddr alpha_cpu_get_pc(CPUState *cs)
return env->pc;
}
+static TCGTBCPUState alpha_get_tb_cpu_state(CPUState *cs)
+{
+ CPUAlphaState *env = cpu_env(cs);
+ uint32_t flags = env->flags & ENV_FLAG_TB_MASK;
+
+#ifdef CONFIG_USER_ONLY
+ flags |= TB_FLAG_UNALIGN * !cs->prctl_unalign_sigbus;
+#endif
+
+ return (TCGTBCPUState){ .pc = env->pc, .flags = flags };
+}
+
static void alpha_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -232,8 +244,6 @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
};
#endif
-#include "accel/tcg/cpu-ops.h"
-
static const TCGCPUOps alpha_tcg_ops = {
/* Alpha processors have a weak memory model */
.guest_default_memory_order = 0,
@@ -241,6 +251,7 @@ static const TCGCPUOps alpha_tcg_ops = {
.initialize = alpha_translate_init,
.translate_code = alpha_translate_code,
+ .get_tb_cpu_state = alpha_get_tb_cpu_state,
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
.restore_state_to_opc = alpha_restore_state_to_opc,
.mmu_index = alpha_cpu_mmu_index,
@@ -250,8 +261,10 @@ static const TCGCPUOps alpha_tcg_ops = {
.record_sigbus = alpha_cpu_record_sigbus,
#else
.tlb_fill = alpha_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_notreached,
.cpu_exec_interrupt = alpha_cpu_exec_interrupt,
.cpu_exec_halt = alpha_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = alpha_cpu_do_interrupt,
.do_transaction_failed = alpha_cpu_do_transaction_failed,
.do_unaligned_access = alpha_cpu_do_unaligned_access,
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
index 849f673..45944e4 100644
--- a/target/alpha/cpu.h
+++ b/target/alpha/cpu.h
@@ -464,17 +464,6 @@ void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
MemTxResult response, uintptr_t retaddr);
#endif
-static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
-{
- *pc = env->pc;
- *cs_base = 0;
- *pflags = env->flags & ENV_FLAG_TB_MASK;
-#ifdef CONFIG_USER_ONLY
- *pflags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
-#endif
-}
-
#ifdef CONFIG_USER_ONLY
/* Copied from linux ieee_swcr_to_fpcr. */
static inline uint64_t alpha_ieee_swcr_to_fpcr(uint64_t swcr)
diff --git a/target/alpha/fpu_helper.c b/target/alpha/fpu_helper.c
index 6aefb9b..30f3c7f 100644
--- a/target/alpha/fpu_helper.c
+++ b/target/alpha/fpu_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
diff --git a/target/alpha/int_helper.c b/target/alpha/int_helper.c
index 5672696..6bfe635 100644
--- a/target/alpha/int_helper.c
+++ b/target/alpha/int_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
diff --git a/target/alpha/machine.c b/target/alpha/machine.c
index f09834f..5f302b1 100644
--- a/target/alpha/machine.c
+++ b/target/alpha/machine.c
@@ -74,7 +74,7 @@ static const VMStateDescription vmstate_env = {
};
static const VMStateField vmstate_cpu_fields[] = {
- VMSTATE_CPU(),
+ VMSTATE_STRUCT(parent_obj, AlphaCPU, 0, vmstate_cpu_common, CPUState),
VMSTATE_STRUCT(env, AlphaCPU, 1, vmstate_env, CPUAlphaState),
VMSTATE_END_OF_LIST()
};
diff --git a/target/alpha/mem_helper.c b/target/alpha/mem_helper.c
index a4d5adb..2113fe3 100644
--- a/target/alpha/mem_helper.c
+++ b/target/alpha/mem_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
static void do_unaligned_access(CPUAlphaState *env, vaddr addr, uintptr_t retaddr)
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index 7f3195a..cebab03 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -21,7 +21,6 @@
#include "cpu.h"
#include "system/cpus.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
diff --git a/target/alpha/vax_helper.c b/target/alpha/vax_helper.c
index f94fb51..c1d201e 100644
--- a/target/alpha/vax_helper.c
+++ b/target/alpha/vax_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
diff --git a/target/arm/arch_dump.c b/target/arm/arch_dump.c
index c40df4e..1dd7984 100644
--- a/target/arm/arch_dump.c
+++ b/target/arm/arch_dump.c
@@ -143,7 +143,6 @@ static int aarch64_write_elf64_prfpreg(WriteCoreDumpFunction f,
return 0;
}
-#ifdef TARGET_AARCH64
static off_t sve_zreg_offset(uint32_t vq, int n)
{
off_t off = sizeof(struct aarch64_user_sve_header);
@@ -231,7 +230,6 @@ static int aarch64_write_elf64_sve(WriteCoreDumpFunction f,
return 0;
}
-#endif
int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, DumpState *s)
@@ -273,11 +271,9 @@ int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
return ret;
}
-#ifdef TARGET_AARCH64
if (cpu_isar_feature(aa64_sve, cpu)) {
ret = aarch64_write_elf64_sve(f, env, cpuid, s);
}
-#endif
return ret;
}
@@ -451,11 +447,9 @@ ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
if (class == ELFCLASS64) {
note_size = AARCH64_PRSTATUS_NOTE_SIZE;
note_size += AARCH64_PRFPREG_NOTE_SIZE;
-#ifdef TARGET_AARCH64
if (cpu_isar_feature(aa64_sve, cpu)) {
note_size += AARCH64_SVE_NOTE_SIZE(&cpu->env);
}
-#endif
} else {
note_size = ARM_PRSTATUS_NOTE_SIZE;
if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
diff --git a/target/arm/arm-qmp-cmds.c b/target/arm/arm-qmp-cmds.c
index 883c0a0..cefd235 100644
--- a/target/arm/arm-qmp-cmds.c
+++ b/target/arm/arm-qmp-cmds.c
@@ -26,10 +26,11 @@
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qapi/qobject-input-visitor.h"
-#include "qapi/qapi-commands-machine-target.h"
-#include "qapi/qapi-commands-misc-target.h"
+#include "qapi/qapi-commands-machine.h"
+#include "qapi/qapi-commands-misc-arm.h"
#include "qobject/qdict.h"
#include "qom/qom-qobject.h"
+#include "cpu.h"
static GICCapability *gic_cap_new(int version)
{
@@ -46,7 +47,7 @@ static inline void gic_cap_kvm_probe(GICCapability *v2, GICCapability *v3)
#ifdef CONFIG_KVM
int fdarray[3];
- if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, NULL)) {
+ if (!kvm_arm_create_scratch_host_vcpu(fdarray, NULL)) {
return;
}
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
index 2183de8..c1a7ae3 100644
--- a/target/arm/cpregs.h
+++ b/target/arm/cpregs.h
@@ -23,6 +23,7 @@
#include "hw/registerfields.h"
#include "target/arm/kvm-consts.h"
+#include "cpu.h"
/*
* ARMCPRegInfo type field bits:
diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h
index 525e4ce..5d8adfb 100644
--- a/target/arm/cpu-features.h
+++ b/target/arm/cpu-features.h
@@ -22,6 +22,8 @@
#include "hw/registerfields.h"
#include "qemu/host-utils.h"
+#include "cpu.h"
+#include "cpu-sysregs.h"
/*
* Naming convention for isar_feature functions:
@@ -44,103 +46,103 @@
*/
static inline bool isar_feature_aa32_thumb_div(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR0, DIVIDE) != 0;
}
static inline bool isar_feature_aa32_arm_div(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1;
+ return FIELD_EX32_IDREG(id, ID_ISAR0, DIVIDE) > 1;
}
static inline bool isar_feature_aa32_lob(const ARMISARegisters *id)
{
/* (M-profile) low-overhead loops and branch future */
- return FIELD_EX32(id->id_isar0, ID_ISAR0, CMPBRANCH) >= 3;
+ return FIELD_EX32_IDREG(id, ID_ISAR0, CMPBRANCH) >= 3;
}
static inline bool isar_feature_aa32_jazelle(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR1, JAZELLE) != 0;
}
static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR5, AES) != 0;
}
static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1;
+ return FIELD_EX32_IDREG(id, ID_ISAR5, AES) > 1;
}
static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR5, SHA1) != 0;
}
static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR5, SHA2) != 0;
}
static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR5, CRC32) != 0;
}
static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR5, RDM) != 0;
}
static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR5, VCMA) != 0;
}
static inline bool isar_feature_aa32_jscvt(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar6, ID_ISAR6, JSCVT) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR6, JSCVT) != 0;
}
static inline bool isar_feature_aa32_dp(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR6, DP) != 0;
}
static inline bool isar_feature_aa32_fhm(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar6, ID_ISAR6, FHM) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR6, FHM) != 0;
}
static inline bool isar_feature_aa32_sb(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar6, ID_ISAR6, SB) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR6, SB) != 0;
}
static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR6, SPECRES) != 0;
}
static inline bool isar_feature_aa32_bf16(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar6, ID_ISAR6, BF16) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR6, BF16) != 0;
}
static inline bool isar_feature_aa32_i8mm(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar6, ID_ISAR6, I8MM) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR6, I8MM) != 0;
}
static inline bool isar_feature_aa32_ras(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_pfr0, ID_PFR0, RAS) != 0;
+ return FIELD_EX32_IDREG(id, ID_PFR0, RAS) != 0;
}
static inline bool isar_feature_aa32_mprofile(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_pfr1, ID_PFR1, MPROGMOD) != 0;
+ return FIELD_EX32_IDREG(id, ID_PFR1, MPROGMOD) != 0;
}
static inline bool isar_feature_aa32_m_sec_state(const ARMISARegisters *id)
@@ -149,7 +151,7 @@ static inline bool isar_feature_aa32_m_sec_state(const ARMISARegisters *id)
* Return true if M-profile state handling insns
* (VSCCLRM, CLRM, FPCTX access insns) are implemented
*/
- return FIELD_EX32(id->id_pfr1, ID_PFR1, SECURITY) >= 3;
+ return FIELD_EX32_IDREG(id, ID_PFR1, SECURITY) >= 3;
}
static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
@@ -282,88 +284,88 @@ static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id)
static inline bool isar_feature_aa32_pxn(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr0, ID_MMFR0, VMSA) >= 4;
+ return FIELD_EX32_IDREG(id, ID_MMFR0, VMSA) >= 4;
}
static inline bool isar_feature_aa32_pan(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) != 0;
+ return FIELD_EX32_IDREG(id, ID_MMFR3, PAN) != 0;
}
static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2;
+ return FIELD_EX32_IDREG(id, ID_MMFR3, PAN) >= 2;
}
static inline bool isar_feature_aa32_pmuv3p1(const ARMISARegisters *id)
{
/* 0xf means "non-standard IMPDEF PMU" */
- return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 &&
- FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
+ return FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) >= 4 &&
+ FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) != 0xf;
}
static inline bool isar_feature_aa32_pmuv3p4(const ARMISARegisters *id)
{
/* 0xf means "non-standard IMPDEF PMU" */
- return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 &&
- FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
+ return FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) >= 5 &&
+ FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) != 0xf;
}
static inline bool isar_feature_aa32_pmuv3p5(const ARMISARegisters *id)
{
/* 0xf means "non-standard IMPDEF PMU" */
- return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 6 &&
- FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
+ return FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) >= 6 &&
+ FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) != 0xf;
}
static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0;
+ return FIELD_EX32_IDREG(id, ID_MMFR4, HPDS) != 0;
}
static inline bool isar_feature_aa32_ac2(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr4, ID_MMFR4, AC2) != 0;
+ return FIELD_EX32_IDREG(id, ID_MMFR4, AC2) != 0;
}
static inline bool isar_feature_aa32_ccidx(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr4, ID_MMFR4, CCIDX) != 0;
+ return FIELD_EX32_IDREG(id, ID_MMFR4, CCIDX) != 0;
}
static inline bool isar_feature_aa32_tts2uxn(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr4, ID_MMFR4, XNX) != 0;
+ return FIELD_EX32_IDREG(id, ID_MMFR4, XNX) != 0;
}
static inline bool isar_feature_aa32_half_evt(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr4, ID_MMFR4, EVT) >= 1;
+ return FIELD_EX32_IDREG(id, ID_MMFR4, EVT) >= 1;
}
static inline bool isar_feature_aa32_evt(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr4, ID_MMFR4, EVT) >= 2;
+ return FIELD_EX32_IDREG(id, ID_MMFR4, EVT) >= 2;
}
static inline bool isar_feature_aa32_dit(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_pfr0, ID_PFR0, DIT) != 0;
+ return FIELD_EX32_IDREG(id, ID_PFR0, DIT) != 0;
}
static inline bool isar_feature_aa32_ssbs(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_pfr2, ID_PFR2, SSBS) != 0;
+ return FIELD_EX32_IDREG(id, ID_PFR2, SSBS) != 0;
}
static inline bool isar_feature_aa32_debugv7p1(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 5;
+ return FIELD_EX32_IDREG(id, ID_DFR0, COPDBG) >= 5;
}
static inline bool isar_feature_aa32_debugv8p2(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 8;
+ return FIELD_EX32_IDREG(id, ID_DFR0, COPDBG) >= 8;
}
static inline bool isar_feature_aa32_doublelock(const ARMISARegisters *id)
@@ -376,107 +378,107 @@ static inline bool isar_feature_aa32_doublelock(const ARMISARegisters *id)
*/
static inline bool isar_feature_aa64_aes(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, AES) != 0;
}
static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, AES) > 1;
}
static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SHA1) != 0;
}
static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SHA2) != 0;
}
static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SHA2) > 1;
}
static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, CRC32) != 0;
}
static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, ATOMIC) != 0;
}
static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, RDM) != 0;
}
static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SHA3) != 0;
}
static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SM3) != 0;
}
static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SM4) != 0;
}
static inline bool isar_feature_aa64_dp(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, DP) != 0;
}
static inline bool isar_feature_aa64_fhm(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, FHM) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, FHM) != 0;
}
static inline bool isar_feature_aa64_condm_4(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, TS) != 0;
}
static inline bool isar_feature_aa64_condm_5(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, TS) >= 2;
}
static inline bool isar_feature_aa64_rndr(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RNDR) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, RNDR) != 0;
}
static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, TLB) == 2;
}
static inline bool isar_feature_aa64_tlbios(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, TLB) != 0;
}
static inline bool isar_feature_aa64_jscvt(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, JSCVT) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, JSCVT) != 0;
}
static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, FCMA) != 0;
}
static inline bool isar_feature_aa64_xs(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, XS) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, XS) != 0;
}
/*
@@ -500,9 +502,9 @@ isar_feature_pauth_feature(const ARMISARegisters *id)
* Architecturally, only one of {APA,API,APA3} may be active (non-zero)
* and the other two must be zero. Thus we may avoid conditionals.
*/
- return (FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) |
- FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, API) |
- FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, APA3));
+ return (FIELD_EX64_IDREG(id, ID_AA64ISAR1, APA) |
+ FIELD_EX64_IDREG(id, ID_AA64ISAR1, API) |
+ FIELD_EX64_IDREG(id, ID_AA64ISAR2, APA3));
}
static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id)
@@ -520,7 +522,7 @@ static inline bool isar_feature_aa64_pauth_qarma5(const ARMISARegisters *id)
* Return true if pauth is enabled with the architected QARMA5 algorithm.
* QEMU will always enable or disable both APA and GPA.
*/
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, APA) != 0;
}
static inline bool isar_feature_aa64_pauth_qarma3(const ARMISARegisters *id)
@@ -529,144 +531,144 @@ static inline bool isar_feature_aa64_pauth_qarma3(const ARMISARegisters *id)
* Return true if pauth is enabled with the architected QARMA3 algorithm.
* QEMU will always enable or disable both APA3 and GPA3.
*/
- return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, APA3) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR2, APA3) != 0;
}
static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, SB) != 0;
}
static inline bool isar_feature_aa64_predinv(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SPECRES) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, SPECRES) != 0;
}
static inline bool isar_feature_aa64_frint(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, FRINTTS) != 0;
}
static inline bool isar_feature_aa64_dcpop(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, DPB) != 0;
}
static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, DPB) >= 2;
}
static inline bool isar_feature_aa64_bf16(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, BF16) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, BF16) != 0;
}
static inline bool isar_feature_aa64_ebf16(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, BF16) > 1;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, BF16) > 1;
}
static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, LRCPC) != 0;
}
static inline bool isar_feature_aa64_rcpc_8_4(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, LRCPC) >= 2;
}
static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, I8MM) != 0;
}
static inline bool isar_feature_aa64_wfxt(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, WFXT) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR2, WFXT) >= 2;
}
static inline bool isar_feature_aa64_hbc(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, BC) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR2, BC) != 0;
}
static inline bool isar_feature_aa64_mops(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, MOPS);
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR2, MOPS);
}
static inline bool isar_feature_aa64_rpres(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, RPRES);
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR2, RPRES);
}
static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id)
{
/* We always set the AdvSIMD and FP fields identically. */
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) != 0xf;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, FP) != 0xf;
}
static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id)
{
/* We always set the AdvSIMD and FP fields identically wrt FP16. */
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, FP) == 1;
}
static inline bool isar_feature_aa64_aa32(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL0) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, EL0) >= 2;
}
static inline bool isar_feature_aa64_aa32_el1(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL1) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, EL1) >= 2;
}
static inline bool isar_feature_aa64_aa32_el2(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL2) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, EL2) >= 2;
}
static inline bool isar_feature_aa64_ras(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RAS) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, RAS) != 0;
}
static inline bool isar_feature_aa64_doublefault(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RAS) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, RAS) >= 2;
}
static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, SVE) != 0;
}
static inline bool isar_feature_aa64_sel2(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SEL2) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, SEL2) != 0;
}
static inline bool isar_feature_aa64_rme(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RME) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, RME) != 0;
}
static inline bool isar_feature_aa64_dit(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, DIT) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, DIT) != 0;
}
static inline bool isar_feature_aa64_scxtnum(const ARMISARegisters *id)
{
- int key = FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, CSV2);
+ int key = FIELD_EX64_IDREG(id, ID_AA64PFR0, CSV2);
if (key >= 2) {
return true; /* FEAT_CSV2_2 */
}
if (key == 1) {
- key = FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, CSV2_FRAC);
+ key = FIELD_EX64_IDREG(id, ID_AA64PFR1, CSV2_FRAC);
return key >= 2; /* FEAT_CSV2_1p2 */
}
return false;
@@ -674,320 +676,320 @@ static inline bool isar_feature_aa64_scxtnum(const ARMISARegisters *id)
static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, SSBS) != 0;
}
static inline bool isar_feature_aa64_bti(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, BT) != 0;
}
static inline bool isar_feature_aa64_mte_insn_reg(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, MTE) != 0;
}
static inline bool isar_feature_aa64_mte(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, MTE) >= 2;
}
static inline bool isar_feature_aa64_mte3(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) >= 3;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, MTE) >= 3;
}
static inline bool isar_feature_aa64_sme(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SME) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, SME) != 0;
}
static inline bool isar_feature_aa64_nmi(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, NMI) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, NMI) != 0;
}
static inline bool isar_feature_aa64_tgran4_lpa2(const ARMISARegisters *id)
{
- return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 1;
+ return FIELD_SEX64_IDREG(id, ID_AA64MMFR0, TGRAN4) >= 1;
}
static inline bool isar_feature_aa64_tgran4_2_lpa2(const ARMISARegisters *id)
{
- unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2);
+ unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN4_2);
return t >= 3 || (t == 0 && isar_feature_aa64_tgran4_lpa2(id));
}
static inline bool isar_feature_aa64_tgran16_lpa2(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN16) >= 2;
}
static inline bool isar_feature_aa64_tgran16_2_lpa2(const ARMISARegisters *id)
{
- unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2);
+ unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN16_2);
return t >= 3 || (t == 0 && isar_feature_aa64_tgran16_lpa2(id));
}
static inline bool isar_feature_aa64_tgran4(const ARMISARegisters *id)
{
- return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 0;
+ return FIELD_SEX64_IDREG(id, ID_AA64MMFR0, TGRAN4) >= 0;
}
static inline bool isar_feature_aa64_tgran16(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 1;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN16) >= 1;
}
static inline bool isar_feature_aa64_tgran64(const ARMISARegisters *id)
{
- return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64) >= 0;
+ return FIELD_SEX64_IDREG(id, ID_AA64MMFR0, TGRAN64) >= 0;
}
static inline bool isar_feature_aa64_tgran4_2(const ARMISARegisters *id)
{
- unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2);
+ unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN4_2);
return t >= 2 || (t == 0 && isar_feature_aa64_tgran4(id));
}
static inline bool isar_feature_aa64_tgran16_2(const ARMISARegisters *id)
{
- unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2);
+ unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN16_2);
return t >= 2 || (t == 0 && isar_feature_aa64_tgran16(id));
}
static inline bool isar_feature_aa64_tgran64_2(const ARMISARegisters *id)
{
- unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64_2);
+ unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN64_2);
return t >= 2 || (t == 0 && isar_feature_aa64_tgran64(id));
}
static inline bool isar_feature_aa64_fgt(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, FGT) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR0, FGT) != 0;
}
static inline bool isar_feature_aa64_ecv_traps(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, ECV) > 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR0, ECV) > 0;
}
static inline bool isar_feature_aa64_ecv(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, ECV) > 1;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR0, ECV) > 1;
}
static inline bool isar_feature_aa64_vh(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, VH) != 0;
}
static inline bool isar_feature_aa64_lor(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, LO) != 0;
}
static inline bool isar_feature_aa64_pan(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, PAN) != 0;
}
static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, PAN) >= 2;
}
static inline bool isar_feature_aa64_pan3(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 3;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, PAN) >= 3;
}
static inline bool isar_feature_aa64_hcx(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HCX) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, HCX) != 0;
}
static inline bool isar_feature_aa64_afp(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, AFP) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, AFP) != 0;
}
static inline bool isar_feature_aa64_tidcp1(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, TIDCP1) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, TIDCP1) != 0;
}
static inline bool isar_feature_aa64_cmow(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, CMOW) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, CMOW) != 0;
}
static inline bool isar_feature_aa64_hafs(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HAFDBS) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, HAFDBS) != 0;
}
static inline bool isar_feature_aa64_hdbs(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HAFDBS) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, HAFDBS) >= 2;
}
static inline bool isar_feature_aa64_tts2uxn(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, XNX) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, XNX) != 0;
}
static inline bool isar_feature_aa64_uao(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, UAO) != 0;
}
static inline bool isar_feature_aa64_st(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, ST) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, ST) != 0;
}
static inline bool isar_feature_aa64_lse2(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, AT) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, AT) != 0;
}
static inline bool isar_feature_aa64_fwb(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, FWB) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, FWB) != 0;
}
static inline bool isar_feature_aa64_ids(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, IDS) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, IDS) != 0;
}
static inline bool isar_feature_aa64_half_evt(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, EVT) >= 1;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, EVT) >= 1;
}
static inline bool isar_feature_aa64_evt(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, EVT) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, EVT) >= 2;
}
static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, CCIDX) != 0;
}
static inline bool isar_feature_aa64_lva(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, VARANGE) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, VARANGE) != 0;
}
static inline bool isar_feature_aa64_e0pd(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, E0PD) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, E0PD) != 0;
}
static inline bool isar_feature_aa64_nv(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, NV) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, NV) != 0;
}
static inline bool isar_feature_aa64_nv2(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, NV) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, NV) >= 2;
}
static inline bool isar_feature_aa64_pmuv3p1(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 &&
- FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
+ return FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) >= 4 &&
+ FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) != 0xf;
}
static inline bool isar_feature_aa64_pmuv3p4(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 &&
- FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
+ return FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) >= 5 &&
+ FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) != 0xf;
}
static inline bool isar_feature_aa64_pmuv3p5(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 6 &&
- FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
+ return FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) >= 6 &&
+ FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) != 0xf;
}
static inline bool isar_feature_aa64_debugv8p2(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, DEBUGVER) >= 8;
+ return FIELD_EX64_IDREG(id, ID_AA64DFR0, DEBUGVER) >= 8;
}
static inline bool isar_feature_aa64_doublelock(const ARMISARegisters *id)
{
- return FIELD_SEX64(id->id_aa64dfr0, ID_AA64DFR0, DOUBLELOCK) >= 0;
+ return FIELD_SEX64_IDREG(id, ID_AA64DFR0, DOUBLELOCK) >= 0;
}
static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, SVEVER) != 0;
}
static inline bool isar_feature_aa64_sve2_aes(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, AES) != 0;
}
static inline bool isar_feature_aa64_sve2_pmull128(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, AES) >= 2;
}
static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, BITPERM) != 0;
}
static inline bool isar_feature_aa64_sve_bf16(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BFLOAT16) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, BFLOAT16) != 0;
}
static inline bool isar_feature_aa64_sve2_sha3(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SHA3) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, SHA3) != 0;
}
static inline bool isar_feature_aa64_sve2_sm4(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SM4) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, SM4) != 0;
}
static inline bool isar_feature_aa64_sve_i8mm(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, I8MM) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, I8MM) != 0;
}
static inline bool isar_feature_aa64_sve_f32mm(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F32MM) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, F32MM) != 0;
}
static inline bool isar_feature_aa64_sve_f64mm(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F64MM) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, F64MM) != 0;
}
static inline bool isar_feature_aa64_sme_f64f64(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, F64F64);
+ return FIELD_EX64_IDREG(id, ID_AA64SMFR0, F64F64);
}
static inline bool isar_feature_aa64_sme_i16i64(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, I16I64) == 0xf;
+ return FIELD_EX64_IDREG(id, ID_AA64SMFR0, I16I64) == 0xf;
}
static inline bool isar_feature_aa64_sme_fa64(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, FA64);
+ return FIELD_EX64_IDREG(id, ID_AA64SMFR0, FA64);
}
/*
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
index 5c5bc8a..8b46c7c 100644
--- a/target/arm/cpu-param.h
+++ b/target/arm/cpu-param.h
@@ -17,14 +17,9 @@
#endif
#ifdef CONFIG_USER_ONLY
-# ifdef TARGET_AARCH64
-# define TARGET_TAGGED_ADDRESSES
-# ifdef __FreeBSD__
-# define TARGET_PAGE_BITS 12
-# else
+# if defined(TARGET_AARCH64) && defined(CONFIG_LINUX)
/* Allow user-only to vary page size from 4k */
# define TARGET_PAGE_BITS_VARY
-# endif
# else
# define TARGET_PAGE_BITS 12
# endif
diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h
index b497667..2fcb0e1 100644
--- a/target/arm/cpu-qom.h
+++ b/target/arm/cpu-qom.h
@@ -28,11 +28,6 @@ OBJECT_DECLARE_CPU_TYPE(ARMCPU, ARMCPUClass, ARM_CPU)
#define TYPE_ARM_MAX_CPU "max-" TYPE_ARM_CPU
-#define TYPE_AARCH64_CPU "aarch64-cpu"
-typedef struct AArch64CPUClass AArch64CPUClass;
-DECLARE_CLASS_CHECKERS(AArch64CPUClass, AARCH64_CPU,
- TYPE_AARCH64_CPU)
-
#define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU
#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
diff --git a/target/arm/cpu-sysregs.h b/target/arm/cpu-sysregs.h
new file mode 100644
index 0000000..7877a3b
--- /dev/null
+++ b/target/arm/cpu-sysregs.h
@@ -0,0 +1,42 @@
+/*
+ * Definitions for Arm ID system registers
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef ARM_CPU_SYSREGS_H
+#define ARM_CPU_SYSREGS_H
+
+/*
+ * Following is similar to the coprocessor regs encodings, but with an argument
+ * ordering that matches the ARM ARM. We also reuse the various CP_REG_ defines
+ * that actually are the same as the equivalent KVM_REG_ values.
+ */
+#define ENCODE_ID_REG(op0, op1, crn, crm, op2) \
+ (((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \
+ ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \
+ ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \
+ ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \
+ ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
+
+#define DEF(NAME, OP0, OP1, CRN, CRM, OP2) NAME##_IDX,
+
+typedef enum ARMIDRegisterIdx {
+#include "cpu-sysregs.h.inc"
+ NUM_ID_IDX,
+} ARMIDRegisterIdx;
+
+#undef DEF
+#define DEF(NAME, OP0, OP1, CRN, CRM, OP2) \
+ SYS_##NAME = ENCODE_ID_REG(OP0, OP1, CRN, CRM, OP2),
+
+typedef enum ARMSysRegs {
+#include "cpu-sysregs.h.inc"
+} ARMSysRegs;
+
+#undef DEF
+
+extern const uint32_t id_register_sysreg[NUM_ID_IDX];
+
+int get_sysreg_idx(ARMSysRegs sysreg);
+
+#endif /* ARM_CPU_SYSREGS_H */
diff --git a/target/arm/cpu-sysregs.h.inc b/target/arm/cpu-sysregs.h.inc
new file mode 100644
index 0000000..cb99286
--- /dev/null
+++ b/target/arm/cpu-sysregs.h.inc
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+DEF(ID_AA64PFR0_EL1, 3, 0, 0, 4, 0)
+DEF(ID_AA64PFR1_EL1, 3, 0, 0, 4, 1)
+DEF(ID_AA64SMFR0_EL1, 3, 0, 0, 4, 5)
+DEF(ID_AA64DFR0_EL1, 3, 0, 0, 5, 0)
+DEF(ID_AA64DFR1_EL1, 3, 0, 0, 5, 1)
+DEF(ID_AA64ISAR0_EL1, 3, 0, 0, 6, 0)
+DEF(ID_AA64ISAR1_EL1, 3, 0, 0, 6, 1)
+DEF(ID_AA64ISAR2_EL1, 3, 0, 0, 6, 2)
+DEF(ID_AA64MMFR0_EL1, 3, 0, 0, 7, 0)
+DEF(ID_AA64MMFR1_EL1, 3, 0, 0, 7, 1)
+DEF(ID_AA64MMFR2_EL1, 3, 0, 0, 7, 2)
+DEF(ID_AA64MMFR3_EL1, 3, 0, 0, 7, 3)
+DEF(ID_PFR0_EL1, 3, 0, 0, 1, 0)
+DEF(ID_PFR1_EL1, 3, 0, 0, 1, 1)
+DEF(ID_DFR0_EL1, 3, 0, 0, 1, 2)
+DEF(ID_MMFR0_EL1, 3, 0, 0, 1, 4)
+DEF(ID_MMFR1_EL1, 3, 0, 0, 1, 5)
+DEF(ID_MMFR2_EL1, 3, 0, 0, 1, 6)
+DEF(ID_MMFR3_EL1, 3, 0, 0, 1, 7)
+DEF(ID_ISAR0_EL1, 3, 0, 0, 2, 0)
+DEF(ID_ISAR1_EL1, 3, 0, 0, 2, 1)
+DEF(ID_ISAR2_EL1, 3, 0, 0, 2, 2)
+DEF(ID_ISAR3_EL1, 3, 0, 0, 2, 3)
+DEF(ID_ISAR4_EL1, 3, 0, 0, 2, 4)
+DEF(ID_ISAR5_EL1, 3, 0, 0, 2, 5)
+DEF(ID_MMFR4_EL1, 3, 0, 0, 2, 6)
+DEF(ID_ISAR6_EL1, 3, 0, 0, 2, 7)
+DEF(MVFR0_EL1, 3, 0, 0, 3, 0)
+DEF(MVFR1_EL1, 3, 0, 0, 3, 1)
+DEF(MVFR2_EL1, 3, 0, 0, 3, 2)
+DEF(ID_PFR2_EL1, 3, 0, 0, 3, 4)
+DEF(ID_DFR1_EL1, 3, 0, 0, 3, 5)
+DEF(ID_MMFR5_EL1, 3, 0, 0, 3, 6)
+DEF(ID_AA64ZFR0_EL1, 3, 0, 0, 4, 4)
+DEF(CTR_EL0, 3, 3, 0, 0, 1)
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 5e95167..ebac86f 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -23,6 +23,7 @@
#include "qemu/timer.h"
#include "qemu/log.h"
#include "exec/page-vary.h"
+#include "exec/tswap.h"
#include "target/arm/idau.h"
#include "qemu/module.h"
#include "qapi/error.h"
@@ -33,7 +34,6 @@
#endif /* CONFIG_TCG */
#include "internals.h"
#include "cpu-features.h"
-#include "exec/exec-all.h"
#include "exec/target_page.h"
#include "hw/qdev-properties.h"
#if !defined(CONFIG_USER_ONLY)
@@ -1099,37 +1099,6 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level)
}
}
-static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
-{
-#ifdef CONFIG_KVM
- ARMCPU *cpu = opaque;
- CPUARMState *env = &cpu->env;
- CPUState *cs = CPU(cpu);
- uint32_t linestate_bit;
- int irq_id;
-
- switch (irq) {
- case ARM_CPU_IRQ:
- irq_id = KVM_ARM_IRQ_CPU_IRQ;
- linestate_bit = CPU_INTERRUPT_HARD;
- break;
- case ARM_CPU_FIQ:
- irq_id = KVM_ARM_IRQ_CPU_FIQ;
- linestate_bit = CPU_INTERRUPT_FIQ;
- break;
- default:
- g_assert_not_reached();
- }
-
- if (level) {
- env->irq_line_state |= linestate_bit;
- } else {
- env->irq_line_state &= ~linestate_bit;
- }
- kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
-#endif
-}
-
static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -1203,7 +1172,7 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
info->endian = BFD_ENDIAN_LITTLE;
if (bswap_code(sctlr_b)) {
- info->endian = TARGET_BIG_ENDIAN ? BFD_ENDIAN_LITTLE : BFD_ENDIAN_BIG;
+ info->endian = target_big_endian() ? BFD_ENDIAN_LITTLE : BFD_ENDIAN_BIG;
}
info->flags &= ~INSN_ARM_BE32;
#ifndef CONFIG_USER_ONLY
@@ -1213,8 +1182,6 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
#endif
}
-#ifdef TARGET_AARCH64
-
static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -1372,15 +1339,6 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
}
-#else
-
-static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
- g_assert_not_reached();
-}
-
-#endif
-
static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -1542,6 +1500,7 @@ static void arm_cpu_initfn(Object *obj)
* 0 means "unset, use the default value". That default might vary depending
* on the CPU type, and is set in the realize fn.
*/
+#ifndef CONFIG_USER_ONLY
static const Property arm_cpu_gt_cntfrq_property =
DEFINE_PROP_UINT64("cntfrq", ARMCPU, gt_cntfrq_hz, 0);
@@ -1551,7 +1510,6 @@ static const Property arm_cpu_reset_cbar_property =
static const Property arm_cpu_reset_hivecs_property =
DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
-#ifndef CONFIG_USER_ONLY
static const Property arm_cpu_has_el2_property =
DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
@@ -1574,6 +1532,7 @@ static const Property arm_cpu_has_neon_property =
static const Property arm_cpu_has_dsp_property =
DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true);
+#ifndef CONFIG_USER_ONLY
static const Property arm_cpu_has_mpu_property =
DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
@@ -1586,6 +1545,7 @@ static const Property arm_cpu_pmsav7_dregion_property =
DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
pmsav7_dregion,
qdev_prop_uint32, uint32_t);
+#endif
static bool arm_get_pmu(Object *obj, Error **errp)
{
@@ -1610,6 +1570,35 @@ static void arm_set_pmu(Object *obj, bool value, Error **errp)
cpu->has_pmu = value;
}
+static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
+}
+
+static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ /*
+ * At this time, this property is only allowed if KVM is enabled. This
+ * restriction allows us to avoid fixing up functionality that assumes a
+ * uniform execution state like do_interrupt.
+ */
+ if (value == false) {
+ if (!kvm_enabled() || !kvm_arm_aarch32_supported()) {
+ error_setg(errp, "'aarch64' feature cannot be disabled "
+ "unless KVM is enabled and 32-bit EL1 "
+ "is supported");
+ return;
+ }
+ unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ } else {
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ }
+}
+
unsigned int gt_cntfrq_period_ns(ARMCPU *cpu)
{
/*
@@ -1726,7 +1715,7 @@ static void arm_cpu_propagate_feature_implications(ARMCPU *cpu)
}
}
-void arm_cpu_post_init(Object *obj)
+static void arm_cpu_post_init(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
@@ -1737,6 +1726,14 @@ void arm_cpu_post_init(Object *obj)
*/
arm_cpu_propagate_feature_implications(cpu);
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ object_property_add_bool(obj, "aarch64", aarch64_cpu_get_aarch64,
+ aarch64_cpu_set_aarch64);
+ object_property_set_description(obj, "aarch64",
+ "Set on/off to enable/disable aarch64 "
+ "execution state ");
+ }
+#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property);
@@ -1752,7 +1749,6 @@ void arm_cpu_post_init(Object *obj)
OBJ_PROP_FLAG_READWRITE);
}
-#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
/* Add the has_el3 state CPU property only if EL3 is allowed. This will
* prevent "has_el3" from existing on CPUs which cannot support EL3.
@@ -1824,6 +1820,7 @@ void arm_cpu_post_init(Object *obj)
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_dsp_property);
}
+#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property);
if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
@@ -1860,8 +1857,6 @@ void arm_cpu_post_init(Object *obj)
&cpu->psci_conduit,
OBJ_PROP_FLAG_READWRITE);
- qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
-
if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) {
qdev_property_add_static(DEVICE(cpu), &arm_cpu_gt_cntfrq_property);
}
@@ -1870,7 +1865,6 @@ void arm_cpu_post_init(Object *obj)
kvm_arm_add_vcpu_properties(cpu);
}
-#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) &&
cpu_isar_feature(aa64_mte, cpu)) {
object_property_add_link(obj, "tag-memory",
@@ -1888,6 +1882,7 @@ void arm_cpu_post_init(Object *obj)
}
}
#endif
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
}
static void arm_cpu_finalizefn(Object *obj)
@@ -1919,7 +1914,6 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
{
Error *local_err = NULL;
-#ifdef TARGET_AARCH64
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
arm_cpu_sve_finalize(cpu, &local_err);
if (local_err != NULL) {
@@ -1955,7 +1949,6 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
return;
}
}
-#endif
if (kvm_enabled()) {
kvm_arm_steal_time_finalize(cpu, &local_err);
@@ -1970,6 +1963,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
ARMCPU *cpu = ARM_CPU(dev);
+ ARMISARegisters *isar = &cpu->isar;
ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
CPUARMState *env = &cpu->env;
Error *local_err = NULL;
@@ -2127,21 +2121,16 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
if (!cpu->has_vfp) {
- uint64_t t;
uint32_t u;
- t = cpu->isar.id_aa64isar1;
- t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0);
- cpu->isar.id_aa64isar1 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64ISAR1, JSCVT, 0);
- t = cpu->isar.id_aa64pfr0;
- t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf);
- cpu->isar.id_aa64pfr0 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, FP, 0xf);
- u = cpu->isar.id_isar6;
+ u = GET_IDREG(isar, ID_ISAR6);
u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0);
u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
- cpu->isar.id_isar6 = u;
+ SET_IDREG(isar, ID_ISAR6, u);
u = cpu->isar.mvfr0;
u = FIELD_DP32(u, MVFR0, FPSP, 0);
@@ -2175,7 +2164,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
unset_feature(env, ARM_FEATURE_NEON);
- t = cpu->isar.id_aa64isar0;
+ t = GET_IDREG(isar, ID_AA64ISAR0);
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 0);
@@ -2183,32 +2172,30 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0);
- cpu->isar.id_aa64isar0 = t;
+ SET_IDREG(isar, ID_AA64ISAR0, t);
- t = cpu->isar.id_aa64isar1;
+ t = GET_IDREG(isar, ID_AA64ISAR1);
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 0);
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0);
- cpu->isar.id_aa64isar1 = t;
+ SET_IDREG(isar, ID_AA64ISAR1, t);
- t = cpu->isar.id_aa64pfr0;
- t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf);
- cpu->isar.id_aa64pfr0 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, ADVSIMD, 0xf);
- u = cpu->isar.id_isar5;
+ u = GET_IDREG(isar, ID_ISAR5);
u = FIELD_DP32(u, ID_ISAR5, AES, 0);
u = FIELD_DP32(u, ID_ISAR5, SHA1, 0);
u = FIELD_DP32(u, ID_ISAR5, SHA2, 0);
u = FIELD_DP32(u, ID_ISAR5, RDM, 0);
u = FIELD_DP32(u, ID_ISAR5, VCMA, 0);
- cpu->isar.id_isar5 = u;
+ SET_IDREG(isar, ID_ISAR5, u);
- u = cpu->isar.id_isar6;
+ u = GET_IDREG(isar, ID_ISAR6);
u = FIELD_DP32(u, ID_ISAR6, DP, 0);
u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
u = FIELD_DP32(u, ID_ISAR6, I8MM, 0);
- cpu->isar.id_isar6 = u;
+ SET_IDREG(isar, ID_ISAR6, u);
if (!arm_feature(env, ARM_FEATURE_M)) {
u = cpu->isar.mvfr1;
@@ -2225,16 +2212,11 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
if (!cpu->has_neon && !cpu->has_vfp) {
- uint64_t t;
uint32_t u;
- t = cpu->isar.id_aa64isar0;
- t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0);
- cpu->isar.id_aa64isar0 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64ISAR0, FHM, 0);
- t = cpu->isar.id_aa64isar1;
- t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0);
- cpu->isar.id_aa64isar1 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64ISAR1, FRINTTS, 0);
u = cpu->isar.mvfr0;
u = FIELD_DP32(u, MVFR0, SIMDREG, 0);
@@ -2251,19 +2233,17 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
unset_feature(env, ARM_FEATURE_THUMB_DSP);
- u = cpu->isar.id_isar1;
- u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1);
- cpu->isar.id_isar1 = u;
+ FIELD_DP32_IDREG(isar, ID_ISAR1, EXTEND, 1);
- u = cpu->isar.id_isar2;
+ u = GET_IDREG(isar, ID_ISAR2);
u = FIELD_DP32(u, ID_ISAR2, MULTU, 1);
u = FIELD_DP32(u, ID_ISAR2, MULTS, 1);
- cpu->isar.id_isar2 = u;
+ SET_IDREG(isar, ID_ISAR2, u);
- u = cpu->isar.id_isar3;
+ u = GET_IDREG(isar, ID_ISAR3);
u = FIELD_DP32(u, ID_ISAR3, SIMD, 1);
u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0);
- cpu->isar.id_isar3 = u;
+ SET_IDREG(isar, ID_ISAR3, u);
}
@@ -2338,14 +2318,12 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* Disable the security extension feature bits in the processor
* feature registers as well.
*/
- cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, ID_PFR1, SECURITY, 0);
- cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPSDBG, 0);
- cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
- ID_AA64PFR0, EL3, 0);
+ FIELD_DP32_IDREG(isar, ID_PFR1, SECURITY, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, COPSDBG, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, EL3, 0);
/* Disable the realm management extension, which requires EL3. */
- cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
- ID_AA64PFR0, RME, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, RME, 0);
}
if (!cpu->has_el2) {
@@ -2368,9 +2346,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
cpu);
#endif
} else {
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0);
- cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, PMUVER, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, PERFMON, 0);
cpu->pmceid0 = 0;
cpu->pmceid1 = 0;
}
@@ -2380,10 +2357,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* Disable the hypervisor feature bits in the processor feature
* registers if we don't have EL2.
*/
- cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
- ID_AA64PFR0, EL2, 0);
- cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1,
- ID_PFR1, VIRTUALIZATION, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, EL2, 0);
+ FIELD_DP32_IDREG(isar, ID_PFR1, VIRTUALIZATION, 0);
}
if (cpu_isar_feature(aa64_mte, cpu)) {
@@ -2402,8 +2377,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* This matches Cortex-A710 BROADCASTMTE input being LOW.
*/
if (tcg_enabled() && cpu->tag_memory == NULL) {
- cpu->isar.id_aa64pfr1 =
- FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR1, MTE, 1);
}
/*
@@ -2411,7 +2385,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* enabled on the guest (i.e mte=off), clear guest's MTE bits."
*/
if (kvm_enabled() && !cpu->kvm_mte) {
- FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR1, MTE, 0);
}
#endif
}
@@ -2431,32 +2405,22 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* try to access the non-existent system registers for them.
*/
/* FEAT_SPE (Statistical Profiling Extension) */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMSVER, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, PMSVER, 0);
/* FEAT_TRBE (Trace Buffer Extension) */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEBUFFER, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEBUFFER, 0);
/* FEAT_TRF (Self-hosted Trace Extension) */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEFILT, 0);
- cpu->isar.id_dfr0 =
- FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, TRACEFILT, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEFILT, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, TRACEFILT, 0);
/* Trace Macrocell system register access */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEVER, 0);
- cpu->isar.id_dfr0 =
- FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPTRC, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEVER, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, COPTRC, 0);
/* Memory mapped trace */
- cpu->isar.id_dfr0 =
- FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, MMAPTRC, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, MMAPTRC, 0);
/* FEAT_AMU (Activity Monitors Extension) */
- cpu->isar.id_aa64pfr0 =
- FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, AMU, 0);
- cpu->isar.id_pfr0 =
- FIELD_DP32(cpu->isar.id_pfr0, ID_PFR0, AMU, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, AMU, 0);
+ FIELD_DP32_IDREG(isar, ID_PFR0, AMU, 0);
/* FEAT_MPAM (Memory Partitioning and Monitoring Extension) */
- cpu->isar.id_aa64pfr0 =
- FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, MPAM, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, MPAM, 0);
}
/* MPU can be configured out of a PMSA CPU either by setting has-mpu
@@ -2672,7 +2636,31 @@ static const char *arm_gdb_get_core_xml_file(CPUState *cs)
return "arm-core.xml";
}
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+/**
+ * aarch64_untagged_addr:
+ *
+ * Remove any address tag from @x. This is explicitly related to the
+ * linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
+ *
+ * There should be a better place to put this, but we need this in
+ * include/accel/tcg/cpu-ldst.h, and not some place linux-user specific.
+ *
+ * Note that arm-*-user will never set tagged_addr_enable.
+ */
+static vaddr aarch64_untagged_addr(CPUState *cs, vaddr x)
+{
+ CPUARMState *env = cpu_env(cs);
+ if (env->tagged_addr_enable) {
+ /*
+ * TBI is enabled for userspace but not kernelspace addresses.
+ * Only clear the tag if bit 55 is clear.
+ */
+ x &= sextract64(x, 0, 56);
+ }
+ return x;
+}
+#else
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps arm_sysemu_ops = {
@@ -2687,6 +2675,29 @@ static const struct SysemuCPUOps arm_sysemu_ops = {
#endif
#ifdef CONFIG_TCG
+#ifndef CONFIG_USER_ONLY
+static vaddr aprofile_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ /*
+ * The Stage2 and Phys indexes are only used for ptw on arm32,
+ * and all pte's are aligned, so we never produce a wrap for these.
+ * Double check that we're not truncating a 40-bit physical address.
+ */
+ assert((unsigned)mmu_idx < (ARMMMUIdx_Stage2_S & ARM_MMU_IDX_COREIDX_MASK));
+
+ if (!is_a64(cpu_env(cs))) {
+ return (uint32_t)result;
+ }
+
+ /*
+ * TODO: For FEAT_CPA2, decide how to we want to resolve
+ * Unpredictable_CPACHECK in AddressIncrement.
+ */
+ return result;
+}
+#endif /* !CONFIG_USER_ONLY */
+
static const TCGCPUOps arm_tcg_ops = {
.mttcg_supported = true,
/* ARM processors have a weak memory model */
@@ -2694,6 +2705,7 @@ static const TCGCPUOps arm_tcg_ops = {
.initialize = arm_translate_init,
.translate_code = arm_translate_code,
+ .get_tb_cpu_state = arm_get_tb_cpu_state,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
.debug_excp_handler = arm_debug_excp_handler,
.restore_state_to_opc = arm_restore_state_to_opc,
@@ -2702,10 +2714,13 @@ static const TCGCPUOps arm_tcg_ops = {
#ifdef CONFIG_USER_ONLY
.record_sigsegv = arm_cpu_record_sigsegv,
.record_sigbus = arm_cpu_record_sigbus,
+ .untagged_addr = aarch64_untagged_addr,
#else
.tlb_fill_align = arm_cpu_tlb_fill_align,
+ .pointer_wrap = aprofile_pointer_wrap,
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
.cpu_exec_halt = arm_cpu_exec_halt,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = arm_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index fdcf8cd..0338153 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -32,6 +32,7 @@
#include "qapi/qapi-types-common.h"
#include "target/arm/multiprocessing.h"
#include "target/arm/gtimer.h"
+#include "target/arm/cpu-sysregs.h"
#define EXCP_UDEF 1 /* undefined instruction */
#define EXCP_SWI 2 /* software interrupt */
@@ -783,12 +784,9 @@ typedef struct CPUArchState {
#else /* CONFIG_USER_ONLY */
/* For usermode syscall translation. */
bool eabi;
-#endif /* CONFIG_USER_ONLY */
-
-#ifdef TARGET_TAGGED_ADDRESSES
/* Linux syscall tagged address support */
bool tagged_addr_enable;
-#endif
+#endif /* CONFIG_USER_ONLY */
} CPUARMState;
static inline void set_feature(CPUARMState *env, int feature)
@@ -837,6 +835,53 @@ typedef struct {
uint32_t map, init, supported;
} ARMVQMap;
+/* REG is ID_XXX */
+#define FIELD_DP64_IDREG(ISAR, REG, FIELD, VALUE) \
+ ({ \
+ ARMISARegisters *i_ = (ISAR); \
+ uint64_t regval = i_->idregs[REG ## _EL1_IDX]; \
+ regval = FIELD_DP64(regval, REG, FIELD, VALUE); \
+ i_->idregs[REG ## _EL1_IDX] = regval; \
+ })
+
+#define FIELD_DP32_IDREG(ISAR, REG, FIELD, VALUE) \
+ ({ \
+ ARMISARegisters *i_ = (ISAR); \
+ uint64_t regval = i_->idregs[REG ## _EL1_IDX]; \
+ regval = FIELD_DP32(regval, REG, FIELD, VALUE); \
+ i_->idregs[REG ## _EL1_IDX] = regval; \
+ })
+
+#define FIELD_EX64_IDREG(ISAR, REG, FIELD) \
+ ({ \
+ const ARMISARegisters *i_ = (ISAR); \
+ FIELD_EX64(i_->idregs[REG ## _EL1_IDX], REG, FIELD); \
+ })
+
+#define FIELD_EX32_IDREG(ISAR, REG, FIELD) \
+ ({ \
+ const ARMISARegisters *i_ = (ISAR); \
+ FIELD_EX32(i_->idregs[REG ## _EL1_IDX], REG, FIELD); \
+ })
+
+#define FIELD_SEX64_IDREG(ISAR, REG, FIELD) \
+ ({ \
+ const ARMISARegisters *i_ = (ISAR); \
+ FIELD_SEX64(i_->idregs[REG ## _EL1_IDX], REG, FIELD); \
+ })
+
+#define SET_IDREG(ISAR, REG, VALUE) \
+ ({ \
+ ARMISARegisters *i_ = (ISAR); \
+ i_->idregs[REG ## _EL1_IDX] = VALUE; \
+ })
+
+#define GET_IDREG(ISAR, REG) \
+ ({ \
+ const ARMISARegisters *i_ = (ISAR); \
+ i_->idregs[REG ## _EL1_IDX]; \
+ })
+
/**
* ARMCPU:
* @env: #CPUARMState
@@ -1005,44 +1050,14 @@ struct ArchCPU {
* field by reading the value from the KVM vCPU.
*/
struct ARMISARegisters {
- uint32_t id_isar0;
- uint32_t id_isar1;
- uint32_t id_isar2;
- uint32_t id_isar3;
- uint32_t id_isar4;
- uint32_t id_isar5;
- uint32_t id_isar6;
- uint32_t id_mmfr0;
- uint32_t id_mmfr1;
- uint32_t id_mmfr2;
- uint32_t id_mmfr3;
- uint32_t id_mmfr4;
- uint32_t id_mmfr5;
- uint32_t id_pfr0;
- uint32_t id_pfr1;
- uint32_t id_pfr2;
uint32_t mvfr0;
uint32_t mvfr1;
uint32_t mvfr2;
- uint32_t id_dfr0;
- uint32_t id_dfr1;
uint32_t dbgdidr;
uint32_t dbgdevid;
uint32_t dbgdevid1;
- uint64_t id_aa64isar0;
- uint64_t id_aa64isar1;
- uint64_t id_aa64isar2;
- uint64_t id_aa64pfr0;
- uint64_t id_aa64pfr1;
- uint64_t id_aa64mmfr0;
- uint64_t id_aa64mmfr1;
- uint64_t id_aa64mmfr2;
- uint64_t id_aa64mmfr3;
- uint64_t id_aa64dfr0;
- uint64_t id_aa64dfr1;
- uint64_t id_aa64zfr0;
- uint64_t id_aa64smfr0;
uint64_t reset_pmcr_el0;
+ uint64_t idregs[NUM_ID_IDX];
} isar;
uint64_t midr;
uint32_t revidr;
@@ -1141,10 +1156,6 @@ struct ARMCPUClass {
ResettablePhases parent_phases;
};
-struct AArch64CPUClass {
- ARMCPUClass parent_class;
-};
-
/* Callback functions for the generic timer's timers. */
void arm_gt_ptimer_cb(void *opaque);
void arm_gt_vtimer_cb(void *opaque);
@@ -1157,8 +1168,6 @@ void arm_gt_sel2vtimer_cb(void *opaque);
unsigned int gt_cntfrq_period_ns(ARMCPU *cpu);
void gt_rme_post_el_change(ARMCPU *cpu, void *opaque);
-void arm_cpu_post_init(Object *obj);
-
#define ARM_AFF0_SHIFT 0
#define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT)
#define ARM_AFF1_SHIFT 8
@@ -3119,9 +3128,6 @@ static inline bool bswap_code(bool sctlr_b)
#endif
}
-void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags);
-
enum {
QEMU_PSCI_CONDUIT_DISABLED = 0,
QEMU_PSCI_CONDUIT_SMC = 1,
@@ -3219,35 +3225,4 @@ extern const uint64_t pred_esz_masks[5];
#define LOG2_TAG_GRANULE 4
#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
-#ifdef CONFIG_USER_ONLY
-
-#define TARGET_PAGE_DATA_SIZE (TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1))
-
-#ifdef TARGET_TAGGED_ADDRESSES
-/**
- * cpu_untagged_addr:
- * @cs: CPU context
- * @x: tagged address
- *
- * Remove any address tag from @x. This is explicitly related to the
- * linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
- *
- * There should be a better place to put this, but we need this in
- * include/exec/cpu_ldst.h, and not some place linux-user specific.
- */
-static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x)
-{
- CPUARMState *env = cpu_env(cs);
- if (env->tagged_addr_enable) {
- /*
- * TBI is enabled for userspace but not kernelspace addresses.
- * Only clear the tag if bit 55 is clear.
- */
- x &= sextract64(x, 0, 56);
- }
- return x;
-}
-#endif /* TARGET_TAGGED_ADDRESSES */
-#endif /* CONFIG_USER_ONLY */
-
#endif
diff --git a/target/arm/cpu32-stubs.c b/target/arm/cpu32-stubs.c
new file mode 100644
index 0000000..81be44d
--- /dev/null
+++ b/target/arm/cpu32-stubs.c
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+#include "target/arm/cpu.h"
+#include "target/arm/internals.h"
+#include <glib.h>
+
+void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 00629a5..1f34067 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -36,6 +36,28 @@
#include "cpu-features.h"
#include "cpregs.h"
+/* convert between <register>_IDX and SYS_<register> */
+#define DEF(NAME, OP0, OP1, CRN, CRM, OP2) \
+ [NAME##_IDX] = SYS_##NAME,
+
+const uint32_t id_register_sysreg[NUM_ID_IDX] = {
+#include "cpu-sysregs.h.inc"
+};
+
+#undef DEF
+#define DEF(NAME, OP0, OP1, CRN, CRM, OP2) \
+ case SYS_##NAME: return NAME##_IDX;
+
+int get_sysreg_idx(ARMSysRegs sysreg)
+{
+ switch (sysreg) {
+#include "cpu-sysregs.h.inc"
+ }
+ g_assert_not_reached();
+}
+
+#undef DEF
+
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
{
/*
@@ -114,7 +136,7 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
* SVE is disabled and so are all vector lengths. Good.
* Disable all SVE extensions as well.
*/
- cpu->isar.id_aa64zfr0 = 0;
+ SET_IDREG(&cpu->isar, ID_AA64ZFR0, 0);
return;
}
@@ -288,16 +310,13 @@ static bool cpu_arm_get_sve(Object *obj, Error **errp)
static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
- uint64_t t;
if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
error_setg(errp, "'sve' feature not supported by KVM on this host");
return;
}
- t = cpu->isar.id_aa64pfr0;
- t = FIELD_DP64(t, ID_AA64PFR0, SVE, value);
- cpu->isar.id_aa64pfr0 = t;
+ FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR0, SVE, value);
}
void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp)
@@ -309,7 +328,7 @@ void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp)
if (vq_map == 0) {
if (!cpu_isar_feature(aa64_sme, cpu)) {
- cpu->isar.id_aa64smfr0 = 0;
+ SET_IDREG(&cpu->isar, ID_AA64SMFR0, 0);
return;
}
@@ -348,11 +367,8 @@ static bool cpu_arm_get_sme(Object *obj, Error **errp)
static void cpu_arm_set_sme(Object *obj, bool value, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
- uint64_t t;
- t = cpu->isar.id_aa64pfr1;
- t = FIELD_DP64(t, ID_AA64PFR1, SME, value);
- cpu->isar.id_aa64pfr1 = t;
+ FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR1, SME, value);
}
static bool cpu_arm_get_sme_fa64(Object *obj, Error **errp)
@@ -365,11 +381,8 @@ static bool cpu_arm_get_sme_fa64(Object *obj, Error **errp)
static void cpu_arm_set_sme_fa64(Object *obj, bool value, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
- uint64_t t;
- t = cpu->isar.id_aa64smfr0;
- t = FIELD_DP64(t, ID_AA64SMFR0, FA64, value);
- cpu->isar.id_aa64smfr0 = t;
+ FIELD_DP64_IDREG(&cpu->isar, ID_AA64SMFR0, FA64, value);
}
#ifdef CONFIG_USER_ONLY
@@ -480,6 +493,7 @@ void aarch64_add_sme_properties(Object *obj)
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
{
ARMPauthFeature features = cpu_isar_feature(pauth_feature, cpu);
+ ARMISARegisters *isar = &cpu->isar;
uint64_t isar1, isar2;
/*
@@ -490,13 +504,13 @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
*
* Begin by disabling all fields.
*/
- isar1 = cpu->isar.id_aa64isar1;
+ isar1 = GET_IDREG(isar, ID_AA64ISAR1);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, 0);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 0);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, 0);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 0);
- isar2 = cpu->isar.id_aa64isar2;
+ isar2 = GET_IDREG(isar, ID_AA64ISAR2);
isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, 0);
isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 0);
@@ -558,8 +572,8 @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
}
}
- cpu->isar.id_aa64isar1 = isar1;
- cpu->isar.id_aa64isar2 = isar2;
+ SET_IDREG(isar, ID_AA64ISAR1, isar1);
+ SET_IDREG(isar, ID_AA64ISAR2, isar2);
}
static const Property arm_cpu_pauth_property =
@@ -606,17 +620,18 @@ void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp)
return;
}
- t = cpu->isar.id_aa64mmfr0;
+ t = GET_IDREG(&cpu->isar, ID_AA64MMFR0);
t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 2); /* 16k pages w/ LPA2 */
t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4, 1); /* 4k pages w/ LPA2 */
t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 3); /* 16k stage2 w/ LPA2 */
t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 3); /* 4k stage2 w/ LPA2 */
- cpu->isar.id_aa64mmfr0 = t;
+ SET_IDREG(&cpu->isar, ID_AA64MMFR0, t);
}
static void aarch64_a57_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a57";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -637,25 +652,25 @@ static void aarch64_a57_initfn(Object *obj)
cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x8444c004;
cpu->reset_sctlr = 0x00c50838;
- cpu->isar.id_pfr0 = 0x00000131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x03010066;
+ SET_IDREG(isar, ID_PFR0, 0x00000131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_DFR0, 0x03010066);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x10101105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02102211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00011142;
- cpu->isar.id_isar5 = 0x00011121;
- cpu->isar.id_isar6 = 0;
- cpu->isar.id_aa64pfr0 = 0x00002222;
- cpu->isar.id_aa64dfr0 = 0x10305106;
- cpu->isar.id_aa64isar0 = 0x00011120;
- cpu->isar.id_aa64mmfr0 = 0x00001124;
+ SET_IDREG(isar, ID_MMFR0, 0x10101105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02102211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00011142);
+ SET_IDREG(isar, ID_ISAR5, 0x00011121);
+ SET_IDREG(isar, ID_ISAR6, 0);
+ SET_IDREG(isar, ID_AA64PFR0, 0x00002222);
+ SET_IDREG(isar, ID_AA64DFR0, 0x10305106);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x00011120);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x00001124);
cpu->isar.dbgdidr = 0x3516d000;
cpu->isar.dbgdevid = 0x01110f13;
cpu->isar.dbgdevid1 = 0x2;
@@ -678,6 +693,7 @@ static void aarch64_a57_initfn(Object *obj)
static void aarch64_a53_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a53";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -698,25 +714,25 @@ static void aarch64_a53_initfn(Object *obj)
cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x84448004; /* L1Ip = VIPT */
cpu->reset_sctlr = 0x00c50838;
- cpu->isar.id_pfr0 = 0x00000131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x03010066;
+ SET_IDREG(isar, ID_PFR0, 0x00000131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_DFR0, 0x03010066);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x10101105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02102211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00011142;
- cpu->isar.id_isar5 = 0x00011121;
- cpu->isar.id_isar6 = 0;
- cpu->isar.id_aa64pfr0 = 0x00002222;
- cpu->isar.id_aa64dfr0 = 0x10305106;
- cpu->isar.id_aa64isar0 = 0x00011120;
- cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
+ SET_IDREG(isar, ID_MMFR0, 0x10101105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02102211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00011142);
+ SET_IDREG(isar, ID_ISAR5, 0x00011121);
+ SET_IDREG(isar, ID_ISAR6, 0);
+ SET_IDREG(isar, ID_AA64PFR0, 0x00002222);
+ SET_IDREG(isar, ID_AA64DFR0, 0x10305106);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x00011120);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x00001122); /* 40 bit physical addr */
cpu->isar.dbgdidr = 0x3516d000;
cpu->isar.dbgdevid = 0x00110f13;
cpu->isar.dbgdevid1 = 0x1;
@@ -781,92 +797,12 @@ static const ARMCPUInfo aarch64_cpus[] = {
#endif
};
-static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
-{
- ARMCPU *cpu = ARM_CPU(obj);
-
- return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
-}
-
-static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
-{
- ARMCPU *cpu = ARM_CPU(obj);
-
- /* At this time, this property is only allowed if KVM is enabled. This
- * restriction allows us to avoid fixing up functionality that assumes a
- * uniform execution state like do_interrupt.
- */
- if (value == false) {
- if (!kvm_enabled() || !kvm_arm_aarch32_supported()) {
- error_setg(errp, "'aarch64' feature cannot be disabled "
- "unless KVM is enabled and 32-bit EL1 "
- "is supported");
- return;
- }
- unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
- } else {
- set_feature(&cpu->env, ARM_FEATURE_AARCH64);
- }
-}
-
-static void aarch64_cpu_finalizefn(Object *obj)
-{
-}
-
-static void aarch64_cpu_class_init(ObjectClass *oc, const void *data)
-{
- object_class_property_add_bool(oc, "aarch64", aarch64_cpu_get_aarch64,
- aarch64_cpu_set_aarch64);
- object_class_property_set_description(oc, "aarch64",
- "Set on/off to enable/disable aarch64 "
- "execution state ");
-}
-
-static void aarch64_cpu_instance_init(Object *obj)
-{
- ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
-
- acc->info->initfn(obj);
- arm_cpu_post_init(obj);
-}
-
-static void cpu_register_class_init(ObjectClass *oc, const void *data)
-{
- ARMCPUClass *acc = ARM_CPU_CLASS(oc);
-
- acc->info = data;
-}
-
-void aarch64_cpu_register(const ARMCPUInfo *info)
-{
- TypeInfo type_info = {
- .parent = TYPE_AARCH64_CPU,
- .instance_init = aarch64_cpu_instance_init,
- .class_init = info->class_init ?: cpu_register_class_init,
- .class_data = info,
- };
-
- type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
- type_register_static(&type_info);
- g_free((void *)type_info.name);
-}
-
-static const TypeInfo aarch64_cpu_type_info = {
- .name = TYPE_AARCH64_CPU,
- .parent = TYPE_ARM_CPU,
- .instance_finalize = aarch64_cpu_finalizefn,
- .abstract = true,
- .class_init = aarch64_cpu_class_init,
-};
-
static void aarch64_cpu_register_types(void)
{
size_t i;
- type_register_static(&aarch64_cpu_type_info);
-
for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
- aarch64_cpu_register(&aarch64_cpus[i]);
+ arm_cpu_register(&aarch64_cpus[i]);
}
}
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
index 473ee2a..69fb1d0 100644
--- a/target/arm/debug_helper.c
+++ b/target/arm/debug_helper.c
@@ -11,11 +11,12 @@
#include "internals.h"
#include "cpu-features.h"
#include "cpregs.h"
-#include "exec/exec-all.h"
-#include "exec/helper-proto.h"
#include "exec/watchpoint.h"
#include "system/tcg.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
#ifdef CONFIG_TCG
/* Return the Exception Level targeted by debug exceptions. */
static int arm_debug_target_el(CPUARMState *env)
@@ -379,7 +380,7 @@ bool arm_debug_check_breakpoint(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
- target_ulong pc;
+ vaddr pc;
int n;
/*
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 7fb6e88..c311d2d 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -12,7 +12,6 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/helper-proto.h"
#include "exec/page-protection.h"
#include "exec/mmap-lock.h"
#include "qemu/main-loop.h"
@@ -20,7 +19,6 @@
#include "qemu/bitops.h"
#include "qemu/qemu-print.h"
#include "exec/cputlb.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "hw/irq.h"
#include "system/cpu-timers.h"
@@ -30,11 +28,16 @@
#include "qapi/error.h"
#include "qemu/guest-random.h"
#ifdef CONFIG_TCG
+#include "accel/tcg/probe.h"
+#include "accel/tcg/getpc.h"
#include "semihosting/common-semi.h"
#endif
#include "cpregs.h"
#include "target/arm/gtimer.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
static void switch_mode(CPUARMState *env, int mode);
@@ -222,7 +225,7 @@ static void count_cpreg(gpointer key, gpointer opaque)
}
}
-static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
+static gint cpreg_key_compare(gconstpointer a, gconstpointer b, gpointer d)
{
uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
@@ -246,7 +249,7 @@ void init_cpreg_list(ARMCPU *cpu)
int arraylen;
keys = g_hash_table_get_keys(cpu->cp_regs);
- keys = g_list_sort(keys, cpreg_key_compare);
+ keys = g_list_sort_with_data(keys, cpreg_key_compare, NULL);
cpu->cpreg_array_len = 0;
@@ -1901,7 +1904,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
.accessfn = pmreg_access,
.fgt = FGT_PMCNTEN,
- .writefn = pmcntenclr_write,
+ .writefn = pmcntenclr_write, .raw_writefn = raw_write,
.type = ARM_CP_ALIAS | ARM_CP_IO },
{ .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
@@ -1909,7 +1912,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.fgt = FGT_PMCNTEN,
.type = ARM_CP_ALIAS | ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
- .writefn = pmcntenclr_write },
+ .writefn = pmcntenclr_write, .raw_writefn = raw_write },
{ .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
.access = PL0_RW, .type = ARM_CP_IO,
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
@@ -2026,16 +2029,16 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
{ .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
.access = PL1_RW, .accessfn = access_tpm,
.fgt = FGT_PMINTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
- .writefn = pmintenclr_write, },
+ .writefn = pmintenclr_write, .raw_writefn = raw_write },
{ .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
.access = PL1_RW, .accessfn = access_tpm,
.fgt = FGT_PMINTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
- .writefn = pmintenclr_write },
+ .writefn = pmintenclr_write, .raw_writefn = raw_write },
{ .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
.access = PL1_R,
@@ -4987,7 +4990,7 @@ static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
mmap_lock();
- tb_invalidate_phys_range(start_address, end_address);
+ tb_invalidate_phys_range(env_cpu(env), start_address, end_address);
mmap_unlock();
}
@@ -6563,9 +6566,7 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
*/
new_len = sve_vqm1_for_el(env, cur_el);
if (new_len < old_len) {
-#ifdef TARGET_AARCH64
aarch64_sve_narrow_vq(env, new_len + 1);
-#endif
}
}
@@ -6588,7 +6589,6 @@ static const ARMCPRegInfo zcr_reginfo[] = {
.writefn = zcr_write, .raw_writefn = raw_write },
};
-#ifdef TARGET_AARCH64
static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -6822,7 +6822,6 @@ static const ARMCPRegInfo nmi_reginfo[] = {
.writefn = aa64_allint_write, .readfn = aa64_allint_read,
.resetfn = arm_cp_reset_ignore },
};
-#endif /* TARGET_AARCH64 */
static void define_pmu_regs(ARMCPU *cpu)
{
@@ -6933,7 +6932,7 @@ static void define_pmu_regs(ARMCPU *cpu)
static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = env_archcpu(env);
- uint64_t pfr1 = cpu->isar.id_pfr1;
+ uint64_t pfr1 = GET_IDREG(&cpu->isar, ID_PFR1);
if (env->gicv3state) {
pfr1 |= 1 << 28;
@@ -6944,7 +6943,7 @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = env_archcpu(env);
- uint64_t pfr0 = cpu->isar.id_aa64pfr0;
+ uint64_t pfr0 = GET_IDREG(&cpu->isar, ID_AA64PFR0);
if (env->gicv3state) {
pfr0 |= 1 << 24;
@@ -7014,7 +7013,6 @@ static const ARMCPRegInfo lor_reginfo[] = {
.type = ARM_CP_CONST, .resetvalue = 0 },
};
-#ifdef TARGET_AARCH64
static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -7507,8 +7505,6 @@ static const ARMCPRegInfo nv2_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) },
};
-#endif /* TARGET_AARCH64 */
-
static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -7754,6 +7750,8 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{
/* Register all the coprocessor registers based on feature bits */
CPUARMState *env = &cpu->env;
+ ARMISARegisters *isar = &cpu->isar;
+
if (arm_feature(env, ARM_FEATURE_M)) {
/* M profile has no coprocessor registers */
return;
@@ -7768,7 +7766,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, not_v8_cp_reginfo);
}
+#ifndef CONFIG_USER_ONLY
define_tlb_insn_regs(cpu);
+#endif
if (arm_feature(env, ARM_FEATURE_V6)) {
/* The ID registers all have impdef reset values */
@@ -7777,7 +7777,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_pfr0 },
+ .resetvalue = GET_IDREG(isar, ID_PFR0)},
/*
* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
* the value of the GIC field until after we define these regs.
@@ -7788,7 +7788,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.accessfn = access_aa32_tid3,
#ifdef CONFIG_USER_ONLY
.type = ARM_CP_CONST,
- .resetvalue = cpu->isar.id_pfr1,
+ .resetvalue = GET_IDREG(isar, ID_PFR1),
#else
.type = ARM_CP_NO_RAW,
.accessfn = access_aa32_tid3,
@@ -7800,7 +7800,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_dfr0 },
+ .resetvalue = GET_IDREG(isar, ID_DFR0)},
{ .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -7810,62 +7810,62 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr0 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR0)},
{ .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr1 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR1)},
{ .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr2 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR2)},
{ .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr3 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR3)},
{ .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar0 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR0)},
{ .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar1 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR1)},
{ .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar2 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR2)},
{ .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar3 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR3) },
{ .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar4 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR4) },
{ .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar5 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR5) },
{ .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr4 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR4)},
{ .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar6 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR6) },
};
define_arm_cp_regs(cpu, v6_idregs);
define_arm_cp_regs(cpu, v6_cp_reginfo);
@@ -7916,7 +7916,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.access = PL1_R,
#ifdef CONFIG_USER_ONLY
.type = ARM_CP_CONST,
- .resetvalue = cpu->isar.id_aa64pfr0
+ .resetvalue = GET_IDREG(isar, ID_AA64PFR0)
#else
.type = ARM_CP_NO_RAW,
.accessfn = access_aa64_tid3,
@@ -7928,7 +7928,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64pfr1},
+ .resetvalue = GET_IDREG(isar, ID_AA64PFR1)},
{ .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -7943,12 +7943,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64zfr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64ZFR0)},
{ .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64smfr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64SMFR0)},
{ .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -7963,12 +7963,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64dfr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64DFR0) },
{ .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64dfr1 },
+ .resetvalue = GET_IDREG(isar, ID_AA64DFR1) },
{ .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -8003,17 +8003,17 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64isar0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64ISAR0)},
{ .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64isar1 },
+ .resetvalue = GET_IDREG(isar, ID_AA64ISAR1)},
{ .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64isar2 },
+ .resetvalue = GET_IDREG(isar, ID_AA64ISAR2)},
{ .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -8043,22 +8043,22 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64mmfr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64MMFR0)},
{ .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64mmfr1 },
+ .resetvalue = GET_IDREG(isar, ID_AA64MMFR1) },
{ .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64mmfr2 },
+ .resetvalue = GET_IDREG(isar, ID_AA64MMFR2) },
{ .name = "ID_AA64MMFR3_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64mmfr3 },
+ .resetvalue = GET_IDREG(isar, ID_AA64MMFR3) },
{ .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -8130,17 +8130,17 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_pfr2 },
+ .resetvalue = GET_IDREG(isar, ID_PFR2)},
{ .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_dfr1 },
+ .resetvalue = GET_IDREG(isar, ID_DFR1)},
{ .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_mmfr5 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR5)},
{ .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -8949,7 +8949,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
}
-#ifdef TARGET_AARCH64
if (cpu_isar_feature(aa64_sme, cpu)) {
define_arm_cp_regs(cpu, sme_reginfo);
}
@@ -9010,7 +9009,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (cpu_isar_feature(aa64_nmi, cpu)) {
define_arm_cp_regs(cpu, nmi_reginfo);
}
-#endif
if (cpu_isar_feature(any_predinv, cpu)) {
define_arm_cp_regs(cpu, predinv_reginfo);
@@ -10619,7 +10617,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
unsigned int new_el = env->exception.target_el;
- target_ulong addr = env->cp15.vbar_el[new_el];
+ vaddr addr = env->cp15.vbar_el[new_el];
unsigned int new_mode = aarch64_pstate_mode(new_el, true);
unsigned int old_mode;
unsigned int cur_el = arm_current_el(env);
@@ -10630,9 +10628,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
* Note that new_el can never be 0. If cur_el is 0, then
* el0_a64 is is_a64(), else el0_a64 is ignored.
*/
-#ifdef TARGET_AARCH64
aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
-#endif
}
if (cur_el < new_el) {
@@ -11423,116 +11419,6 @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env)
return arm_mmu_idx_el(env, arm_current_el(env));
}
-static bool mve_no_pred(CPUARMState *env)
-{
- /*
- * Return true if there is definitely no predication of MVE
- * instructions by VPR or LTPSIZE. (Returning false even if there
- * isn't any predication is OK; generated code will just be
- * a little worse.)
- * If the CPU does not implement MVE then this TB flag is always 0.
- *
- * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
- * logic in gen_update_fp_context() needs to be updated to match.
- *
- * We do not include the effect of the ECI bits here -- they are
- * tracked in other TB flags. This simplifies the logic for
- * "when did we emit code that changes the MVE_NO_PRED TB flag
- * and thus need to end the TB?".
- */
- if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
- return false;
- }
- if (env->v7m.vpr) {
- return false;
- }
- if (env->v7m.ltpsize < 4) {
- return false;
- }
- return true;
-}
-
-void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
-{
- CPUARMTBFlags flags;
-
- assert_hflags_rebuild_correctly(env);
- flags = env->hflags;
-
- if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
- *pc = env->pc;
- if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
- DP_TBFLAG_A64(flags, BTYPE, env->btype);
- }
- } else {
- *pc = env->regs[15];
-
- if (arm_feature(env, ARM_FEATURE_M)) {
- if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
- FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
- != env->v7m.secure) {
- DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
- }
-
- if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
- (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
- (env->v7m.secure &&
- !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
- /*
- * ASPEN is set, but FPCA/SFPA indicate that there is no
- * active FP context; we must create a new FP context before
- * executing any FP insn.
- */
- DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
- }
-
- bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
- if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
- DP_TBFLAG_M32(flags, LSPACT, 1);
- }
-
- if (mve_no_pred(env)) {
- DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
- }
- } else {
- /*
- * Note that XSCALE_CPAR shares bits with VECSTRIDE.
- * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
- */
- if (arm_feature(env, ARM_FEATURE_XSCALE)) {
- DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
- } else {
- DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
- DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
- }
- if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
- DP_TBFLAG_A32(flags, VFPEN, 1);
- }
- }
-
- DP_TBFLAG_AM32(flags, THUMB, env->thumb);
- DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
- }
-
- /*
- * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
- * states defined in the ARM ARM for software singlestep:
- * SS_ACTIVE PSTATE.SS State
- * 0 x Inactive (the TB flag for SS is always 0)
- * 1 0 Active-pending
- * 1 1 Active-not-pending
- * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
- */
- if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
- DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
- }
-
- *pflags = flags.flags;
- *cs_base = flags.flags2;
-}
-
-#ifdef TARGET_AARCH64
/*
* The manual says that when SVE is enabled and VQ is widened the
* implementation is allowed to zero the previously inaccessible
@@ -11644,12 +11530,9 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
/* When changing vector length, clear inaccessible state. */
if (new_len < old_len) {
-#ifdef TARGET_AARCH64
aarch64_sve_narrow_vq(env, new_len + 1);
-#endif
}
}
-#endif
#ifndef CONFIG_USER_ONLY
ARMSecuritySpace arm_security_space(CPUARMState *env)
diff --git a/target/arm/helper.h b/target/arm/helper.h
index 0907505..f340a49 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -1,1154 +1,6 @@
-DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
-DEF_HELPER_FLAGS_1(uxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
+/* SPDX-License-Identifier: GPL-2.0-or-later */
-DEF_HELPER_3(add_setq, i32, env, i32, i32)
-DEF_HELPER_3(add_saturate, i32, env, i32, i32)
-DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
-DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
-DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(sdiv, TCG_CALL_NO_RWG, s32, env, s32, s32)
-DEF_HELPER_FLAGS_3(udiv, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32)
-
-#define PAS_OP(pfx) \
- DEF_HELPER_3(pfx ## add8, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## sub8, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## sub16, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## add16, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## addsubx, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## subaddx, i32, i32, i32, ptr)
-
-PAS_OP(s)
-PAS_OP(u)
-#undef PAS_OP
-
-#define PAS_OP(pfx) \
- DEF_HELPER_2(pfx ## add8, i32, i32, i32) \
- DEF_HELPER_2(pfx ## sub8, i32, i32, i32) \
- DEF_HELPER_2(pfx ## sub16, i32, i32, i32) \
- DEF_HELPER_2(pfx ## add16, i32, i32, i32) \
- DEF_HELPER_2(pfx ## addsubx, i32, i32, i32) \
- DEF_HELPER_2(pfx ## subaddx, i32, i32, i32)
-PAS_OP(q)
-PAS_OP(sh)
-PAS_OP(uq)
-PAS_OP(uh)
-#undef PAS_OP
-
-DEF_HELPER_3(ssat, i32, env, i32, i32)
-DEF_HELPER_3(usat, i32, env, i32, i32)
-DEF_HELPER_3(ssat16, i32, env, i32, i32)
-DEF_HELPER_3(usat16, i32, env, i32, i32)
-
-DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32)
-
-DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
- i32, i32, i32, i32)
-DEF_HELPER_2(exception_internal, noreturn, env, i32)
-DEF_HELPER_3(exception_with_syndrome, noreturn, env, i32, i32)
-DEF_HELPER_4(exception_with_syndrome_el, noreturn, env, i32, i32, i32)
-DEF_HELPER_2(exception_bkpt_insn, noreturn, env, i32)
-DEF_HELPER_2(exception_swstep, noreturn, env, i32)
-DEF_HELPER_2(exception_pc_alignment, noreturn, env, tl)
-DEF_HELPER_1(setend, void, env)
-DEF_HELPER_2(wfi, void, env, i32)
-DEF_HELPER_1(wfe, void, env)
-DEF_HELPER_2(wfit, void, env, i64)
-DEF_HELPER_1(yield, void, env)
-DEF_HELPER_1(pre_hvc, void, env)
-DEF_HELPER_2(pre_smc, void, env, i32)
-DEF_HELPER_1(vesb, void, env)
-
-DEF_HELPER_3(cpsr_write, void, env, i32, i32)
-DEF_HELPER_2(cpsr_write_eret, void, env, i32)
-DEF_HELPER_1(cpsr_read, i32, env)
-
-DEF_HELPER_3(v7m_msr, void, env, i32, i32)
-DEF_HELPER_2(v7m_mrs, i32, env, i32)
-
-DEF_HELPER_2(v7m_bxns, void, env, i32)
-DEF_HELPER_2(v7m_blxns, void, env, i32)
-
-DEF_HELPER_3(v7m_tt, i32, env, i32, i32)
-
-DEF_HELPER_1(v7m_preserve_fp_state, void, env)
-
-DEF_HELPER_2(v7m_vlstm, void, env, i32)
-DEF_HELPER_2(v7m_vlldm, void, env, i32)
-
-DEF_HELPER_2(v8m_stackcheck, void, env, i32)
-
-DEF_HELPER_FLAGS_2(check_bxj_trap, TCG_CALL_NO_WG, void, env, i32)
-
-DEF_HELPER_4(access_check_cp_reg, cptr, env, i32, i32, i32)
-DEF_HELPER_FLAGS_2(lookup_cp_reg, TCG_CALL_NO_RWG_SE, cptr, env, i32)
-DEF_HELPER_FLAGS_2(tidcp_el0, TCG_CALL_NO_WG, void, env, i32)
-DEF_HELPER_FLAGS_2(tidcp_el1, TCG_CALL_NO_WG, void, env, i32)
-DEF_HELPER_3(set_cp_reg, void, env, cptr, i32)
-DEF_HELPER_2(get_cp_reg, i32, env, cptr)
-DEF_HELPER_3(set_cp_reg64, void, env, cptr, i64)
-DEF_HELPER_2(get_cp_reg64, i64, env, cptr)
-
-DEF_HELPER_2(get_r13_banked, i32, env, i32)
-DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
-
-DEF_HELPER_3(mrs_banked, i32, env, i32, i32)
-DEF_HELPER_4(msr_banked, void, env, i32, i32, i32)
-
-DEF_HELPER_2(get_user_reg, i32, env, i32)
-DEF_HELPER_3(set_user_reg, void, env, i32, i32)
-
-DEF_HELPER_FLAGS_1(rebuild_hflags_m32_newel, TCG_CALL_NO_RWG, void, env)
-DEF_HELPER_FLAGS_2(rebuild_hflags_m32, TCG_CALL_NO_RWG, void, env, int)
-DEF_HELPER_FLAGS_1(rebuild_hflags_a32_newel, TCG_CALL_NO_RWG, void, env)
-DEF_HELPER_FLAGS_2(rebuild_hflags_a32, TCG_CALL_NO_RWG, void, env, int)
-DEF_HELPER_FLAGS_2(rebuild_hflags_a64, TCG_CALL_NO_RWG, void, env, int)
-
-DEF_HELPER_FLAGS_5(probe_access, TCG_CALL_NO_WG, void, env, tl, i32, i32, i32)
-
-DEF_HELPER_1(vfp_get_fpscr, i32, env)
-DEF_HELPER_2(vfp_set_fpscr, void, env, i32)
-
-DEF_HELPER_3(vfp_addh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_adds, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_addd, f64, f64, f64, fpst)
-DEF_HELPER_3(vfp_subh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_subs, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_subd, f64, f64, f64, fpst)
-DEF_HELPER_3(vfp_mulh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_muls, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_muld, f64, f64, f64, fpst)
-DEF_HELPER_3(vfp_divh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_divs, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_divd, f64, f64, f64, fpst)
-DEF_HELPER_3(vfp_maxh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_maxs, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_maxd, f64, f64, f64, fpst)
-DEF_HELPER_3(vfp_minh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_mins, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_mind, f64, f64, f64, fpst)
-DEF_HELPER_3(vfp_maxnumh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_maxnums, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, fpst)
-DEF_HELPER_3(vfp_minnumh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_minnums, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_minnumd, f64, f64, f64, fpst)
-DEF_HELPER_2(vfp_sqrth, f16, f16, fpst)
-DEF_HELPER_2(vfp_sqrts, f32, f32, fpst)
-DEF_HELPER_2(vfp_sqrtd, f64, f64, fpst)
-DEF_HELPER_3(vfp_cmph, void, f16, f16, env)
-DEF_HELPER_3(vfp_cmps, void, f32, f32, env)
-DEF_HELPER_3(vfp_cmpd, void, f64, f64, env)
-DEF_HELPER_3(vfp_cmpeh, void, f16, f16, env)
-DEF_HELPER_3(vfp_cmpes, void, f32, f32, env)
-DEF_HELPER_3(vfp_cmped, void, f64, f64, env)
-
-DEF_HELPER_2(vfp_fcvtds, f64, f32, fpst)
-DEF_HELPER_2(vfp_fcvtsd, f32, f64, fpst)
-DEF_HELPER_FLAGS_2(bfcvt, TCG_CALL_NO_RWG, i32, f32, fpst)
-DEF_HELPER_FLAGS_2(bfcvt_pair, TCG_CALL_NO_RWG, i32, i64, fpst)
-
-DEF_HELPER_2(vfp_uitoh, f16, i32, fpst)
-DEF_HELPER_2(vfp_uitos, f32, i32, fpst)
-DEF_HELPER_2(vfp_uitod, f64, i32, fpst)
-DEF_HELPER_2(vfp_sitoh, f16, i32, fpst)
-DEF_HELPER_2(vfp_sitos, f32, i32, fpst)
-DEF_HELPER_2(vfp_sitod, f64, i32, fpst)
-
-DEF_HELPER_2(vfp_touih, i32, f16, fpst)
-DEF_HELPER_2(vfp_touis, i32, f32, fpst)
-DEF_HELPER_2(vfp_touid, i32, f64, fpst)
-DEF_HELPER_2(vfp_touizh, i32, f16, fpst)
-DEF_HELPER_2(vfp_touizs, i32, f32, fpst)
-DEF_HELPER_2(vfp_touizd, i32, f64, fpst)
-DEF_HELPER_2(vfp_tosih, s32, f16, fpst)
-DEF_HELPER_2(vfp_tosis, s32, f32, fpst)
-DEF_HELPER_2(vfp_tosid, s32, f64, fpst)
-DEF_HELPER_2(vfp_tosizh, s32, f16, fpst)
-DEF_HELPER_2(vfp_tosizs, s32, f32, fpst)
-DEF_HELPER_2(vfp_tosizd, s32, f64, fpst)
-
-DEF_HELPER_3(vfp_toshh_round_to_zero, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_toslh_round_to_zero, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_touhh_round_to_zero, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_toulh_round_to_zero, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_touls_round_to_zero, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_tosqd_round_to_zero, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_touqd_round_to_zero, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_touhh, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_toshh, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_toulh, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_toslh, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_touqh, i64, f16, i32, fpst)
-DEF_HELPER_3(vfp_tosqh, i64, f16, i32, fpst)
-DEF_HELPER_3(vfp_toshs, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_tosls, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_tosqs, i64, f32, i32, fpst)
-DEF_HELPER_3(vfp_touhs, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_touls, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_touqs, i64, f32, i32, fpst)
-DEF_HELPER_3(vfp_toshd, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_tosld, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_tosqd, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_touhd, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_tould, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_touqd, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_shtos, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_sltos, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_sqtos, f32, i64, i32, fpst)
-DEF_HELPER_3(vfp_uhtos, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_ultos, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_uqtos, f32, i64, i32, fpst)
-DEF_HELPER_3(vfp_shtod, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_sltod, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_sqtod, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_uhtod, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_ultod, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_uqtod, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_shtoh, f16, i32, i32, fpst)
-DEF_HELPER_3(vfp_uhtoh, f16, i32, i32, fpst)
-DEF_HELPER_3(vfp_sltoh, f16, i32, i32, fpst)
-DEF_HELPER_3(vfp_ultoh, f16, i32, i32, fpst)
-DEF_HELPER_3(vfp_sqtoh, f16, i64, i32, fpst)
-DEF_HELPER_3(vfp_uqtoh, f16, i64, i32, fpst)
-
-DEF_HELPER_3(vfp_shtos_round_to_nearest, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_sltos_round_to_nearest, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_uhtos_round_to_nearest, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_ultos_round_to_nearest, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_shtod_round_to_nearest, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_sltod_round_to_nearest, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_uhtod_round_to_nearest, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_ultod_round_to_nearest, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_shtoh_round_to_nearest, f16, i32, i32, fpst)
-DEF_HELPER_3(vfp_uhtoh_round_to_nearest, f16, i32, i32, fpst)
-DEF_HELPER_3(vfp_sltoh_round_to_nearest, f16, i32, i32, fpst)
-DEF_HELPER_3(vfp_ultoh_round_to_nearest, f16, i32, i32, fpst)
-
-DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, fpst)
-
-DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f32, TCG_CALL_NO_RWG, f32, f16, fpst, i32)
-DEF_HELPER_FLAGS_3(vfp_fcvt_f32_to_f16, TCG_CALL_NO_RWG, f16, f32, fpst, i32)
-DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, f16, fpst, i32)
-DEF_HELPER_FLAGS_3(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, f16, f64, fpst, i32)
-
-DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, fpst)
-DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, fpst)
-DEF_HELPER_4(vfp_muladdh, f16, f16, f16, f16, fpst)
-
-DEF_HELPER_FLAGS_2(recpe_f16, TCG_CALL_NO_RWG, f16, f16, fpst)
-DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(recpe_rpres_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, fpst)
-DEF_HELPER_FLAGS_2(rsqrte_f16, TCG_CALL_NO_RWG, f16, f16, fpst)
-DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(rsqrte_rpres_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, fpst)
-DEF_HELPER_FLAGS_1(recpe_u32, TCG_CALL_NO_RWG, i32, i32)
-DEF_HELPER_FLAGS_1(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32)
-DEF_HELPER_FLAGS_4(neon_tbl, TCG_CALL_NO_RWG, i64, env, i32, i64, i64)
-
-DEF_HELPER_3(shl_cc, i32, env, i32, i32)
-DEF_HELPER_3(shr_cc, i32, env, i32, i32)
-DEF_HELPER_3(sar_cc, i32, env, i32, i32)
-DEF_HELPER_3(ror_cc, i32, env, i32, i32)
-
-DEF_HELPER_FLAGS_2(rinth_exact, TCG_CALL_NO_RWG, f16, f16, fpst)
-DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, fpst)
-DEF_HELPER_FLAGS_2(rinth, TCG_CALL_NO_RWG, f16, f16, fpst)
-DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, fpst)
-
-DEF_HELPER_FLAGS_2(vjcvt, TCG_CALL_NO_RWG, i32, f64, env)
-DEF_HELPER_FLAGS_2(fjcvtzs, TCG_CALL_NO_RWG, i64, f64, fpst)
-
-DEF_HELPER_FLAGS_3(check_hcr_el2_trap, TCG_CALL_NO_WG, void, env, i32, i32)
-
-/* neon_helper.c */
-DEF_HELPER_2(neon_pmin_u8, i32, i32, i32)
-DEF_HELPER_2(neon_pmin_s8, i32, i32, i32)
-DEF_HELPER_2(neon_pmin_u16, i32, i32, i32)
-DEF_HELPER_2(neon_pmin_s16, i32, i32, i32)
-DEF_HELPER_2(neon_pmax_u8, i32, i32, i32)
-DEF_HELPER_2(neon_pmax_s8, i32, i32, i32)
-DEF_HELPER_2(neon_pmax_u16, i32, i32, i32)
-DEF_HELPER_2(neon_pmax_s16, i32, i32, i32)
-
-DEF_HELPER_2(neon_shl_u16, i32, i32, i32)
-DEF_HELPER_2(neon_shl_s16, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_u8, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_s8, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_u16, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_s16, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_u32, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_s32, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_u64, i64, i64, i64)
-DEF_HELPER_2(neon_rshl_s64, i64, i64, i64)
-DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64)
-DEF_HELPER_FLAGS_5(neon_sqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_sqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_sqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_sqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_sqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_sqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_sqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_sqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshli_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshli_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshli_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshli_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_uqshli_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_uqshli_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_uqshli_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_uqshli_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshlui_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshlui_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshlui_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshlui_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_4(gvec_srshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_srshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_srshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_srshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_urshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_urshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_urshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_urshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_2(neon_add_u8, i32, i32, i32)
-DEF_HELPER_2(neon_add_u16, i32, i32, i32)
-DEF_HELPER_2(neon_sub_u8, i32, i32, i32)
-DEF_HELPER_2(neon_sub_u16, i32, i32, i32)
-DEF_HELPER_2(neon_mul_u8, i32, i32, i32)
-DEF_HELPER_2(neon_mul_u16, i32, i32, i32)
-
-DEF_HELPER_2(neon_tst_u8, i32, i32, i32)
-DEF_HELPER_2(neon_tst_u16, i32, i32, i32)
-DEF_HELPER_2(neon_tst_u32, i32, i32, i32)
-
-DEF_HELPER_1(neon_clz_u8, i32, i32)
-DEF_HELPER_1(neon_clz_u16, i32, i32)
-DEF_HELPER_1(neon_cls_s8, i32, i32)
-DEF_HELPER_1(neon_cls_s16, i32, i32)
-DEF_HELPER_1(neon_cls_s32, i32, i32)
-DEF_HELPER_FLAGS_3(gvec_cnt_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32)
-DEF_HELPER_4(neon_qrdmlah_s16, i32, env, i32, i32, i32)
-DEF_HELPER_4(neon_qrdmlsh_s16, i32, env, i32, i32, i32)
-DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32)
-DEF_HELPER_4(neon_qrdmlah_s32, i32, env, s32, s32, s32)
-DEF_HELPER_4(neon_qrdmlsh_s32, i32, env, s32, s32, s32)
-
-DEF_HELPER_1(neon_narrow_u8, i64, i64)
-DEF_HELPER_1(neon_narrow_u16, i64, i64)
-DEF_HELPER_2(neon_unarrow_sat8, i64, env, i64)
-DEF_HELPER_2(neon_narrow_sat_u8, i64, env, i64)
-DEF_HELPER_2(neon_narrow_sat_s8, i64, env, i64)
-DEF_HELPER_2(neon_unarrow_sat16, i64, env, i64)
-DEF_HELPER_2(neon_narrow_sat_u16, i64, env, i64)
-DEF_HELPER_2(neon_narrow_sat_s16, i64, env, i64)
-DEF_HELPER_2(neon_unarrow_sat32, i64, env, i64)
-DEF_HELPER_2(neon_narrow_sat_u32, i64, env, i64)
-DEF_HELPER_2(neon_narrow_sat_s32, i64, env, i64)
-DEF_HELPER_1(neon_narrow_high_u8, i32, i64)
-DEF_HELPER_1(neon_narrow_high_u16, i32, i64)
-DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64)
-DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64)
-DEF_HELPER_1(neon_widen_u8, i64, i32)
-DEF_HELPER_1(neon_widen_s8, i64, i32)
-DEF_HELPER_1(neon_widen_u16, i64, i32)
-DEF_HELPER_1(neon_widen_s16, i64, i32)
-
-DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64)
-DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64)
-DEF_HELPER_2(neon_abdl_u16, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_s16, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_u32, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_s32, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_u64, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_s64, i64, i32, i32)
-DEF_HELPER_2(neon_mull_u8, i64, i32, i32)
-DEF_HELPER_2(neon_mull_s8, i64, i32, i32)
-DEF_HELPER_2(neon_mull_u16, i64, i32, i32)
-DEF_HELPER_2(neon_mull_s16, i64, i32, i32)
-
-DEF_HELPER_1(neon_negl_u16, i64, i64)
-DEF_HELPER_1(neon_negl_u32, i64, i64)
-
-DEF_HELPER_FLAGS_2(neon_qabs_s8, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qabs_s16, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qabs_s32, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qabs_s64, TCG_CALL_NO_RWG, i64, env, i64)
-DEF_HELPER_FLAGS_2(neon_qneg_s8, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qneg_s16, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qneg_s32, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qneg_s64, TCG_CALL_NO_RWG, i64, env, i64)
-
-DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, fpst)
-DEF_HELPER_3(neon_cge_f32, i32, i32, i32, fpst)
-DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, fpst)
-DEF_HELPER_3(neon_acge_f32, i32, i32, i32, fpst)
-DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, fpst)
-DEF_HELPER_3(neon_acge_f64, i64, i64, i64, fpst)
-DEF_HELPER_3(neon_acgt_f64, i64, i64, i64, fpst)
-
-/* iwmmxt_helper.c */
-DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64)
-DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64)
-
-#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \
-DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \
-DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \
-DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \
-
-DEF_IWMMXT_HELPER_SIZE_ENV(unpackl)
-DEF_IWMMXT_HELPER_SIZE_ENV(unpackh)
-
-DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64)
-
-DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq)
-DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu)
-DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts)
-
-DEF_IWMMXT_HELPER_SIZE_ENV(mins)
-DEF_IWMMXT_HELPER_SIZE_ENV(minu)
-DEF_IWMMXT_HELPER_SIZE_ENV(maxs)
-DEF_IWMMXT_HELPER_SIZE_ENV(maxu)
-
-DEF_IWMMXT_HELPER_SIZE_ENV(subn)
-DEF_IWMMXT_HELPER_SIZE_ENV(addn)
-DEF_IWMMXT_HELPER_SIZE_ENV(subu)
-DEF_IWMMXT_HELPER_SIZE_ENV(addu)
-DEF_IWMMXT_HELPER_SIZE_ENV(subs)
-DEF_IWMMXT_HELPER_SIZE_ENV(adds)
-
-DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64)
-
-DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32)
-DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32)
-
-DEF_HELPER_1(iwmmxt_bcstb, i64, i32)
-DEF_HELPER_1(iwmmxt_bcstw, i64, i32)
-DEF_HELPER_1(iwmmxt_bcstl, i64, i32)
-
-DEF_HELPER_1(iwmmxt_addcb, i64, i64)
-DEF_HELPER_1(iwmmxt_addcw, i64, i64)
-DEF_HELPER_1(iwmmxt_addcl, i64, i64)
-
-DEF_HELPER_1(iwmmxt_msbb, i32, i64)
-DEF_HELPER_1(iwmmxt_msbw, i32, i64)
-DEF_HELPER_1(iwmmxt_msbl, i32, i64)
-
-DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32)
-
-DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64)
-
-DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32)
-DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32)
-DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32)
-
-DEF_HELPER_FLAGS_2(neon_unzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_unzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qunzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qunzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qunzip32, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_zip8, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_zip16, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qzip32, TCG_CALL_NO_RWG, void, ptr, ptr)
-
-DEF_HELPER_FLAGS_4(crypto_aese, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_aesd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_aesmc, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_aesimc, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sha1su0, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha1c, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha1p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha1m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_sha1h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_sha1su1, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sha256h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_sha512su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha512su1, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sm3tt1a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3tt1b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3tt2a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3tt2b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3partw1, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3partw2, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sm4e, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm4ekey, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_rax1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
-DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
-
-DEF_HELPER_FLAGS_5(gvec_qrdmlah_s16, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s16, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_qrdmlah_s32, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s32, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(sve2_sqrdmlah_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlah_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlah_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlah_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_sdot_idx_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_udot_idx_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sdot_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_udot_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sudot_idx_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usdot_idx_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_6(gvec_fcmlah, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fcmlas, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fcmlad, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_sstoh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_sitos, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_ustoh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_uitos, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_tosszh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_tosizs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_touszh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_touizs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vcvt_sf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_uf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rz_fs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rz_fu, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_uh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rz_hs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rz_hu, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vcvt_sd, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_ud, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rz_ds, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rz_du, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sd, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ud, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ss, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_us, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_uh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vrint_rm_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vrint_rm_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vrintx_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vrintx_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_frecpe_rpres_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_frsqrte_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_frsqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_frsqrte_rpres_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_frsqrte_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fcgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fcgt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fcgt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fcge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fcge0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fcge0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fceq0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fceq0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fcle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fcle0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fcle0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fclt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fclt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fclt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_ah_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_ah_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_ah_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fceq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fceq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fceq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fcge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fcge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fcge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fcgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fcgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fcgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_facge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_facge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_facge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_facgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_facgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_facgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmax_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmin_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmaxnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fminnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fminnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_recps_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_recps_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmla_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmls_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_vfma_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_vfma_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_vfma_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_ah_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_ah_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_ah_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_ftsmul_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmul_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmul_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmul_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_6(gvec_fmla_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fmla_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fmla_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_6(gvec_fmls_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fmls_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fmls_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_uqadd_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqadd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqadd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqadd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqadd_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqadd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqadd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqadd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqsub_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqsub_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqsub_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqsub_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqsub_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqsub_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqsub_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqsub_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usqadd_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usqadd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usqadd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usqadd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_suqadd_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_suqadd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_suqadd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_suqadd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmlal_a32, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(gvec_fmlal_a64, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a32, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a64, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_2(frint32_s, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, fpst)
-DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, fpst)
-
-DEF_HELPER_FLAGS_3(gvec_ceq0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_clt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_clt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cle0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cgt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cge0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_smulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_umulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(neon_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_ssra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ssra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ssra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ssra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_usra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_usra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_usra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_usra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_srshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_urshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_urshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_urshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_urshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_srsra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srsra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srsra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srsra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_ursra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ursra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ursra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ursra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_sri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_sli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sli_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sli_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sli_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_sabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_uabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_saba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_saba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_saba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_saba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_uaba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uaba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uaba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uaba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_mul_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_mul_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_mul_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_mla_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_mla_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_mla_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_mls_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_mls_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_mls_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqdmulh_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqdmulh_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_6(sve2_fmlal_zzzw_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_6(sve2_fmlal_zzxw_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_smmla_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_ummla_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usmmla_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_6(gvec_bfdot, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_6(gvec_bfdot_idx, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_6(gvec_bfmmla, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_faddp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_faddp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_faddp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fminp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmaxnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fminnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fminnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fminnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_addp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_addp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_addp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_addp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_smaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_sminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_umaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_uminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_urecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ursqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+#include "tcg/helper.h"
#ifdef TARGET_AARCH64
#include "tcg/helper-a64.h"
diff --git a/target/arm/hvf-stub.c b/target/arm/hvf-stub.c
new file mode 100644
index 0000000..ff13726
--- /dev/null
+++ b/target/arm/hvf-stub.c
@@ -0,0 +1,20 @@
+/*
+ * QEMU Hypervisor.framework (HVF) stubs for ARM
+ *
+ * Copyright (c) Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hvf_arm.h"
+
+uint32_t hvf_arm_get_default_ipa_bit_size(void)
+{
+ g_assert_not_reached();
+}
+
+uint32_t hvf_arm_get_max_ipa_bit_size(void)
+{
+ g_assert_not_reached();
+}
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
index 34ca36f..7b6d291 100644
--- a/target/arm/hvf/hvf.c
+++ b/target/arm/hvf/hvf.c
@@ -19,6 +19,7 @@
#include "system/hw_accel.h"
#include "hvf_arm.h"
#include "cpregs.h"
+#include "cpu-sysregs.h"
#include <mach/mach_time.h>
@@ -845,14 +846,17 @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt)
return val;
}
-static void clamp_id_aa64mmfr0_parange_to_ipa_size(uint64_t *id_aa64mmfr0)
+static void clamp_id_aa64mmfr0_parange_to_ipa_size(ARMISARegisters *isar)
{
uint32_t ipa_size = chosen_ipa_bit_size ?
chosen_ipa_bit_size : hvf_arm_get_max_ipa_bit_size();
+ uint64_t id_aa64mmfr0;
/* Clamp down the PARange to the IPA size the kernel supports. */
uint8_t index = round_down_to_parange_index(ipa_size);
- *id_aa64mmfr0 = (*id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index;
+ id_aa64mmfr0 = GET_IDREG(isar, ID_AA64MMFR0);
+ id_aa64mmfr0 = (id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index;
+ SET_IDREG(isar, ID_AA64MMFR0, id_aa64mmfr0);
}
static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
@@ -862,16 +866,16 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
int reg;
uint64_t *val;
} regs[] = {
- { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 },
- { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 },
- { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 },
- { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
- { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
- { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
+ { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.idregs[ID_AA64PFR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.idregs[ID_AA64PFR1_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.idregs[ID_AA64DFR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.idregs[ID_AA64DFR1_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.idregs[ID_AA64ISAR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.idregs[ID_AA64ISAR1_EL1_IDX] },
/* Add ID_AA64ISAR2_EL1 here when HVF supports it */
- { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
- { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
- { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
+ { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.idregs[ID_AA64MMFR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.idregs[ID_AA64MMFR1_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.idregs[ID_AA64MMFR2_EL1_IDX] },
/* Add ID_AA64MMFR3_EL1 here when HVF supports it */
};
hv_vcpu_t fd;
@@ -879,7 +883,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
hv_vcpu_exit_t *exit;
int i;
- ahcf->dtb_compatible = "arm,arm-v8";
+ ahcf->dtb_compatible = "arm,armv8";
ahcf->features = (1ULL << ARM_FEATURE_V8) |
(1ULL << ARM_FEATURE_NEON) |
(1ULL << ARM_FEATURE_AARCH64) |
@@ -898,7 +902,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
r |= hv_vcpu_destroy(fd);
- clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar.id_aa64mmfr0);
+ clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar);
/*
* Disable SME, which is not properly handled by QEMU hvf yet.
@@ -910,7 +914,8 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* - fix any assumptions we made that SME implies SVE (since
* on the M4 there is SME but not SVE)
*/
- host_isar.id_aa64pfr1 &= ~R_ID_AA64PFR1_SME_MASK;
+ SET_IDREG(&host_isar, ID_AA64PFR1,
+ GET_IDREG(&host_isar, ID_AA64PFR1) & ~R_ID_AA64PFR1_SME_MASK);
ahcf->isar = host_isar;
@@ -927,7 +932,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
ahcf->reset_sctlr |= 0x00800000;
/* Make sure we don't advertise AArch32 support for EL0/EL1 */
- if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) {
+ if ((GET_IDREG(&host_isar, ID_AA64PFR0) & 0xff) != 0x11) {
return false;
}
@@ -1065,12 +1070,12 @@ int hvf_arch_init_vcpu(CPUState *cpu)
/* We're limited to underlying hardware caps, override internal versions */
ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
- &arm_cpu->isar.id_aa64mmfr0);
+ &arm_cpu->isar.idregs[ID_AA64MMFR0_EL1_IDX]);
assert_hvf_ok(ret);
- clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar.id_aa64mmfr0);
+ clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar);
ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
- arm_cpu->isar.id_aa64mmfr0);
+ arm_cpu->isar.idregs[ID_AA64MMFR0_EL1_IDX]);
assert_hvf_ok(ret);
return 0;
@@ -1083,13 +1088,13 @@ void hvf_kick_vcpu_thread(CPUState *cpu)
}
static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
- uint32_t syndrome)
+ uint32_t syndrome, int target_el)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
CPUARMState *env = &arm_cpu->env;
cpu->exception_index = excp;
- env->exception.target_el = 1;
+ env->exception.target_el = target_el;
env->exception.syndrome = syndrome;
arm_cpu_do_interrupt(cpu);
@@ -1449,7 +1454,7 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val)
SYSREG_CRN(reg),
SYSREG_CRM(reg),
SYSREG_OP2(reg));
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
return 1;
}
@@ -1759,7 +1764,7 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
SYSREG_CRN(reg),
SYSREG_CRM(reg),
SYSREG_OP2(reg));
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
return 1;
}
@@ -1910,7 +1915,17 @@ int hvf_vcpu_exec(CPUState *cpu)
flush_cpu_state(cpu);
bql_unlock();
- assert_hvf_ok(hv_vcpu_run(cpu->accel->fd));
+ r = hv_vcpu_run(cpu->accel->fd);
+ bql_lock();
+ switch (r) {
+ case HV_SUCCESS:
+ break;
+ case HV_ILLEGAL_GUEST_STATE:
+ trace_hvf_illegal_guest_state();
+ /* fall through */
+ default:
+ g_assert_not_reached();
+ }
/* handle VMEXIT */
uint64_t exit_reason = hvf_exit->reason;
@@ -1918,7 +1933,6 @@ int hvf_vcpu_exec(CPUState *cpu)
uint32_t ec = syn_get_ec(syndrome);
ret = 0;
- bql_lock();
switch (exit_reason) {
case HV_EXIT_REASON_EXCEPTION:
/* This is the main one, handle below. */
@@ -1953,7 +1967,7 @@ int hvf_vcpu_exec(CPUState *cpu)
if (!hvf_find_sw_breakpoint(cpu, env->pc)) {
/* Re-inject into the guest */
ret = 0;
- hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0));
+ hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0), 1);
}
break;
}
@@ -2058,13 +2072,13 @@ int hvf_vcpu_exec(CPUState *cpu)
cpu_synchronize_state(cpu);
if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
if (!hvf_handle_psci_call(cpu)) {
- trace_hvf_unknown_hvc(env->xregs[0]);
+ trace_hvf_unknown_hvc(env->pc, env->xregs[0]);
/* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
env->xregs[0] = -1;
}
} else {
- trace_hvf_unknown_hvc(env->xregs[0]);
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ trace_hvf_unknown_hvc(env->pc, env->xregs[0]);
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
}
break;
case EC_AA64_SMC:
@@ -2079,7 +2093,7 @@ int hvf_vcpu_exec(CPUState *cpu)
}
} else {
trace_hvf_unknown_smc(env->xregs[0]);
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
}
break;
default:
@@ -2278,28 +2292,23 @@ static inline bool hvf_arm_hw_debug_active(CPUState *cpu)
return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
}
-static void hvf_arch_set_traps(void)
+static void hvf_arch_set_traps(CPUState *cpu)
{
- CPUState *cpu;
bool should_enable_traps = false;
hv_return_t r = HV_SUCCESS;
/* Check whether guest debugging is enabled for at least one vCPU; if it
* is, enable exiting the guest on all vCPUs */
- CPU_FOREACH(cpu) {
- should_enable_traps |= cpu->accel->guest_debug_enabled;
- }
- CPU_FOREACH(cpu) {
- /* Set whether debug exceptions exit the guest */
- r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
- should_enable_traps);
- assert_hvf_ok(r);
+ should_enable_traps |= cpu->accel->guest_debug_enabled;
+ /* Set whether debug exceptions exit the guest */
+ r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
+ should_enable_traps);
+ assert_hvf_ok(r);
- /* Set whether accesses to debug registers exit the guest */
- r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
- should_enable_traps);
- assert_hvf_ok(r);
- }
+ /* Set whether accesses to debug registers exit the guest */
+ r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
+ should_enable_traps);
+ assert_hvf_ok(r);
}
void hvf_arch_update_guest_debug(CPUState *cpu)
@@ -2340,7 +2349,7 @@ void hvf_arch_update_guest_debug(CPUState *cpu)
deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 0);
}
- hvf_arch_set_traps();
+ hvf_arch_set_traps(cpu);
}
bool hvf_arch_supports_guest_debug(void)
diff --git a/target/arm/hvf/trace-events b/target/arm/hvf/trace-events
index 4fbbe4b..b49746f 100644
--- a/target/arm/hvf/trace-events
+++ b/target/arm/hvf/trace-events
@@ -5,9 +5,10 @@ hvf_inject_irq(void) "injecting IRQ"
hvf_data_abort(uint64_t pc, uint64_t va, uint64_t pa, bool isv, bool iswrite, bool s1ptw, uint32_t len, uint32_t srt) "data abort: [pc=0x%"PRIx64" va=0x%016"PRIx64" pa=0x%016"PRIx64" isv=%d iswrite=%d s1ptw=%d len=%d srt=%d]"
hvf_sysreg_read(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg read 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d) = 0x%016"PRIx64
hvf_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg write 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d, val=0x%016"PRIx64")"
-hvf_unknown_hvc(uint64_t x0) "unknown HVC! 0x%016"PRIx64
+hvf_unknown_hvc(uint64_t pc, uint64_t x0) "pc=0x%"PRIx64" unknown HVC! 0x%016"PRIx64
hvf_unknown_smc(uint64_t x0) "unknown SMC! 0x%016"PRIx64
hvf_exit(uint64_t syndrome, uint32_t ec, uint64_t pc) "exit: 0x%"PRIx64" [ec=0x%x pc=0x%"PRIx64"]"
-hvf_psci_call(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3, uint32_t cpuid) "PSCI Call x0=0x%016"PRIx64" x1=0x%016"PRIx64" x2=0x%016"PRIx64" x3=0x%016"PRIx64" cpu=0x%x"
+hvf_psci_call(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3, uint32_t cpuid) "PSCI Call x0=0x%016"PRIx64" x1=0x%016"PRIx64" x2=0x%016"PRIx64" x3=0x%016"PRIx64" cpuid=0x%x"
hvf_vgic_write(const char *name, uint64_t val) "vgic write to %s [val=0x%016"PRIx64"]"
hvf_vgic_read(const char *name, uint64_t val) "vgic read from %s [val=0x%016"PRIx64"]"
+hvf_illegal_guest_state(void) "HV_ILLEGAL_GUEST_STATE"
diff --git a/target/arm/hvf_arm.h b/target/arm/hvf_arm.h
index 26c717b..ea82f26 100644
--- a/target/arm/hvf_arm.h
+++ b/target/arm/hvf_arm.h
@@ -11,7 +11,7 @@
#ifndef QEMU_HVF_ARM_H
#define QEMU_HVF_ARM_H
-#include "cpu.h"
+#include "target/arm/cpu-qom.h"
/**
* hvf_arm_init_debug() - initialize guest debug capabilities
@@ -22,23 +22,7 @@ void hvf_arm_init_debug(void);
void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu);
-#ifdef CONFIG_HVF
-
uint32_t hvf_arm_get_default_ipa_bit_size(void);
uint32_t hvf_arm_get_max_ipa_bit_size(void);
-#else
-
-static inline uint32_t hvf_arm_get_default_ipa_bit_size(void)
-{
- return 0;
-}
-
-static inline uint32_t hvf_arm_get_max_ipa_bit_size(void)
-{
- return 0;
-}
-
-#endif
-
#endif
diff --git a/target/arm/hyp_gdbstub.c b/target/arm/hyp_gdbstub.c
index 0512d67..bb59697 100644
--- a/target/arm/hyp_gdbstub.c
+++ b/target/arm/hyp_gdbstub.c
@@ -54,7 +54,7 @@ GArray *hw_breakpoints, *hw_watchpoints;
* here so future PC comparisons will work properly.
*/
-int insert_hw_breakpoint(target_ulong addr)
+int insert_hw_breakpoint(vaddr addr)
{
HWBreakpoint brk = {
.bcr = 0x1, /* BCR E=1, enable */
@@ -80,7 +80,7 @@ int insert_hw_breakpoint(target_ulong addr)
* Delete a breakpoint and shuffle any above down
*/
-int delete_hw_breakpoint(target_ulong pc)
+int delete_hw_breakpoint(vaddr pc)
{
int i;
for (i = 0; i < hw_breakpoints->len; i++) {
@@ -226,7 +226,7 @@ int delete_hw_watchpoint(vaddr addr, vaddr len, int type)
return -ENOENT;
}
-bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
+bool find_hw_breakpoint(CPUState *cpu, vaddr pc)
{
int i;
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 4d3d84f..21a8d67 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -28,6 +28,7 @@
#include "exec/hwaddr.h"
#include "exec/vaddr.h"
#include "exec/breakpoint.h"
+#include "accel/tcg/tb-cpu-state.h"
#include "hw/registerfields.h"
#include "tcg/tcg-gvec-desc.h"
#include "system/memory.h"
@@ -353,7 +354,6 @@ static inline int r14_bank_number(int mode)
}
void arm_cpu_register(const ARMCPUInfo *info);
-void aarch64_cpu_register(const ARMCPUInfo *info);
void register_cp_regs_for_features(ARMCPU *cpu);
void init_cpreg_list(ARMCPU *cpu);
@@ -372,6 +372,7 @@ void arm_restore_state_to_opc(CPUState *cs,
const uint64_t *data);
#ifdef CONFIG_TCG
+TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs);
void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
/* Our implementation of TCGCPUOps::cpu_exec_halt */
@@ -649,16 +650,12 @@ static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
{
return false;
}
-static inline void arm_handle_psci_call(ARMCPU *cpu)
-{
- g_assert_not_reached();
-}
#else
/* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
+#endif
/* Actually handle a PSCI call */
void arm_handle_psci_call(ARMCPU *cpu);
-#endif
/**
* arm_clear_exclusive: clear the exclusive monitor
@@ -1174,7 +1171,7 @@ static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
static inline int arm_num_brps(ARMCPU *cpu)
{
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
- return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
+ return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, BRPS) + 1;
} else {
return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
}
@@ -1188,7 +1185,7 @@ static inline int arm_num_brps(ARMCPU *cpu)
static inline int arm_num_wrps(ARMCPU *cpu)
{
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
- return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
+ return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, WRPS) + 1;
} else {
return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
}
@@ -1202,7 +1199,7 @@ static inline int arm_num_wrps(ARMCPU *cpu)
static inline int arm_num_ctx_cmps(ARMCPU *cpu)
{
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
- return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
+ return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, CTX_CMPS) + 1;
} else {
return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
}
@@ -1831,7 +1828,7 @@ void aarch64_add_sme_properties(Object *obj);
/* Return true if the gdbstub is presenting an AArch64 CPU */
static inline bool arm_gdbstub_is_aarch64(ARMCPU *cpu)
{
- return object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU);
+ return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
}
/* Read the CONTROL register as the MRS instruction would. */
@@ -1906,8 +1903,6 @@ static inline bool arm_fgt_active(CPUARMState *env, int el)
(!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
}
-void assert_hflags_rebuild_correctly(CPUARMState *env);
-
/*
* Although the ARM implementation of hardware assisted debugging
* allows for different breakpoints per-core, the current GDB
@@ -1949,9 +1944,9 @@ extern GArray *hw_breakpoints, *hw_watchpoints;
#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
-bool find_hw_breakpoint(CPUState *cpu, target_ulong pc);
-int insert_hw_breakpoint(target_ulong pc);
-int delete_hw_breakpoint(target_ulong pc);
+bool find_hw_breakpoint(CPUState *cpu, vaddr pc);
+int insert_hw_breakpoint(vaddr pc);
+int delete_hw_breakpoint(vaddr pc);
bool check_watchpoint_in_range(int i, vaddr addr);
CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, vaddr addr);
diff --git a/target/arm/kvm-stub.c b/target/arm/kvm-stub.c
index 965a486..34e57fa 100644
--- a/target/arm/kvm-stub.c
+++ b/target/arm/kvm-stub.c
@@ -22,3 +22,100 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level)
{
g_assert_not_reached();
}
+
+/*
+ * It's safe to call these functions without KVM support.
+ * They should either do nothing or return "not supported".
+ */
+bool kvm_arm_aarch32_supported(void)
+{
+ return false;
+}
+
+bool kvm_arm_pmu_supported(void)
+{
+ return false;
+}
+
+bool kvm_arm_sve_supported(void)
+{
+ return false;
+}
+
+bool kvm_arm_mte_supported(void)
+{
+ return false;
+}
+
+/*
+ * These functions should never actually be called without KVM support.
+ */
+void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_add_vcpu_properties(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa)
+{
+ g_assert_not_reached();
+}
+
+int kvm_arm_vgic_probe(void)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_pmu_init(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_reset_vcpu(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+void arm_cpu_kvm_set_irq(void *arm_cpu, int irq, int level)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_cpu_pre_save(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+bool kvm_arm_cpu_post_load(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 97de8c7..426f8b1 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -26,6 +26,7 @@
#include "system/kvm_int.h"
#include "kvm_arm.h"
#include "cpu.h"
+#include "cpu-sysregs.h"
#include "trace.h"
#include "internals.h"
#include "hw/pci/pci.h"
@@ -100,8 +101,7 @@ static int kvm_arm_vcpu_finalize(ARMCPU *cpu, int feature)
return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_FINALIZE, &feature);
}
-bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
- int *fdarray,
+bool kvm_arm_create_scratch_host_vcpu(int *fdarray,
struct kvm_vcpu_init *init)
{
int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1;
@@ -150,40 +150,13 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
struct kvm_vcpu_init preferred;
ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &preferred);
- if (!ret) {
- init->target = preferred.target;
- }
- }
- if (ret >= 0) {
- ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
if (ret < 0) {
goto err;
}
- } else if (cpus_to_try) {
- /* Old kernel which doesn't know about the
- * PREFERRED_TARGET ioctl: we know it will only support
- * creating one kind of guest CPU which is its preferred
- * CPU type.
- */
- struct kvm_vcpu_init try;
-
- while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
- try.target = *cpus_to_try++;
- memcpy(try.features, init->features, sizeof(init->features));
- ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, &try);
- if (ret >= 0) {
- break;
- }
- }
- if (ret < 0) {
- goto err;
- }
- init->target = try.target;
- } else {
- /* Treat a NULL cpus_to_try argument the same as an empty
- * list, which means we will fail the call since this must
- * be an old kernel which doesn't support PREFERRED_TARGET.
- */
+ init->target = preferred.target;
+ }
+ ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
+ if (ret < 0) {
goto err;
}
@@ -246,6 +219,28 @@ static bool kvm_arm_pauth_supported(void)
kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC));
}
+
+static uint64_t idregs_sysreg_to_kvm_reg(ARMSysRegs sysreg)
+{
+ return ARM64_SYS_REG((sysreg & CP_REG_ARM64_SYSREG_OP0_MASK) >> CP_REG_ARM64_SYSREG_OP0_SHIFT,
+ (sysreg & CP_REG_ARM64_SYSREG_OP1_MASK) >> CP_REG_ARM64_SYSREG_OP1_SHIFT,
+ (sysreg & CP_REG_ARM64_SYSREG_CRN_MASK) >> CP_REG_ARM64_SYSREG_CRN_SHIFT,
+ (sysreg & CP_REG_ARM64_SYSREG_CRM_MASK) >> CP_REG_ARM64_SYSREG_CRM_SHIFT,
+ (sysreg & CP_REG_ARM64_SYSREG_OP2_MASK) >> CP_REG_ARM64_SYSREG_OP2_SHIFT);
+}
+
+/* read a sysreg value and store it in the idregs */
+static int get_host_cpu_reg(int fd, ARMHostCPUFeatures *ahcf, ARMIDRegisterIdx index)
+{
+ uint64_t *reg;
+ int ret;
+
+ reg = &ahcf->isar.idregs[index];
+ ret = read_sys_reg64(fd, reg,
+ idregs_sysreg_to_kvm_reg(id_register_sysreg[index]));
+ return ret;
+}
+
static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
{
/* Identify the feature bits corresponding to the host CPU, and
@@ -259,17 +254,6 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
uint64_t features = 0;
int err;
- /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
- * we know these will only support creating one kind of guest CPU,
- * which is its preferred CPU type. Fortunately these old kernels
- * support only a very limited number of CPUs.
- */
- static const uint32_t cpus_to_try[] = {
- KVM_ARM_TARGET_AEM_V8,
- KVM_ARM_TARGET_FOUNDATION_V8,
- KVM_ARM_TARGET_CORTEX_A57,
- QEMU_KVM_ARM_TARGET_NONE
- };
/*
* target = -1 informs kvm_arm_create_scratch_host_vcpu()
* to use the preferred target
@@ -300,15 +284,15 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
features |= 1ULL << ARM_FEATURE_PMU;
}
- if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
+ if (!kvm_arm_create_scratch_host_vcpu(fdarray, &init)) {
return false;
}
ahcf->target = init.target;
- ahcf->dtb_compatible = "arm,arm-v8";
+ ahcf->dtb_compatible = "arm,armv8";
+ int fd = fdarray[2];
- err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
- ARM64_SYS_REG(3, 0, 0, 4, 0));
+ err = get_host_cpu_reg(fd, ahcf, ID_AA64PFR0_EL1_IDX);
if (unlikely(err < 0)) {
/*
* Before v4.15, the kernel only exposed a limited number of system
@@ -326,31 +310,20 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* ??? Either of these sounds like too much effort just
* to work around running a modern host kernel.
*/
- ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */
+ SET_IDREG(&ahcf->isar, ID_AA64PFR0, 0x00000011); /* EL1&0, AArch64 only */
err = 0;
} else {
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
- ARM64_SYS_REG(3, 0, 0, 4, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0,
- ARM64_SYS_REG(3, 0, 0, 4, 5));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0,
- ARM64_SYS_REG(3, 0, 0, 5, 0));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1,
- ARM64_SYS_REG(3, 0, 0, 5, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
- ARM64_SYS_REG(3, 0, 0, 6, 0));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
- ARM64_SYS_REG(3, 0, 0, 6, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2,
- ARM64_SYS_REG(3, 0, 0, 6, 2));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
- ARM64_SYS_REG(3, 0, 0, 7, 0));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
- ARM64_SYS_REG(3, 0, 0, 7, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
- ARM64_SYS_REG(3, 0, 0, 7, 2));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr3,
- ARM64_SYS_REG(3, 0, 0, 7, 3));
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64PFR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64SMFR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64DFR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64DFR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR2_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR2_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR3_EL1_IDX);
/*
* Note that if AArch32 support is not present in the host,
@@ -359,49 +332,31 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* than skipping the reads and leaving 0, as we must avoid
* considering the values in every case.
*/
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0,
- ARM64_SYS_REG(3, 0, 0, 1, 0));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1,
- ARM64_SYS_REG(3, 0, 0, 1, 1));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
- ARM64_SYS_REG(3, 0, 0, 1, 2));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
- ARM64_SYS_REG(3, 0, 0, 1, 4));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
- ARM64_SYS_REG(3, 0, 0, 1, 5));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
- ARM64_SYS_REG(3, 0, 0, 1, 6));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
- ARM64_SYS_REG(3, 0, 0, 1, 7));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
- ARM64_SYS_REG(3, 0, 0, 2, 0));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
- ARM64_SYS_REG(3, 0, 0, 2, 1));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
- ARM64_SYS_REG(3, 0, 0, 2, 2));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
- ARM64_SYS_REG(3, 0, 0, 2, 3));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
- ARM64_SYS_REG(3, 0, 0, 2, 4));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
- ARM64_SYS_REG(3, 0, 0, 2, 5));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
- ARM64_SYS_REG(3, 0, 0, 2, 6));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
- ARM64_SYS_REG(3, 0, 0, 2, 7));
-
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
+ err |= get_host_cpu_reg(fd, ahcf, ID_PFR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_PFR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_DFR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_MMFR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_MMFR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_MMFR2_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_MMFR3_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_ISAR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_ISAR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_ISAR2_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_ISAR3_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_ISAR4_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_ISAR5_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_ISAR6_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_MMFR4_EL1_IDX);
+
+ err |= read_sys_reg32(fd, &ahcf->isar.mvfr0,
ARM64_SYS_REG(3, 0, 0, 3, 0));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
+ err |= read_sys_reg32(fd, &ahcf->isar.mvfr1,
ARM64_SYS_REG(3, 0, 0, 3, 1));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
+ err |= read_sys_reg32(fd, &ahcf->isar.mvfr2,
ARM64_SYS_REG(3, 0, 0, 3, 2));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2,
- ARM64_SYS_REG(3, 0, 0, 3, 4));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1,
- ARM64_SYS_REG(3, 0, 0, 3, 5));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5,
- ARM64_SYS_REG(3, 0, 0, 3, 6));
+ err |= get_host_cpu_reg(fd, ahcf, ID_PFR2_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_DFR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_MMFR5_EL1_IDX);
/*
* DBGDIDR is a bit complicated because the kernel doesn't
@@ -413,14 +368,14 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
* We only do this if the CPU supports AArch32 at EL1.
*/
- if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
- int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
- int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
+ if (FIELD_EX32_IDREG(&ahcf->isar, ID_AA64PFR0, EL1) >= 2) {
+ int wrps = FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, WRPS);
+ int brps = FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, BRPS);
int ctx_cmps =
- FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
+ FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, CTX_CMPS);
int version = 6; /* ARMv8 debug architecture */
bool has_el3 =
- !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
+ !!FIELD_EX32_IDREG(&ahcf->isar, ID_AA64PFR0, EL3);
uint32_t dbgdidr = 0;
dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps);
@@ -435,7 +390,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
if (pmu_supported) {
/* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0,
+ err |= read_sys_reg64(fd, &ahcf->isar.reset_pmcr_el0,
ARM64_SYS_REG(3, 3, 9, 12, 0));
}
@@ -447,8 +402,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* enabled SVE support, which resulted in an error rather than RAZ.
* So only read the register if we set KVM_ARM_VCPU_SVE above.
*/
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
- ARM64_SYS_REG(3, 0, 0, 4, 4));
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64ZFR0_EL1_IDX);
}
}
@@ -977,13 +931,24 @@ void kvm_arm_cpu_pre_save(ARMCPU *cpu)
}
}
-void kvm_arm_cpu_post_load(ARMCPU *cpu)
+bool kvm_arm_cpu_post_load(ARMCPU *cpu)
{
+ if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
+ return false;
+ }
+ /* Note that it's OK for the TCG side not to know about
+ * every register in the list; KVM is authoritative if
+ * we're using it.
+ */
+ write_list_to_cpustate(cpu);
+
/* KVM virtual time adjustment */
if (cpu->kvm_adjvtime) {
cpu->kvm_vtime = *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT);
cpu->kvm_vtime_dirty = true;
}
+
+ return true;
}
void kvm_arm_reset_vcpu(ARMCPU *cpu)
@@ -1835,7 +1800,7 @@ uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
probed = true;
- if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
+ if (!kvm_arm_create_scratch_host_vcpu(fdarray, &init)) {
error_report("failed to create scratch VCPU with SVE enabled");
abort();
}
@@ -1874,6 +1839,11 @@ static int kvm_arm_sve_set_vls(ARMCPU *cpu)
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
int ret;
@@ -1882,8 +1852,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
CPUARMState *env = &cpu->env;
uint64_t psciver;
- if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
- !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
+ if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
error_report("KVM is not supported for this guest CPU type");
return -EINVAL;
}
@@ -2468,3 +2437,32 @@ void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
cpu->kvm_mte = true;
}
}
+
+void arm_cpu_kvm_set_irq(void *arm_cpu, int irq, int level)
+{
+ ARMCPU *cpu = arm_cpu;
+ CPUARMState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ uint32_t linestate_bit;
+ int irq_id;
+
+ switch (irq) {
+ case ARM_CPU_IRQ:
+ irq_id = KVM_ARM_IRQ_CPU_IRQ;
+ linestate_bit = CPU_INTERRUPT_HARD;
+ break;
+ case ARM_CPU_FIQ:
+ irq_id = KVM_ARM_IRQ_CPU_FIQ;
+ linestate_bit = CPU_INTERRUPT_FIQ;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (level) {
+ env->irq_line_state |= linestate_bit;
+ } else {
+ env->irq_line_state &= ~linestate_bit;
+ }
+ kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
+}
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index 05c3de8..7dc83ca 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -12,6 +12,7 @@
#define QEMU_KVM_ARM_H
#include "system/kvm.h"
+#include "target/arm/cpu-qom.h"
#define KVM_ARM_VGIC_V2 (1 << 0)
#define KVM_ARM_VGIC_V3 (1 << 1)
@@ -83,8 +84,10 @@ void kvm_arm_cpu_pre_save(ARMCPU *cpu);
* @cpu: ARMCPU
*
* Called from cpu_post_load() to update KVM CPU state from the cpreg list.
+ *
+ * Returns: true on success, or false if write_list_to_kvmstate failed.
*/
-void kvm_arm_cpu_post_load(ARMCPU *cpu);
+bool kvm_arm_cpu_post_load(ARMCPU *cpu);
/**
* kvm_arm_reset_vcpu:
@@ -94,13 +97,9 @@ void kvm_arm_cpu_post_load(ARMCPU *cpu);
*/
void kvm_arm_reset_vcpu(ARMCPU *cpu);
-#ifdef CONFIG_KVM
+struct kvm_vcpu_init;
/**
* kvm_arm_create_scratch_host_vcpu:
- * @cpus_to_try: array of QEMU_KVM_ARM_TARGET_* values (terminated with
- * QEMU_KVM_ARM_TARGET_NONE) to try as fallback if the kernel does not
- * know the PREFERRED_TARGET ioctl. Passing NULL is the same as passing
- * an empty array.
* @fdarray: filled in with kvmfd, vmfd, cpufd file descriptors in that order
* @init: filled in with the necessary values for creating a host
* vcpu. If NULL is provided, will not init the vCPU (though the cpufd
@@ -113,8 +112,7 @@ void kvm_arm_reset_vcpu(ARMCPU *cpu);
* Returns: true on success (and fdarray and init are filled in),
* false on failure (and fdarray and init are not valid).
*/
-bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
- int *fdarray,
+bool kvm_arm_create_scratch_host_vcpu(int *fdarray,
struct kvm_vcpu_init *init);
/**
@@ -221,85 +219,6 @@ int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
void kvm_arm_enable_mte(Object *cpuobj, Error **errp);
-#else
-
-/*
- * It's safe to call these functions without KVM support.
- * They should either do nothing or return "not supported".
- */
-static inline bool kvm_arm_aarch32_supported(void)
-{
- return false;
-}
-
-static inline bool kvm_arm_pmu_supported(void)
-{
- return false;
-}
-
-static inline bool kvm_arm_sve_supported(void)
-{
- return false;
-}
-
-static inline bool kvm_arm_mte_supported(void)
-{
- return false;
-}
-
-/*
- * These functions should never actually be called without KVM support.
- */
-static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_add_vcpu_properties(ARMCPU *cpu)
-{
- g_assert_not_reached();
-}
-
-static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa)
-{
- g_assert_not_reached();
-}
-
-static inline int kvm_arm_vgic_probe(void)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_pmu_init(ARMCPU *cpu)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
-{
- g_assert_not_reached();
-}
-
-static inline uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
-{
- g_assert_not_reached();
-}
-
-#endif
+void arm_cpu_kvm_set_irq(void *arm_cpu, int irq, int level);
#endif
diff --git a/target/arm/machine.c b/target/arm/machine.c
index 978249f..e442d48 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -6,7 +6,8 @@
#include "kvm_arm.h"
#include "internals.h"
#include "cpu-features.h"
-#include "migration/cpu.h"
+#include "migration/qemu-file-types.h"
+#include "migration/vmstate.h"
#include "target/arm/gtimer.h"
static bool vfp_needed(void *opaque)
@@ -240,7 +241,6 @@ static const VMStateDescription vmstate_iwmmxt = {
}
};
-#ifdef TARGET_AARCH64
/* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
* and ARMPredicateReg is actively empty. This triggers errors
* in the expansion of the VMSTATE macros.
@@ -320,7 +320,6 @@ static const VMStateDescription vmstate_za = {
VMSTATE_END_OF_LIST()
}
};
-#endif /* AARCH64 */
static bool serror_needed(void *opaque)
{
@@ -977,15 +976,9 @@ static int cpu_post_load(void *opaque, int version_id)
}
if (kvm_enabled()) {
- if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
+ if (!kvm_arm_cpu_post_load(cpu)) {
return -1;
}
- /* Note that it's OK for the TCG side not to know about
- * every register in the list; KVM is authoritative if
- * we're using it.
- */
- write_list_to_cpustate(cpu);
- kvm_arm_cpu_post_load(cpu);
} else {
if (!write_list_to_cpustate(cpu)) {
return -1;
@@ -1101,10 +1094,8 @@ const VMStateDescription vmstate_arm_cpu = {
&vmstate_pmsav7,
&vmstate_pmsav8,
&vmstate_m_security,
-#ifdef TARGET_AARCH64
&vmstate_sve,
&vmstate_za,
-#endif
&vmstate_serror,
&vmstate_irq_line_state,
&vmstate_wfxt_timer,
diff --git a/target/arm/meson.build b/target/arm/meson.build
index 3065081..7aa81e3 100644
--- a/target/arm/meson.build
+++ b/target/arm/meson.build
@@ -1,41 +1,58 @@
arm_ss = ss.source_set()
+arm_common_ss = ss.source_set()
arm_ss.add(files(
- 'cpu.c',
- 'debug_helper.c',
'gdbstub.c',
- 'helper.c',
- 'vfp_fpscr.c',
))
-arm_ss.add(zlib)
-
-arm_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c'), if_false: files('kvm-stub.c'))
-arm_ss.add(when: 'CONFIG_HVF', if_true: files('hyp_gdbstub.c'))
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
'cpu64.c',
- 'gdbstub64.c',
-))
+ 'gdbstub64.c'))
arm_system_ss = ss.source_set()
+arm_common_system_ss = ss.source_set()
arm_system_ss.add(files(
+ 'arm-qmp-cmds.c',
+))
+arm_system_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c'))
+arm_system_ss.add(when: 'CONFIG_HVF', if_true: files('hyp_gdbstub.c'))
+
+arm_user_ss = ss.source_set()
+arm_user_ss.add(files('cpu.c'))
+arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files(
+ 'cpu32-stubs.c',
+))
+arm_user_ss.add(files(
+ 'debug_helper.c',
+ 'helper.c',
+ 'vfp_fpscr.c',
+))
+
+arm_common_system_ss.add(files('cpu.c'))
+arm_common_system_ss.add(when: 'TARGET_AARCH64', if_false: files(
+ 'cpu32-stubs.c'))
+arm_common_system_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
+arm_common_system_ss.add(when: 'CONFIG_HVF', if_false: files('hvf-stub.c'))
+arm_common_system_ss.add(files(
'arch_dump.c',
'arm-powerctl.c',
- 'arm-qmp-cmds.c',
'cortex-regs.c',
+ 'debug_helper.c',
+ 'helper.c',
'machine.c',
'ptw.c',
+ 'vfp_fpscr.c',
))
-arm_user_ss = ss.source_set()
-
subdir('hvf')
if 'CONFIG_TCG' in config_all_accel
subdir('tcg')
else
- arm_ss.add(files('tcg-stubs.c'))
+ arm_common_system_ss.add(files('tcg-stubs.c'))
endif
target_arch += {'arm': arm_ss}
target_system_arch += {'arm': arm_system_ss}
target_user_arch += {'arm': arm_user_ss}
+target_common_arch += {'arm': arm_common_ss}
+target_common_system_arch += {'arm': arm_common_system_ss}
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index e0e82ae..561bf26 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -10,10 +10,10 @@
#include "qemu/log.h"
#include "qemu/range.h"
#include "qemu/main-loop.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "exec/tlb-flags.h"
+#include "accel/tcg/probe.h"
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
@@ -122,7 +122,7 @@ unsigned int arm_pamax(ARMCPU *cpu)
{
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
unsigned int parange =
- FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
+ FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE);
/*
* id_aa64mmfr0 is a read-only register so values outside of the
@@ -332,7 +332,7 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
* physical address size is invalid.
*/
pps = FIELD_EX64(gpccr, GPCCR, PPS);
- if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) {
+ if (pps > FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE)) {
goto fault_walk;
}
pps = pamax_map[pps];
@@ -737,7 +737,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
uint64_t new_val, S1Translate *ptw,
ARMMMUFaultInfo *fi)
{
-#if defined(TARGET_AARCH64) && defined(CONFIG_TCG)
+#if defined(CONFIG_ATOMIC64) && defined(CONFIG_TCG)
uint64_t cur_val;
void *host = ptw->out_host;
@@ -1660,7 +1660,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
uint64_t ttbr;
hwaddr descaddr, indexmask, indexmask_grainsize;
uint32_t tableattrs;
- target_ulong page_size;
+ uint64_t page_size;
uint64_t attrs;
int32_t stride;
int addrsize, inputsize, outputsize;
@@ -1703,7 +1703,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
* ID_AA64MMFR0 is a read-only register so values outside of the
* supported mappings can be considered an implementation error.
*/
- ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
+ ps = FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE);
ps = MIN(ps, param.ps);
assert(ps < ARRAY_SIZE(pamax_map));
outputsize = pamax_map[ps];
@@ -1733,7 +1733,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
* validation to do here.
*/
if (inputsize < addrsize) {
- target_ulong top_bits = sextract64(address, inputsize,
+ uint64_t top_bits = sextract64(address, inputsize,
addrsize - inputsize);
if (-top_bits != param.select) {
/* The gap between the two regions is a Translation fault */
@@ -3551,13 +3551,9 @@ bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
memop, result, fi);
}
-bool get_phys_addr(CPUARMState *env, vaddr address,
- MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
+static ARMSecuritySpace
+arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx)
{
- S1Translate ptw = {
- .in_mmu_idx = mmu_idx,
- };
ARMSecuritySpace ss;
switch (mmu_idx) {
@@ -3618,28 +3614,33 @@ bool get_phys_addr(CPUARMState *env, vaddr address,
g_assert_not_reached();
}
- ptw.in_space = ss;
+ return ss;
+}
+
+bool get_phys_addr(CPUARMState *env, vaddr address,
+ MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
+{
+ S1Translate ptw = {
+ .in_mmu_idx = mmu_idx,
+ .in_space = arm_mmu_idx_to_security_space(env, mmu_idx),
+ };
+
return get_phys_addr_gpc(env, &ptw, address, access_type,
memop, result, fi);
}
-hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
- MemTxAttrs *attrs)
+static hwaddr arm_cpu_get_phys_page(CPUARMState *env, vaddr addr,
+ MemTxAttrs *attrs, ARMMMUIdx mmu_idx)
{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- ARMMMUIdx mmu_idx = arm_mmu_idx(env);
- ARMSecuritySpace ss = arm_security_space(env);
S1Translate ptw = {
.in_mmu_idx = mmu_idx,
- .in_space = ss,
+ .in_space = arm_mmu_idx_to_security_space(env, mmu_idx),
.in_debug = true,
};
GetPhysAddrResult res = {};
ARMMMUFaultInfo fi = {};
- bool ret;
-
- ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi);
+ bool ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi);
*attrs = res.f.attrs;
if (ret) {
@@ -3647,3 +3648,33 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
}
return res.f.phys_addr;
}
+
+hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
+ MemTxAttrs *attrs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+
+ hwaddr res = arm_cpu_get_phys_page(env, addr, attrs, mmu_idx);
+
+ if (res != -1) {
+ return res;
+ }
+
+ /*
+ * Memory may be accessible for an "unprivileged load/store" variant.
+ * In this case, get_a64_user_mem_index function generates an op using an
+ * unprivileged mmu idx, so we need to try with it.
+ */
+ switch (mmu_idx) {
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E10_0);
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E20_0);
+ default:
+ return -1;
+ }
+}
diff --git a/target/arm/tcg-stubs.c b/target/arm/tcg-stubs.c
index 93a15ca..5e5166c 100644
--- a/target/arm/tcg-stubs.c
+++ b/target/arm/tcg-stubs.c
@@ -21,10 +21,6 @@ void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
{
g_assert_not_reached();
}
-/* Temporarily while cpu_get_tb_cpu_state() is still in common code */
-void assert_hflags_rebuild_correctly(CPUARMState *env)
-{
-}
/* TLBI insns are only used by TCG, so we don't need to do anything for KVM */
void define_tlb_insn_regs(ARMCPU *cpu)
diff --git a/target/arm/tcg/arith_helper.c b/target/arm/tcg/arith_helper.c
index 9a555c7..6701398 100644
--- a/target/arm/tcg/arith_helper.c
+++ b/target/arm/tcg/arith_helper.c
@@ -6,11 +6,12 @@
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
#include "qemu/crc32c.h"
#include <zlib.h> /* for crc32 */
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
/*
* Note that signed overflow is undefined in C. The following routines are
* careful to use unsigned types where modulo arithmetic is required.
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
index b34b657..eddd711 100644
--- a/target/arm/tcg/cpu-v7m.c
+++ b/target/arm/tcg/cpu-v7m.c
@@ -45,6 +45,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
static void cortex_m0_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V6);
set_feature(&cpu->env, ARM_FEATURE_M);
@@ -58,51 +59,53 @@ static void cortex_m0_initfn(Object *obj)
* by looking at ID register fields. We use the same values as
* for the M3.
*/
- cpu->isar.id_pfr0 = 0x00000030;
- cpu->isar.id_pfr1 = 0x00000200;
- cpu->isar.id_dfr0 = 0x00100000;
+ SET_IDREG(isar, ID_PFR0, 0x00000030);
+ SET_IDREG(isar, ID_PFR1, 0x00000200);
+ SET_IDREG(isar, ID_DFR0, 0x00100000);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x00000030;
- cpu->isar.id_mmfr1 = 0x00000000;
- cpu->isar.id_mmfr2 = 0x00000000;
- cpu->isar.id_mmfr3 = 0x00000000;
- cpu->isar.id_isar0 = 0x01141110;
- cpu->isar.id_isar1 = 0x02111000;
- cpu->isar.id_isar2 = 0x21112231;
- cpu->isar.id_isar3 = 0x01111110;
- cpu->isar.id_isar4 = 0x01310102;
- cpu->isar.id_isar5 = 0x00000000;
- cpu->isar.id_isar6 = 0x00000000;
+ SET_IDREG(isar, ID_MMFR0, 0x00000030);
+ SET_IDREG(isar, ID_MMFR1, 0x00000000);
+ SET_IDREG(isar, ID_MMFR2, 0x00000000);
+ SET_IDREG(isar, ID_MMFR3, 0x00000000);
+ SET_IDREG(isar, ID_ISAR0, 0x01141110);
+ SET_IDREG(isar, ID_ISAR1, 0x02111000);
+ SET_IDREG(isar, ID_ISAR2, 0x21112231);
+ SET_IDREG(isar, ID_ISAR3, 0x01111110);
+ SET_IDREG(isar, ID_ISAR4, 0x01310102);
+ SET_IDREG(isar, ID_ISAR5, 0x00000000);
+ SET_IDREG(isar, ID_ISAR6, 0x00000000);
}
static void cortex_m3_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V7);
set_feature(&cpu->env, ARM_FEATURE_M);
set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
cpu->midr = 0x410fc231;
cpu->pmsav7_dregion = 8;
- cpu->isar.id_pfr0 = 0x00000030;
- cpu->isar.id_pfr1 = 0x00000200;
- cpu->isar.id_dfr0 = 0x00100000;
+ SET_IDREG(isar, ID_PFR0, 0x00000030);
+ SET_IDREG(isar, ID_PFR1, 0x00000200);
+ SET_IDREG(isar, ID_DFR0, 0x00100000);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x00000030;
- cpu->isar.id_mmfr1 = 0x00000000;
- cpu->isar.id_mmfr2 = 0x00000000;
- cpu->isar.id_mmfr3 = 0x00000000;
- cpu->isar.id_isar0 = 0x01141110;
- cpu->isar.id_isar1 = 0x02111000;
- cpu->isar.id_isar2 = 0x21112231;
- cpu->isar.id_isar3 = 0x01111110;
- cpu->isar.id_isar4 = 0x01310102;
- cpu->isar.id_isar5 = 0x00000000;
- cpu->isar.id_isar6 = 0x00000000;
+ SET_IDREG(isar, ID_MMFR0, 0x00000030);
+ SET_IDREG(isar, ID_MMFR1, 0x00000000);
+ SET_IDREG(isar, ID_MMFR2, 0x00000000);
+ SET_IDREG(isar, ID_MMFR3, 0x00000000);
+ SET_IDREG(isar, ID_ISAR0, 0x01141110);
+ SET_IDREG(isar, ID_ISAR1, 0x02111000);
+ SET_IDREG(isar, ID_ISAR2, 0x21112231);
+ SET_IDREG(isar, ID_ISAR3, 0x01111110);
+ SET_IDREG(isar, ID_ISAR4, 0x01310102);
+ SET_IDREG(isar, ID_ISAR5, 0x00000000);
+ SET_IDREG(isar, ID_ISAR6, 0x00000000);
}
static void cortex_m4_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V7);
set_feature(&cpu->env, ARM_FEATURE_M);
@@ -113,26 +116,27 @@ static void cortex_m4_initfn(Object *obj)
cpu->isar.mvfr0 = 0x10110021;
cpu->isar.mvfr1 = 0x11000011;
cpu->isar.mvfr2 = 0x00000000;
- cpu->isar.id_pfr0 = 0x00000030;
- cpu->isar.id_pfr1 = 0x00000200;
- cpu->isar.id_dfr0 = 0x00100000;
+ SET_IDREG(isar, ID_PFR0, 0x00000030);
+ SET_IDREG(isar, ID_PFR1, 0x00000200);
+ SET_IDREG(isar, ID_DFR0, 0x00100000);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x00000030;
- cpu->isar.id_mmfr1 = 0x00000000;
- cpu->isar.id_mmfr2 = 0x00000000;
- cpu->isar.id_mmfr3 = 0x00000000;
- cpu->isar.id_isar0 = 0x01141110;
- cpu->isar.id_isar1 = 0x02111000;
- cpu->isar.id_isar2 = 0x21112231;
- cpu->isar.id_isar3 = 0x01111110;
- cpu->isar.id_isar4 = 0x01310102;
- cpu->isar.id_isar5 = 0x00000000;
- cpu->isar.id_isar6 = 0x00000000;
+ SET_IDREG(isar, ID_MMFR0, 0x00000030);
+ SET_IDREG(isar, ID_MMFR1, 0x00000000);
+ SET_IDREG(isar, ID_MMFR2, 0x00000000);
+ SET_IDREG(isar, ID_MMFR3, 0x00000000);
+ SET_IDREG(isar, ID_ISAR0, 0x01141110);
+ SET_IDREG(isar, ID_ISAR1, 0x02111000);
+ SET_IDREG(isar, ID_ISAR2, 0x21112231);
+ SET_IDREG(isar, ID_ISAR3, 0x01111110);
+ SET_IDREG(isar, ID_ISAR4, 0x01310102);
+ SET_IDREG(isar, ID_ISAR5, 0x00000000);
+ SET_IDREG(isar, ID_ISAR6, 0x00000000);
}
static void cortex_m7_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V7);
set_feature(&cpu->env, ARM_FEATURE_M);
@@ -143,26 +147,27 @@ static void cortex_m7_initfn(Object *obj)
cpu->isar.mvfr0 = 0x10110221;
cpu->isar.mvfr1 = 0x12000011;
cpu->isar.mvfr2 = 0x00000040;
- cpu->isar.id_pfr0 = 0x00000030;
- cpu->isar.id_pfr1 = 0x00000200;
- cpu->isar.id_dfr0 = 0x00100000;
+ SET_IDREG(isar, ID_PFR0, 0x00000030);
+ SET_IDREG(isar, ID_PFR1, 0x00000200);
+ SET_IDREG(isar, ID_DFR0, 0x00100000);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x00100030;
- cpu->isar.id_mmfr1 = 0x00000000;
- cpu->isar.id_mmfr2 = 0x01000000;
- cpu->isar.id_mmfr3 = 0x00000000;
- cpu->isar.id_isar0 = 0x01101110;
- cpu->isar.id_isar1 = 0x02112000;
- cpu->isar.id_isar2 = 0x20232231;
- cpu->isar.id_isar3 = 0x01111131;
- cpu->isar.id_isar4 = 0x01310132;
- cpu->isar.id_isar5 = 0x00000000;
- cpu->isar.id_isar6 = 0x00000000;
+ SET_IDREG(isar, ID_MMFR0, 0x00100030);
+ SET_IDREG(isar, ID_MMFR1, 0x00000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01000000);
+ SET_IDREG(isar, ID_MMFR3, 0x00000000);
+ SET_IDREG(isar, ID_ISAR0, 0x01101110);
+ SET_IDREG(isar, ID_ISAR1, 0x02112000);
+ SET_IDREG(isar, ID_ISAR2, 0x20232231);
+ SET_IDREG(isar, ID_ISAR3, 0x01111131);
+ SET_IDREG(isar, ID_ISAR4, 0x01310132);
+ SET_IDREG(isar, ID_ISAR5, 0x00000000);
+ SET_IDREG(isar, ID_ISAR6, 0x00000000);
}
static void cortex_m33_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V8);
set_feature(&cpu->env, ARM_FEATURE_M);
@@ -175,21 +180,21 @@ static void cortex_m33_initfn(Object *obj)
cpu->isar.mvfr0 = 0x10110021;
cpu->isar.mvfr1 = 0x11000011;
cpu->isar.mvfr2 = 0x00000040;
- cpu->isar.id_pfr0 = 0x00000030;
- cpu->isar.id_pfr1 = 0x00000210;
- cpu->isar.id_dfr0 = 0x00200000;
+ SET_IDREG(isar, ID_PFR0, 0x00000030);
+ SET_IDREG(isar, ID_PFR1, 0x00000210);
+ SET_IDREG(isar, ID_DFR0, 0x00200000);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x00101F40;
- cpu->isar.id_mmfr1 = 0x00000000;
- cpu->isar.id_mmfr2 = 0x01000000;
- cpu->isar.id_mmfr3 = 0x00000000;
- cpu->isar.id_isar0 = 0x01101110;
- cpu->isar.id_isar1 = 0x02212000;
- cpu->isar.id_isar2 = 0x20232232;
- cpu->isar.id_isar3 = 0x01111131;
- cpu->isar.id_isar4 = 0x01310132;
- cpu->isar.id_isar5 = 0x00000000;
- cpu->isar.id_isar6 = 0x00000000;
+ SET_IDREG(isar, ID_MMFR0, 0x00101F40);
+ SET_IDREG(isar, ID_MMFR1, 0x00000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01000000);
+ SET_IDREG(isar, ID_MMFR3, 0x00000000);
+ SET_IDREG(isar, ID_ISAR0, 0x01101110);
+ SET_IDREG(isar, ID_ISAR1, 0x02212000);
+ SET_IDREG(isar, ID_ISAR2, 0x20232232);
+ SET_IDREG(isar, ID_ISAR3, 0x01111131);
+ SET_IDREG(isar, ID_ISAR4, 0x01310132);
+ SET_IDREG(isar, ID_ISAR5, 0x00000000);
+ SET_IDREG(isar, ID_ISAR6, 0x00000000);
cpu->clidr = 0x00000000;
cpu->ctr = 0x8000c000;
}
@@ -197,6 +202,7 @@ static void cortex_m33_initfn(Object *obj)
static void cortex_m55_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V8);
set_feature(&cpu->env, ARM_FEATURE_V8_1M);
@@ -212,21 +218,21 @@ static void cortex_m55_initfn(Object *obj)
cpu->isar.mvfr0 = 0x10110221;
cpu->isar.mvfr1 = 0x12100211;
cpu->isar.mvfr2 = 0x00000040;
- cpu->isar.id_pfr0 = 0x20000030;
- cpu->isar.id_pfr1 = 0x00000230;
- cpu->isar.id_dfr0 = 0x10200000;
+ SET_IDREG(isar, ID_PFR0, 0x20000030);
+ SET_IDREG(isar, ID_PFR1, 0x00000230);
+ SET_IDREG(isar, ID_DFR0, 0x10200000);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x00111040;
- cpu->isar.id_mmfr1 = 0x00000000;
- cpu->isar.id_mmfr2 = 0x01000000;
- cpu->isar.id_mmfr3 = 0x00000011;
- cpu->isar.id_isar0 = 0x01103110;
- cpu->isar.id_isar1 = 0x02212000;
- cpu->isar.id_isar2 = 0x20232232;
- cpu->isar.id_isar3 = 0x01111131;
- cpu->isar.id_isar4 = 0x01310132;
- cpu->isar.id_isar5 = 0x00000000;
- cpu->isar.id_isar6 = 0x00000000;
+ SET_IDREG(isar, ID_MMFR0, 0x00111040);
+ SET_IDREG(isar, ID_MMFR1, 0x00000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01000000);
+ SET_IDREG(isar, ID_MMFR3, 0x00000011);
+ SET_IDREG(isar, ID_ISAR0, 0x01103110);
+ SET_IDREG(isar, ID_ISAR1, 0x02212000);
+ SET_IDREG(isar, ID_ISAR2, 0x20232232);
+ SET_IDREG(isar, ID_ISAR3, 0x01111131);
+ SET_IDREG(isar, ID_ISAR4, 0x01310132);
+ SET_IDREG(isar, ID_ISAR5, 0x00000000);
+ SET_IDREG(isar, ID_ISAR6, 0x00000000);
cpu->clidr = 0x00000000; /* caches not implemented */
cpu->ctr = 0x8303c003;
}
@@ -238,6 +244,7 @@ static const TCGCPUOps arm_v7m_tcg_ops = {
.initialize = arm_translate_init,
.translate_code = arm_translate_code,
+ .get_tb_cpu_state = arm_get_tb_cpu_state,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
.debug_excp_handler = arm_debug_excp_handler,
.restore_state_to_opc = arm_restore_state_to_opc,
@@ -248,8 +255,10 @@ static const TCGCPUOps arm_v7m_tcg_ops = {
.record_sigbus = arm_cpu_record_sigbus,
#else
.tlb_fill_align = arm_cpu_tlb_fill_align,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
.cpu_exec_halt = arm_cpu_exec_halt,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = arm_v7m_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,
diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c
index 2c45b7e..942b636 100644
--- a/target/arm/tcg/cpu32.c
+++ b/target/arm/tcg/cpu32.c
@@ -23,18 +23,19 @@
void aa32_max_features(ARMCPU *cpu)
{
uint32_t t;
+ ARMISARegisters *isar = &cpu->isar;
/* Add additional features supported by QEMU */
- t = cpu->isar.id_isar5;
+ t = GET_IDREG(isar, ID_ISAR5);
t = FIELD_DP32(t, ID_ISAR5, AES, 2); /* FEAT_PMULL */
t = FIELD_DP32(t, ID_ISAR5, SHA1, 1); /* FEAT_SHA1 */
t = FIELD_DP32(t, ID_ISAR5, SHA2, 1); /* FEAT_SHA256 */
t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
t = FIELD_DP32(t, ID_ISAR5, RDM, 1); /* FEAT_RDM */
t = FIELD_DP32(t, ID_ISAR5, VCMA, 1); /* FEAT_FCMA */
- cpu->isar.id_isar5 = t;
+ SET_IDREG(isar, ID_ISAR5, t);
- t = cpu->isar.id_isar6;
+ t = GET_IDREG(isar, ID_ISAR6);
t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1); /* FEAT_JSCVT */
t = FIELD_DP32(t, ID_ISAR6, DP, 1); /* Feat_DotProd */
t = FIELD_DP32(t, ID_ISAR6, FHM, 1); /* FEAT_FHM */
@@ -42,7 +43,7 @@ void aa32_max_features(ARMCPU *cpu)
t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1); /* FEAT_SPECRES */
t = FIELD_DP32(t, ID_ISAR6, BF16, 1); /* FEAT_AA32BF16 */
t = FIELD_DP32(t, ID_ISAR6, I8MM, 1); /* FEAT_AA32I8MM */
- cpu->isar.id_isar6 = t;
+ SET_IDREG(isar, ID_ISAR6, t);
t = cpu->isar.mvfr1;
t = FIELD_DP32(t, MVFR1, FPHP, 3); /* FEAT_FP16 */
@@ -54,38 +55,34 @@ void aa32_max_features(ARMCPU *cpu)
t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
cpu->isar.mvfr2 = t;
- t = cpu->isar.id_mmfr3;
- t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* FEAT_PAN2 */
- cpu->isar.id_mmfr3 = t;
+ FIELD_DP32_IDREG(isar, ID_MMFR3, PAN, 2); /* FEAT_PAN2 */
- t = cpu->isar.id_mmfr4;
+ t = GET_IDREG(isar, ID_MMFR4);
t = FIELD_DP32(t, ID_MMFR4, HPDS, 2); /* FEAT_HPDS2 */
t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* FEAT_TTCNP */
t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX */
t = FIELD_DP32(t, ID_MMFR4, EVT, 2); /* FEAT_EVT */
- cpu->isar.id_mmfr4 = t;
+ SET_IDREG(isar, ID_MMFR4, t);
- t = cpu->isar.id_mmfr5;
- t = FIELD_DP32(t, ID_MMFR5, ETS, 2); /* FEAT_ETS2 */
- cpu->isar.id_mmfr5 = t;
+ FIELD_DP32_IDREG(isar, ID_MMFR5, ETS, 2); /* FEAT_ETS2 */
- t = cpu->isar.id_pfr0;
+ t = GET_IDREG(isar, ID_PFR0);
t = FIELD_DP32(t, ID_PFR0, CSV2, 2); /* FEAT_CSV2 */
t = FIELD_DP32(t, ID_PFR0, DIT, 1); /* FEAT_DIT */
t = FIELD_DP32(t, ID_PFR0, RAS, 1); /* FEAT_RAS */
- cpu->isar.id_pfr0 = t;
+ SET_IDREG(isar, ID_PFR0, t);
- t = cpu->isar.id_pfr2;
+ t = GET_IDREG(isar, ID_PFR2);
t = FIELD_DP32(t, ID_PFR2, CSV3, 1); /* FEAT_CSV3 */
t = FIELD_DP32(t, ID_PFR2, SSBS, 1); /* FEAT_SSBS */
- cpu->isar.id_pfr2 = t;
+ SET_IDREG(isar, ID_PFR2, t);
- t = cpu->isar.id_dfr0;
+ t = GET_IDREG(isar, ID_DFR0);
t = FIELD_DP32(t, ID_DFR0, COPDBG, 10); /* FEAT_Debugv8p8 */
t = FIELD_DP32(t, ID_DFR0, COPSDBG, 10); /* FEAT_Debugv8p8 */
t = FIELD_DP32(t, ID_DFR0, PERFMON, 6); /* FEAT_PMUv3p5 */
- cpu->isar.id_dfr0 = t;
+ SET_IDREG(isar, ID_DFR0, t);
/* Debug ID registers. */
@@ -115,9 +112,7 @@ void aa32_max_features(ARMCPU *cpu)
t = FIELD_DP32(t, DBGDEVID1, PCSROFFSET, 2);
cpu->isar.dbgdevid1 = t;
- t = cpu->isar.id_dfr1;
- t = FIELD_DP32(t, ID_DFR1, HPMN0, 1); /* FEAT_HPMN0 */
- cpu->isar.id_dfr1 = t;
+ FIELD_DP32_IDREG(isar, ID_DFR1, HPMN0, 1); /* FEAT_HPMN0 */
}
/* CPU models. These are not needed for the AArch64 linux-user build. */
@@ -140,7 +135,7 @@ static void arm926_initfn(Object *obj)
* ARMv5 does not have the ID_ISAR registers, but we can still
* set the field to indicate Jazelle support within QEMU.
*/
- cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
+ FIELD_DP32_IDREG(&cpu->isar, ID_ISAR1, JAZELLE, 1);
/*
* Similarly, we need to set MVFR0 fields to enable vfp and short vector
* support even though ARMv5 doesn't have this register.
@@ -182,7 +177,7 @@ static void arm1026_initfn(Object *obj)
* ARMv5 does not have the ID_ISAR registers, but we can still
* set the field to indicate Jazelle support within QEMU.
*/
- cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
+ FIELD_DP32_IDREG(&cpu->isar, ID_ISAR1, JAZELLE, 1);
/*
* Similarly, we need to set MVFR0 fields to enable vfp and short vector
* support even though ARMv5 doesn't have this register.
@@ -206,6 +201,7 @@ static void arm1026_initfn(Object *obj)
static void arm1136_r2_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
/*
* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
* older core than plain "arm1136". In particular this does not
@@ -226,24 +222,25 @@ static void arm1136_r2_initfn(Object *obj)
cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00050078;
- cpu->isar.id_pfr0 = 0x111;
- cpu->isar.id_pfr1 = 0x1;
- cpu->isar.id_dfr0 = 0x2;
+ SET_IDREG(isar, ID_PFR0, 0x111);
+ SET_IDREG(isar, ID_PFR1, 0x1);
+ SET_IDREG(isar, ID_DFR0, 0x2);
cpu->id_afr0 = 0x3;
- cpu->isar.id_mmfr0 = 0x01130003;
- cpu->isar.id_mmfr1 = 0x10030302;
- cpu->isar.id_mmfr2 = 0x01222110;
- cpu->isar.id_isar0 = 0x00140011;
- cpu->isar.id_isar1 = 0x12002111;
- cpu->isar.id_isar2 = 0x11231111;
- cpu->isar.id_isar3 = 0x01102131;
- cpu->isar.id_isar4 = 0x141;
+ SET_IDREG(isar, ID_MMFR0, 0x01130003);
+ SET_IDREG(isar, ID_MMFR1, 0x10030302);
+ SET_IDREG(isar, ID_MMFR2, 0x01222110);
+ SET_IDREG(isar, ID_ISAR0, 0x00140011);
+ SET_IDREG(isar, ID_ISAR1, 0x12002111);
+ SET_IDREG(isar, ID_ISAR2, 0x11231111);
+ SET_IDREG(isar, ID_ISAR3, 0x01102131);
+ SET_IDREG(isar, ID_ISAR4, 0x141);
cpu->reset_auxcr = 7;
}
static void arm1136_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,arm1136";
set_feature(&cpu->env, ARM_FEATURE_V6K);
@@ -257,24 +254,25 @@ static void arm1136_initfn(Object *obj)
cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00050078;
- cpu->isar.id_pfr0 = 0x111;
- cpu->isar.id_pfr1 = 0x1;
- cpu->isar.id_dfr0 = 0x2;
+ SET_IDREG(isar, ID_PFR0, 0x111);
+ SET_IDREG(isar, ID_PFR1, 0x1);
+ SET_IDREG(isar, ID_DFR0, 0x2);
cpu->id_afr0 = 0x3;
- cpu->isar.id_mmfr0 = 0x01130003;
- cpu->isar.id_mmfr1 = 0x10030302;
- cpu->isar.id_mmfr2 = 0x01222110;
- cpu->isar.id_isar0 = 0x00140011;
- cpu->isar.id_isar1 = 0x12002111;
- cpu->isar.id_isar2 = 0x11231111;
- cpu->isar.id_isar3 = 0x01102131;
- cpu->isar.id_isar4 = 0x141;
+ SET_IDREG(isar, ID_MMFR0, 0x01130003);
+ SET_IDREG(isar, ID_MMFR1, 0x10030302);
+ SET_IDREG(isar, ID_MMFR2, 0x01222110);
+ SET_IDREG(isar, ID_ISAR0, 0x00140011);
+ SET_IDREG(isar, ID_ISAR1, 0x12002111);
+ SET_IDREG(isar, ID_ISAR2, 0x11231111);
+ SET_IDREG(isar, ID_ISAR3, 0x01102131);
+ SET_IDREG(isar, ID_ISAR4, 0x141);
cpu->reset_auxcr = 7;
}
static void arm1176_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,arm1176";
set_feature(&cpu->env, ARM_FEATURE_V6K);
@@ -289,24 +287,25 @@ static void arm1176_initfn(Object *obj)
cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00050078;
- cpu->isar.id_pfr0 = 0x111;
- cpu->isar.id_pfr1 = 0x11;
- cpu->isar.id_dfr0 = 0x33;
+ SET_IDREG(isar, ID_PFR0, 0x111);
+ SET_IDREG(isar, ID_PFR1, 0x11);
+ SET_IDREG(isar, ID_DFR0, 0x33);
cpu->id_afr0 = 0;
- cpu->isar.id_mmfr0 = 0x01130003;
- cpu->isar.id_mmfr1 = 0x10030302;
- cpu->isar.id_mmfr2 = 0x01222100;
- cpu->isar.id_isar0 = 0x0140011;
- cpu->isar.id_isar1 = 0x12002111;
- cpu->isar.id_isar2 = 0x11231121;
- cpu->isar.id_isar3 = 0x01102131;
- cpu->isar.id_isar4 = 0x01141;
+ SET_IDREG(isar, ID_MMFR0, 0x01130003);
+ SET_IDREG(isar, ID_MMFR1, 0x10030302);
+ SET_IDREG(isar, ID_MMFR2, 0x01222100);
+ SET_IDREG(isar, ID_ISAR0, 0x0140011);
+ SET_IDREG(isar, ID_ISAR1, 0x12002111);
+ SET_IDREG(isar, ID_ISAR2, 0x11231121);
+ SET_IDREG(isar, ID_ISAR3, 0x01102131);
+ SET_IDREG(isar, ID_ISAR4, 0x01141);
cpu->reset_auxcr = 7;
}
static void arm11mpcore_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,arm11mpcore";
set_feature(&cpu->env, ARM_FEATURE_V6K);
@@ -318,18 +317,18 @@ static void arm11mpcore_initfn(Object *obj)
cpu->isar.mvfr0 = 0x11111111;
cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */
- cpu->isar.id_pfr0 = 0x111;
- cpu->isar.id_pfr1 = 0x1;
- cpu->isar.id_dfr0 = 0;
+ SET_IDREG(isar, ID_PFR0, 0x111);
+ SET_IDREG(isar, ID_PFR1, 0x1);
+ SET_IDREG(isar, ID_DFR0, 0);
cpu->id_afr0 = 0x2;
- cpu->isar.id_mmfr0 = 0x01100103;
- cpu->isar.id_mmfr1 = 0x10020302;
- cpu->isar.id_mmfr2 = 0x01222000;
- cpu->isar.id_isar0 = 0x00100011;
- cpu->isar.id_isar1 = 0x12002111;
- cpu->isar.id_isar2 = 0x11221011;
- cpu->isar.id_isar3 = 0x01102131;
- cpu->isar.id_isar4 = 0x141;
+ SET_IDREG(isar, ID_MMFR0, 0x01100103);
+ SET_IDREG(isar, ID_MMFR1, 0x10020302);
+ SET_IDREG(isar, ID_MMFR2, 0x01222000);
+ SET_IDREG(isar, ID_ISAR0, 0x00100011);
+ SET_IDREG(isar, ID_ISAR1, 0x12002111);
+ SET_IDREG(isar, ID_ISAR2, 0x11221011);
+ SET_IDREG(isar, ID_ISAR3, 0x01102131);
+ SET_IDREG(isar, ID_ISAR4, 0x141);
cpu->reset_auxcr = 1;
}
@@ -343,6 +342,7 @@ static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
static void cortex_a8_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a8";
set_feature(&cpu->env, ARM_FEATURE_V7);
@@ -357,19 +357,19 @@ static void cortex_a8_initfn(Object *obj)
cpu->isar.mvfr1 = 0x00011111;
cpu->ctr = 0x82048004;
cpu->reset_sctlr = 0x00c50078;
- cpu->isar.id_pfr0 = 0x1031;
- cpu->isar.id_pfr1 = 0x11;
- cpu->isar.id_dfr0 = 0x400;
+ SET_IDREG(isar, ID_PFR0, 0x1031);
+ SET_IDREG(isar, ID_PFR1, 0x11);
+ SET_IDREG(isar, ID_DFR0, 0x400);
cpu->id_afr0 = 0;
- cpu->isar.id_mmfr0 = 0x31100003;
- cpu->isar.id_mmfr1 = 0x20000000;
- cpu->isar.id_mmfr2 = 0x01202000;
- cpu->isar.id_mmfr3 = 0x11;
- cpu->isar.id_isar0 = 0x00101111;
- cpu->isar.id_isar1 = 0x12112111;
- cpu->isar.id_isar2 = 0x21232031;
- cpu->isar.id_isar3 = 0x11112131;
- cpu->isar.id_isar4 = 0x00111142;
+ SET_IDREG(isar, ID_MMFR0, 0x31100003);
+ SET_IDREG(isar, ID_MMFR1, 0x20000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01202000);
+ SET_IDREG(isar, ID_MMFR3, 0x11);
+ SET_IDREG(isar, ID_ISAR0, 0x00101111);
+ SET_IDREG(isar, ID_ISAR1, 0x12112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232031);
+ SET_IDREG(isar, ID_ISAR3, 0x11112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00111142);
cpu->isar.dbgdidr = 0x15141000;
cpu->clidr = (1 << 27) | (2 << 24) | 3;
cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
@@ -412,6 +412,7 @@ static const ARMCPRegInfo cortexa9_cp_reginfo[] = {
static void cortex_a9_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a9";
set_feature(&cpu->env, ARM_FEATURE_V7);
@@ -432,19 +433,19 @@ static void cortex_a9_initfn(Object *obj)
cpu->isar.mvfr1 = 0x01111111;
cpu->ctr = 0x80038003;
cpu->reset_sctlr = 0x00c50078;
- cpu->isar.id_pfr0 = 0x1031;
- cpu->isar.id_pfr1 = 0x11;
- cpu->isar.id_dfr0 = 0x000;
+ SET_IDREG(isar, ID_PFR0, 0x1031);
+ SET_IDREG(isar, ID_PFR1, 0x11);
+ SET_IDREG(isar, ID_DFR0, 0x000);
cpu->id_afr0 = 0;
- cpu->isar.id_mmfr0 = 0x00100103;
- cpu->isar.id_mmfr1 = 0x20000000;
- cpu->isar.id_mmfr2 = 0x01230000;
- cpu->isar.id_mmfr3 = 0x00002111;
- cpu->isar.id_isar0 = 0x00101111;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232041;
- cpu->isar.id_isar3 = 0x11112131;
- cpu->isar.id_isar4 = 0x00111142;
+ SET_IDREG(isar, ID_MMFR0, 0x00100103);
+ SET_IDREG(isar, ID_MMFR1, 0x20000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01230000);
+ SET_IDREG(isar, ID_MMFR3, 0x00002111);
+ SET_IDREG(isar, ID_ISAR0, 0x00101111);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232041);
+ SET_IDREG(isar, ID_ISAR3, 0x11112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00111142);
cpu->isar.dbgdidr = 0x35141000;
cpu->clidr = (1 << 27) | (1 << 24) | 3;
cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
@@ -479,6 +480,7 @@ static const ARMCPRegInfo cortexa15_cp_reginfo[] = {
static void cortex_a7_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a7";
set_feature(&cpu->env, ARM_FEATURE_V7VE);
@@ -497,23 +499,23 @@ static void cortex_a7_initfn(Object *obj)
cpu->isar.mvfr1 = 0x11111111;
cpu->ctr = 0x84448003;
cpu->reset_sctlr = 0x00c50078;
- cpu->isar.id_pfr0 = 0x00001131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x02010555;
+ SET_IDREG(isar, ID_PFR0, 0x00001131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_DFR0, 0x02010555);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x10101105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01240000;
- cpu->isar.id_mmfr3 = 0x02102211;
+ SET_IDREG(isar, ID_MMFR0, 0x10101105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01240000);
+ SET_IDREG(isar, ID_MMFR3, 0x02102211);
/*
* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but
* table 4-41 gives 0x02101110, which includes the arm div insns.
*/
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232041;
- cpu->isar.id_isar3 = 0x11112131;
- cpu->isar.id_isar4 = 0x10011142;
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232041);
+ SET_IDREG(isar, ID_ISAR3, 0x11112131);
+ SET_IDREG(isar, ID_ISAR4, 0x10011142);
cpu->isar.dbgdidr = 0x3515f005;
cpu->isar.dbgdevid = 0x01110f13;
cpu->isar.dbgdevid1 = 0x1;
@@ -528,6 +530,7 @@ static void cortex_a7_initfn(Object *obj)
static void cortex_a15_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a15";
set_feature(&cpu->env, ARM_FEATURE_V7VE);
@@ -548,19 +551,19 @@ static void cortex_a15_initfn(Object *obj)
cpu->isar.mvfr1 = 0x11111111;
cpu->ctr = 0x8444c004;
cpu->reset_sctlr = 0x00c50078;
- cpu->isar.id_pfr0 = 0x00001131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x02010555;
+ SET_IDREG(isar, ID_PFR0, 0x00001131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_DFR0, 0x02010555);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x20000000;
- cpu->isar.id_mmfr2 = 0x01240000;
- cpu->isar.id_mmfr3 = 0x02102211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232041;
- cpu->isar.id_isar3 = 0x11112131;
- cpu->isar.id_isar4 = 0x10011142;
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x20000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01240000);
+ SET_IDREG(isar, ID_MMFR3, 0x02102211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232041);
+ SET_IDREG(isar, ID_ISAR3, 0x11112131);
+ SET_IDREG(isar, ID_ISAR4, 0x10011142);
cpu->isar.dbgdidr = 0x3515f021;
cpu->isar.dbgdevid = 0x01110f13;
cpu->isar.dbgdevid1 = 0x0;
@@ -585,27 +588,28 @@ static const ARMCPRegInfo cortexr5_cp_reginfo[] = {
static void cortex_r5_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V7);
set_feature(&cpu->env, ARM_FEATURE_V7MP);
set_feature(&cpu->env, ARM_FEATURE_PMSA);
set_feature(&cpu->env, ARM_FEATURE_PMU);
cpu->midr = 0x411fc153; /* r1p3 */
- cpu->isar.id_pfr0 = 0x0131;
- cpu->isar.id_pfr1 = 0x001;
- cpu->isar.id_dfr0 = 0x010400;
+ SET_IDREG(isar, ID_PFR0, 0x0131);
+ SET_IDREG(isar, ID_PFR1, 0x001);
+ SET_IDREG(isar, ID_DFR0, 0x010400);
cpu->id_afr0 = 0x0;
- cpu->isar.id_mmfr0 = 0x0210030;
- cpu->isar.id_mmfr1 = 0x00000000;
- cpu->isar.id_mmfr2 = 0x01200000;
- cpu->isar.id_mmfr3 = 0x0211;
- cpu->isar.id_isar0 = 0x02101111;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232141;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x0010142;
- cpu->isar.id_isar5 = 0x0;
- cpu->isar.id_isar6 = 0x0;
+ SET_IDREG(isar, ID_MMFR0, 0x0210030);
+ SET_IDREG(isar, ID_MMFR1, 0x00000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01200000);
+ SET_IDREG(isar, ID_MMFR3, 0x0211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101111);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232141);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x0010142);
+ SET_IDREG(isar, ID_ISAR5, 0x0);
+ SET_IDREG(isar, ID_ISAR6, 0x0);
cpu->mp_is_up = true;
cpu->pmsav7_dregion = 16;
cpu->isar.reset_pmcr_el0 = 0x41151800;
@@ -720,6 +724,7 @@ static const ARMCPRegInfo cortex_r52_cp_reginfo[] = {
static void cortex_r52_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V8);
set_feature(&cpu->env, ARM_FEATURE_EL2);
@@ -737,21 +742,21 @@ static void cortex_r52_initfn(Object *obj)
cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x8144c004;
cpu->reset_sctlr = 0x30c50838;
- cpu->isar.id_pfr0 = 0x00000131;
- cpu->isar.id_pfr1 = 0x10111001;
- cpu->isar.id_dfr0 = 0x03010006;
+ SET_IDREG(isar, ID_PFR0, 0x00000131);
+ SET_IDREG(isar, ID_PFR1, 0x10111001);
+ SET_IDREG(isar, ID_DFR0, 0x03010006);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x00211040;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01200000;
- cpu->isar.id_mmfr3 = 0xf0102211;
- cpu->isar.id_mmfr4 = 0x00000010;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232142;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00010142;
- cpu->isar.id_isar5 = 0x00010001;
+ SET_IDREG(isar, ID_MMFR0, 0x00211040);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01200000);
+ SET_IDREG(isar, ID_MMFR3, 0xf0102211);
+ SET_IDREG(isar, ID_MMFR4, 0x00000010);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232142);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00010142);
+ SET_IDREG(isar, ID_ISAR5, 0x00010001);
cpu->isar.dbgdidr = 0x77168000;
cpu->clidr = (1 << 27) | (1 << 24) | 0x3;
cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
@@ -949,6 +954,7 @@ static void pxa270c5_initfn(Object *obj)
static void arm_max_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
/* aarch64_a57_initfn, advertising none of the aarch64 features */
cpu->dtb_compatible = "arm,cortex-a57";
@@ -968,21 +974,21 @@ static void arm_max_initfn(Object *obj)
cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x8444c004;
cpu->reset_sctlr = 0x00c50838;
- cpu->isar.id_pfr0 = 0x00000131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x03010066;
+ SET_IDREG(isar, ID_PFR0, 0x00000131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_DFR0, 0x03010066);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x10101105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02102211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00011142;
- cpu->isar.id_isar5 = 0x00011121;
- cpu->isar.id_isar6 = 0;
+ SET_IDREG(isar, ID_MMFR0, 0x10101105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02102211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00011142);
+ SET_IDREG(isar, ID_ISAR5, 0x00011121);
+ SET_IDREG(isar, ID_ISAR6, 0);
cpu->isar.reset_pmcr_el0 = 0x41013000;
cpu->clidr = 0x0a200023;
cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
index 29ab0ac..937f29e2 100644
--- a/target/arm/tcg/cpu64.c
+++ b/target/arm/tcg/cpu64.c
@@ -32,6 +32,7 @@
static void aarch64_a35_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a35";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -48,28 +49,28 @@ static void aarch64_a35_initfn(Object *obj)
cpu->midr = 0x411fd040;
cpu->revidr = 0;
cpu->ctr = 0x84448004;
- cpu->isar.id_pfr0 = 0x00000131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x03010066;
+ SET_IDREG(isar, ID_PFR0, 0x00000131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_DFR0, 0x03010066);
cpu->id_afr0 = 0;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02102211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00011142;
- cpu->isar.id_isar5 = 0x00011121;
- cpu->isar.id_aa64pfr0 = 0x00002222;
- cpu->isar.id_aa64pfr1 = 0;
- cpu->isar.id_aa64dfr0 = 0x10305106;
- cpu->isar.id_aa64dfr1 = 0;
- cpu->isar.id_aa64isar0 = 0x00011120;
- cpu->isar.id_aa64isar1 = 0;
- cpu->isar.id_aa64mmfr0 = 0x00101122;
- cpu->isar.id_aa64mmfr1 = 0;
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02102211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00011142);
+ SET_IDREG(isar, ID_ISAR5, 0x00011121);
+ SET_IDREG(isar, ID_AA64PFR0, 0x00002222);
+ SET_IDREG(isar, ID_AA64PFR1, 0);
+ SET_IDREG(isar, ID_AA64DFR0, 0x10305106);
+ SET_IDREG(isar, ID_AA64DFR1, 0);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x00011120);
+ SET_IDREG(isar, ID_AA64ISAR1, 0);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x00101122);
+ SET_IDREG(isar, ID_AA64MMFR1, 0);
cpu->clidr = 0x0a200023;
cpu->dcz_blocksize = 4;
@@ -157,11 +158,8 @@ static bool cpu_arm_get_rme(Object *obj, Error **errp)
static void cpu_arm_set_rme(Object *obj, bool value, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
- uint64_t t;
- t = cpu->isar.id_aa64pfr0;
- t = FIELD_DP64(t, ID_AA64PFR0, RME, value);
- cpu->isar.id_aa64pfr0 = t;
+ FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR0, RME, value);
}
static void cpu_max_set_l0gptsz(Object *obj, Visitor *v, const char *name,
@@ -204,6 +202,7 @@ static const Property arm_cpu_lpa2_property =
static void aarch64_a55_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a55";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -220,31 +219,31 @@ static void aarch64_a55_initfn(Object *obj)
cpu->clidr = 0x82000023;
cpu->ctr = 0x84448004; /* L1Ip = VIPT */
cpu->dcz_blocksize = 4; /* 64 bytes */
- cpu->isar.id_aa64dfr0 = 0x0000000010305408ull;
- cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
- cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
- cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull;
- cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
- cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
- cpu->isar.id_aa64pfr0 = 0x0000000010112222ull;
- cpu->isar.id_aa64pfr1 = 0x0000000000000010ull;
+ SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408ull);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x0000100010211120ull);
+ SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000100001ull);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101122ull);
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
+ SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
+ SET_IDREG(isar, ID_AA64PFR0, 0x0000000010112222ull);
+ SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000010ull);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_dfr0 = 0x04010088;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00011142;
- cpu->isar.id_isar5 = 0x01011121;
- cpu->isar.id_isar6 = 0x00000010;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02122211;
- cpu->isar.id_mmfr4 = 0x00021110;
- cpu->isar.id_pfr0 = 0x10010131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_pfr2 = 0x00000011;
+ SET_IDREG(isar, ID_DFR0, 0x04010088);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00011142);
+ SET_IDREG(isar, ID_ISAR5, 0x01011121);
+ SET_IDREG(isar, ID_ISAR6, 0x00000010);
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02122211);
+ SET_IDREG(isar, ID_MMFR4, 0x00021110);
+ SET_IDREG(isar, ID_PFR0, 0x10010131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_PFR2, 0x00000011);
cpu->midr = 0x412FD050; /* r2p0 */
cpu->revidr = 0;
@@ -276,6 +275,7 @@ static void aarch64_a55_initfn(Object *obj)
static void aarch64_a72_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a72";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -295,24 +295,24 @@ static void aarch64_a72_initfn(Object *obj)
cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x8444c004;
cpu->reset_sctlr = 0x00c50838;
- cpu->isar.id_pfr0 = 0x00000131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x03010066;
+ SET_IDREG(isar, ID_PFR0, 0x00000131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_DFR0, 0x03010066);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02102211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00011142;
- cpu->isar.id_isar5 = 0x00011121;
- cpu->isar.id_aa64pfr0 = 0x00002222;
- cpu->isar.id_aa64dfr0 = 0x10305106;
- cpu->isar.id_aa64isar0 = 0x00011120;
- cpu->isar.id_aa64mmfr0 = 0x00001124;
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02102211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00011142);
+ SET_IDREG(isar, ID_ISAR5, 0x00011121);
+ SET_IDREG(isar, ID_AA64PFR0, 0x00002222);
+ SET_IDREG(isar, ID_AA64DFR0, 0x10305106);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x00011120);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x00001124);
cpu->isar.dbgdidr = 0x3516d000;
cpu->isar.dbgdevid = 0x01110f13;
cpu->isar.dbgdevid1 = 0x2;
@@ -335,6 +335,7 @@ static void aarch64_a72_initfn(Object *obj)
static void aarch64_a76_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a76";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -351,31 +352,31 @@ static void aarch64_a76_initfn(Object *obj)
cpu->clidr = 0x82000023;
cpu->ctr = 0x8444C004;
cpu->dcz_blocksize = 4;
- cpu->isar.id_aa64dfr0 = 0x0000000010305408ull;
- cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
- cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
- cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull;
- cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
- cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
- cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */
- cpu->isar.id_aa64pfr1 = 0x0000000000000010ull;
+ SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408ull),
+ SET_IDREG(isar, ID_AA64ISAR0, 0x0000100010211120ull);
+ SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000100001ull);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101122ull);
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
+ SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
+ SET_IDREG(isar, ID_AA64PFR0, 0x1100000010111112ull); /* GIC filled in later */
+ SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000010ull);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_dfr0 = 0x04010088;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00010142;
- cpu->isar.id_isar5 = 0x01011121;
- cpu->isar.id_isar6 = 0x00000010;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02122211;
- cpu->isar.id_mmfr4 = 0x00021110;
- cpu->isar.id_pfr0 = 0x10010131;
- cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
- cpu->isar.id_pfr2 = 0x00000011;
+ SET_IDREG(isar, ID_DFR0, 0x04010088);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00010142);
+ SET_IDREG(isar, ID_ISAR5, 0x01011121);
+ SET_IDREG(isar, ID_ISAR6, 0x00000010);
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02122211);
+ SET_IDREG(isar, ID_MMFR4, 0x00021110);
+ SET_IDREG(isar, ID_PFR0, 0x10010131);
+ SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
+ SET_IDREG(isar, ID_PFR2, 0x00000011);
cpu->midr = 0x414fd0b1; /* r4p1 */
cpu->revidr = 0;
@@ -408,6 +409,7 @@ static void aarch64_a76_initfn(Object *obj)
static void aarch64_a64fx_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,a64fx";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -422,18 +424,18 @@ static void aarch64_a64fx_initfn(Object *obj)
cpu->revidr = 0x00000000;
cpu->ctr = 0x86668006;
cpu->reset_sctlr = 0x30000180;
- cpu->isar.id_aa64pfr0 = 0x0000000101111111; /* No RAS Extensions */
- cpu->isar.id_aa64pfr1 = 0x0000000000000000;
- cpu->isar.id_aa64dfr0 = 0x0000000010305408;
- cpu->isar.id_aa64dfr1 = 0x0000000000000000;
+ SET_IDREG(isar, ID_AA64PFR0, 0x0000000101111111); /* No RAS Extensions */
+ SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000000);
+ SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408),
+ SET_IDREG(isar, ID_AA64DFR1, 0x0000000000000000),
cpu->id_aa64afr0 = 0x0000000000000000;
cpu->id_aa64afr1 = 0x0000000000000000;
- cpu->isar.id_aa64mmfr0 = 0x0000000000001122;
- cpu->isar.id_aa64mmfr1 = 0x0000000011212100;
- cpu->isar.id_aa64mmfr2 = 0x0000000000001011;
- cpu->isar.id_aa64isar0 = 0x0000000010211120;
- cpu->isar.id_aa64isar1 = 0x0000000000010001;
- cpu->isar.id_aa64zfr0 = 0x0000000000000000;
+ SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000001122);
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000011212100);
+ SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x0000000010211120);
+ SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000010001);
+ SET_IDREG(isar, ID_AA64ZFR0, 0x0000000000000000);
cpu->clidr = 0x0000000080000023;
/* 64KB L1 dcache */
cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 256, 64 * KiB, 7);
@@ -581,6 +583,7 @@ static void define_neoverse_v1_cp_reginfo(ARMCPU *cpu)
static void aarch64_neoverse_n1_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,neoverse-n1";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -597,31 +600,31 @@ static void aarch64_neoverse_n1_initfn(Object *obj)
cpu->clidr = 0x82000023;
cpu->ctr = 0x8444c004;
cpu->dcz_blocksize = 4;
- cpu->isar.id_aa64dfr0 = 0x0000000110305408ull;
- cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
- cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
- cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull;
- cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
- cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
- cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */
- cpu->isar.id_aa64pfr1 = 0x0000000000000020ull;
+ SET_IDREG(isar, ID_AA64DFR0, 0x0000000110305408ull);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x0000100010211120ull);
+ SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000100001ull);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101125ull);
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
+ SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
+ SET_IDREG(isar, ID_AA64PFR0, 0x1100000010111112ull); /* GIC filled in later */
+ SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000020ull);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_dfr0 = 0x04010088;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00010142;
- cpu->isar.id_isar5 = 0x01011121;
- cpu->isar.id_isar6 = 0x00000010;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02122211;
- cpu->isar.id_mmfr4 = 0x00021110;
- cpu->isar.id_pfr0 = 0x10010131;
- cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
- cpu->isar.id_pfr2 = 0x00000011;
+ SET_IDREG(isar, ID_DFR0, 0x04010088);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00010142);
+ SET_IDREG(isar, ID_ISAR5, 0x01011121);
+ SET_IDREG(isar, ID_ISAR6, 0x00000010);
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02122211);
+ SET_IDREG(isar, ID_MMFR4, 0x00021110);
+ SET_IDREG(isar, ID_PFR0, 0x10010131);
+ SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
+ SET_IDREG(isar, ID_PFR2, 0x00000011);
cpu->midr = 0x414fd0c1; /* r4p1 */
cpu->revidr = 0;
@@ -656,6 +659,7 @@ static void aarch64_neoverse_n1_initfn(Object *obj)
static void aarch64_neoverse_v1_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,neoverse-v1";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -674,32 +678,32 @@ static void aarch64_neoverse_v1_initfn(Object *obj)
cpu->dcz_blocksize = 4;
cpu->id_aa64afr0 = 0x00000000;
cpu->id_aa64afr1 = 0x00000000;
- cpu->isar.id_aa64dfr0 = 0x000001f210305519ull;
- cpu->isar.id_aa64dfr1 = 0x00000000;
- cpu->isar.id_aa64isar0 = 0x1011111110212120ull; /* with FEAT_RNG */
- cpu->isar.id_aa64isar1 = 0x0011100001211032ull;
- cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull;
- cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
- cpu->isar.id_aa64mmfr2 = 0x0220011102101011ull;
- cpu->isar.id_aa64pfr0 = 0x1101110120111112ull; /* GIC filled in later */
- cpu->isar.id_aa64pfr1 = 0x0000000000000020ull;
+ SET_IDREG(isar, ID_AA64DFR0, 0x000001f210305519ull),
+ SET_IDREG(isar, ID_AA64DFR1, 0x00000000),
+ SET_IDREG(isar, ID_AA64ISAR0, 0x1011111110212120ull); /* with FEAT_RNG */
+ SET_IDREG(isar, ID_AA64ISAR1, 0x0011000001211032ull);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101125ull);
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull),
+ SET_IDREG(isar, ID_AA64MMFR2, 0x0220011102101011ull),
+ SET_IDREG(isar, ID_AA64PFR0, 0x1101110120111112ull); /* GIC filled in later */
+ SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000020ull);
cpu->id_afr0 = 0x00000000;
- cpu->isar.id_dfr0 = 0x15011099;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00010142;
- cpu->isar.id_isar5 = 0x11011121;
- cpu->isar.id_isar6 = 0x01100111;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02122211;
- cpu->isar.id_mmfr4 = 0x01021110;
- cpu->isar.id_pfr0 = 0x21110131;
- cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
- cpu->isar.id_pfr2 = 0x00000011;
+ SET_IDREG(isar, ID_DFR0, 0x15011099);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00010142);
+ SET_IDREG(isar, ID_ISAR5, 0x11011121);
+ SET_IDREG(isar, ID_ISAR6, 0x01100111);
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02122211);
+ SET_IDREG(isar, ID_MMFR4, 0x01021110);
+ SET_IDREG(isar, ID_PFR0, 0x21110131);
+ SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
+ SET_IDREG(isar, ID_PFR2, 0x00000011);
cpu->midr = 0x411FD402; /* r1p2 */
cpu->revidr = 0;
@@ -735,7 +739,7 @@ static void aarch64_neoverse_v1_initfn(Object *obj)
cpu->isar.mvfr2 = 0x00000043;
/* From 3.7.5 ID_AA64ZFR0_EL1 */
- cpu->isar.id_aa64zfr0 = 0x0000100000100000;
+ SET_IDREG(isar, ID_AA64ZFR0, 0x0000100000100000);
cpu->sve_vq.supported = (1 << 0) /* 128bit */
| (1 << 1); /* 256bit */
@@ -882,6 +886,7 @@ static const ARMCPRegInfo cortex_a710_cp_reginfo[] = {
static void aarch64_a710_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a710";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -897,38 +902,38 @@ static void aarch64_a710_initfn(Object *obj)
/* Ordered by Section B.4: AArch64 registers */
cpu->midr = 0x412FD471; /* r2p1 */
cpu->revidr = 0;
- cpu->isar.id_pfr0 = 0x21110131;
- cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
- cpu->isar.id_dfr0 = 0x16011099;
+ SET_IDREG(isar, ID_PFR0, 0x21110131);
+ SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
+ SET_IDREG(isar, ID_DFR0, 0x16011099);
cpu->id_afr0 = 0;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02122211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00010142;
- cpu->isar.id_isar5 = 0x11011121; /* with Crypto */
- cpu->isar.id_mmfr4 = 0x21021110;
- cpu->isar.id_isar6 = 0x01111111;
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02122211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00010142);
+ SET_IDREG(isar, ID_ISAR5, 0x11011121); /* with Crypto */
+ SET_IDREG(isar, ID_MMFR4, 0x21021110);
+ SET_IDREG(isar, ID_ISAR6, 0x01111111);
cpu->isar.mvfr0 = 0x10110222;
cpu->isar.mvfr1 = 0x13211111;
cpu->isar.mvfr2 = 0x00000043;
- cpu->isar.id_pfr2 = 0x00000011;
- cpu->isar.id_aa64pfr0 = 0x1201111120111112ull; /* GIC filled in later */
- cpu->isar.id_aa64pfr1 = 0x0000000000000221ull;
- cpu->isar.id_aa64zfr0 = 0x0000110100110021ull; /* with Crypto */
- cpu->isar.id_aa64dfr0 = 0x000011f010305619ull;
- cpu->isar.id_aa64dfr1 = 0;
+ SET_IDREG(isar, ID_PFR2, 0x00000011);
+ SET_IDREG(isar, ID_AA64PFR0, 0x1201111120111112ull); /* GIC filled in later */
+ SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000221ull);
+ SET_IDREG(isar, ID_AA64ZFR0, 0x0000110100110021ull); /* with Crypto */
+ SET_IDREG(isar, ID_AA64DFR0, 0x000011f010305619ull);
+ SET_IDREG(isar, ID_AA64DFR1, 0);
cpu->id_aa64afr0 = 0;
cpu->id_aa64afr1 = 0;
- cpu->isar.id_aa64isar0 = 0x0221111110212120ull; /* with Crypto */
- cpu->isar.id_aa64isar1 = 0x0010111101211052ull;
- cpu->isar.id_aa64mmfr0 = 0x0000022200101122ull;
- cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
- cpu->isar.id_aa64mmfr2 = 0x1221011110101011ull;
+ SET_IDREG(isar, ID_AA64ISAR0, 0x0221111110212120ull); /* with Crypto */
+ SET_IDREG(isar, ID_AA64ISAR1, 0x0010111101211052ull);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x0000022200101122ull);
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
+ SET_IDREG(isar, ID_AA64MMFR2, 0x1221011110101011ull);
cpu->clidr = 0x0000001482000023ull;
cpu->gm_blocksize = 4;
cpu->ctr = 0x000000049444c004ull;
@@ -983,6 +988,7 @@ static const ARMCPRegInfo neoverse_n2_cp_reginfo[] = {
static void aarch64_neoverse_n2_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,neoverse-n2";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -998,38 +1004,38 @@ static void aarch64_neoverse_n2_initfn(Object *obj)
/* Ordered by Section B.5: AArch64 ID registers */
cpu->midr = 0x410FD493; /* r0p3 */
cpu->revidr = 0;
- cpu->isar.id_pfr0 = 0x21110131;
- cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
- cpu->isar.id_dfr0 = 0x16011099;
+ SET_IDREG(isar, ID_PFR0, 0x21110131);
+ SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
+ SET_IDREG(isar, ID_DFR0, 0x16011099);
cpu->id_afr0 = 0;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02122211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00010142;
- cpu->isar.id_isar5 = 0x11011121; /* with Crypto */
- cpu->isar.id_mmfr4 = 0x01021110;
- cpu->isar.id_isar6 = 0x01111111;
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02122211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00010142);
+ SET_IDREG(isar, ID_ISAR5, 0x11011121); /* with Crypto */
+ SET_IDREG(isar, ID_MMFR4, 0x01021110);
+ SET_IDREG(isar, ID_ISAR6, 0x01111111);
cpu->isar.mvfr0 = 0x10110222;
cpu->isar.mvfr1 = 0x13211111;
cpu->isar.mvfr2 = 0x00000043;
- cpu->isar.id_pfr2 = 0x00000011;
- cpu->isar.id_aa64pfr0 = 0x1201111120111112ull; /* GIC filled in later */
- cpu->isar.id_aa64pfr1 = 0x0000000000000221ull;
- cpu->isar.id_aa64zfr0 = 0x0000110100110021ull; /* with Crypto */
- cpu->isar.id_aa64dfr0 = 0x000011f210305619ull;
- cpu->isar.id_aa64dfr1 = 0;
+ SET_IDREG(isar, ID_PFR2, 0x00000011);
+ SET_IDREG(isar, ID_AA64PFR0, 0x1201111120111112ull); /* GIC filled in later */
+ SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000221ull);
+ SET_IDREG(isar, ID_AA64ZFR0, 0x0000110100110021ull); /* with Crypto */
+ SET_IDREG(isar, ID_AA64DFR0, 0x000011f210305619ull);
+ SET_IDREG(isar, ID_AA64DFR1, 0);
cpu->id_aa64afr0 = 0;
cpu->id_aa64afr1 = 0;
- cpu->isar.id_aa64isar0 = 0x1221111110212120ull; /* with Crypto and FEAT_RNG */
- cpu->isar.id_aa64isar1 = 0x0011111101211052ull;
- cpu->isar.id_aa64mmfr0 = 0x0000022200101125ull;
- cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
- cpu->isar.id_aa64mmfr2 = 0x1221011112101011ull;
+ SET_IDREG(isar, ID_AA64ISAR0, 0x1221111110212120ull); /* with Crypto and FEAT_RNG */
+ SET_IDREG(isar, ID_AA64ISAR1, 0x0011111101211052ull);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x0000022200101125ull);
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
+ SET_IDREG(isar, ID_AA64MMFR2, 0x1221011112101011ull);
cpu->clidr = 0x0000001482000023ull;
cpu->gm_blocksize = 4;
cpu->ctr = 0x00000004b444c004ull;
@@ -1083,6 +1089,7 @@ static void aarch64_neoverse_n2_initfn(Object *obj)
void aarch64_max_tcg_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
uint64_t t;
uint32_t u;
@@ -1133,7 +1140,7 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, CTR_EL0, DIC, 1);
cpu->ctr = t;
- t = cpu->isar.id_aa64isar0;
+ t = GET_IDREG(isar, ID_AA64ISAR0);
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */
t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* FEAT_SHA512 */
@@ -1148,9 +1155,9 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* FEAT_FlagM2 */
t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */
t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1); /* FEAT_RNG */
- cpu->isar.id_aa64isar0 = t;
+ SET_IDREG(isar, ID_AA64ISAR0, t);
- t = cpu->isar.id_aa64isar1;
+ t = GET_IDREG(isar, ID_AA64ISAR1);
t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); /* FEAT_DPB2 */
t = FIELD_DP64(t, ID_AA64ISAR1, APA, PauthFeat_FPACCOMBINED);
t = FIELD_DP64(t, ID_AA64ISAR1, API, 1);
@@ -1164,16 +1171,16 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64ISAR1, DGH, 1); /* FEAT_DGH */
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); /* FEAT_I8MM */
t = FIELD_DP64(t, ID_AA64ISAR1, XS, 1); /* FEAT_XS */
- cpu->isar.id_aa64isar1 = t;
+ SET_IDREG(isar, ID_AA64ISAR1, t);
- t = cpu->isar.id_aa64isar2;
+ t = GET_IDREG(isar, ID_AA64ISAR2);
t = FIELD_DP64(t, ID_AA64ISAR2, RPRES, 1); /* FEAT_RPRES */
t = FIELD_DP64(t, ID_AA64ISAR2, MOPS, 1); /* FEAT_MOPS */
t = FIELD_DP64(t, ID_AA64ISAR2, BC, 1); /* FEAT_HBC */
t = FIELD_DP64(t, ID_AA64ISAR2, WFXT, 2); /* FEAT_WFxT */
- cpu->isar.id_aa64isar2 = t;
+ SET_IDREG(isar, ID_AA64ISAR2, t);
- t = cpu->isar.id_aa64pfr0;
+ t = GET_IDREG(isar, ID_AA64PFR0);
t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); /* FEAT_FP16 */
t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); /* FEAT_FP16 */
t = FIELD_DP64(t, ID_AA64PFR0, RAS, 2); /* FEAT_RASv1p1 + FEAT_DoubleFault */
@@ -1182,9 +1189,9 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */
t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 3); /* FEAT_CSV2_3 */
t = FIELD_DP64(t, ID_AA64PFR0, CSV3, 1); /* FEAT_CSV3 */
- cpu->isar.id_aa64pfr0 = t;
+ SET_IDREG(isar, ID_AA64PFR0, t);
- t = cpu->isar.id_aa64pfr1;
+ t = GET_IDREG(isar, ID_AA64PFR1);
t = FIELD_DP64(t, ID_AA64PFR1, BT, 1); /* FEAT_BTI */
t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2); /* FEAT_SSBS2 */
/*
@@ -1197,9 +1204,9 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */
t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_3 */
t = FIELD_DP64(t, ID_AA64PFR1, NMI, 1); /* FEAT_NMI */
- cpu->isar.id_aa64pfr1 = t;
+ SET_IDREG(isar, ID_AA64PFR1, t);
- t = cpu->isar.id_aa64mmfr0;
+ t = GET_IDREG(isar, ID_AA64MMFR0);
t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 6); /* FEAT_LPA: 52 bits */
t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 1); /* 16k pages supported */
t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 2); /* 16k stage2 supported */
@@ -1207,9 +1214,9 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 2); /* 4k stage2 supported */
t = FIELD_DP64(t, ID_AA64MMFR0, FGT, 1); /* FEAT_FGT */
t = FIELD_DP64(t, ID_AA64MMFR0, ECV, 2); /* FEAT_ECV */
- cpu->isar.id_aa64mmfr0 = t;
+ SET_IDREG(isar, ID_AA64MMFR0, t);
- t = cpu->isar.id_aa64mmfr1;
+ t = GET_IDREG(isar, ID_AA64MMFR1);
t = FIELD_DP64(t, ID_AA64MMFR1, HAFDBS, 2); /* FEAT_HAFDBS */
t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */
t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); /* FEAT_VHE */
@@ -1222,9 +1229,9 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64MMFR1, AFP, 1); /* FEAT_AFP */
t = FIELD_DP64(t, ID_AA64MMFR1, TIDCP1, 1); /* FEAT_TIDCP1 */
t = FIELD_DP64(t, ID_AA64MMFR1, CMOW, 1); /* FEAT_CMOW */
- cpu->isar.id_aa64mmfr1 = t;
+ SET_IDREG(isar, ID_AA64MMFR1, t);
- t = cpu->isar.id_aa64mmfr2;
+ t = GET_IDREG(isar, ID_AA64MMFR2);
t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* FEAT_TTCNP */
t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); /* FEAT_UAO */
t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1); /* FEAT_IESB */
@@ -1238,13 +1245,11 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2); /* FEAT_BBM at level 2 */
t = FIELD_DP64(t, ID_AA64MMFR2, EVT, 2); /* FEAT_EVT */
t = FIELD_DP64(t, ID_AA64MMFR2, E0PD, 1); /* FEAT_E0PD */
- cpu->isar.id_aa64mmfr2 = t;
+ SET_IDREG(isar, ID_AA64MMFR2, t);
- t = cpu->isar.id_aa64mmfr3;
- t = FIELD_DP64(t, ID_AA64MMFR3, SPEC_FPACC, 1); /* FEAT_FPACC_SPEC */
- cpu->isar.id_aa64mmfr3 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64MMFR3, SPEC_FPACC, 1); /* FEAT_FPACC_SPEC */
- t = cpu->isar.id_aa64zfr0;
+ t = GET_IDREG(isar, ID_AA64ZFR0);
t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* FEAT_SVE_PMULL128 */
t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1); /* FEAT_SVE_BitPerm */
@@ -1254,15 +1259,15 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1); /* FEAT_I8MM */
t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1); /* FEAT_F32MM */
t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1); /* FEAT_F64MM */
- cpu->isar.id_aa64zfr0 = t;
+ SET_IDREG(isar, ID_AA64ZFR0, t);
- t = cpu->isar.id_aa64dfr0;
+ t = GET_IDREG(isar, ID_AA64DFR0);
t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 10); /* FEAT_Debugv8p8 */
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 6); /* FEAT_PMUv3p5 */
t = FIELD_DP64(t, ID_AA64DFR0, HPMN0, 1); /* FEAT_HPMN0 */
- cpu->isar.id_aa64dfr0 = t;
+ SET_IDREG(isar, ID_AA64DFR0, t);
- t = cpu->isar.id_aa64smfr0;
+ t = GET_IDREG(isar, ID_AA64SMFR0);
t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */
t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */
t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */
@@ -1270,7 +1275,7 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */
t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */
t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */
- cpu->isar.id_aa64smfr0 = t;
+ SET_IDREG(isar, ID_AA64SMFR0, t);
/* Replicate the same data to the 32-bit id registers. */
aa32_max_features(cpu);
@@ -1316,7 +1321,7 @@ static void aarch64_cpu_register_types(void)
size_t i;
for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
- aarch64_cpu_register(&aarch64_cpus[i]);
+ arm_cpu_register(&aarch64_cpus[i]);
}
}
diff --git a/target/arm/tcg/crypto_helper.c b/target/arm/tcg/crypto_helper.c
index 7cadd61..3428bd1 100644
--- a/target/arm/tcg/crypto_helper.c
+++ b/target/arm/tcg/crypto_helper.c
@@ -10,14 +10,16 @@
*/
#include "qemu/osdep.h"
+#include "qemu/bitops.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
#include "crypto/aes-round.h"
#include "crypto/sm4.h"
#include "vec_internal.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
union CRYPTO_STATE {
uint8_t bytes[16];
uint32_t words[4];
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
index 842d9e6..4f618ae 100644
--- a/target/arm/tcg/helper-a64.c
+++ b/target/arm/tcg/helper-a64.c
@@ -29,8 +29,9 @@
#include "internals.h"
#include "qemu/crc32c.h"
#include "exec/cpu-common.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/helper-retaddr.h"
+#include "accel/tcg/probe.h"
#include "exec/target_page.h"
#include "exec/tlb-flags.h"
#include "qemu/int128.h"
diff --git a/target/arm/tcg/helper.h b/target/arm/tcg/helper.h
new file mode 100644
index 0000000..80db7c2
--- /dev/null
+++ b/target/arm/tcg/helper.h
@@ -0,0 +1,1153 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(uxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
+
+DEF_HELPER_3(add_setq, i32, env, i32, i32)
+DEF_HELPER_3(add_saturate, i32, env, i32, i32)
+DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
+DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
+DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(sdiv, TCG_CALL_NO_RWG, s32, env, s32, s32)
+DEF_HELPER_FLAGS_3(udiv, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32)
+
+#define PAS_OP(pfx) \
+ DEF_HELPER_3(pfx ## add8, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## sub8, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## sub16, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## add16, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## addsubx, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## subaddx, i32, i32, i32, ptr)
+
+PAS_OP(s)
+PAS_OP(u)
+#undef PAS_OP
+
+#define PAS_OP(pfx) \
+ DEF_HELPER_2(pfx ## add8, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## sub8, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## sub16, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## add16, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## addsubx, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## subaddx, i32, i32, i32)
+PAS_OP(q)
+PAS_OP(sh)
+PAS_OP(uq)
+PAS_OP(uh)
+#undef PAS_OP
+
+DEF_HELPER_3(ssat, i32, env, i32, i32)
+DEF_HELPER_3(usat, i32, env, i32, i32)
+DEF_HELPER_3(ssat16, i32, env, i32, i32)
+DEF_HELPER_3(usat16, i32, env, i32, i32)
+
+DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+
+DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
+ i32, i32, i32, i32)
+DEF_HELPER_2(exception_internal, noreturn, env, i32)
+DEF_HELPER_3(exception_with_syndrome, noreturn, env, i32, i32)
+DEF_HELPER_4(exception_with_syndrome_el, noreturn, env, i32, i32, i32)
+DEF_HELPER_2(exception_bkpt_insn, noreturn, env, i32)
+DEF_HELPER_2(exception_swstep, noreturn, env, i32)
+DEF_HELPER_2(exception_pc_alignment, noreturn, env, vaddr)
+DEF_HELPER_1(setend, void, env)
+DEF_HELPER_2(wfi, void, env, i32)
+DEF_HELPER_1(wfe, void, env)
+DEF_HELPER_2(wfit, void, env, i64)
+DEF_HELPER_1(yield, void, env)
+DEF_HELPER_1(pre_hvc, void, env)
+DEF_HELPER_2(pre_smc, void, env, i32)
+DEF_HELPER_1(vesb, void, env)
+
+DEF_HELPER_3(cpsr_write, void, env, i32, i32)
+DEF_HELPER_2(cpsr_write_eret, void, env, i32)
+DEF_HELPER_1(cpsr_read, i32, env)
+
+DEF_HELPER_3(v7m_msr, void, env, i32, i32)
+DEF_HELPER_2(v7m_mrs, i32, env, i32)
+
+DEF_HELPER_2(v7m_bxns, void, env, i32)
+DEF_HELPER_2(v7m_blxns, void, env, i32)
+
+DEF_HELPER_3(v7m_tt, i32, env, i32, i32)
+
+DEF_HELPER_1(v7m_preserve_fp_state, void, env)
+
+DEF_HELPER_2(v7m_vlstm, void, env, i32)
+DEF_HELPER_2(v7m_vlldm, void, env, i32)
+
+DEF_HELPER_2(v8m_stackcheck, void, env, i32)
+
+DEF_HELPER_FLAGS_2(check_bxj_trap, TCG_CALL_NO_WG, void, env, i32)
+
+DEF_HELPER_4(access_check_cp_reg, cptr, env, i32, i32, i32)
+DEF_HELPER_FLAGS_2(lookup_cp_reg, TCG_CALL_NO_RWG_SE, cptr, env, i32)
+DEF_HELPER_FLAGS_2(tidcp_el0, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_2(tidcp_el1, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_3(set_cp_reg, void, env, cptr, i32)
+DEF_HELPER_2(get_cp_reg, i32, env, cptr)
+DEF_HELPER_3(set_cp_reg64, void, env, cptr, i64)
+DEF_HELPER_2(get_cp_reg64, i64, env, cptr)
+
+DEF_HELPER_2(get_r13_banked, i32, env, i32)
+DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
+
+DEF_HELPER_3(mrs_banked, i32, env, i32, i32)
+DEF_HELPER_4(msr_banked, void, env, i32, i32, i32)
+
+DEF_HELPER_2(get_user_reg, i32, env, i32)
+DEF_HELPER_3(set_user_reg, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_1(rebuild_hflags_m32_newel, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(rebuild_hflags_m32, TCG_CALL_NO_RWG, void, env, int)
+DEF_HELPER_FLAGS_1(rebuild_hflags_a32_newel, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(rebuild_hflags_a32, TCG_CALL_NO_RWG, void, env, int)
+DEF_HELPER_FLAGS_2(rebuild_hflags_a64, TCG_CALL_NO_RWG, void, env, int)
+
+DEF_HELPER_FLAGS_5(probe_access, TCG_CALL_NO_WG, void, env, vaddr, i32, i32, i32)
+
+DEF_HELPER_1(vfp_get_fpscr, i32, env)
+DEF_HELPER_2(vfp_set_fpscr, void, env, i32)
+
+DEF_HELPER_3(vfp_addh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_adds, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_addd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_subh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_subs, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_subd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_mulh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_muls, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_muld, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_divh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_divs, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_divd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_maxh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_maxs, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_maxd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_minh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_mins, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_mind, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_maxnumh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_maxnums, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_minnumh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_minnums, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_minnumd, f64, f64, f64, fpst)
+DEF_HELPER_2(vfp_sqrth, f16, f16, fpst)
+DEF_HELPER_2(vfp_sqrts, f32, f32, fpst)
+DEF_HELPER_2(vfp_sqrtd, f64, f64, fpst)
+DEF_HELPER_3(vfp_cmph, void, f16, f16, env)
+DEF_HELPER_3(vfp_cmps, void, f32, f32, env)
+DEF_HELPER_3(vfp_cmpd, void, f64, f64, env)
+DEF_HELPER_3(vfp_cmpeh, void, f16, f16, env)
+DEF_HELPER_3(vfp_cmpes, void, f32, f32, env)
+DEF_HELPER_3(vfp_cmped, void, f64, f64, env)
+
+DEF_HELPER_2(vfp_fcvtds, f64, f32, fpst)
+DEF_HELPER_2(vfp_fcvtsd, f32, f64, fpst)
+DEF_HELPER_FLAGS_2(bfcvt, TCG_CALL_NO_RWG, i32, f32, fpst)
+DEF_HELPER_FLAGS_2(bfcvt_pair, TCG_CALL_NO_RWG, i32, i64, fpst)
+
+DEF_HELPER_2(vfp_uitoh, f16, i32, fpst)
+DEF_HELPER_2(vfp_uitos, f32, i32, fpst)
+DEF_HELPER_2(vfp_uitod, f64, i32, fpst)
+DEF_HELPER_2(vfp_sitoh, f16, i32, fpst)
+DEF_HELPER_2(vfp_sitos, f32, i32, fpst)
+DEF_HELPER_2(vfp_sitod, f64, i32, fpst)
+
+DEF_HELPER_2(vfp_touih, i32, f16, fpst)
+DEF_HELPER_2(vfp_touis, i32, f32, fpst)
+DEF_HELPER_2(vfp_touid, i32, f64, fpst)
+DEF_HELPER_2(vfp_touizh, i32, f16, fpst)
+DEF_HELPER_2(vfp_touizs, i32, f32, fpst)
+DEF_HELPER_2(vfp_touizd, i32, f64, fpst)
+DEF_HELPER_2(vfp_tosih, s32, f16, fpst)
+DEF_HELPER_2(vfp_tosis, s32, f32, fpst)
+DEF_HELPER_2(vfp_tosid, s32, f64, fpst)
+DEF_HELPER_2(vfp_tosizh, s32, f16, fpst)
+DEF_HELPER_2(vfp_tosizs, s32, f32, fpst)
+DEF_HELPER_2(vfp_tosizd, s32, f64, fpst)
+
+DEF_HELPER_3(vfp_toshh_round_to_zero, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toslh_round_to_zero, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_touhh_round_to_zero, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toulh_round_to_zero, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_touls_round_to_zero, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tosqd_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touqd_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touhh, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toshh, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toulh, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toslh, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_touqh, i64, f16, i32, fpst)
+DEF_HELPER_3(vfp_tosqh, i64, f16, i32, fpst)
+DEF_HELPER_3(vfp_toshs, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_tosls, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_tosqs, i64, f32, i32, fpst)
+DEF_HELPER_3(vfp_touhs, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_touls, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_touqs, i64, f32, i32, fpst)
+DEF_HELPER_3(vfp_toshd, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tosld, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tosqd, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touhd, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tould, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touqd, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_shtos, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_sltos, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_sqtos, f32, i64, i32, fpst)
+DEF_HELPER_3(vfp_uhtos, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_ultos, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_uqtos, f32, i64, i32, fpst)
+DEF_HELPER_3(vfp_shtod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_sltod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_sqtod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_uhtod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_ultod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_uqtod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_shtoh, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_uhtoh, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_sltoh, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_ultoh, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_sqtoh, f16, i64, i32, fpst)
+DEF_HELPER_3(vfp_uqtoh, f16, i64, i32, fpst)
+
+DEF_HELPER_3(vfp_shtos_round_to_nearest, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_sltos_round_to_nearest, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_uhtos_round_to_nearest, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_ultos_round_to_nearest, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_shtod_round_to_nearest, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_sltod_round_to_nearest, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_uhtod_round_to_nearest, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_ultod_round_to_nearest, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_shtoh_round_to_nearest, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_uhtoh_round_to_nearest, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_sltoh_round_to_nearest, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_ultoh_round_to_nearest, f16, i32, i32, fpst)
+
+DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, fpst)
+
+DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f32, TCG_CALL_NO_RWG, f32, f16, fpst, i32)
+DEF_HELPER_FLAGS_3(vfp_fcvt_f32_to_f16, TCG_CALL_NO_RWG, f16, f32, fpst, i32)
+DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, f16, fpst, i32)
+DEF_HELPER_FLAGS_3(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, f16, f64, fpst, i32)
+
+DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, fpst)
+DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, fpst)
+DEF_HELPER_4(vfp_muladdh, f16, f16, f16, f16, fpst)
+
+DEF_HELPER_FLAGS_2(recpe_f16, TCG_CALL_NO_RWG, f16, f16, fpst)
+DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(recpe_rpres_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, fpst)
+DEF_HELPER_FLAGS_2(rsqrte_f16, TCG_CALL_NO_RWG, f16, f16, fpst)
+DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(rsqrte_rpres_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, fpst)
+DEF_HELPER_FLAGS_1(recpe_u32, TCG_CALL_NO_RWG, i32, i32)
+DEF_HELPER_FLAGS_1(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32)
+DEF_HELPER_FLAGS_4(neon_tbl, TCG_CALL_NO_RWG, i64, env, i32, i64, i64)
+
+DEF_HELPER_3(shl_cc, i32, env, i32, i32)
+DEF_HELPER_3(shr_cc, i32, env, i32, i32)
+DEF_HELPER_3(sar_cc, i32, env, i32, i32)
+DEF_HELPER_3(ror_cc, i32, env, i32, i32)
+
+DEF_HELPER_FLAGS_2(rinth_exact, TCG_CALL_NO_RWG, f16, f16, fpst)
+DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, fpst)
+DEF_HELPER_FLAGS_2(rinth, TCG_CALL_NO_RWG, f16, f16, fpst)
+DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, fpst)
+
+DEF_HELPER_FLAGS_2(vjcvt, TCG_CALL_NO_RWG, i32, f64, env)
+DEF_HELPER_FLAGS_2(fjcvtzs, TCG_CALL_NO_RWG, i64, f64, fpst)
+
+DEF_HELPER_FLAGS_3(check_hcr_el2_trap, TCG_CALL_NO_WG, void, env, i32, i32)
+
+/* neon_helper.c */
+DEF_HELPER_2(neon_pmin_u8, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_s8, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_u16, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_s16, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_u8, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_s8, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_u16, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_s16, i32, i32, i32)
+
+DEF_HELPER_2(neon_shl_u16, i32, i32, i32)
+DEF_HELPER_2(neon_shl_s16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u8, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s8, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u32, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s32, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u64, i64, i64, i64)
+DEF_HELPER_2(neon_rshl_s64, i64, i64, i64)
+DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(neon_sqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshli_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshli_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshli_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshli_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_uqshli_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_uqshli_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_uqshli_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_uqshli_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshlui_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshlui_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshlui_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshlui_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(gvec_srshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_srshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_srshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_srshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_urshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_urshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_urshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_urshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_2(neon_add_u8, i32, i32, i32)
+DEF_HELPER_2(neon_add_u16, i32, i32, i32)
+DEF_HELPER_2(neon_sub_u8, i32, i32, i32)
+DEF_HELPER_2(neon_sub_u16, i32, i32, i32)
+DEF_HELPER_2(neon_mul_u8, i32, i32, i32)
+DEF_HELPER_2(neon_mul_u16, i32, i32, i32)
+
+DEF_HELPER_2(neon_tst_u8, i32, i32, i32)
+DEF_HELPER_2(neon_tst_u16, i32, i32, i32)
+DEF_HELPER_2(neon_tst_u32, i32, i32, i32)
+
+DEF_HELPER_1(neon_clz_u8, i32, i32)
+DEF_HELPER_1(neon_clz_u16, i32, i32)
+DEF_HELPER_1(neon_cls_s8, i32, i32)
+DEF_HELPER_1(neon_cls_s16, i32, i32)
+DEF_HELPER_1(neon_cls_s32, i32, i32)
+DEF_HELPER_FLAGS_3(gvec_cnt_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32)
+DEF_HELPER_4(neon_qrdmlah_s16, i32, env, i32, i32, i32)
+DEF_HELPER_4(neon_qrdmlsh_s16, i32, env, i32, i32, i32)
+DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32)
+DEF_HELPER_4(neon_qrdmlah_s32, i32, env, s32, s32, s32)
+DEF_HELPER_4(neon_qrdmlsh_s32, i32, env, s32, s32, s32)
+
+DEF_HELPER_1(neon_narrow_u8, i64, i64)
+DEF_HELPER_1(neon_narrow_u16, i64, i64)
+DEF_HELPER_2(neon_unarrow_sat8, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u8, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s8, i64, env, i64)
+DEF_HELPER_2(neon_unarrow_sat16, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u16, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s16, i64, env, i64)
+DEF_HELPER_2(neon_unarrow_sat32, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u32, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s32, i64, env, i64)
+DEF_HELPER_1(neon_narrow_high_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_high_u16, i32, i64)
+DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64)
+DEF_HELPER_1(neon_widen_u8, i64, i32)
+DEF_HELPER_1(neon_widen_s8, i64, i32)
+DEF_HELPER_1(neon_widen_u16, i64, i32)
+DEF_HELPER_1(neon_widen_s16, i64, i32)
+
+DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64)
+DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64)
+DEF_HELPER_2(neon_abdl_u16, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s16, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_u32, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s32, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_u64, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s64, i64, i32, i32)
+DEF_HELPER_2(neon_mull_u8, i64, i32, i32)
+DEF_HELPER_2(neon_mull_s8, i64, i32, i32)
+DEF_HELPER_2(neon_mull_u16, i64, i32, i32)
+DEF_HELPER_2(neon_mull_s16, i64, i32, i32)
+
+DEF_HELPER_1(neon_negl_u16, i64, i64)
+DEF_HELPER_1(neon_negl_u32, i64, i64)
+
+DEF_HELPER_FLAGS_2(neon_qabs_s8, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s16, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s32, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s64, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(neon_qneg_s8, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s16, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s32, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s64, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_cge_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_acge_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_acge_f64, i64, i64, i64, fpst)
+DEF_HELPER_3(neon_acgt_f64, i64, i64, i64, fpst)
+
+/* iwmmxt_helper.c */
+DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64)
+DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64)
+
+#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \
+DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \
+DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \
+DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \
+
+DEF_IWMMXT_HELPER_SIZE_ENV(unpackl)
+DEF_IWMMXT_HELPER_SIZE_ENV(unpackh)
+
+DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq)
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu)
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(mins)
+DEF_IWMMXT_HELPER_SIZE_ENV(minu)
+DEF_IWMMXT_HELPER_SIZE_ENV(maxs)
+DEF_IWMMXT_HELPER_SIZE_ENV(maxu)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(subn)
+DEF_IWMMXT_HELPER_SIZE_ENV(addn)
+DEF_IWMMXT_HELPER_SIZE_ENV(subu)
+DEF_IWMMXT_HELPER_SIZE_ENV(addu)
+DEF_IWMMXT_HELPER_SIZE_ENV(subs)
+DEF_IWMMXT_HELPER_SIZE_ENV(adds)
+
+DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64)
+
+DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32)
+DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32)
+
+DEF_HELPER_1(iwmmxt_bcstb, i64, i32)
+DEF_HELPER_1(iwmmxt_bcstw, i64, i32)
+DEF_HELPER_1(iwmmxt_bcstl, i64, i32)
+
+DEF_HELPER_1(iwmmxt_addcb, i64, i64)
+DEF_HELPER_1(iwmmxt_addcw, i64, i64)
+DEF_HELPER_1(iwmmxt_addcl, i64, i64)
+
+DEF_HELPER_1(iwmmxt_msbb, i32, i64)
+DEF_HELPER_1(iwmmxt_msbw, i32, i64)
+DEF_HELPER_1(iwmmxt_msbl, i32, i64)
+
+DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32)
+
+DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64)
+
+DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32)
+DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32)
+DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32)
+
+DEF_HELPER_FLAGS_2(neon_unzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_unzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qunzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qunzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qunzip32, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_zip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_zip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qzip32, TCG_CALL_NO_RWG, void, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(crypto_aese, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_aesd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_aesmc, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_aesimc, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sha1su0, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha1c, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha1p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha1m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha1h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha1su1, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sha256h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha512su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha512su1, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sm3tt1a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3tt1b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3tt2a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3tt2b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3partw1, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3partw2, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sm4e, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm4ekey, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_rax1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
+
+DEF_HELPER_FLAGS_5(gvec_qrdmlah_s16, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s16, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_qrdmlah_s32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_sdot_idx_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_idx_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sdot_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sudot_idx_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usdot_idx_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(gvec_fcmlah, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlas, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlad, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sstoh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_sitos, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_ustoh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_uitos, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_tosszh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_tosizs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_touszh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_touizs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_sf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_uf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_fs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_fu, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_uh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_hs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_hu, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_sd, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_ud, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_ds, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_du, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sd, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ud, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ss, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_us, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_uh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vrint_rm_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vrint_rm_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vrintx_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vrintx_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frecpe_rpres_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_frsqrte_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frsqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frsqrte_rpres_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frsqrte_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fcgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcgt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcgt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fcge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcge0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcge0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fceq0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fceq0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fcle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcle0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcle0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fclt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fclt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fclt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ah_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fceq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fceq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fceq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fcge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fcgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_facge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_facge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_facge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_facgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_facgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_facgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmax_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmin_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmaxnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_recps_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_recps_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmla_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmls_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_vfma_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_vfma_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_vfma_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ah_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ftsmul_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmul_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(gvec_fmla_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fmla_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fmla_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(gvec_fmls_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fmls_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fmls_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_uqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmlal_a32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_fmlal_a64, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a64, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_2(frint32_s, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, fpst)
+DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, fpst)
+
+DEF_HELPER_FLAGS_3(gvec_ceq0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_clt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_clt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cle0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cgt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cge0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_smulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_umulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(neon_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_ssra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ssra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ssra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ssra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_usra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_usra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_usra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_usra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_srshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_urshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_srsra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_ursra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_sri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_sli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sli_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sli_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sli_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_uabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_saba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_saba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_saba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_saba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_uaba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uaba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uaba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uaba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_mul_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_mla_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mla_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mla_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_mls_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mls_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mls_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqdmulh_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqdmulh_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fmlal_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(sve2_fmlal_zzxw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_smmla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_ummla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usmmla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(gvec_bfdot, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_bfdot_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_6(gvec_bfmmla, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_faddp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_faddp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_faddp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmaxnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fminnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_addp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_addp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_addp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_addp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_smaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_umaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_uminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_urecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c
index e51d9f7..1ccec63 100644
--- a/target/arm/tcg/hflags.c
+++ b/target/arm/tcg/hflags.c
@@ -9,9 +9,13 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/helper-proto.h"
+#include "exec/translation-block.h"
+#include "accel/tcg/cpu-ops.h"
#include "cpregs.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
static inline bool fgt_svc(CPUARMState *env, int el)
{
/*
@@ -498,7 +502,7 @@ void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
}
-void assert_hflags_rebuild_correctly(CPUARMState *env)
+static void assert_hflags_rebuild_correctly(CPUARMState *env)
{
#ifdef CONFIG_DEBUG_TCG
CPUARMTBFlags c = env->hflags;
@@ -513,3 +517,116 @@ void assert_hflags_rebuild_correctly(CPUARMState *env)
}
#endif
}
+
+static bool mve_no_pred(CPUARMState *env)
+{
+ /*
+ * Return true if there is definitely no predication of MVE
+ * instructions by VPR or LTPSIZE. (Returning false even if there
+ * isn't any predication is OK; generated code will just be
+ * a little worse.)
+ * If the CPU does not implement MVE then this TB flag is always 0.
+ *
+ * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
+ * logic in gen_update_fp_context() needs to be updated to match.
+ *
+ * We do not include the effect of the ECI bits here -- they are
+ * tracked in other TB flags. This simplifies the logic for
+ * "when did we emit code that changes the MVE_NO_PRED TB flag
+ * and thus need to end the TB?".
+ */
+ if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
+ return false;
+ }
+ if (env->v7m.vpr) {
+ return false;
+ }
+ if (env->v7m.ltpsize < 4) {
+ return false;
+ }
+ return true;
+}
+
+TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs)
+{
+ CPUARMState *env = cpu_env(cs);
+ CPUARMTBFlags flags;
+ vaddr pc;
+
+ assert_hflags_rebuild_correctly(env);
+ flags = env->hflags;
+
+ if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
+ pc = env->pc;
+ if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
+ DP_TBFLAG_A64(flags, BTYPE, env->btype);
+ }
+ } else {
+ pc = env->regs[15];
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
+ != env->v7m.secure) {
+ DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
+ }
+
+ if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
+ (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
+ (env->v7m.secure &&
+ !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
+ /*
+ * ASPEN is set, but FPCA/SFPA indicate that there is no
+ * active FP context; we must create a new FP context before
+ * executing any FP insn.
+ */
+ DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
+ }
+
+ bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
+ if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
+ DP_TBFLAG_M32(flags, LSPACT, 1);
+ }
+
+ if (mve_no_pred(env)) {
+ DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
+ }
+ } else {
+ /*
+ * Note that XSCALE_CPAR shares bits with VECSTRIDE.
+ * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
+ */
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
+ } else {
+ DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
+ DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
+ }
+ if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
+ DP_TBFLAG_A32(flags, VFPEN, 1);
+ }
+ }
+
+ DP_TBFLAG_AM32(flags, THUMB, env->thumb);
+ DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
+ }
+
+ /*
+ * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
+ * states defined in the ARM ARM for software singlestep:
+ * SS_ACTIVE PSTATE.SS State
+ * 0 x Inactive (the TB flag for SS is always 0)
+ * 1 0 Active-pending
+ * 1 1 Active-not-pending
+ * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
+ */
+ if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
+ DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
+ }
+
+ return (TCGTBCPUState){
+ .pc = pc,
+ .flags = flags.flags,
+ .cs_base = flags.flags2,
+ };
+}
diff --git a/target/arm/tcg/iwmmxt_helper.c b/target/arm/tcg/iwmmxt_helper.c
index 610b1b2..ba054b6 100644
--- a/target/arm/tcg/iwmmxt_helper.c
+++ b/target/arm/tcg/iwmmxt_helper.c
@@ -22,7 +22,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/helper-proto.h"
+
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
/* iwMMXt macros extracted from GNU gdb. */
diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
index 37dc98d..6614719 100644
--- a/target/arm/tcg/m_helper.c
+++ b/target/arm/tcg/m_helper.c
@@ -15,7 +15,6 @@
#include "qemu/main-loop.h"
#include "qemu/bitops.h"
#include "qemu/log.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#ifdef CONFIG_TCG
#include "accel/tcg/cpu-ldst.h"
diff --git a/target/arm/tcg/meson.build b/target/arm/tcg/meson.build
index dd12cce..c59f0f0 100644
--- a/target/arm/tcg/meson.build
+++ b/target/arm/tcg/meson.build
@@ -30,18 +30,10 @@ arm_ss.add(files(
'translate-mve.c',
'translate-neon.c',
'translate-vfp.c',
- 'crypto_helper.c',
- 'hflags.c',
- 'iwmmxt_helper.c',
'm_helper.c',
'mve_helper.c',
- 'neon_helper.c',
'op_helper.c',
- 'tlb_helper.c',
'vec_helper.c',
- 'tlb-insns.c',
- 'arith_helper.c',
- 'vfp_helper.c',
))
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
@@ -63,3 +55,26 @@ arm_system_ss.add(files(
arm_system_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('cpu-v7m.c'))
arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files('cpu-v7m.c'))
+
+arm_common_ss.add(zlib)
+
+arm_common_ss.add(files(
+ 'arith_helper.c',
+ 'crypto_helper.c',
+))
+
+arm_common_system_ss.add(files(
+ 'hflags.c',
+ 'iwmmxt_helper.c',
+ 'neon_helper.c',
+ 'tlb_helper.c',
+ 'tlb-insns.c',
+ 'vfp_helper.c',
+))
+arm_user_ss.add(files(
+ 'hflags.c',
+ 'iwmmxt_helper.c',
+ 'neon_helper.c',
+ 'tlb_helper.c',
+ 'vfp_helper.c',
+))
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index 7dc5fb7..0efc18a 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -21,7 +21,6 @@
#include "qemu/log.h"
#include "cpu.h"
#include "internals.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#ifdef CONFIG_USER_ONLY
#include "user/cpu_loop.h"
@@ -30,6 +29,7 @@
#include "system/ram_addr.h"
#endif
#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/helper-proto.h"
#include "exec/tlb-flags.h"
#include "accel/tcg/cpu-ops.h"
@@ -37,7 +37,6 @@
#include "qemu/guest-random.h"
#include "mte_helper.h"
-
static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
{
if (exclude == 0xffff) {
@@ -63,6 +62,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
bool probe, uintptr_t ra)
{
#ifdef CONFIG_USER_ONLY
+ const size_t page_data_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1);
uint64_t clean_ptr = useronly_clean_ptr(ptr);
int flags = page_get_flags(clean_ptr);
uint8_t *tags;
@@ -83,7 +83,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
return NULL;
}
- tags = page_get_target_data(clean_ptr);
+ tags = page_get_target_data(clean_ptr, page_data_size);
index = extract32(ptr, LOG2_TAG_GRANULE + 1,
TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
diff --git a/target/arm/tcg/mve_helper.c b/target/arm/tcg/mve_helper.c
index f9f67d1..506d1c3 100644
--- a/target/arm/tcg/mve_helper.c
+++ b/target/arm/tcg/mve_helper.c
@@ -23,7 +23,6 @@
#include "vec_internal.h"
#include "exec/helper-proto.h"
#include "accel/tcg/cpu-ldst.h"
-#include "exec/exec-all.h"
#include "tcg/tcg.h"
#include "fpu/softfloat.h"
#include "crypto/clmul.h"
diff --git a/target/arm/tcg/neon_helper.c b/target/arm/tcg/neon_helper.c
index e2cc7cf..2cc8241 100644
--- a/target/arm/tcg/neon_helper.c
+++ b/target/arm/tcg/neon_helper.c
@@ -9,11 +9,13 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
#include "fpu/softfloat.h"
#include "vec_internal.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)
diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c
index 38d49cb..575e566 100644
--- a/target/arm/tcg/op_helper.c
+++ b/target/arm/tcg/op_helper.c
@@ -23,8 +23,8 @@
#include "exec/target_page.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "cpregs.h"
#define SIGNBIT (uint32_t)0x80000000
@@ -1222,7 +1222,7 @@ uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
}
}
-void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
+void HELPER(probe_access)(CPUARMState *env, vaddr ptr,
uint32_t access_type, uint32_t mmu_idx,
uint32_t size)
{
diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c
index 59bf275..c591c30 100644
--- a/target/arm/tcg/pauth_helper.c
+++ b/target/arm/tcg/pauth_helper.c
@@ -21,7 +21,6 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
diff --git a/target/arm/tcg/sme_helper.c b/target/arm/tcg/sme_helper.c
index 96b84c3..de0c6e5 100644
--- a/target/arm/tcg/sme_helper.c
+++ b/target/arm/tcg/sme_helper.c
@@ -23,7 +23,7 @@
#include "tcg/tcg-gvec-desc.h"
#include "exec/helper-proto.h"
#include "accel/tcg/cpu-ldst.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/helper-retaddr.h"
#include "qemu/int128.h"
#include "fpu/softfloat.h"
#include "vec_internal.h"
diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c
index 87b6b4b..a2c363a 100644
--- a/target/arm/tcg/sve_helper.c
+++ b/target/arm/tcg/sve_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/helper-proto.h"
#include "exec/target_page.h"
@@ -31,7 +30,9 @@
#include "vec_internal.h"
#include "sve_ldst_internal.h"
#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/helper-retaddr.h"
#include "accel/tcg/cpu-ops.h"
+#include "accel/tcg/probe.h"
#ifdef CONFIG_USER_ONLY
#include "user/page-protection.h"
#endif
diff --git a/target/arm/tcg/tlb-insns.c b/target/arm/tcg/tlb-insns.c
index 0407ad5..95c26c6 100644
--- a/target/arm/tcg/tlb-insns.c
+++ b/target/arm/tcg/tlb-insns.c
@@ -35,7 +35,6 @@ static CPAccessResult access_ttlbis(CPUARMState *env, const ARMCPRegInfo *ri,
return CP_ACCESS_OK;
}
-#ifdef TARGET_AARCH64
/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBOS. */
static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
@@ -46,7 +45,6 @@ static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri,
}
return CP_ACCESS_OK;
}
-#endif
/* IS variants of TLB operations must affect all cores */
static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -802,7 +800,6 @@ static const ARMCPRegInfo tlbi_el3_cp_reginfo[] = {
.writefn = tlbi_aa64_vae3_write },
};
-#ifdef TARGET_AARCH64
typedef struct {
uint64_t base;
uint64_t length;
@@ -1270,8 +1267,6 @@ static const ARMCPRegInfo tlbi_rme_reginfo[] = {
.writefn = tlbi_aa64_paallos_write },
};
-#endif
-
void define_tlb_insn_regs(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
@@ -1299,7 +1294,6 @@ void define_tlb_insn_regs(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_EL3)) {
define_arm_cp_regs(cpu, tlbi_el3_cp_reginfo);
}
-#ifdef TARGET_AARCH64
if (cpu_isar_feature(aa64_tlbirange, cpu)) {
define_arm_cp_regs(cpu, tlbirange_reginfo);
}
@@ -1309,5 +1303,4 @@ void define_tlb_insn_regs(ARMCPU *cpu)
if (cpu_isar_feature(aa64_rme, cpu)) {
define_arm_cp_regs(cpu, tlbi_rme_reginfo);
}
-#endif
}
diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c
index 8841f03..23c72a9 100644
--- a/target/arm/tcg/tlb_helper.c
+++ b/target/arm/tcg/tlb_helper.c
@@ -9,9 +9,9 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/exec-all.h"
-#include "exec/helper-proto.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
/*
* Returns true if the stage 1 translation regime is using LPAE format page
@@ -277,7 +277,7 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
}
-void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc)
+void helper_exception_pc_alignment(CPUARMState *env, vaddr pc)
{
ARMMMUFaultInfo fi = { .type = ARMFault_Alignment };
int target_el = exception_target_el(env);
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 43408c7..815225b 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -17,7 +17,6 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
-#include "exec/exec-all.h"
#include "exec/target_page.h"
#include "translate.h"
#include "translate-a64.h"
@@ -434,12 +433,6 @@ static void gen_rebuild_hflags(DisasContext *s)
gen_helper_rebuild_hflags_a64(tcg_env, tcg_constant_i32(s->current_el));
}
-static void gen_exception_internal(int excp)
-{
- assert(excp_is_internal(excp));
- gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp));
-}
-
static void gen_exception_internal_insn(DisasContext *s, int excp)
{
gen_a64_update_pc(s, 0);
@@ -1076,11 +1069,9 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
TCGv_i64 cf_64 = tcg_temp_new_i64();
TCGv_i64 vf_64 = tcg_temp_new_i64();
TCGv_i64 tmp = tcg_temp_new_i64();
- TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_extu_i32_i64(cf_64, cpu_CF);
- tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero);
- tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero);
+ tcg_gen_addcio_i64(result, cf_64, t0, t1, cf_64);
tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
gen_set_NZ64(result);
@@ -1094,12 +1085,10 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
TCGv_i32 t0_32 = tcg_temp_new_i32();
TCGv_i32 t1_32 = tcg_temp_new_i32();
TCGv_i32 tmp = tcg_temp_new_i32();
- TCGv_i32 zero = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(t0_32, t0);
tcg_gen_extrl_i64_i32(t1_32, t1);
- tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero);
- tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero);
+ tcg_gen_addcio_i32(cpu_NF, cpu_CF, t0_32, t1_32, cpu_CF);
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
@@ -1821,6 +1810,10 @@ static bool trans_RETA(DisasContext *s, arg_reta *a)
{
TCGv_i64 dst;
+ if (!dc_isar_feature(aa64_pauth, s)) {
+ return false;
+ }
+
dst = auth_branch_target(s, cpu_reg(s, 30), cpu_X[31], !a->m);
gen_a64_set_pc(s, dst);
s->base.is_jmp = DISAS_JUMP;
@@ -8600,7 +8593,7 @@ static bool trans_CCMP(DisasContext *s, arg_CCMP *a)
tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
nzcv = a->nzcv;
- has_andc = tcg_op_supported(INDEX_op_andc_i32, TCG_TYPE_I32, 0);
+ has_andc = tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0);
if (nzcv & 8) { /* N */
tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
} else {
@@ -10247,7 +10240,7 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
* start of the TB.
*/
assert(s->base.num_insns == 1);
- gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc));
+ gen_helper_exception_pc_alignment(tcg_env, tcg_constant_vaddr(pc));
s->base.is_jmp = DISAS_NORETURN;
s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
return;
diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c
index d23be47..f3cf028 100644
--- a/target/arm/tcg/translate-sve.c
+++ b/target/arm/tcg/translate-sve.c
@@ -629,7 +629,7 @@ static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
* = | ~(m | k)
*/
tcg_gen_and_i64(n, n, k);
- if (tcg_op_supported(INDEX_op_orc_i64, TCG_TYPE_I64, 0)) {
+ if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I64, 0)) {
tcg_gen_or_i64(m, m, k);
tcg_gen_orc_i64(d, n, m);
} else {
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
index 273b860..f7d6d8c 100644
--- a/target/arm/tcg/translate.c
+++ b/target/arm/tcg/translate.c
@@ -372,7 +372,7 @@ static void gen_rebuild_hflags(DisasContext *s, bool new_el)
}
}
-static void gen_exception_internal(int excp)
+void gen_exception_internal(int excp)
{
assert(excp_is_internal(excp));
gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp));
@@ -494,20 +494,9 @@ static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
{
TCGv_i32 tmp = tcg_temp_new_i32();
- if (tcg_op_supported(INDEX_op_add2_i32, TCG_TYPE_I32, 0)) {
- tcg_gen_movi_i32(tmp, 0);
- tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
- tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
- } else {
- TCGv_i64 q0 = tcg_temp_new_i64();
- TCGv_i64 q1 = tcg_temp_new_i64();
- tcg_gen_extu_i32_i64(q0, t0);
- tcg_gen_extu_i32_i64(q1, t1);
- tcg_gen_add_i64(q0, q0, q1);
- tcg_gen_extu_i32_i64(q1, cpu_CF);
- tcg_gen_add_i64(q0, q0, q1);
- tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
- }
+
+ tcg_gen_addcio_i32(cpu_NF, cpu_CF, t0, t1, cpu_CF);
+
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
tcg_gen_xor_i32(tmp, t0, t1);
@@ -7771,7 +7760,8 @@ static bool arm_check_ss_active(DisasContext *dc)
static void arm_post_translate_insn(DisasContext *dc)
{
- if (dc->condjmp && dc->base.is_jmp == DISAS_NEXT) {
+ if (dc->condjmp &&
+ (dc->base.is_jmp == DISAS_NEXT || dc->base.is_jmp == DISAS_TOO_MANY)) {
if (dc->pc_save != dc->condlabel.pc_save) {
gen_update_pc(dc, dc->condlabel.pc_save - dc->pc_save);
}
@@ -7801,7 +7791,7 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
* be possible after an indirect branch, at the start of the TB.
*/
assert(dc->base.num_insns == 1);
- gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc));
+ gen_helper_exception_pc_alignment(tcg_env, tcg_constant_vaddr(pc));
dc->base.is_jmp = DISAS_NORETURN;
dc->base.pc_next = QEMU_ALIGN_UP(pc, 4);
return;
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
index 53e485d..0004a97 100644
--- a/target/arm/tcg/translate.h
+++ b/target/arm/tcg/translate.h
@@ -4,7 +4,6 @@
#include "cpu.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
-#include "exec/exec-all.h"
#include "exec/translator.h"
#include "exec/translation-block.h"
#include "exec/helper-gen.h"
@@ -348,6 +347,7 @@ void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
void arm_gen_test_cc(int cc, TCGLabel *label);
MemOp pow2_align(unsigned i);
void unallocated_encoding(DisasContext *s);
+void gen_exception_internal(int excp);
void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp,
uint32_t syn, uint32_t target_el);
void gen_exception_insn(DisasContext *s, target_long pc_diff,
diff --git a/target/arm/tcg/vec_internal.h b/target/arm/tcg/vec_internal.h
index 6b93b5a..c02f9c3 100644
--- a/target/arm/tcg/vec_internal.h
+++ b/target/arm/tcg/vec_internal.h
@@ -22,6 +22,8 @@
#include "fpu/softfloat.h"
+typedef struct CPUArchState CPUARMState;
+
/*
* Note that vector data is stored in host-endian 64-bit chunks,
* so addressing units smaller than that needs a host-endian fixup.
diff --git a/target/arm/tcg/vfp_helper.c b/target/arm/tcg/vfp_helper.c
index b32e2f4..b1324c5 100644
--- a/target/arm/tcg/vfp_helper.c
+++ b/target/arm/tcg/vfp_helper.c
@@ -19,12 +19,14 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/helper-proto.h"
#include "internals.h"
#include "cpu-features.h"
#include "fpu/softfloat.h"
#include "qemu/log.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
/*
* Set the float_status behaviour to match the Arm defaults:
* * tininess-before-rounding
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
index 3f261c6..6995de6 100644
--- a/target/avr/cpu.c
+++ b/target/avr/cpu.c
@@ -21,13 +21,13 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/qemu-print.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "system/address-spaces.h"
#include "cpu.h"
#include "disas/dis-asm.h"
#include "tcg/debug-assert.h"
#include "hw/qdev-properties.h"
+#include "accel/tcg/cpu-ops.h"
static void avr_cpu_set_pc(CPUState *cs, vaddr value)
{
@@ -54,6 +54,21 @@ static int avr_cpu_mmu_index(CPUState *cs, bool ifetch)
return ifetch ? MMU_CODE_IDX : MMU_DATA_IDX;
}
+static TCGTBCPUState avr_get_tb_cpu_state(CPUState *cs)
+{
+ CPUAVRState *env = cpu_env(cs);
+ uint32_t flags = 0;
+
+ if (env->fullacc) {
+ flags |= TB_FLAGS_FULL_ACCESS;
+ }
+ if (env->skip) {
+ flags |= TB_FLAGS_SKIP;
+ }
+
+ return (TCGTBCPUState){ .pc = env->pc_w * 2, .flags = flags };
+}
+
static void avr_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -221,20 +236,26 @@ static const struct SysemuCPUOps avr_sysemu_ops = {
.get_phys_page_debug = avr_cpu_get_phys_page_debug,
};
-#include "accel/tcg/cpu-ops.h"
-
static const TCGCPUOps avr_tcg_ops = {
.guest_default_memory_order = 0,
.mttcg_supported = false,
.initialize = avr_cpu_tcg_init,
.translate_code = avr_cpu_translate_code,
+ .get_tb_cpu_state = avr_get_tb_cpu_state,
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
.restore_state_to_opc = avr_restore_state_to_opc,
.mmu_index = avr_cpu_mmu_index,
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
.cpu_exec_halt = avr_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.tlb_fill = avr_cpu_tlb_fill,
.do_interrupt = avr_cpu_do_interrupt,
+ /*
+ * TODO: code and data wrapping are different, but for the most part
+ * AVR only references bytes or aligned code fetches. But we use
+ * non-aligned MO_16 accesses for stack push/pop.
+ */
+ .pointer_wrap = cpu_pointer_wrap_uint32,
};
static void avr_cpu_class_init(ObjectClass *oc, const void *data)
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
index d666617..518e243 100644
--- a/target/avr/cpu.h
+++ b/target/avr/cpu.h
@@ -205,24 +205,6 @@ enum {
TB_FLAGS_SKIP = 2,
};
-static inline void cpu_get_tb_cpu_state(CPUAVRState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
-{
- uint32_t flags = 0;
-
- *pc = env->pc_w * 2;
- *cs_base = 0;
-
- if (env->fullacc) {
- flags |= TB_FLAGS_FULL_ACCESS;
- }
- if (env->skip) {
- flags |= TB_FLAGS_SKIP;
- }
-
- *pflags = flags;
-}
-
static inline int cpu_interrupts_enabled(CPUAVRState *env)
{
return env->sregI != 0;
diff --git a/target/avr/helper.c b/target/avr/helper.c
index afa5914..b9cd6d5 100644
--- a/target/avr/helper.c
+++ b/target/avr/helper.c
@@ -23,7 +23,6 @@
#include "qemu/error-report.h"
#include "cpu.h"
#include "accel/tcg/cpu-ops.h"
-#include "accel/tcg/getpc.h"
#include "exec/cputlb.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
diff --git a/target/avr/translate.c b/target/avr/translate.c
index b9c592c..804b0b2 100644
--- a/target/avr/translate.c
+++ b/target/avr/translate.c
@@ -22,7 +22,6 @@
#include "qemu/qemu-print.h"
#include "tcg/tcg.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
index a5d31c3..a5a0417 100644
--- a/target/hexagon/cpu.c
+++ b/target/hexagon/cpu.c
@@ -19,13 +19,13 @@
#include "qemu/qemu-print.h"
#include "cpu.h"
#include "internal.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "qapi/error.h"
#include "hw/qdev-properties.h"
#include "fpu/softfloat-helpers.h"
#include "tcg/tcg.h"
#include "exec/gdbstub.h"
+#include "accel/tcg/cpu-ops.h"
static void hexagon_v66_cpu_init(Object *obj) { }
static void hexagon_v67_cpu_init(Object *obj) { }
@@ -255,6 +255,22 @@ static vaddr hexagon_cpu_get_pc(CPUState *cs)
return cpu_env(cs)->gpr[HEX_REG_PC];
}
+static TCGTBCPUState hexagon_get_tb_cpu_state(CPUState *cs)
+{
+ CPUHexagonState *env = cpu_env(cs);
+ vaddr pc = env->gpr[HEX_REG_PC];
+ uint32_t hex_flags = 0;
+
+ if (pc == env->gpr[HEX_REG_SA0]) {
+ hex_flags = FIELD_DP32(hex_flags, TB_FLAGS, IS_TIGHT_LOOP, 1);
+ }
+ if (pc & PCALIGN_MASK) {
+ hexagon_raise_exception_err(env, HEX_CAUSE_PC_NOT_ALIGNED, 0);
+ }
+
+ return (TCGTBCPUState){ .pc = pc, .flags = hex_flags };
+}
+
static void hexagon_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -322,14 +338,13 @@ static void hexagon_cpu_init(Object *obj)
{
}
-#include "accel/tcg/cpu-ops.h"
-
static const TCGCPUOps hexagon_tcg_ops = {
/* MTTCG not yet supported: require strict ordering */
.guest_default_memory_order = TCG_MO_ALL,
.mttcg_supported = false,
.initialize = hexagon_translate_init,
.translate_code = hexagon_translate_code,
+ .get_tb_cpu_state = hexagon_get_tb_cpu_state,
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
.restore_state_to_opc = hexagon_restore_state_to_opc,
.mmu_index = hexagon_cpu_mmu_index,
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
index c065fa8..43a854f 100644
--- a/target/hexagon/cpu.h
+++ b/target/hexagon/cpu.h
@@ -137,21 +137,6 @@ G_NORETURN void hexagon_raise_exception_err(CPUHexagonState *env,
uint32_t exception,
uintptr_t pc);
-static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- uint32_t hex_flags = 0;
- *pc = env->gpr[HEX_REG_PC];
- *cs_base = 0;
- if (*pc == env->gpr[HEX_REG_SA0]) {
- hex_flags = FIELD_DP32(hex_flags, TB_FLAGS, IS_TIGHT_LOOP, 1);
- }
- *flags = hex_flags;
- if (*pc & PCALIGN_MASK) {
- hexagon_raise_exception_err(env, HEX_CAUSE_PC_NOT_ALIGNED, 0);
- }
-}
-
typedef HexagonCPU ArchCPU;
void hexagon_translate_init(void);
diff --git a/target/hexagon/mmvec/macros.h b/target/hexagon/mmvec/macros.h
index c1a8839..c7840fb 100644
--- a/target/hexagon/mmvec/macros.h
+++ b/target/hexagon/mmvec/macros.h
@@ -22,6 +22,7 @@
#include "arch.h"
#include "mmvec/system_ext_mmvec.h"
#include "accel/tcg/getpc.h"
+#include "accel/tcg/probe.h"
#ifndef QEMU_GENERATE
#define VdV (*(MMVector *restrict)(VdV_void))
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
index 3f3d86d..444799d 100644
--- a/target/hexagon/op_helper.c
+++ b/target/hexagon/op_helper.c
@@ -17,8 +17,8 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
#include "cpu.h"
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index b792cb2..2477772 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -24,12 +24,12 @@
#include "qemu/timer.h"
#include "cpu.h"
#include "qemu/module.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "exec/target_page.h"
#include "fpu/softfloat.h"
#include "tcg/tcg.h"
#include "hw/hppa/hppa_hardware.h"
+#include "accel/tcg/cpu-ops.h"
static void hppa_cpu_set_pc(CPUState *cs, vaddr value)
{
@@ -51,11 +51,12 @@ static vaddr hppa_cpu_get_pc(CPUState *cs)
env->iaoq_f & -4);
}
-void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
- uint64_t *pcsbase, uint32_t *pflags)
+static TCGTBCPUState hppa_get_tb_cpu_state(CPUState *cs)
{
+ CPUHPPAState *env = cpu_env(cs);
uint32_t flags = 0;
uint64_t cs_base = 0;
+ vaddr pc;
/*
* TB lookup assumes that PC contains the complete virtual address.
@@ -63,7 +64,7 @@ void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
* incomplete virtual address. This also means that we must separate
* out current cpu privilege from the low bits of IAOQ_F.
*/
- *pc = hppa_cpu_get_pc(env_cpu(env));
+ pc = hppa_cpu_get_pc(env_cpu(env));
flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
/*
@@ -99,8 +100,7 @@ void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
}
#endif
- *pcsbase = cs_base;
- *pflags = flags;
+ return (TCGTBCPUState){ .pc = pc, .flags = flags, .cs_base = cs_base };
}
static void hppa_cpu_synchronize_from_tb(CPUState *cs,
@@ -250,8 +250,6 @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
};
#endif
-#include "accel/tcg/cpu-ops.h"
-
static const TCGCPUOps hppa_tcg_ops = {
/* PA-RISC 1.x processors have a strong memory model. */
/*
@@ -264,14 +262,17 @@ static const TCGCPUOps hppa_tcg_ops = {
.initialize = hppa_translate_init,
.translate_code = hppa_translate_code,
+ .get_tb_cpu_state = hppa_get_tb_cpu_state,
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
.restore_state_to_opc = hppa_restore_state_to_opc,
.mmu_index = hppa_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill_align = hppa_cpu_tlb_fill_align,
+ .pointer_wrap = cpu_pointer_wrap_notreached,
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
.cpu_exec_halt = hppa_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = hppa_cpu_do_interrupt,
.do_unaligned_access = hppa_cpu_do_unaligned_access,
.do_transaction_failed = hppa_cpu_do_transaction_failed,
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index acc9937..11d59d1 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -351,9 +351,6 @@ hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr);
#define CS_BASE_DIFFPAGE (1 << 12)
#define CS_BASE_DIFFSPACE (1 << 13)
-void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags);
-
target_ulong cpu_hppa_get_psw(CPUHPPAState *env);
void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong);
void update_gva_offset_mask(CPUHPPAState *env);
diff --git a/target/hppa/fpu_helper.c b/target/hppa/fpu_helper.c
index a62d9d3..4535320 100644
--- a/target/hppa/fpu_helper.c
+++ b/target/hppa/fpu_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
@@ -95,7 +94,8 @@ static void update_fr0_op(CPUHPPAState *env, uintptr_t ra)
{
uint32_t soft_exp = get_float_exception_flags(&env->fp_status);
uint32_t hard_exp = 0;
- uint32_t shadow = env->fr0_shadow;
+ uint32_t shadow = env->fr0_shadow & 0x3ffffff;
+ uint32_t fr1 = 0;
if (likely(soft_exp == 0)) {
env->fr[0] = (uint64_t)shadow << 32;
@@ -108,9 +108,22 @@ static void update_fr0_op(CPUHPPAState *env, uintptr_t ra)
hard_exp |= CONVERT_BIT(soft_exp, float_flag_overflow, R_FPSR_ENA_O_MASK);
hard_exp |= CONVERT_BIT(soft_exp, float_flag_divbyzero, R_FPSR_ENA_Z_MASK);
hard_exp |= CONVERT_BIT(soft_exp, float_flag_invalid, R_FPSR_ENA_V_MASK);
- shadow |= hard_exp << (R_FPSR_FLAGS_SHIFT - R_FPSR_ENABLES_SHIFT);
+ if (hard_exp & shadow) {
+ shadow = FIELD_DP32(shadow, FPSR, T, 1);
+ /* fill exception register #1, which is lower 32-bits of fr[0] */
+#if !defined(CONFIG_USER_ONLY)
+ if (hard_exp & (R_FPSR_ENA_O_MASK | R_FPSR_ENA_U_MASK)) {
+ /* over- and underflow both set overflow flag only */
+ fr1 = FIELD_DP32(fr1, FPSR, C, 1);
+ fr1 = FIELD_DP32(fr1, FPSR, FLG_O, 1);
+ } else
+#endif
+ {
+ fr1 |= hard_exp << (R_FPSR_FLAGS_SHIFT - R_FPSR_ENABLES_SHIFT);
+ }
+ }
env->fr0_shadow = shadow;
- env->fr[0] = (uint64_t)shadow << 32;
+ env->fr[0] = (uint64_t)shadow << 32 | fr1;
if (hard_exp & shadow) {
hppa_dynamic_excp(env, EXCP_ASSIST, ra);
diff --git a/target/hppa/helper.c b/target/hppa/helper.c
index ac7f58f..d7f8495 100644
--- a/target/hppa/helper.c
+++ b/target/hppa/helper.c
@@ -21,7 +21,6 @@
#include "qemu/log.h"
#include "cpu.h"
#include "fpu/softfloat.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/qemu-print.h"
#include "hw/hppa/hppa_hardware.h"
diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c
index 7d48643..191ae19 100644
--- a/target/hppa/int_helper.c
+++ b/target/hppa/int_helper.c
@@ -177,6 +177,10 @@ void hppa_cpu_do_interrupt(CPUState *cs)
}
}
env->cr[CR_IIR] = ldl_phys(cs->as, paddr);
+ if (i == EXCP_ASSIST) {
+ /* stuff insn code into bits of FP exception register #1 */
+ env->fr[0] |= (env->cr[CR_IIR] & 0x03ffffff);
+ }
}
break;
diff --git a/target/hppa/machine.c b/target/hppa/machine.c
index bb47a2e..13e5551 100644
--- a/target/hppa/machine.c
+++ b/target/hppa/machine.c
@@ -216,7 +216,7 @@ static const VMStateDescription vmstate_env = {
};
static const VMStateField vmstate_cpu_fields[] = {
- VMSTATE_CPU(),
+ VMSTATE_STRUCT(parent_obj, HPPACPU, 0, vmstate_cpu_common, CPUState),
VMSTATE_STRUCT(env, HPPACPU, 1, vmstate_env, CPUHPPAState),
VMSTATE_END_OF_LIST()
};
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
index 554d7bf..9bdd0a6 100644
--- a/target/hppa/mem_helper.c
+++ b/target/hppa/mem_helper.c
@@ -20,9 +20,9 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/cputlb.h"
#include "accel/tcg/cpu-mmu-index.h"
+#include "accel/tcg/probe.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "exec/helper-proto.h"
diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c
index 2398ce2..0458378 100644
--- a/target/hppa/op_helper.c
+++ b/target/hppa/op_helper.c
@@ -20,9 +20,9 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "qemu/timer.h"
#include "trace.h"
#ifdef CONFIG_USER_ONLY
diff --git a/target/hppa/sys_helper.c b/target/hppa/sys_helper.c
index 052a6a8..6e65fad 100644
--- a/target/hppa/sys_helper.c
+++ b/target/hppa/sys_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/timer.h"
#include "system/runstate.h"
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 14f3833..7a81cfc 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
@@ -1209,10 +1208,10 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
cb_msb = tcg_temp_new_i64();
cb = tcg_temp_new_i64();
- tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
if (is_c) {
- tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
- get_psw_carry(ctx, d), ctx->zero);
+ tcg_gen_addcio_i64(dest, cb_msb, in1, in2, get_psw_carry(ctx, d));
+ } else {
+ tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
}
tcg_gen_xor_i64(cb, in1, in2);
tcg_gen_xor_i64(cb, cb, dest);
@@ -1308,9 +1307,7 @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
if (is_b) {
/* DEST,C = IN1 + ~IN2 + C. */
tcg_gen_not_i64(cb, in2);
- tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
- get_psw_carry(ctx, d), ctx->zero);
- tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
+ tcg_gen_addcio_i64(dest, cb_msb, in1, cb, get_psw_carry(ctx, d));
tcg_gen_xor_i64(cb, cb, in1);
tcg_gen_xor_i64(cb, cb, dest);
} else {
@@ -3008,9 +3005,7 @@ static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
tcg_gen_xor_i64(add2, in2, addc);
tcg_gen_andi_i64(addc, addc, 1);
- tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
- tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
- addc, ctx->zero);
+ tcg_gen_addcio_i64(dest, cpu_psw_cb_msb, add1, add2, addc);
/* Write back the result register. */
save_gpr(ctx, a->t, dest);
@@ -3553,8 +3548,7 @@ static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
TCGv_i64 cb = tcg_temp_new_i64();
TCGv_i64 cb_msb = tcg_temp_new_i64();
- tcg_gen_movi_i64(cb_msb, 0);
- tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
+ tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
tcg_gen_xor_i64(cb, in1, in2);
tcg_gen_xor_i64(cb, cb, dest);
cb_cond = get_carry(ctx, d, cb, cb_msb);
diff --git a/target/i386/confidential-guest.h b/target/i386/confidential-guest.h
index 164be76..48b88db 100644
--- a/target/i386/confidential-guest.h
+++ b/target/i386/confidential-guest.h
@@ -39,8 +39,10 @@ struct X86ConfidentialGuestClass {
/* <public> */
int (*kvm_type)(X86ConfidentialGuest *cg);
- uint32_t (*mask_cpuid_features)(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index,
- int reg, uint32_t value);
+ void (*cpu_instance_init)(X86ConfidentialGuest *cg, CPUState *cpu);
+ uint32_t (*adjust_cpuid_features)(X86ConfidentialGuest *cg, uint32_t feature,
+ uint32_t index, int reg, uint32_t value);
+ int (*check_features)(X86ConfidentialGuest *cg, CPUState *cs);
};
/**
@@ -59,25 +61,47 @@ static inline int x86_confidential_guest_kvm_type(X86ConfidentialGuest *cg)
}
}
+static inline void x86_confidential_guest_cpu_instance_init(X86ConfidentialGuest *cg,
+ CPUState *cpu)
+{
+ X86ConfidentialGuestClass *klass = X86_CONFIDENTIAL_GUEST_GET_CLASS(cg);
+
+ if (klass->cpu_instance_init) {
+ klass->cpu_instance_init(cg, cpu);
+ }
+}
+
/**
- * x86_confidential_guest_mask_cpuid_features:
+ * x86_confidential_guest_adjust_cpuid_features:
*
- * Removes unsupported features from a confidential guest's CPUID values, returns
- * the value with the bits removed. The bits removed should be those that KVM
- * provides independent of host-supported CPUID features, but are not supported by
- * the confidential computing firmware.
+ * Adjust the supported features from a confidential guest's CPUID values,
+ * returns the adjusted value. There are bits being removed that are not
+ * supported by the confidential computing firmware or bits being added that
+ * are forcibly exposed to guest by the confidential computing firmware.
*/
-static inline int x86_confidential_guest_mask_cpuid_features(X86ConfidentialGuest *cg,
+static inline int x86_confidential_guest_adjust_cpuid_features(X86ConfidentialGuest *cg,
uint32_t feature, uint32_t index,
int reg, uint32_t value)
{
X86ConfidentialGuestClass *klass = X86_CONFIDENTIAL_GUEST_GET_CLASS(cg);
- if (klass->mask_cpuid_features) {
- return klass->mask_cpuid_features(cg, feature, index, reg, value);
+ if (klass->adjust_cpuid_features) {
+ return klass->adjust_cpuid_features(cg, feature, index, reg, value);
} else {
return value;
}
}
+static inline int x86_confidential_guest_check_features(X86ConfidentialGuest *cg,
+ CPUState *cs)
+{
+ X86ConfidentialGuestClass *klass = X86_CONFIDENTIAL_GUEST_GET_CLASS(cg);
+
+ if (klass->check_features) {
+ return klass->check_features(cg, cs);
+ }
+
+ return 0;
+}
+
#endif
diff --git a/target/i386/cpu-system.c b/target/i386/cpu-system.c
index 55f192e..b1494aa 100644
--- a/target/i386/cpu-system.c
+++ b/target/i386/cpu-system.c
@@ -24,7 +24,7 @@
#include "qobject/qdict.h"
#include "qapi/qobject-input-visitor.h"
#include "qom/qom-qobject.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/qapi-commands-machine.h"
#include "cpu-internal.h"
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 6f21d5e..0d35e95 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -37,8 +37,9 @@
#include "hw/i386/topology.h"
#include "exec/watchpoint.h"
#ifndef CONFIG_USER_ONLY
+#include "confidential-guest.h"
#include "system/reset.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/qapi-commands-machine.h"
#include "system/address-spaces.h"
#include "hw/boards.h"
#include "hw/i386/sgx-epc.h"
@@ -776,11 +777,12 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
- CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
+ CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE | \
+ CPUID_HT)
/* partly implemented:
CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
/* missing:
- CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
+ CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_TM, CPUID_PBE */
/*
* Kernel-only features that can be shown to usermode programs even if
@@ -848,7 +850,8 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A | \
- CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_KERNEL_FEATURES)
+ CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_KERNEL_FEATURES | \
+ CPUID_EXT3_CMP_LEG)
#define TCG_EXT4_FEATURES 0
@@ -897,6 +900,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
#define TCG_7_1_EAX_FEATURES (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | \
CPUID_7_1_EAX_FSRC | CPUID_7_1_EAX_CMPCCXADD)
+#define TCG_7_1_ECX_FEATURES 0
#define TCG_7_1_EDX_FEATURES 0
#define TCG_7_2_EDX_FEATURES 0
#define TCG_APM_FEATURES 0
@@ -922,6 +926,17 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
#define TCG_8000_0008_EBX (CPUID_8000_0008_EBX_XSAVEERPTR | \
CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_KERNEL_FEATURES)
+#if defined CONFIG_USER_ONLY
+#define CPUID_8000_0021_EAX_KERNEL_FEATURES CPUID_8000_0021_EAX_AUTO_IBRS
+#else
+#define CPUID_8000_0021_EAX_KERNEL_FEATURES 0
+#endif
+
+#define TCG_8000_0021_EAX_FEATURES ( \
+ CPUID_8000_0021_EAX_NO_NESTED_DATA_BP | \
+ CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE | \
+ CPUID_8000_0021_EAX_KERNEL_FEATURES)
+
FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_1_EDX] = {
.type = CPUID_FEATURE_WORD,
@@ -1136,6 +1151,25 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
.tcg_features = TCG_7_1_EAX_FEATURES,
},
+ [FEAT_7_1_ECX] = {
+ .type = CPUID_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ NULL, "msr-imm", NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid = {
+ .eax = 7,
+ .needs_ecx = true, .ecx = 1,
+ .reg = R_ECX,
+ },
+ .tcg_features = TCG_7_1_ECX_FEATURES,
+ },
[FEAT_7_1_EDX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
@@ -1239,17 +1273,17 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_8000_0021_EAX] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
- "no-nested-data-bp", NULL, "lfence-always-serializing", NULL,
+ "no-nested-data-bp", "fs-gs-base-ns", "lfence-always-serializing", NULL,
NULL, NULL, "null-sel-clr-base", NULL,
"auto-ibrs", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
+ "prefetchi", NULL, NULL, NULL,
"eraps", NULL, NULL, "sbpb",
"ibpb-brtype", "srso-no", "srso-user-kernel-no", NULL,
},
.cpuid = { .eax = 0x80000021, .reg = R_EAX, },
- .tcg_features = 0,
+ .tcg_features = TCG_8000_0021_EAX_FEATURES,
.unmigratable_flags = 0,
},
[FEAT_8000_0021_EBX] = {
@@ -1372,6 +1406,14 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
"bhi-no", NULL, NULL, NULL,
"pbrsb-no", NULL, "gds-no", "rfds-no",
"rfds-clear", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, "its-no", NULL,
},
.msr = {
.index = MSR_IA32_ARCH_CAPABILITIES,
@@ -1656,14 +1698,21 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
};
-typedef struct FeatureMask {
- FeatureWord index;
- uint64_t mask;
-} FeatureMask;
+bool is_feature_word_cpuid(uint32_t feature, uint32_t index, int reg)
+{
+ FeatureWordInfo *wi;
+ FeatureWord w;
-typedef struct FeatureDep {
- FeatureMask from, to;
-} FeatureDep;
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ wi = &feature_word_info[w];
+ if (wi->type == CPUID_FEATURE_WORD && wi->cpuid.eax == feature &&
+ (!wi->cpuid.needs_ecx || wi->cpuid.ecx == index) &&
+ wi->cpuid.reg == reg) {
+ return true;
+ }
+ }
+ return false;
+}
static FeatureDep feature_dependencies[] = {
{
@@ -1775,10 +1824,6 @@ static FeatureDep feature_dependencies[] = {
.to = { FEAT_7_1_EAX, CPUID_7_1_EAX_FRED },
},
{
- .from = { FEAT_7_1_EAX, CPUID_7_1_EAX_WRMSRNS },
- .to = { FEAT_7_1_EAX, CPUID_7_1_EAX_FRED },
- },
- {
.from = { FEAT_7_0_EBX, CPUID_7_0_EBX_SGX },
.to = { FEAT_7_0_ECX, CPUID_7_0_ECX_SGX_LC },
},
@@ -1833,9 +1878,6 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
};
#undef REGISTER
-/* CPUID feature bits available in XSS */
-#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK)
-
ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = {
[XSTATE_FP_BIT] = {
/* x87 FP state component is always enabled if XSAVE is supported */
@@ -2185,6 +2227,60 @@ static CPUCaches epyc_v4_cache_info = {
},
};
+static CPUCaches epyc_v5_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 64 * KiB,
+ .line_size = 64,
+ .associativity = 4,
+ .partitions = 1,
+ .sets = 256,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 8 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 8192,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
static const CPUCaches epyc_rome_cache_info = {
.l1d_cache = &(CPUCacheInfo) {
.type = DATA_CACHE,
@@ -2293,6 +2389,60 @@ static const CPUCaches epyc_rome_v3_cache_info = {
},
};
+static const CPUCaches epyc_rome_v5_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 16 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 16384,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
static const CPUCaches epyc_milan_cache_info = {
.l1d_cache = &(CPUCacheInfo) {
.type = DATA_CACHE,
@@ -2401,6 +2551,60 @@ static const CPUCaches epyc_milan_v2_cache_info = {
},
};
+static const CPUCaches epyc_milan_v3_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 32 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 32768,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
static const CPUCaches epyc_genoa_cache_info = {
.l1d_cache = &(CPUCacheInfo) {
.type = DATA_CACHE,
@@ -2455,6 +2659,114 @@ static const CPUCaches epyc_genoa_cache_info = {
},
};
+static const CPUCaches epyc_genoa_v2_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 1 * MiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 2048,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 32 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 32768,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
+static const CPUCaches epyc_turin_cache_info = {
+ .l1d_cache = &(CPUCacheInfo) {
+ .type = DATA_CACHE,
+ .level = 1,
+ .size = 48 * KiB,
+ .line_size = 64,
+ .associativity = 12,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l1i_cache = &(CPUCacheInfo) {
+ .type = INSTRUCTION_CACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .line_size = 64,
+ .associativity = 8,
+ .partitions = 1,
+ .sets = 64,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l2_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 1 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 1024,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
+ },
+ .l3_cache = &(CPUCacheInfo) {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 32 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .partitions = 1,
+ .sets = 32768,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .no_invd_sharing = true,
+ .complex_indexing = false,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
+ },
+};
+
/* The following VMX features are not supported by KVM and are left out in the
* CPU definitions:
*
@@ -5212,6 +5524,25 @@ static const X86CPUDefinition builtin_x86_defs[] = {
},
.cache_info = &epyc_v4_cache_info
},
+ {
+ .version = 5,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "model-id",
+ "AMD EPYC-v5 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_v5_cache_info
+ },
{ /* end of list */ }
}
},
@@ -5350,6 +5681,25 @@ static const X86CPUDefinition builtin_x86_defs[] = {
{ /* end of list */ }
},
},
+ {
+ .version = 5,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "model-id",
+ "AMD EPYC-Rome-v5 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_rome_v5_cache_info
+ },
{ /* end of list */ }
}
},
@@ -5425,6 +5775,25 @@ static const X86CPUDefinition builtin_x86_defs[] = {
},
.cache_info = &epyc_milan_v2_cache_info
},
+ {
+ .version = 3,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "model-id",
+ "AMD EPYC-Milan-v3 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_milan_v3_cache_info
+ },
{ /* end of list */ }
}
},
@@ -5499,6 +5868,31 @@ static const X86CPUDefinition builtin_x86_defs[] = {
.xlevel = 0x80000022,
.model_id = "AMD EPYC-Genoa Processor",
.cache_info = &epyc_genoa_cache_info,
+ .versions = (X86CPUVersionDefinition[]) {
+ { .version = 1 },
+ {
+ .version = 2,
+ .props = (PropValue[]) {
+ { "overflow-recov", "on" },
+ { "succor", "on" },
+ { "lbrv", "on" },
+ { "tsc-scale", "on" },
+ { "vmcb-clean", "on" },
+ { "flushbyasid", "on" },
+ { "pause-filter", "on" },
+ { "pfthreshold", "on" },
+ { "v-vmsave-vmload", "on" },
+ { "vgif", "on" },
+ { "fs-gs-base-ns", "on" },
+ { "perfmon-v2", "on" },
+ { "model-id",
+ "AMD EPYC-Genoa-v2 Processor" },
+ { /* end of list */ }
+ },
+ .cache_info = &epyc_genoa_v2_cache_info
+ },
+ { /* end of list */ }
+ }
},
{
.name = "YongFeng",
@@ -5636,6 +6030,89 @@ static const X86CPUDefinition builtin_x86_defs[] = {
{ /* end of list */ }
}
},
+ {
+ .name = "EPYC-Turin",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 26,
+ .model = 0,
+ .stepping = 0,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
+ CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
+ CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_PCID | CPUID_EXT_CX16 | CPUID_EXT_FMA |
+ CPUID_EXT_SSSE3 | CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3,
+ .features[FEAT_1_EDX] =
+ CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
+ CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
+ CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
+ CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
+ CPUID_VME | CPUID_FP87,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
+ CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_AVX512F |
+ CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_AVX512IFMA |
+ CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB |
+ CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_SHA_NI |
+ CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL,
+ .features[FEAT_7_0_ECX] =
+ CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
+ CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
+ CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
+ CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
+ CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57 |
+ CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_MOVDIRI |
+ CPUID_7_0_ECX_MOVDIR64B,
+ .features[FEAT_7_0_EDX] =
+ CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_AVX512_VP2INTERSECT,
+ .features[FEAT_7_1_EAX] =
+ CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_AVX512_BF16,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
+ CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
+ CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
+ CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0007_EBX] =
+ CPUID_8000_0007_EBX_OVERFLOW_RECOV | CPUID_8000_0007_EBX_SUCCOR,
+ .features[FEAT_8000_0008_EBX] =
+ CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR |
+ CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB |
+ CPUID_8000_0008_EBX_IBRS | CPUID_8000_0008_EBX_STIBP |
+ CPUID_8000_0008_EBX_STIBP_ALWAYS_ON |
+ CPUID_8000_0008_EBX_AMD_SSBD | CPUID_8000_0008_EBX_AMD_PSFD,
+ .features[FEAT_8000_0021_EAX] =
+ CPUID_8000_0021_EAX_NO_NESTED_DATA_BP |
+ CPUID_8000_0021_EAX_FS_GS_BASE_NS |
+ CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING |
+ CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE |
+ CPUID_8000_0021_EAX_AUTO_IBRS | CPUID_8000_0021_EAX_PREFETCHI |
+ CPUID_8000_0021_EAX_SBPB | CPUID_8000_0021_EAX_IBPB_BRTYPE |
+ CPUID_8000_0021_EAX_SRSO_USER_KERNEL_NO,
+ .features[FEAT_8000_0022_EAX] =
+ CPUID_8000_0022_EAX_PERFMON_V2,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
+ .features[FEAT_SVM] =
+ CPUID_SVM_NPT | CPUID_SVM_LBRV | CPUID_SVM_NRIPSAVE |
+ CPUID_SVM_TSCSCALE | CPUID_SVM_VMCBCLEAN | CPUID_SVM_FLUSHASID |
+ CPUID_SVM_PAUSEFILTER | CPUID_SVM_PFTHRESHOLD |
+ CPUID_SVM_V_VMSAVE_VMLOAD | CPUID_SVM_VGIF |
+ CPUID_SVM_VNMI | CPUID_SVM_SVME_ADDR_CHK,
+ .xlevel = 0x80000022,
+ .model_id = "AMD EPYC-Turin Processor",
+ .cache_info = &epyc_turin_cache_info,
+ },
};
/*
@@ -5745,7 +6222,7 @@ static const TypeInfo max_x86_cpu_type_info = {
.class_init = max_x86_cpu_class_init,
};
-static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
+static char *feature_word_description(FeatureWordInfo *f)
{
assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
@@ -5754,11 +6231,15 @@ static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
{
const char *reg = get_register_name_32(f->cpuid.reg);
assert(reg);
- return g_strdup_printf("CPUID.%02XH:%s",
- f->cpuid.eax, reg);
+ if (!f->cpuid.needs_ecx) {
+ return g_strdup_printf("CPUID[eax=%02Xh].%s", f->cpuid.eax, reg);
+ } else {
+ return g_strdup_printf("CPUID[eax=%02Xh,ecx=%02Xh].%s",
+ f->cpuid.eax, f->cpuid.ecx, reg);
+ }
}
case MSR_FEATURE_WORD:
- return g_strdup_printf("MSR(%02XH)",
+ return g_strdup_printf("MSR(%02Xh)",
f->msr.index);
}
@@ -5778,12 +6259,13 @@ static bool x86_cpu_have_filtered_features(X86CPU *cpu)
return false;
}
-static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
- const char *verbose_prefix)
+void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix)
{
CPUX86State *env = &cpu->env;
FeatureWordInfo *f = &feature_word_info[w];
int i;
+ g_autofree char *feat_word_str = feature_word_description(f);
if (!cpu->force_features) {
env->features[w] &= ~mask;
@@ -5796,7 +6278,35 @@ static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
for (i = 0; i < 64; ++i) {
if ((1ULL << i) & mask) {
- g_autofree char *feat_word_str = feature_word_description(f, i);
+ warn_report("%s: %s%s%s [bit %d]",
+ verbose_prefix,
+ feat_word_str,
+ f->feat_names[i] ? "." : "",
+ f->feat_names[i] ? f->feat_names[i] : "", i);
+ }
+ }
+}
+
+void mark_forced_on_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix)
+{
+ CPUX86State *env = &cpu->env;
+ FeatureWordInfo *f = &feature_word_info[w];
+ int i;
+
+ if (!cpu->force_features) {
+ env->features[w] |= mask;
+ }
+
+ cpu->forced_on_features[w] |= mask;
+
+ if (!verbose_prefix) {
+ return;
+ }
+
+ for (i = 0; i < 64; ++i) {
+ if ((1ULL << i) & mask) {
+ g_autofree char *feat_word_str = feature_word_description(f);
warn_report("%s: %s%s%s [bit %d]",
verbose_prefix,
feat_word_str,
@@ -6240,7 +6750,7 @@ static void listflags(GList *features)
}
/* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
-static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
+static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b, gpointer d)
{
ObjectClass *class_a = (ObjectClass *)a;
ObjectClass *class_b = (ObjectClass *)b;
@@ -6261,7 +6771,7 @@ static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
static GSList *get_sorted_cpu_model_list(void)
{
GSList *list = object_class_get_list(TYPE_X86_CPU, false);
- list = g_slist_sort(list, x86_cpu_list_compare);
+ list = g_slist_sort_with_data(list, x86_cpu_list_compare, NULL);
return list;
}
@@ -6318,6 +6828,11 @@ static void x86_cpu_list_entry(gpointer data, gpointer user_data)
qemu_printf(" %-20s %s\n", name, desc);
}
+static gint strcmp_wrap(gconstpointer a, gconstpointer b, gpointer d)
+{
+ return strcmp(a, b);
+}
+
/* list available CPU models and flags */
static void x86_cpu_list(void)
{
@@ -6340,7 +6855,7 @@ static void x86_cpu_list(void)
}
}
- names = g_list_sort(names, (GCompareFunc)strcmp);
+ names = g_list_sort_with_data(names, strcmp_wrap, NULL);
qemu_printf("\nRecognized CPUID flags:\n");
listflags(names);
@@ -6834,9 +7349,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
if (threads_per_pkg > 1) {
*ebx |= threads_per_pkg << 16;
}
- if (!cpu->enable_pmu) {
- *ecx &= ~CPUID_EXT_PDCM;
- }
break;
case 2:
/* cache info: needed for Pentium Pro compatibility */
@@ -6947,9 +7459,9 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
} else if (count == 1) {
*eax = env->features[FEAT_7_1_EAX];
+ *ecx = env->features[FEAT_7_1_ECX];
*edx = env->features[FEAT_7_1_EDX];
*ebx = 0;
- *ecx = 0;
} else if (count == 2) {
*edx = env->features[FEAT_7_2_EDX];
*eax = 0;
@@ -7018,7 +7530,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 0x1F:
/* V2 Extended Topology Enumeration Leaf */
- if (!x86_has_extended_topo(env->avail_cpu_topo)) {
+ if (!x86_has_cpuid_0x1f(cpu)) {
*eax = *ebx = *ecx = *edx = 0;
break;
}
@@ -7826,6 +8338,13 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
}
}
+ if (!cpu->enable_pmu) {
+ mark_unavailable_features(cpu, FEAT_1_ECX,
+ env->user_features[FEAT_1_ECX] & CPUID_EXT_PDCM,
+ "This feature is not available due to PMU being disabled");
+ env->features[FEAT_1_ECX] &= ~CPUID_EXT_PDCM;
+ }
+
for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
FeatureDep *d = &feature_dependencies[i];
if (!(env->features[d->from.index] & d->from.mask)) {
@@ -7854,6 +8373,7 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_1_ECX);
x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EDX);
x86_cpu_adjust_feat_level(cpu, FEAT_7_2_EDX);
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
@@ -7882,7 +8402,7 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
* cpu->vendor_cpuid_only has been unset for compatibility with older
* machine types.
*/
- if (x86_has_extended_topo(env->avail_cpu_topo) &&
+ if (x86_has_cpuid_0x1f(cpu) &&
(IS_INTEL_CPU(env) || !cpu->vendor_cpuid_only)) {
x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
}
@@ -8517,6 +9037,13 @@ static void x86_cpu_post_initfn(Object *obj)
}
accel_cpu_instance_init(CPU(obj));
+
+#ifndef CONFIG_USER_ONLY
+ if (current_machine && current_machine->cgs) {
+ x86_confidential_guest_cpu_instance_init(
+ X86_CONFIDENTIAL_GUEST(current_machine->cgs), (CPU(obj)));
+ }
+#endif
}
static void x86_cpu_init_default_topo(X86CPU *cpu)
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 54bf963..51e1013 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -35,10 +35,6 @@
#define XEN_NR_VIRQS 24
-/* support for self modifying code even if the modified instruction is
- close to the modifying instruction */
-#define TARGET_HAS_PRECISE_SMC
-
#ifdef TARGET_X86_64
#define I386_ELF_MACHINE EM_X86_64
#define ELF_MACHINE_UNAME "x86_64"
@@ -588,6 +584,7 @@ typedef enum X86Seg {
#define XSTATE_OPMASK_BIT 5
#define XSTATE_ZMM_Hi256_BIT 6
#define XSTATE_Hi16_ZMM_BIT 7
+#define XSTATE_PT_BIT 8
#define XSTATE_PKRU_BIT 9
#define XSTATE_ARCH_LBR_BIT 15
#define XSTATE_XTILE_CFG_BIT 17
@@ -601,6 +598,7 @@ typedef enum X86Seg {
#define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
#define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
#define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
+#define XSTATE_PT_MASK (1ULL << XSTATE_PT_BIT)
#define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
#define XSTATE_ARCH_LBR_MASK (1ULL << XSTATE_ARCH_LBR_BIT)
#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT)
@@ -623,6 +621,11 @@ typedef enum X86Seg {
XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \
XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK)
+/* CPUID feature bits available in XSS */
+#define CPUID_XSTATE_XSS_MASK (XSTATE_ARCH_LBR_MASK)
+
+#define CPUID_XSTATE_MASK (CPUID_XSTATE_XCR0_MASK | CPUID_XSTATE_XSS_MASK)
+
/* CPUID feature words */
typedef enum FeatureWord {
FEAT_1_EDX, /* CPUID[1].EDX */
@@ -665,12 +668,22 @@ typedef enum FeatureWord {
FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */
FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */
FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */
+ FEAT_7_1_ECX, /* CPUID[EAX=7,ECX=1].ECX */
FEAT_7_1_EDX, /* CPUID[EAX=7,ECX=1].EDX */
FEAT_7_2_EDX, /* CPUID[EAX=7,ECX=2].EDX */
FEAT_24_0_EBX, /* CPUID[EAX=0x24,ECX=0].EBX */
FEATURE_WORDS,
} FeatureWord;
+typedef struct FeatureMask {
+ FeatureWord index;
+ uint64_t mask;
+} FeatureMask;
+
+typedef struct FeatureDep {
+ FeatureMask from, to;
+} FeatureDep;
+
typedef uint64_t FeatureWordArray[FEATURE_WORDS];
uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
@@ -903,6 +916,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_0_ECX_LA57 (1U << 16)
/* Read Processor ID */
#define CPUID_7_0_ECX_RDPID (1U << 22)
+/* KeyLocker */
+#define CPUID_7_0_ECX_KeyLocker (1U << 23)
/* Bus Lock Debug Exception */
#define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24)
/* Cache Line Demote Instruction */
@@ -924,6 +939,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_0_EDX_FSRM (1U << 4)
/* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
#define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
+ /* "md_clear" VERW clears CPU buffers */
+#define CPUID_7_0_EDX_MD_CLEAR (1U << 10)
/* SERIALIZE instruction */
#define CPUID_7_0_EDX_SERIALIZE (1U << 14)
/* TSX Suspend Load Address Tracking instruction */
@@ -961,6 +978,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_1_EAX_AVX_VNNI (1U << 4)
/* AVX512 BFloat16 Instruction */
#define CPUID_7_1_EAX_AVX512_BF16 (1U << 5)
+/* Linear address space separation */
+#define CPUID_7_1_EAX_LASS (1U << 6)
/* CMPCCXADD Instructions */
#define CPUID_7_1_EAX_CMPCCXADD (1U << 7)
/* Fast Zero REP MOVS */
@@ -982,6 +1001,9 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
/* Linear Address Masking */
#define CPUID_7_1_EAX_LAM (1U << 26)
+/* The immediate form of MSR access instructions */
+#define CPUID_7_1_ECX_MSR_IMM (1U << 5)
+
/* Support for VPDPB[SU,UU,SS]D[,S] */
#define CPUID_7_1_EDX_AVX_VNNI_INT8 (1U << 4)
/* AVX NE CONVERT Instructions */
@@ -1005,6 +1027,7 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_7_2_EDX_DDPD_U (1U << 3)
/* Indicate bit 10 of the IA32_SPEC_CTRL MSR is supported */
#define CPUID_7_2_EDX_BHI_CTRL (1U << 4)
+
/* Do not exhibit MXCSR Configuration Dependent Timing (MCDT) behavior */
#define CPUID_7_2_EDX_MCDT_NO (1U << 5)
@@ -1074,12 +1097,16 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
/* Processor ignores nested data breakpoints */
#define CPUID_8000_0021_EAX_NO_NESTED_DATA_BP (1U << 0)
+/* WRMSR to FS_BASE, GS_BASE, or KERNEL_GS_BASE is non-serializing */
+#define CPUID_8000_0021_EAX_FS_GS_BASE_NS (1U << 1)
/* LFENCE is always serializing */
#define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2)
/* Null Selector Clears Base */
#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6)
/* Automatic IBRS */
#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8)
+/* Indicates support for IC prefetch */
+#define CPUID_8000_0021_EAX_PREFETCHI (1U << 20)
/* Enhanced Return Address Predictor Scurity */
#define CPUID_8000_0021_EAX_ERAPS (1U << 24)
/* Selective Branch Predictor Barrier */
@@ -1104,6 +1131,7 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w);
#define CPUID_XSAVE_XSAVEC (1U << 1)
#define CPUID_XSAVE_XGETBV1 (1U << 2)
#define CPUID_XSAVE_XSAVES (1U << 3)
+#define CPUID_XSAVE_XFD (1U << 4)
#define CPUID_6_EAX_ARAT (1U << 2)
@@ -1745,12 +1773,6 @@ typedef enum TPRAccess {
/* Cache information data structures: */
-enum CacheType {
- DATA_CACHE,
- INSTRUCTION_CACHE,
- UNIFIED_CACHE
-};
-
typedef struct CPUCacheInfo {
enum CacheType type;
uint8_t level;
@@ -1809,11 +1831,6 @@ typedef struct CPUCaches {
CPUCacheInfo *l3_cache;
} CPUCaches;
-typedef struct X86LazyFlags {
- target_ulong result;
- target_ulong auxbits;
-} X86LazyFlags;
-
typedef struct CPUArchState {
/* standard registers */
target_ulong regs[CPU_NB_REGS];
@@ -2106,7 +2123,6 @@ typedef struct CPUArchState {
QemuMutex xen_timers_lock;
#endif
#if defined(CONFIG_HVF)
- X86LazyFlags lflags;
void *emu_mmio_buf;
#endif
@@ -2202,6 +2218,9 @@ struct ArchCPU {
/* Features that were filtered out because of missing host capabilities */
FeatureWordArray filtered_features;
+ /* Features that are forced enabled by underlying hypervisor, e.g., TDX */
+ FeatureWordArray forced_on_features;
+
/* Enable PMU CPUID bits. This can't be enabled by default yet because
* it doesn't have ABI stability guarantees, as it passes all PMU CPUID
* bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
@@ -2249,6 +2268,9 @@ struct ArchCPU {
/* Compatibility bits for old machine types: */
bool enable_cpuid_0xb;
+ /* Force to enable cpuid 0x1f */
+ bool force_cpuid_0x1f;
+
/* Enable auto level-increase for all CPUID leaves */
bool full_cpuid_auto_level;
@@ -2509,6 +2531,17 @@ void cpu_set_apic_feature(CPUX86State *env);
void host_cpuid(uint32_t function, uint32_t count,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
bool cpu_has_x2apic_feature(CPUX86State *env);
+bool is_feature_word_cpuid(uint32_t feature, uint32_t index, int reg);
+void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix);
+void mark_forced_on_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
+ const char *verbose_prefix);
+
+static inline bool x86_has_cpuid_0x1f(X86CPU *cpu)
+{
+ return cpu->force_cpuid_0x1f ||
+ x86_has_extended_topo(cpu->env.avail_cpu_topo);
+}
/* helper.c */
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
@@ -2603,20 +2636,6 @@ static inline bool is_mmu_index_32(int mmu_index)
#include "hw/i386/apic.h"
#endif
-static inline void cpu_get_tb_cpu_state(CPUX86State *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *flags = env->hflags |
- (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
- if (env->hflags & HF_CS64_MASK) {
- *cs_base = 0;
- *pc = env->eip;
- } else {
- *cs_base = env->segs[R_CS].base;
- *pc = (uint32_t)(*cs_base + env->eip);
- }
-}
-
void do_cpu_init(X86CPU *cpu);
#define MCE_INJECT_BROADCAST 1
diff --git a/target/i386/emulate/x86_decode.c b/target/i386/emulate/x86_decode.c
index 7fee219..2eca398 100644
--- a/target/i386/emulate/x86_decode.c
+++ b/target/i386/emulate/x86_decode.c
@@ -26,7 +26,7 @@
static void decode_invalid(CPUX86State *env, struct x86_decode *decode)
{
- printf("%llx: failed to decode instruction ", env->eip);
+ printf(TARGET_FMT_lx ": failed to decode instruction ", env->eip);
for (int i = 0; i < decode->opcode_len; i++) {
printf("%x ", decode->opcode[i]);
}
@@ -109,8 +109,8 @@ static void decode_modrm_reg(CPUX86State *env, struct x86_decode *decode,
{
op->type = X86_VAR_REG;
op->reg = decode->modrm.reg;
- op->ptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.r,
- decode->operand_size);
+ op->regptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.r,
+ decode->operand_size);
}
static void decode_rax(CPUX86State *env, struct x86_decode *decode,
@@ -119,8 +119,8 @@ static void decode_rax(CPUX86State *env, struct x86_decode *decode,
op->type = X86_VAR_REG;
op->reg = R_EAX;
/* Since reg is always AX, REX prefix has no impact. */
- op->ptr = get_reg_ref(env, op->reg, false, 0,
- decode->operand_size);
+ op->regptr = get_reg_ref(env, op->reg, false, 0,
+ decode->operand_size);
}
static inline void decode_immediate(CPUX86State *env, struct x86_decode *decode,
@@ -262,16 +262,16 @@ static void decode_incgroup(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[0] - 0x40;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
}
static void decode_decgroup(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[0] - 0x48;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
}
static void decode_incgroup2(CPUX86State *env, struct x86_decode *decode)
@@ -287,16 +287,16 @@ static void decode_pushgroup(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[0] - 0x50;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
}
static void decode_popgroup(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[0] - 0x58;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
}
static void decode_jxx(CPUX86State *env, struct x86_decode *decode)
@@ -377,16 +377,16 @@ static void decode_xchgroup(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[0] - 0x90;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
}
static void decode_movgroup(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[0] - 0xb8;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
decode_immediate(env, decode, &decode->op[1], decode->operand_size);
}
@@ -394,15 +394,15 @@ static void fetch_moffs(CPUX86State *env, struct x86_decode *decode,
struct x86_decode_op *op)
{
op->type = X86_VAR_OFFSET;
- op->ptr = decode_bytes(env, decode, decode->addressing_size);
+ op->addr = decode_bytes(env, decode, decode->addressing_size);
}
static void decode_movgroup8(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[0] - 0xb0;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
decode_immediate(env, decode, &decode->op[1], decode->operand_size);
}
@@ -411,8 +411,8 @@ static void decode_rcx(CPUX86State *env, struct x86_decode *decode,
{
op->type = X86_VAR_REG;
op->reg = R_ECX;
- op->ptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.b,
- decode->operand_size);
+ op->regptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.b,
+ decode->operand_size);
}
struct decode_tbl {
@@ -631,8 +631,8 @@ static void decode_bswap(CPUX86State *env, struct x86_decode *decode)
{
decode->op[0].type = X86_VAR_REG;
decode->op[0].reg = decode->opcode[1] - 0xc8;
- decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
- decode->rex.b, decode->operand_size);
+ decode->op[0].regptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
}
static void decode_d9_4(CPUX86State *env, struct x86_decode *decode)
@@ -1408,7 +1408,7 @@ struct decode_tbl _2op_inst[] = {
};
struct decode_x87_tbl invl_inst_x87 = {0x0, 0, 0, 0, 0, false, false, NULL,
- NULL, decode_invalid, 0};
+ NULL, decode_invalid};
struct decode_x87_tbl _x87_inst[] = {
{0xd8, 0, 3, X86_DECODE_CMD_FADD, 10, false, false,
@@ -1456,8 +1456,7 @@ struct decode_x87_tbl _x87_inst[] = {
decode_x87_modrm_st0, NULL, decode_d9_4},
{0xd9, 4, 0, X86_DECODE_CMD_INVL, 4, false, false,
decode_x87_modrm_bytep, NULL, NULL},
- {0xd9, 5, 3, X86_DECODE_CMD_FLDxx, 10, false, false, NULL, NULL, NULL,
- RFLAGS_MASK_NONE},
+ {0xd9, 5, 3, X86_DECODE_CMD_FLDxx, 10, false, false, NULL, NULL, NULL},
{0xd9, 5, 0, X86_DECODE_CMD_FLDCW, 2, false, false,
decode_x87_modrm_bytep, NULL, NULL},
@@ -1478,20 +1477,17 @@ struct decode_x87_tbl _x87_inst[] = {
decode_x87_modrm_st0, NULL},
{0xda, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
decode_x87_modrm_st0, NULL},
- {0xda, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
- RFLAGS_MASK_NONE},
+ {0xda, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL},
{0xda, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0,
decode_x87_modrm_intp, NULL},
{0xda, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true, decode_x87_modrm_st0,
decode_decode_x87_modrm_st0, NULL},
{0xda, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0,
decode_x87_modrm_intp, NULL},
- {0xda, 6, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
- RFLAGS_MASK_NONE},
+ {0xda, 6, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL},
{0xda, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0,
decode_x87_modrm_intp, NULL},
- {0xda, 7, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
- RFLAGS_MASK_NONE},
+ {0xda, 7, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL},
{0xda, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0,
decode_x87_modrm_intp, NULL},
@@ -1511,8 +1507,7 @@ struct decode_x87_tbl _x87_inst[] = {
decode_x87_modrm_intp, NULL, NULL},
{0xdb, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL,
decode_db_4},
- {0xdb, 4, 0, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
- RFLAGS_MASK_NONE},
+ {0xdb, 4, 0, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL},
{0xdb, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, false,
decode_x87_modrm_st0, decode_x87_modrm_st0, NULL},
{0xdb, 5, 0, X86_DECODE_CMD_FLD, 10, false, false,
@@ -1661,16 +1656,16 @@ void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode,
}
calc_addr:
if (X86_DECODE_CMD_LEA == decode->cmd) {
- op->ptr = (uint16_t)ptr;
+ op->addr = (uint16_t)ptr;
} else {
- op->ptr = decode_linear_addr(env, decode, (uint16_t)ptr, seg);
+ op->addr = decode_linear_addr(env, decode, (uint16_t)ptr, seg);
}
}
-target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
+void *get_reg_ref(CPUX86State *env, int reg, int rex_present,
int is_extended, int size)
{
- target_ulong ptr = 0;
+ void *ptr = NULL;
if (is_extended) {
reg |= R_R8;
@@ -1679,13 +1674,13 @@ target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
switch (size) {
case 1:
if (is_extended || reg < 4 || rex_present) {
- ptr = (target_ulong)&RL(env, reg);
+ ptr = &RL(env, reg);
} else {
- ptr = (target_ulong)&RH(env, reg - 4);
+ ptr = &RH(env, reg - 4);
}
break;
default:
- ptr = (target_ulong)&RRX(env, reg);
+ ptr = &RRX(env, reg);
break;
}
return ptr;
@@ -1696,7 +1691,7 @@ target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present,
{
target_ulong val = 0;
memcpy(&val,
- (void *)get_reg_ref(env, reg, rex_present, is_extended, size),
+ get_reg_ref(env, reg, rex_present, is_extended, size),
size);
return val;
}
@@ -1763,9 +1758,9 @@ void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,
}
if (X86_DECODE_CMD_LEA == decode->cmd) {
- op->ptr = (uint32_t)ptr;
+ op->addr = (uint32_t)ptr;
} else {
- op->ptr = decode_linear_addr(env, decode, (uint32_t)ptr, seg);
+ op->addr = decode_linear_addr(env, decode, (uint32_t)ptr, seg);
}
}
@@ -1793,9 +1788,9 @@ void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,
}
if (X86_DECODE_CMD_LEA == decode->cmd) {
- op->ptr = ptr;
+ op->addr = ptr;
} else {
- op->ptr = decode_linear_addr(env, decode, ptr, seg);
+ op->addr = decode_linear_addr(env, decode, ptr, seg);
}
}
@@ -1806,8 +1801,8 @@ void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
if (3 == decode->modrm.mod) {
op->reg = decode->modrm.reg;
op->type = X86_VAR_REG;
- op->ptr = get_reg_ref(env, decode->modrm.rm, decode->rex.rex,
- decode->rex.b, decode->operand_size);
+ op->regptr = get_reg_ref(env, decode->modrm.rm, decode->rex.rex,
+ decode->rex.b, decode->operand_size);
return;
}
diff --git a/target/i386/emulate/x86_decode.h b/target/i386/emulate/x86_decode.h
index 87cc728..927645a 100644
--- a/target/i386/emulate/x86_decode.h
+++ b/target/i386/emulate/x86_decode.h
@@ -266,7 +266,10 @@ typedef struct x86_decode_op {
int reg;
target_ulong val;
- target_ulong ptr;
+ union {
+ target_ulong addr;
+ void *regptr;
+ };
} x86_decode_op;
typedef struct x86_decode {
@@ -301,8 +304,8 @@ uint64_t sign(uint64_t val, int size);
uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode);
-target_ulong get_reg_ref(CPUX86State *env, int reg, int rex_present,
- int is_extended, int size);
+void *get_reg_ref(CPUX86State *env, int reg, int rex_present,
+ int is_extended, int size);
target_ulong get_reg_val(CPUX86State *env, int reg, int rex_present,
int is_extended, int size);
void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
diff --git a/target/i386/emulate/x86_emu.c b/target/i386/emulate/x86_emu.c
index 26a4876..db7a7f7 100644
--- a/target/i386/emulate/x86_emu.c
+++ b/target/i386/emulate/x86_emu.c
@@ -31,8 +31,8 @@
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
+// License along with this library; if not, see
+// <https://www.gnu.org/licenses/>.
/////////////////////////////////////////////////////////////////////////
#include "qemu/osdep.h"
@@ -52,7 +52,7 @@
uint8_t v2 = (uint8_t)decode->op[1].val; \
uint8_t diff = v1 cmd v2; \
if (save_res) { \
- write_val_ext(env, decode->op[0].ptr, diff, 1); \
+ write_val_ext(env, &decode->op[0], diff, 1); \
} \
FLAGS_FUNC##8(env, v1, v2, diff); \
break; \
@@ -63,7 +63,7 @@
uint16_t v2 = (uint16_t)decode->op[1].val; \
uint16_t diff = v1 cmd v2; \
if (save_res) { \
- write_val_ext(env, decode->op[0].ptr, diff, 2); \
+ write_val_ext(env, &decode->op[0], diff, 2); \
} \
FLAGS_FUNC##16(env, v1, v2, diff); \
break; \
@@ -74,7 +74,7 @@
uint32_t v2 = (uint32_t)decode->op[1].val; \
uint32_t diff = v1 cmd v2; \
if (save_res) { \
- write_val_ext(env, decode->op[0].ptr, diff, 4); \
+ write_val_ext(env, &decode->op[0], diff, 4); \
} \
FLAGS_FUNC##32(env, v1, v2, diff); \
break; \
@@ -121,7 +121,7 @@ void write_reg(CPUX86State *env, int reg, target_ulong val, int size)
}
}
-target_ulong read_val_from_reg(target_ulong reg_ptr, int size)
+target_ulong read_val_from_reg(void *reg_ptr, int size)
{
target_ulong val;
@@ -144,7 +144,7 @@ target_ulong read_val_from_reg(target_ulong reg_ptr, int size)
return val;
}
-void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size)
+void write_val_to_reg(void *reg_ptr, target_ulong val, int size)
{
switch (size) {
case 1:
@@ -164,18 +164,18 @@ void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size)
}
}
-static bool is_host_reg(CPUX86State *env, target_ulong ptr)
+static void write_val_to_mem(CPUX86State *env, target_ulong ptr, target_ulong val, int size)
{
- return (ptr - (target_ulong)&env->regs[0]) < sizeof(env->regs);
+ emul_ops->write_mem(env_cpu(env), &val, ptr, size);
}
-void write_val_ext(CPUX86State *env, target_ulong ptr, target_ulong val, int size)
+void write_val_ext(CPUX86State *env, struct x86_decode_op *decode, target_ulong val, int size)
{
- if (is_host_reg(env, ptr)) {
- write_val_to_reg(ptr, val, size);
- return;
+ if (decode->type == X86_VAR_REG) {
+ write_val_to_reg(decode->regptr, val, size);
+ } else {
+ write_val_to_mem(env, decode->addr, val, size);
}
- emul_ops->write_mem(env_cpu(env), &val, ptr, size);
}
uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes)
@@ -185,15 +185,11 @@ uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes)
}
-target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size)
+static target_ulong read_val_from_mem(CPUX86State *env, target_long ptr, int size)
{
target_ulong val;
uint8_t *mmio_ptr;
- if (is_host_reg(env, ptr)) {
- return read_val_from_reg(ptr, size);
- }
-
mmio_ptr = read_mmio(env, ptr, size);
switch (size) {
case 1:
@@ -215,6 +211,15 @@ target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size)
return val;
}
+target_ulong read_val_ext(CPUX86State *env, struct x86_decode_op *decode, int size)
+{
+ if (decode->type == X86_VAR_REG) {
+ return read_val_from_reg(decode->regptr, size);
+ } else {
+ return read_val_from_mem(env, decode->addr, size);
+ }
+}
+
static void fetch_operands(CPUX86State *env, struct x86_decode *decode,
int n, bool val_op0, bool val_op1, bool val_op2)
{
@@ -226,25 +231,25 @@ static void fetch_operands(CPUX86State *env, struct x86_decode *decode,
case X86_VAR_IMMEDIATE:
break;
case X86_VAR_REG:
- VM_PANIC_ON(!decode->op[i].ptr);
+ VM_PANIC_ON(!decode->op[i].regptr);
if (calc_val[i]) {
- decode->op[i].val = read_val_from_reg(decode->op[i].ptr,
+ decode->op[i].val = read_val_from_reg(decode->op[i].regptr,
decode->operand_size);
}
break;
case X86_VAR_RM:
calc_modrm_operand(env, decode, &decode->op[i]);
if (calc_val[i]) {
- decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
+ decode->op[i].val = read_val_ext(env, &decode->op[i],
decode->operand_size);
}
break;
case X86_VAR_OFFSET:
- decode->op[i].ptr = decode_linear_addr(env, decode,
- decode->op[i].ptr,
- R_DS);
+ decode->op[i].addr = decode_linear_addr(env, decode,
+ decode->op[i].addr,
+ R_DS);
if (calc_val[i]) {
- decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
+ decode->op[i].val = read_val_ext(env, &decode->op[i],
decode->operand_size);
}
break;
@@ -257,7 +262,7 @@ static void fetch_operands(CPUX86State *env, struct x86_decode *decode,
static void exec_mov(CPUX86State *env, struct x86_decode *decode)
{
fetch_operands(env, decode, 2, false, true, false);
- write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
+ write_val_ext(env, &decode->op[0], decode->op[1].val,
decode->operand_size);
env->eip += decode->len;
@@ -312,7 +317,7 @@ static void exec_neg(CPUX86State *env, struct x86_decode *decode)
fetch_operands(env, decode, 2, true, true, false);
val = 0 - sign(decode->op[1].val, decode->operand_size);
- write_val_ext(env, decode->op[1].ptr, val, decode->operand_size);
+ write_val_ext(env, &decode->op[1], val, decode->operand_size);
if (4 == decode->operand_size) {
SET_FLAGS_OSZAPC_SUB32(env, 0, 0 - val, val);
@@ -363,7 +368,7 @@ static void exec_not(CPUX86State *env, struct x86_decode *decode)
{
fetch_operands(env, decode, 1, true, false, false);
- write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val,
+ write_val_ext(env, &decode->op[0], ~decode->op[0].val,
decode->operand_size);
env->eip += decode->len;
}
@@ -382,8 +387,8 @@ void exec_movzx(CPUX86State *env, struct x86_decode *decode)
}
decode->operand_size = src_op_size;
calc_modrm_operand(env, decode, &decode->op[1]);
- decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size);
- write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
+ decode->op[1].val = read_val_ext(env, &decode->op[1], src_op_size);
+ write_val_ext(env, &decode->op[0], decode->op[1].val, op_size);
env->eip += decode->len;
}
@@ -469,10 +474,10 @@ static inline void string_rep(CPUX86State *env, struct x86_decode *decode,
while (rcx--) {
func(env, decode);
write_reg(env, R_ECX, rcx, decode->addressing_size);
- if ((PREFIX_REP == rep) && !get_ZF(env)) {
+ if ((PREFIX_REP == rep) && !env->cc_dst) {
break;
}
- if ((PREFIX_REPN == rep) && get_ZF(env)) {
+ if ((PREFIX_REPN == rep) && env->cc_dst) {
break;
}
}
@@ -535,8 +540,8 @@ static void exec_movs_single(CPUX86State *env, struct x86_decode *decode)
dst_addr = linear_addr_size(env_cpu(env), RDI(env),
decode->addressing_size, R_ES);
- val = read_val_ext(env, src_addr, decode->operand_size);
- write_val_ext(env, dst_addr, val, decode->operand_size);
+ val = read_val_from_mem(env, src_addr, decode->operand_size);
+ write_val_to_mem(env, dst_addr, val, decode->operand_size);
string_increment_reg(env, R_ESI, decode);
string_increment_reg(env, R_EDI, decode);
@@ -563,9 +568,9 @@ static void exec_cmps_single(CPUX86State *env, struct x86_decode *decode)
decode->addressing_size, R_ES);
decode->op[0].type = X86_VAR_IMMEDIATE;
- decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size);
+ decode->op[0].val = read_val_from_mem(env, src_addr, decode->operand_size);
decode->op[1].type = X86_VAR_IMMEDIATE;
- decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size);
+ decode->op[1].val = read_val_from_mem(env, dst_addr, decode->operand_size);
EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
@@ -697,15 +702,15 @@ static void do_bt(CPUX86State *env, struct x86_decode *decode, int flag)
if (decode->op[0].type != X86_VAR_REG) {
if (4 == decode->operand_size) {
displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32;
- decode->op[0].ptr += 4 * displacement;
+ decode->op[0].addr += 4 * displacement;
} else if (2 == decode->operand_size) {
displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16;
- decode->op[0].ptr += 2 * displacement;
+ decode->op[0].addr += 2 * displacement;
} else {
VM_PANIC("bt 64bit\n");
}
}
- decode->op[0].val = read_val_ext(env, decode->op[0].ptr,
+ decode->op[0].val = read_val_ext(env, &decode->op[0],
decode->operand_size);
cf = (decode->op[0].val >> index) & 0x01;
@@ -723,7 +728,7 @@ static void do_bt(CPUX86State *env, struct x86_decode *decode, int flag)
decode->op[0].val &= ~(1u << index);
break;
}
- write_val_ext(env, decode->op[0].ptr, decode->op[0].val,
+ write_val_ext(env, &decode->op[0], decode->op[0].val,
decode->operand_size);
set_CF(env, cf);
}
@@ -775,7 +780,7 @@ void exec_shl(CPUX86State *env, struct x86_decode *decode)
of = cf ^ (res >> 7);
}
- write_val_ext(env, decode->op[0].ptr, res, 1);
+ write_val_ext(env, &decode->op[0], res, 1);
SET_FLAGS_OSZAPC_LOGIC8(env, 0, 0, res);
SET_FLAGS_OxxxxC(env, of, cf);
break;
@@ -791,7 +796,7 @@ void exec_shl(CPUX86State *env, struct x86_decode *decode)
of = cf ^ (res >> 15); /* of = cf ^ result15 */
}
- write_val_ext(env, decode->op[0].ptr, res, 2);
+ write_val_ext(env, &decode->op[0], res, 2);
SET_FLAGS_OSZAPC_LOGIC16(env, 0, 0, res);
SET_FLAGS_OxxxxC(env, of, cf);
break;
@@ -800,7 +805,7 @@ void exec_shl(CPUX86State *env, struct x86_decode *decode)
{
uint32_t res = decode->op[0].val << count;
- write_val_ext(env, decode->op[0].ptr, res, 4);
+ write_val_ext(env, &decode->op[0], res, 4);
SET_FLAGS_OSZAPC_LOGIC32(env, 0, 0, res);
cf = (decode->op[0].val >> (32 - count)) & 0x1;
of = cf ^ (res >> 31); /* of = cf ^ result31 */
@@ -831,10 +836,10 @@ void exec_movsx(CPUX86State *env, struct x86_decode *decode)
decode->operand_size = src_op_size;
calc_modrm_operand(env, decode, &decode->op[1]);
- decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size),
+ decode->op[1].val = sign(read_val_ext(env, &decode->op[1], src_op_size),
src_op_size);
- write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
+ write_val_ext(env, &decode->op[0], decode->op[1].val, op_size);
env->eip += decode->len;
}
@@ -862,7 +867,7 @@ void exec_ror(CPUX86State *env, struct x86_decode *decode)
count &= 0x7; /* use only bottom 3 bits */
res = ((uint8_t)decode->op[0].val >> count) |
((uint8_t)decode->op[0].val << (8 - count));
- write_val_ext(env, decode->op[0].ptr, res, 1);
+ write_val_ext(env, &decode->op[0], res, 1);
bit6 = (res >> 6) & 1;
bit7 = (res >> 7) & 1;
/* set eflags: ROR count affects the following flags: C, O */
@@ -886,7 +891,7 @@ void exec_ror(CPUX86State *env, struct x86_decode *decode)
count &= 0x0f; /* use only 4 LSB's */
res = ((uint16_t)decode->op[0].val >> count) |
((uint16_t)decode->op[0].val << (16 - count));
- write_val_ext(env, decode->op[0].ptr, res, 2);
+ write_val_ext(env, &decode->op[0], res, 2);
bit14 = (res >> 14) & 1;
bit15 = (res >> 15) & 1;
@@ -904,7 +909,7 @@ void exec_ror(CPUX86State *env, struct x86_decode *decode)
if (count) {
res = ((uint32_t)decode->op[0].val >> count) |
((uint32_t)decode->op[0].val << (32 - count));
- write_val_ext(env, decode->op[0].ptr, res, 4);
+ write_val_ext(env, &decode->op[0], res, 4);
bit31 = (res >> 31) & 1;
bit30 = (res >> 30) & 1;
@@ -941,7 +946,7 @@ void exec_rol(CPUX86State *env, struct x86_decode *decode)
res = ((uint8_t)decode->op[0].val << count) |
((uint8_t)decode->op[0].val >> (8 - count));
- write_val_ext(env, decode->op[0].ptr, res, 1);
+ write_val_ext(env, &decode->op[0], res, 1);
/* set eflags:
* ROL count affects the following flags: C, O
*/
@@ -968,7 +973,7 @@ void exec_rol(CPUX86State *env, struct x86_decode *decode)
res = ((uint16_t)decode->op[0].val << count) |
((uint16_t)decode->op[0].val >> (16 - count));
- write_val_ext(env, decode->op[0].ptr, res, 2);
+ write_val_ext(env, &decode->op[0], res, 2);
bit0 = (res & 0x1);
bit15 = (res >> 15);
/* of = cf ^ result15 */
@@ -986,7 +991,7 @@ void exec_rol(CPUX86State *env, struct x86_decode *decode)
res = ((uint32_t)decode->op[0].val << count) |
((uint32_t)decode->op[0].val >> (32 - count));
- write_val_ext(env, decode->op[0].ptr, res, 4);
+ write_val_ext(env, &decode->op[0], res, 4);
bit0 = (res & 0x1);
bit31 = (res >> 31);
/* of = cf ^ result31 */
@@ -1024,7 +1029,7 @@ void exec_rcl(CPUX86State *env, struct x86_decode *decode)
(op1_8 >> (9 - count));
}
- write_val_ext(env, decode->op[0].ptr, res, 1);
+ write_val_ext(env, &decode->op[0], res, 1);
cf = (op1_8 >> (8 - count)) & 0x01;
of = cf ^ (res >> 7); /* of = cf ^ result7 */
@@ -1050,7 +1055,7 @@ void exec_rcl(CPUX86State *env, struct x86_decode *decode)
(op1_16 >> (17 - count));
}
- write_val_ext(env, decode->op[0].ptr, res, 2);
+ write_val_ext(env, &decode->op[0], res, 2);
cf = (op1_16 >> (16 - count)) & 0x1;
of = cf ^ (res >> 15); /* of = cf ^ result15 */
@@ -1073,7 +1078,7 @@ void exec_rcl(CPUX86State *env, struct x86_decode *decode)
(op1_32 >> (33 - count));
}
- write_val_ext(env, decode->op[0].ptr, res, 4);
+ write_val_ext(env, &decode->op[0], res, 4);
cf = (op1_32 >> (32 - count)) & 0x1;
of = cf ^ (res >> 31); /* of = cf ^ result31 */
@@ -1105,7 +1110,7 @@ void exec_rcr(CPUX86State *env, struct x86_decode *decode)
res = (op1_8 >> count) | (get_CF(env) << (8 - count)) |
(op1_8 << (9 - count));
- write_val_ext(env, decode->op[0].ptr, res, 1);
+ write_val_ext(env, &decode->op[0], res, 1);
cf = (op1_8 >> (count - 1)) & 0x1;
of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */
@@ -1124,7 +1129,7 @@ void exec_rcr(CPUX86State *env, struct x86_decode *decode)
res = (op1_16 >> count) | (get_CF(env) << (16 - count)) |
(op1_16 << (17 - count));
- write_val_ext(env, decode->op[0].ptr, res, 2);
+ write_val_ext(env, &decode->op[0], res, 2);
cf = (op1_16 >> (count - 1)) & 0x1;
of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^
@@ -1148,7 +1153,7 @@ void exec_rcr(CPUX86State *env, struct x86_decode *decode)
(op1_32 << (33 - count));
}
- write_val_ext(env, decode->op[0].ptr, res, 4);
+ write_val_ext(env, &decode->op[0], res, 4);
cf = (op1_32 >> (count - 1)) & 0x1;
of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */
@@ -1163,9 +1168,9 @@ static void exec_xchg(CPUX86State *env, struct x86_decode *decode)
{
fetch_operands(env, decode, 2, true, true, false);
- write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
+ write_val_ext(env, &decode->op[0], decode->op[1].val,
decode->operand_size);
- write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
+ write_val_ext(env, &decode->op[1], decode->op[0].val,
decode->operand_size);
env->eip += decode->len;
@@ -1174,7 +1179,7 @@ static void exec_xchg(CPUX86State *env, struct x86_decode *decode)
static void exec_xadd(CPUX86State *env, struct x86_decode *decode)
{
EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
- write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
+ write_val_ext(env, &decode->op[1], decode->op[0].val,
decode->operand_size);
env->eip += decode->len;
@@ -1241,7 +1246,7 @@ static void init_cmd_handler(void)
bool exec_instruction(CPUX86State *env, struct x86_decode *ins)
{
if (!_cmd_handler[ins->cmd].handler) {
- printf("Unimplemented handler (%llx) for %d (%x %x) \n", env->eip,
+ printf("Unimplemented handler (" TARGET_FMT_lx ") for %d (%x %x) \n", env->eip,
ins->cmd, ins->opcode[0],
ins->opcode_len > 1 ? ins->opcode[1] : 0);
env->eip += ins->len;
diff --git a/target/i386/emulate/x86_emu.h b/target/i386/emulate/x86_emu.h
index 555b567..a1a9612 100644
--- a/target/i386/emulate/x86_emu.h
+++ b/target/i386/emulate/x86_emu.h
@@ -42,11 +42,11 @@ void x86_emul_raise_exception(CPUX86State *env, int exception_index, int error_c
target_ulong read_reg(CPUX86State *env, int reg, int size);
void write_reg(CPUX86State *env, int reg, target_ulong val, int size);
-target_ulong read_val_from_reg(target_ulong reg_ptr, int size);
-void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size);
-void write_val_ext(CPUX86State *env, target_ulong ptr, target_ulong val, int size);
+target_ulong read_val_from_reg(void *reg_ptr, int size);
+void write_val_to_reg(void *reg_ptr, target_ulong val, int size);
+void write_val_ext(CPUX86State *env, struct x86_decode_op *decode, target_ulong val, int size);
uint8_t *read_mmio(CPUX86State *env, target_ulong ptr, int bytes);
-target_ulong read_val_ext(CPUX86State *env, target_ulong ptr, int size);
+target_ulong read_val_ext(CPUX86State *env, struct x86_decode_op *decode, int size);
void exec_movzx(CPUX86State *env, struct x86_decode *decode);
void exec_shl(CPUX86State *env, struct x86_decode *decode);
diff --git a/target/i386/emulate/x86_flags.c b/target/i386/emulate/x86_flags.c
index 84e2736..6592193 100644
--- a/target/i386/emulate/x86_flags.c
+++ b/target/i386/emulate/x86_flags.c
@@ -14,8 +14,8 @@
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
+// License along with this library; if not, see
+// <https://www.gnu.org/licenses/>.
/////////////////////////////////////////////////////////////////////////
/*
* flags functions
@@ -29,41 +29,50 @@
#include "x86.h"
-/* this is basically bocsh code */
+/*
+ * The algorithms here are similar to those in Bochs. After an ALU
+ * operation, CC_DST can be used to compute ZF, SF and PF, whereas
+ * CC_SRC is used to compute AF, CF and OF. In reality, SF and PF are the
+ * XOR of the value computed from CC_DST and the value found in bits 7 and 2
+ * of CC_SRC; this way the same logic can be used to compute the flags
+ * both before and after an ALU operation.
+ *
+ * Compared to the TCG CC_OP codes, this avoids conditionals when converting
+ * to and from the RFLAGS representation.
+ */
-#define LF_SIGN_BIT 31
+#define LF_SIGN_BIT (TARGET_LONG_BITS - 1)
-#define LF_BIT_SD (0) /* lazy Sign Flag Delta */
-#define LF_BIT_AF (3) /* lazy Adjust flag */
-#define LF_BIT_PDB (8) /* lazy Parity Delta Byte (8 bits) */
-#define LF_BIT_CF (31) /* lazy Carry Flag */
-#define LF_BIT_PO (30) /* lazy Partial Overflow = CF ^ OF */
+#define LF_BIT_PD (2) /* lazy Parity Delta, same bit as PF */
+#define LF_BIT_AF (3) /* lazy Adjust flag */
+#define LF_BIT_SD (7) /* lazy Sign Flag Delta, same bit as SF */
+#define LF_BIT_CF (TARGET_LONG_BITS - 1) /* lazy Carry Flag */
+#define LF_BIT_PO (TARGET_LONG_BITS - 2) /* lazy Partial Overflow = CF ^ OF */
-#define LF_MASK_SD (0x01 << LF_BIT_SD)
-#define LF_MASK_AF (0x01 << LF_BIT_AF)
-#define LF_MASK_PDB (0xFF << LF_BIT_PDB)
-#define LF_MASK_CF (0x01 << LF_BIT_CF)
-#define LF_MASK_PO (0x01 << LF_BIT_PO)
+#define LF_MASK_PD ((target_ulong)0x01 << LF_BIT_PD)
+#define LF_MASK_AF ((target_ulong)0x01 << LF_BIT_AF)
+#define LF_MASK_SD ((target_ulong)0x01 << LF_BIT_SD)
+#define LF_MASK_CF ((target_ulong)0x01 << LF_BIT_CF)
+#define LF_MASK_PO ((target_ulong)0x01 << LF_BIT_PO)
/* ******************* */
/* OSZAPC */
/* ******************* */
-/* size, carries, result */
+/* use carries to fill in AF, PO and CF, while ensuring PD and SD are clear.
+ * for full-word operations just clear PD and SD; for smaller operand
+ * sizes only keep AF in the low byte and shift the carries left to
+ * place PO and CF in the top two bits.
+ */
#define SET_FLAGS_OSZAPC_SIZE(size, lf_carries, lf_result) { \
- target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \
- (((lf_carries) >> (size - 2)) << LF_BIT_PO); \
- env->lflags.result = (target_ulong)(int##size##_t)(lf_result); \
- if ((size) == 32) { \
- temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
- } else if ((size) == 16) { \
- temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \
- } else if ((size) == 8) { \
- temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \
+ env->cc_dst = (target_ulong)(int##size##_t)(lf_result); \
+ target_ulong temp = (lf_carries); \
+ if ((size) == TARGET_LONG_BITS) { \
+ temp = temp & ~(LF_MASK_PD | LF_MASK_SD); \
} else { \
- VM_PANIC("unimplemented"); \
+ temp = (temp & LF_MASK_AF) | (temp << (TARGET_LONG_BITS - (size))); \
} \
- env->lflags.auxbits = (target_ulong)(uint32_t)temp; \
+ env->cc_src = temp; \
}
/* carries, result */
@@ -77,23 +86,18 @@
/* ******************* */
/* OSZAP */
/* ******************* */
-/* size, carries, result */
+/* same as setting OSZAPC, but preserve CF and flip PO if the old value of CF
+ * did not match the high bit of lf_carries. */
#define SET_FLAGS_OSZAP_SIZE(size, lf_carries, lf_result) { \
- target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \
- (((lf_carries) >> (size - 2)) << LF_BIT_PO); \
- if ((size) == 32) { \
- temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
- } else if ((size) == 16) { \
- temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \
- } else if ((size) == 8) { \
- temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \
+ env->cc_dst = (target_ulong)(int##size##_t)(lf_result); \
+ target_ulong temp = (lf_carries); \
+ if ((size) == TARGET_LONG_BITS) { \
+ temp = (temp & ~(LF_MASK_PD | LF_MASK_SD)); \
} else { \
- VM_PANIC("unimplemented"); \
+ temp = (temp & LF_MASK_AF) | (temp << (TARGET_LONG_BITS - (size))); \
} \
- env->lflags.result = (target_ulong)(int##size##_t)(lf_result); \
- target_ulong delta_c = (env->lflags.auxbits ^ temp) & LF_MASK_CF; \
- delta_c ^= (delta_c >> 1); \
- env->lflags.auxbits = (target_ulong)(uint32_t)(temp ^ delta_c); \
+ target_ulong cf_changed = ((target_long)(env->cc_src ^ temp)) < 0; \
+ env->cc_src = temp ^ (cf_changed * (LF_MASK_PO | LF_MASK_CF)); \
}
/* carries, result */
@@ -104,11 +108,11 @@
#define SET_FLAGS_OSZAP_32(carries, result) \
SET_FLAGS_OSZAP_SIZE(32, carries, result)
-void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf)
+void SET_FLAGS_OxxxxC(CPUX86State *env, bool new_of, bool new_cf)
{
- uint32_t temp_po = new_of ^ new_cf;
- env->lflags.auxbits &= ~(LF_MASK_PO | LF_MASK_CF);
- env->lflags.auxbits |= (temp_po << LF_BIT_PO) | (new_cf << LF_BIT_CF);
+ env->cc_src &= ~(LF_MASK_PO | LF_MASK_CF);
+ env->cc_src |= (-(target_ulong)new_cf << LF_BIT_PO);
+ env->cc_src ^= ((target_ulong)new_of << LF_BIT_PO);
}
void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
@@ -202,104 +206,68 @@ void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2,
SET_FLAGS_OSZAPC_8(0, diff);
}
-bool get_PF(CPUX86State *env)
-{
- uint32_t temp = (255 & env->lflags.result);
- temp = temp ^ (255 & (env->lflags.auxbits >> LF_BIT_PDB));
- temp = (temp ^ (temp >> 4)) & 0x0F;
- return (0x9669U >> temp) & 1;
-}
-
-void set_PF(CPUX86State *env, bool val)
+static inline uint32_t get_PF(CPUX86State *env)
{
- uint32_t temp = (255 & env->lflags.result) ^ (!val);
- env->lflags.auxbits &= ~(LF_MASK_PDB);
- env->lflags.auxbits |= (temp << LF_BIT_PDB);
+ return ((parity8(env->cc_dst) - 1) ^ env->cc_src) & CC_P;
}
-bool get_OF(CPUX86State *env)
+static inline uint32_t get_OF(CPUX86State *env)
{
- return ((env->lflags.auxbits + (1U << LF_BIT_PO)) >> LF_BIT_CF) & 1;
+ return ((env->cc_src >> (LF_BIT_CF - 11)) + CC_O / 2) & CC_O;
}
bool get_CF(CPUX86State *env)
{
- return (env->lflags.auxbits >> LF_BIT_CF) & 1;
-}
-
-void set_OF(CPUX86State *env, bool val)
-{
- bool old_cf = get_CF(env);
- SET_FLAGS_OxxxxC(env, val, old_cf);
+ return ((target_long)env->cc_src) < 0;
}
void set_CF(CPUX86State *env, bool val)
{
- bool old_of = get_OF(env);
- SET_FLAGS_OxxxxC(env, old_of, val);
+ /* If CF changes, flip PO and CF */
+ target_ulong temp = -(target_ulong)val;
+ target_ulong cf_changed = ((target_long)(env->cc_src ^ temp)) < 0;
+ env->cc_src ^= cf_changed * (LF_MASK_PO | LF_MASK_CF);
}
-bool get_AF(CPUX86State *env)
+static inline uint32_t get_ZF(CPUX86State *env)
{
- return (env->lflags.auxbits >> LF_BIT_AF) & 1;
+ return env->cc_dst ? 0 : CC_Z;
}
-void set_AF(CPUX86State *env, bool val)
+static inline uint32_t get_SF(CPUX86State *env)
{
- env->lflags.auxbits &= ~(LF_MASK_AF);
- env->lflags.auxbits |= val << LF_BIT_AF;
+ return ((env->cc_dst >> (LF_SIGN_BIT - LF_BIT_SD)) ^
+ env->cc_src) & CC_S;
}
-bool get_ZF(CPUX86State *env)
+void lflags_to_rflags(CPUX86State *env)
{
- return !env->lflags.result;
+ env->eflags &= ~(CC_C|CC_P|CC_A|CC_Z|CC_S|CC_O);
+ /* rotate left by one to move carry-out bits into CF and AF */
+ env->eflags |= (
+ (env->cc_src << 1) |
+ (env->cc_src >> (TARGET_LONG_BITS - 1))) & (CC_C | CC_A);
+ env->eflags |= get_SF(env);
+ env->eflags |= get_PF(env);
+ env->eflags |= get_ZF(env);
+ env->eflags |= get_OF(env);
}
-void set_ZF(CPUX86State *env, bool val)
+void rflags_to_lflags(CPUX86State *env)
{
- if (val) {
- env->lflags.auxbits ^=
- (((env->lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD);
- /* merge the parity bits into the Parity Delta Byte */
- uint32_t temp_pdb = (255 & env->lflags.result);
- env->lflags.auxbits ^= (temp_pdb << LF_BIT_PDB);
- /* now zero the .result value */
- env->lflags.result = 0;
- } else {
- env->lflags.result |= (1 << 8);
- }
-}
+ target_ulong cf_af, cf_xor_of;
-bool get_SF(CPUX86State *env)
-{
- return ((env->lflags.result >> LF_SIGN_BIT) ^
- (env->lflags.auxbits >> LF_BIT_SD)) & 1;
-}
+ /* Leave the low byte zero so that parity is always even... */
+ env->cc_dst = !(env->eflags & CC_Z) << 8;
-void set_SF(CPUX86State *env, bool val)
-{
- bool temp_sf = get_SF(env);
- env->lflags.auxbits ^= (temp_sf ^ val) << LF_BIT_SD;
-}
+ /* ... and therefore cc_src always uses opposite polarity. */
+ env->cc_src = CC_P;
+ env->cc_src ^= env->eflags & (CC_S | CC_P);
-void lflags_to_rflags(CPUX86State *env)
-{
- env->eflags &= ~(CC_C|CC_P|CC_A|CC_Z|CC_S|CC_O);
- env->eflags |= get_CF(env) ? CC_C : 0;
- env->eflags |= get_PF(env) ? CC_P : 0;
- env->eflags |= get_AF(env) ? CC_A : 0;
- env->eflags |= get_ZF(env) ? CC_Z : 0;
- env->eflags |= get_SF(env) ? CC_S : 0;
- env->eflags |= get_OF(env) ? CC_O : 0;
-}
+ /* rotate right by one to move CF and AF into the carry-out positions */
+ cf_af = env->eflags & (CC_C | CC_A);
+ env->cc_src |= ((cf_af >> 1) | (cf_af << (TARGET_LONG_BITS - 1)));
-void rflags_to_lflags(CPUX86State *env)
-{
- env->lflags.auxbits = env->lflags.result = 0;
- set_OF(env, env->eflags & CC_O);
- set_SF(env, env->eflags & CC_S);
- set_ZF(env, env->eflags & CC_Z);
- set_AF(env, env->eflags & CC_A);
- set_PF(env, env->eflags & CC_P);
- set_CF(env, env->eflags & CC_C);
+ cf_xor_of = ((env->eflags & (CC_C | CC_O)) + (CC_O - CC_C)) & CC_O;
+ env->cc_src |= -cf_xor_of & LF_MASK_PO;
}
diff --git a/target/i386/emulate/x86_flags.h b/target/i386/emulate/x86_flags.h
index 6c17500..a395c83 100644
--- a/target/i386/emulate/x86_flags.h
+++ b/target/i386/emulate/x86_flags.h
@@ -14,8 +14,8 @@
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
-// License along with this library; if not, write to the Free Software
-// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
+// License along with this library; if not, see
+// <https://www.gnu.org/licenses/>.
/////////////////////////////////////////////////////////////////////////
/*
* x86 eflags functions
@@ -28,20 +28,10 @@
void lflags_to_rflags(CPUX86State *env);
void rflags_to_lflags(CPUX86State *env);
-bool get_PF(CPUX86State *env);
-void set_PF(CPUX86State *env, bool val);
bool get_CF(CPUX86State *env);
void set_CF(CPUX86State *env, bool val);
-bool get_AF(CPUX86State *env);
-void set_AF(CPUX86State *env, bool val);
-bool get_ZF(CPUX86State *env);
-void set_ZF(CPUX86State *env, bool val);
-bool get_SF(CPUX86State *env);
-void set_SF(CPUX86State *env, bool val);
-bool get_OF(CPUX86State *env);
-void set_OF(CPUX86State *env, bool val);
-void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf);
+void SET_FLAGS_OxxxxC(CPUX86State *env, bool new_of, bool new_cf);
void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
uint32_t diff);
diff --git a/target/i386/helper.c b/target/i386/helper.c
index 197fdac..e0aaed3 100644
--- a/target/i386/helper.c
+++ b/target/i386/helper.c
@@ -526,7 +526,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
static inline target_ulong get_memio_eip(CPUX86State *env)
{
#ifdef CONFIG_TCG
- uint64_t data[TARGET_INSN_START_WORDS];
+ uint64_t data[INSN_START_WORDS];
CPUState *cs = env_cpu(env);
if (!cpu_unwind_state_data(cs, cs->mem_io_pc, data)) {
diff --git a/target/i386/host-cpu.c b/target/i386/host-cpu.c
index a2d3830..7512567 100644
--- a/target/i386/host-cpu.c
+++ b/target/i386/host-cpu.c
@@ -15,7 +15,7 @@
#include "system/system.h"
/* Note: Only safe for use on x86(-64) hosts */
-static uint32_t host_cpu_phys_bits(void)
+uint32_t host_cpu_phys_bits(void)
{
uint32_t eax;
uint32_t host_phys_bits;
diff --git a/target/i386/host-cpu.h b/target/i386/host-cpu.h
index 6a9bc91..b97ec01 100644
--- a/target/i386/host-cpu.h
+++ b/target/i386/host-cpu.h
@@ -10,6 +10,7 @@
#ifndef HOST_CPU_H
#define HOST_CPU_H
+uint32_t host_cpu_phys_bits(void);
void host_cpu_instance_init(X86CPU *cpu);
void host_cpu_max_instance_init(X86CPU *cpu);
bool host_cpu_realizefn(CPUState *cs, Error **errp);
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
index 23ebf25..99e37a3 100644
--- a/target/i386/hvf/hvf.c
+++ b/target/i386/hvf/hvf.c
@@ -76,6 +76,7 @@
#include "qemu/main-loop.h"
#include "qemu/accel.h"
#include "target/i386/cpu.h"
+#include "exec/target_page.h"
static Error *invtsc_mig_blocker;
diff --git a/target/i386/hvf/x86_cpuid.c b/target/i386/hvf/x86_cpuid.c
index fa131b1..0798a0c 100644
--- a/target/i386/hvf/x86_cpuid.c
+++ b/target/i386/hvf/x86_cpuid.c
@@ -73,7 +73,7 @@ uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX |
- CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS;
+ CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_HT;
ecx &= CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 |
CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID |
CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_MOVBE |
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index c9a3c02..234878c 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -38,6 +38,7 @@
#include "kvm_i386.h"
#include "../confidential-guest.h"
#include "sev.h"
+#include "tdx.h"
#include "xen-emu.h"
#include "hyperv.h"
#include "hyperv-proto.h"
@@ -192,6 +193,7 @@ static const char *vm_type_name[] = {
[KVM_X86_SEV_VM] = "SEV",
[KVM_X86_SEV_ES_VM] = "SEV-ES",
[KVM_X86_SNP_VM] = "SEV-SNP",
+ [KVM_X86_TDX_VM] = "TDX",
};
bool kvm_is_vm_type_supported(int type)
@@ -326,7 +328,7 @@ void kvm_synchronize_all_tsc(void)
{
CPUState *cpu;
- if (kvm_enabled()) {
+ if (kvm_enabled() && !is_tdx_vm()) {
CPU_FOREACH(cpu) {
run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
}
@@ -392,7 +394,7 @@ static bool host_tsx_broken(void)
/* Returns the value for a specific register on the cpuid entry
*/
-static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
+uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
{
uint32_t ret = 0;
switch (reg) {
@@ -414,9 +416,9 @@ static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
/* Find matching entry for function/index on kvm_cpuid2 struct
*/
-static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
- uint32_t function,
- uint32_t index)
+struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
+ uint32_t function,
+ uint32_t index)
{
int i;
for (i = 0; i < cpuid->nent; ++i) {
@@ -572,7 +574,7 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
}
if (current_machine->cgs) {
- ret = x86_confidential_guest_mask_cpuid_features(
+ ret = x86_confidential_guest_adjust_cpuid_features(
X86_CONFIDENTIAL_GUEST(current_machine->cgs),
function, index, reg, ret);
}
@@ -868,6 +870,15 @@ static int kvm_arch_set_tsc_khz(CPUState *cs)
int r, cur_freq;
bool set_ioctl = false;
+ /*
+ * TSC of TD vcpu is immutable, it cannot be set/changed via vcpu scope
+ * VM_SET_TSC_KHZ, but only be initialized via VM scope VM_SET_TSC_KHZ
+ * before ioctl KVM_TDX_INIT_VM in tdx_pre_create_vcpu()
+ */
+ if (is_tdx_vm()) {
+ return 0;
+ }
+
if (!env->tsc_khz) {
return 0;
}
@@ -1779,8 +1790,6 @@ static int hyperv_init_vcpu(X86CPU *cpu)
static Error *invtsc_mig_blocker;
-#define KVM_MAX_CPUID_ENTRIES 100
-
static void kvm_init_xsave(CPUX86State *env)
{
if (has_xsave2) {
@@ -1823,9 +1832,8 @@ static void kvm_init_nested_state(CPUX86State *env)
}
}
-static uint32_t kvm_x86_build_cpuid(CPUX86State *env,
- struct kvm_cpuid_entry2 *entries,
- uint32_t cpuid_i)
+uint32_t kvm_x86_build_cpuid(CPUX86State *env, struct kvm_cpuid_entry2 *entries,
+ uint32_t cpuid_i)
{
uint32_t limit, i, j;
uint32_t unused;
@@ -1864,7 +1872,7 @@ static uint32_t kvm_x86_build_cpuid(CPUX86State *env,
break;
}
case 0x1f:
- if (!x86_has_extended_topo(env->avail_cpu_topo)) {
+ if (!x86_has_cpuid_0x1f(env_archcpu(env))) {
cpuid_i--;
break;
}
@@ -2052,6 +2060,15 @@ full:
abort();
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ if (is_tdx_vm()) {
+ return tdx_pre_create_vcpu(cpu, errp);
+ }
+
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
struct {
@@ -2076,6 +2093,14 @@ int kvm_arch_init_vcpu(CPUState *cs)
int r;
Error *local_err = NULL;
+ if (current_machine->cgs) {
+ r = x86_confidential_guest_check_features(
+ X86_CONFIDENTIAL_GUEST(current_machine->cgs), cs);
+ if (r < 0) {
+ return r;
+ }
+ }
+
memset(&cpuid_data, 0, sizeof(cpuid_data));
cpuid_i = 0;
@@ -3206,16 +3231,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
Error *local_err = NULL;
/*
- * Initialize SEV context, if required
- *
- * If no memory encryption is requested (ms->cgs == NULL) this is
- * a no-op.
- *
- * It's also a no-op if a non-SEV confidential guest support
- * mechanism is selected. SEV is the only mechanism available to
- * select on x86 at present, so this doesn't arise, but if new
- * mechanisms are supported in future (e.g. TDX), they'll need
- * their own initialization either here or elsewhere.
+ * Initialize confidential guest (SEV/TDX) context, if required
*/
if (ms->cgs) {
ret = confidential_guest_kvm_init(ms->cgs, &local_err);
@@ -3856,32 +3872,34 @@ static void kvm_init_msrs(X86CPU *cpu)
CPUX86State *env = &cpu->env;
kvm_msr_buf_reset(cpu);
- if (has_msr_arch_capabs) {
- kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
- env->features[FEAT_ARCH_CAPABILITIES]);
- }
- if (has_msr_core_capabs) {
- kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
- env->features[FEAT_CORE_CAPABILITY]);
- }
+ if (!is_tdx_vm()) {
+ if (has_msr_arch_capabs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
+ env->features[FEAT_ARCH_CAPABILITIES]);
+ }
- if (has_msr_perf_capabs && cpu->enable_pmu) {
- kvm_msr_entry_add_perf(cpu, env->features);
+ if (has_msr_core_capabs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
+ env->features[FEAT_CORE_CAPABILITY]);
+ }
+
+ if (has_msr_perf_capabs && cpu->enable_pmu) {
+ kvm_msr_entry_add_perf(cpu, env->features);
+ }
+
+ /*
+ * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
+ * all kernels with MSR features should have them.
+ */
+ if (kvm_feature_msrs && cpu_has_vmx(env)) {
+ kvm_msr_entry_add_vmx(cpu, env->features);
+ }
}
if (has_msr_ucode_rev) {
kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev);
}
-
- /*
- * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
- * all kernels with MSR features should have them.
- */
- if (kvm_feature_msrs && cpu_has_vmx(env)) {
- kvm_msr_entry_add_vmx(cpu, env->features);
- }
-
assert(kvm_buf_set_msrs(cpu) == 0);
}
@@ -6000,9 +6018,11 @@ static bool host_supports_vmx(void)
* because private/shared page tracking is already provided through other
* means, these 2 use-cases should be treated as being mutually-exclusive.
*/
-static int kvm_handle_hc_map_gpa_range(struct kvm_run *run)
+static int kvm_handle_hc_map_gpa_range(X86CPU *cpu, struct kvm_run *run)
{
+ struct kvm_pre_fault_memory mem;
uint64_t gpa, size, attributes;
+ int ret;
if (!machine_require_guest_memfd(current_machine))
return -EINVAL;
@@ -6013,13 +6033,32 @@ static int kvm_handle_hc_map_gpa_range(struct kvm_run *run)
trace_kvm_hc_map_gpa_range(gpa, size, attributes, run->hypercall.flags);
- return kvm_convert_memory(gpa, size, attributes & KVM_MAP_GPA_RANGE_ENCRYPTED);
+ ret = kvm_convert_memory(gpa, size, attributes & KVM_MAP_GPA_RANGE_ENCRYPTED);
+ if (ret || !kvm_pre_fault_memory_supported) {
+ return ret;
+ }
+
+ /*
+ * Opportunistically pre-fault memory in. Failures are ignored so that any
+ * errors in faulting in the memory will get captured in KVM page fault
+ * path when the guest first accesses the page.
+ */
+ memset(&mem, 0, sizeof(mem));
+ mem.gpa = gpa;
+ mem.size = size;
+ while (mem.size) {
+ if (kvm_vcpu_ioctl(CPU(cpu), KVM_PRE_FAULT_MEMORY, &mem)) {
+ break;
+ }
+ }
+
+ return 0;
}
-static int kvm_handle_hypercall(struct kvm_run *run)
+static int kvm_handle_hypercall(X86CPU *cpu, struct kvm_run *run)
{
if (run->hypercall.nr == KVM_HC_MAP_GPA_RANGE)
- return kvm_handle_hc_map_gpa_range(run);
+ return kvm_handle_hc_map_gpa_range(cpu, run);
return -EINVAL;
}
@@ -6119,7 +6158,32 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
break;
#endif
case KVM_EXIT_HYPERCALL:
- ret = kvm_handle_hypercall(run);
+ ret = kvm_handle_hypercall(cpu, run);
+ break;
+ case KVM_EXIT_SYSTEM_EVENT:
+ switch (run->system_event.type) {
+ case KVM_SYSTEM_EVENT_TDX_FATAL:
+ ret = tdx_handle_report_fatal_error(cpu, run);
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ break;
+ case KVM_EXIT_TDX:
+ /*
+ * run->tdx is already set up for the case where userspace
+ * does not handle the TDVMCALL.
+ */
+ switch (run->tdx.nr) {
+ case TDVMCALL_GET_QUOTE:
+ tdx_handle_get_quote(cpu, run);
+ break;
+ case TDVMCALL_GET_TD_VM_CALL_INFO:
+ tdx_handle_get_tdvmcall_info(cpu, run);
+ break;
+ }
+ ret = 0;
break;
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h
index 88565e8..5f83e88 100644
--- a/target/i386/kvm/kvm_i386.h
+++ b/target/i386/kvm/kvm_i386.h
@@ -13,6 +13,8 @@
#include "system/kvm.h"
+#define KVM_MAX_CPUID_ENTRIES 100
+
/* always false if !CONFIG_KVM */
#define kvm_pit_in_kernel() \
(kvm_irqchip_in_kernel() && !kvm_irqchip_is_split())
@@ -42,6 +44,13 @@ void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask);
#ifdef CONFIG_KVM
+#include <linux/kvm.h>
+
+typedef struct KvmCpuidInfo {
+ struct kvm_cpuid2 cpuid;
+ struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
+} KvmCpuidInfo;
+
bool kvm_is_vm_type_supported(int type);
bool kvm_has_adjust_clock_stable(void);
bool kvm_has_exception_payload(void);
@@ -57,6 +66,12 @@ uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address);
void kvm_update_msi_routes_all(void *private, bool global,
uint32_t index, uint32_t mask);
+struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
+ uint32_t function,
+ uint32_t index);
+uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg);
+uint32_t kvm_x86_build_cpuid(CPUX86State *env, struct kvm_cpuid_entry2 *entries,
+ uint32_t cpuid_i);
#endif /* CONFIG_KVM */
void kvm_pc_setup_irq_routing(bool pci_enabled);
diff --git a/target/i386/kvm/meson.build b/target/i386/kvm/meson.build
index 3996caf..2675bf8 100644
--- a/target/i386/kvm/meson.build
+++ b/target/i386/kvm/meson.build
@@ -8,6 +8,8 @@ i386_kvm_ss.add(files(
i386_kvm_ss.add(when: 'CONFIG_XEN_EMU', if_true: files('xen-emu.c'))
+i386_kvm_ss.add(when: 'CONFIG_TDX', if_true: files('tdx.c', 'tdx-quote-generator.c'), if_false: files('tdx-stub.c'))
+
i386_system_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c'), if_false: files('hyperv-stub.c'))
i386_system_ss.add_all(when: 'CONFIG_KVM', if_true: i386_kvm_ss)
diff --git a/target/i386/kvm/tdx-quote-generator.c b/target/i386/kvm/tdx-quote-generator.c
new file mode 100644
index 0000000..f59715f
--- /dev/null
+++ b/target/i386/kvm/tdx-quote-generator.c
@@ -0,0 +1,300 @@
+/*
+ * QEMU TDX Quote Generation Support
+ *
+ * Copyright (c) 2025 Intel Corporation
+ *
+ * Author:
+ * Xiaoyao Li <xiaoyao.li@intel.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "qapi/qapi-visit-sockets.h"
+
+#include "tdx-quote-generator.h"
+
+#define QGS_MSG_LIB_MAJOR_VER 1
+#define QGS_MSG_LIB_MINOR_VER 1
+
+typedef enum _qgs_msg_type_t {
+ GET_QUOTE_REQ = 0,
+ GET_QUOTE_RESP = 1,
+ GET_COLLATERAL_REQ = 2,
+ GET_COLLATERAL_RESP = 3,
+ GET_PLATFORM_INFO_REQ = 4,
+ GET_PLATFORM_INFO_RESP = 5,
+ QGS_MSG_TYPE_MAX
+} qgs_msg_type_t;
+
+typedef struct _qgs_msg_header_t {
+ uint16_t major_version;
+ uint16_t minor_version;
+ uint32_t type;
+ uint32_t size; // size of the whole message, include this header, in byte
+ uint32_t error_code; // used in response only
+} qgs_msg_header_t;
+
+typedef struct _qgs_msg_get_quote_req_t {
+ qgs_msg_header_t header; // header.type = GET_QUOTE_REQ
+ uint32_t report_size; // cannot be 0
+ uint32_t id_list_size; // length of id_list, in byte, can be 0
+} qgs_msg_get_quote_req_t;
+
+typedef struct _qgs_msg_get_quote_resp_s {
+ qgs_msg_header_t header; // header.type = GET_QUOTE_RESP
+ uint32_t selected_id_size; // can be 0 in case only one id is sent in request
+ uint32_t quote_size; // length of quote_data, in byte
+ uint8_t id_quote[]; // selected id followed by quote
+} qgs_msg_get_quote_resp_t;
+
+#define HEADER_SIZE 4
+
+static uint32_t decode_header(const char *buf, size_t len) {
+ if (len < HEADER_SIZE) {
+ return 0;
+ }
+ uint32_t msg_size = 0;
+ for (uint32_t i = 0; i < HEADER_SIZE; ++i) {
+ msg_size = msg_size * 256 + (buf[i] & 0xFF);
+ }
+ return msg_size;
+}
+
+static void encode_header(char *buf, size_t len, uint32_t size) {
+ assert(len >= HEADER_SIZE);
+ buf[0] = ((size >> 24) & 0xFF);
+ buf[1] = ((size >> 16) & 0xFF);
+ buf[2] = ((size >> 8) & 0xFF);
+ buf[3] = (size & 0xFF);
+}
+
+static void tdx_generate_quote_cleanup(TdxGenerateQuoteTask *task)
+{
+ timer_del(&task->timer);
+
+ g_source_remove(task->watch);
+ qio_channel_close(QIO_CHANNEL(task->sioc), NULL);
+ object_unref(OBJECT(task->sioc));
+
+ task->completion(task);
+}
+
+static gboolean tdx_get_quote_read(QIOChannel *ioc, GIOCondition condition,
+ gpointer opaque)
+{
+ TdxGenerateQuoteTask *task = opaque;
+ Error *err = NULL;
+ int ret;
+
+ ret = qio_channel_read(ioc, task->receive_buf + task->receive_buf_received,
+ task->payload_len - task->receive_buf_received, &err);
+ if (ret < 0) {
+ if (ret == QIO_CHANNEL_ERR_BLOCK) {
+ return G_SOURCE_CONTINUE;
+ } else {
+ error_report_err(err);
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+ }
+
+ if (ret == 0) {
+ error_report("End of file before reply received");
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+
+ task->receive_buf_received += ret;
+ if (task->receive_buf_received >= HEADER_SIZE) {
+ uint32_t len = decode_header(task->receive_buf,
+ task->receive_buf_received);
+ if (len == 0 ||
+ len > (task->payload_len - HEADER_SIZE)) {
+ error_report("Message len %u must be non-zero & less than %zu",
+ len, (task->payload_len - HEADER_SIZE));
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+
+ /* Now we know the size, shrink to fit */
+ task->payload_len = HEADER_SIZE + len;
+ task->receive_buf = g_renew(char,
+ task->receive_buf,
+ task->payload_len);
+ }
+
+ if (task->receive_buf_received >= (sizeof(qgs_msg_header_t) + HEADER_SIZE)) {
+ qgs_msg_header_t *hdr = (qgs_msg_header_t *)(task->receive_buf + HEADER_SIZE);
+ if (hdr->major_version != QGS_MSG_LIB_MAJOR_VER ||
+ hdr->minor_version != QGS_MSG_LIB_MINOR_VER) {
+ error_report("Invalid QGS message header version %d.%d",
+ hdr->major_version,
+ hdr->minor_version);
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+ if (hdr->type != GET_QUOTE_RESP) {
+ error_report("Invalid QGS message type %d",
+ hdr->type);
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+ if (hdr->size > (task->payload_len - HEADER_SIZE)) {
+ error_report("QGS message size %d exceeds payload capacity %zu",
+ hdr->size, task->payload_len);
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+ if (hdr->error_code != 0) {
+ error_report("QGS message error code %d",
+ hdr->error_code);
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+ }
+ if (task->receive_buf_received >= (sizeof(qgs_msg_get_quote_resp_t) + HEADER_SIZE)) {
+ qgs_msg_get_quote_resp_t *msg = (qgs_msg_get_quote_resp_t *)(task->receive_buf + HEADER_SIZE);
+ if (msg->selected_id_size != 0) {
+ error_report("QGS message selected ID was %d not 0",
+ msg->selected_id_size);
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+
+ if ((task->payload_len - HEADER_SIZE - sizeof(qgs_msg_get_quote_resp_t)) !=
+ msg->quote_size) {
+ error_report("QGS quote size %d should be %zu",
+ msg->quote_size,
+ (task->payload_len - sizeof(qgs_msg_get_quote_resp_t)));
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ goto end;
+ }
+ }
+
+ if (task->receive_buf_received == task->payload_len) {
+ size_t strip = HEADER_SIZE + sizeof(qgs_msg_get_quote_resp_t);
+ memmove(task->receive_buf,
+ task->receive_buf + strip,
+ task->receive_buf_received - strip);
+ task->receive_buf_received -= strip;
+ task->status_code = TDX_VP_GET_QUOTE_SUCCESS;
+ goto end;
+ }
+
+ return G_SOURCE_CONTINUE;
+
+end:
+ tdx_generate_quote_cleanup(task);
+ return G_SOURCE_REMOVE;
+}
+
+static gboolean tdx_send_report(QIOChannel *ioc, GIOCondition condition,
+ gpointer opaque)
+{
+ TdxGenerateQuoteTask *task = opaque;
+ Error *err = NULL;
+ int ret;
+
+ ret = qio_channel_write(ioc, task->send_data + task->send_data_sent,
+ task->send_data_size - task->send_data_sent, &err);
+ if (ret < 0) {
+ if (ret == QIO_CHANNEL_ERR_BLOCK) {
+ ret = 0;
+ } else {
+ error_report_err(err);
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ tdx_generate_quote_cleanup(task);
+ goto end;
+ }
+ }
+ task->send_data_sent += ret;
+
+ if (task->send_data_sent == task->send_data_size) {
+ task->watch = qio_channel_add_watch(QIO_CHANNEL(task->sioc), G_IO_IN,
+ tdx_get_quote_read, task, NULL);
+ goto end;
+ }
+
+ return G_SOURCE_CONTINUE;
+
+end:
+ return G_SOURCE_REMOVE;
+}
+
+static void tdx_quote_generator_connected(QIOTask *qio_task, gpointer opaque)
+{
+ TdxGenerateQuoteTask *task = opaque;
+ Error *err = NULL;
+ int ret;
+
+ ret = qio_task_propagate_error(qio_task, &err);
+ if (ret) {
+ error_report_err(err);
+ task->status_code = TDX_VP_GET_QUOTE_QGS_UNAVAILABLE;
+ tdx_generate_quote_cleanup(task);
+ return;
+ }
+
+ task->watch = qio_channel_add_watch(QIO_CHANNEL(task->sioc), G_IO_OUT,
+ tdx_send_report, task, NULL);
+}
+
+#define TRANSACTION_TIMEOUT 30000
+
+static void getquote_expired(void *opaque)
+{
+ TdxGenerateQuoteTask *task = opaque;
+
+ task->status_code = TDX_VP_GET_QUOTE_ERROR;
+ tdx_generate_quote_cleanup(task);
+}
+
+static void setup_get_quote_timer(TdxGenerateQuoteTask *task)
+{
+ int64_t time;
+
+ timer_init_ms(&task->timer, QEMU_CLOCK_VIRTUAL, getquote_expired, task);
+ time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
+ timer_mod(&task->timer, time + TRANSACTION_TIMEOUT);
+}
+
+void tdx_generate_quote(TdxGenerateQuoteTask *task,
+ SocketAddress *qg_sock_addr)
+{
+ QIOChannelSocket *sioc;
+ qgs_msg_get_quote_req_t msg;
+
+ /* Prepare a QGS message prelude */
+ msg.header.major_version = QGS_MSG_LIB_MAJOR_VER;
+ msg.header.minor_version = QGS_MSG_LIB_MINOR_VER;
+ msg.header.type = GET_QUOTE_REQ;
+ msg.header.size = sizeof(msg) + task->send_data_size;
+ msg.header.error_code = 0;
+ msg.report_size = task->send_data_size;
+ msg.id_list_size = 0;
+
+ /* Make room to add the QGS message prelude */
+ task->send_data = g_renew(char,
+ task->send_data,
+ task->send_data_size + sizeof(msg) + HEADER_SIZE);
+ memmove(task->send_data + sizeof(msg) + HEADER_SIZE,
+ task->send_data,
+ task->send_data_size);
+ memcpy(task->send_data + HEADER_SIZE,
+ &msg,
+ sizeof(msg));
+ encode_header(task->send_data, HEADER_SIZE, task->send_data_size + sizeof(msg));
+ task->send_data_size += sizeof(msg) + HEADER_SIZE;
+
+ sioc = qio_channel_socket_new();
+ task->sioc = sioc;
+
+ setup_get_quote_timer(task);
+
+ qio_channel_socket_connect_async(sioc, qg_sock_addr,
+ tdx_quote_generator_connected, task,
+ NULL, NULL);
+}
diff --git a/target/i386/kvm/tdx-quote-generator.h b/target/i386/kvm/tdx-quote-generator.h
new file mode 100644
index 0000000..3bd9b8e
--- /dev/null
+++ b/target/i386/kvm/tdx-quote-generator.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef QEMU_I386_TDX_QUOTE_GENERATOR_H
+#define QEMU_I386_TDX_QUOTE_GENERATOR_H
+
+#include "qom/object_interfaces.h"
+#include "io/channel-socket.h"
+#include "exec/hwaddr.h"
+
+#define TDX_GET_QUOTE_STRUCTURE_VERSION 1ULL
+
+#define TDX_VP_GET_QUOTE_SUCCESS 0ULL
+#define TDX_VP_GET_QUOTE_IN_FLIGHT (-1ULL)
+#define TDX_VP_GET_QUOTE_ERROR 0x8000000000000000ULL
+#define TDX_VP_GET_QUOTE_QGS_UNAVAILABLE 0x8000000000000001ULL
+
+/* Limit to avoid resource starvation. */
+#define TDX_GET_QUOTE_MAX_BUF_LEN (128 * 1024)
+#define TDX_MAX_GET_QUOTE_REQUEST 16
+
+#define TDX_GET_QUOTE_HDR_SIZE 24
+
+/* Format of pages shared with guest. */
+struct tdx_get_quote_header {
+ /* Format version: must be 1 in little endian. */
+ uint64_t structure_version;
+
+ /*
+ * GetQuote status code in little endian:
+ * Guest must set error_code to 0 to avoid information leak.
+ * Qemu sets this before interrupting guest.
+ */
+ uint64_t error_code;
+
+ /*
+ * in-message size in little endian: The message will follow this header.
+ * The in-message will be send to QGS.
+ */
+ uint32_t in_len;
+
+ /*
+ * out-message size in little endian:
+ * On request, out_len must be zero to avoid information leak.
+ * On return, message size from QGS. Qemu overwrites this field.
+ * The message will follows this header. The in-message is overwritten.
+ */
+ uint32_t out_len;
+
+ /*
+ * Message buffer follows.
+ * Guest sets message that will be send to QGS. If out_len > in_len, guest
+ * should zero remaining buffer to avoid information leak.
+ * Qemu overwrites this buffer with a message returned from QGS.
+ */
+};
+
+typedef struct TdxGenerateQuoteTask {
+ hwaddr buf_gpa;
+ hwaddr payload_gpa;
+ uint64_t payload_len;
+
+ char *send_data;
+ uint64_t send_data_size;
+ uint64_t send_data_sent;
+
+ char *receive_buf;
+ uint64_t receive_buf_received;
+
+ uint64_t status_code;
+ struct tdx_get_quote_header hdr;
+
+ QIOChannelSocket *sioc;
+ guint watch;
+ QEMUTimer timer;
+
+ void (*completion)(struct TdxGenerateQuoteTask *task);
+ void *opaque;
+} TdxGenerateQuoteTask;
+
+void tdx_generate_quote(TdxGenerateQuoteTask *task, SocketAddress *qg_sock_addr);
+
+#endif /* QEMU_I386_TDX_QUOTE_GENERATOR_H */
diff --git a/target/i386/kvm/tdx-stub.c b/target/i386/kvm/tdx-stub.c
new file mode 100644
index 0000000..76fee49
--- /dev/null
+++ b/target/i386/kvm/tdx-stub.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+
+#include "tdx.h"
+
+int tdx_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return -EINVAL;
+}
+
+int tdx_parse_tdvf(void *flash_ptr, int size)
+{
+ return -EINVAL;
+}
+
+int tdx_handle_report_fatal_error(X86CPU *cpu, struct kvm_run *run)
+{
+ return -EINVAL;
+}
+
+void tdx_handle_get_quote(X86CPU *cpu, struct kvm_run *run)
+{
+}
+
+void tdx_handle_get_tdvmcall_info(X86CPU *cpu, struct kvm_run *run)
+{
+}
diff --git a/target/i386/kvm/tdx.c b/target/i386/kvm/tdx.c
new file mode 100644
index 0000000..e809e4b
--- /dev/null
+++ b/target/i386/kvm/tdx.c
@@ -0,0 +1,1487 @@
+/*
+ * QEMU TDX support
+ *
+ * Copyright (c) 2025 Intel Corporation
+ *
+ * Author:
+ * Xiaoyao Li <xiaoyao.li@intel.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qemu/base64.h"
+#include "qemu/mmap-alloc.h"
+#include "qapi/error.h"
+#include "qapi/qapi-visit-sockets.h"
+#include "qom/object_interfaces.h"
+#include "crypto/hash.h"
+#include "system/kvm_int.h"
+#include "system/runstate.h"
+#include "system/system.h"
+#include "system/ramblock.h"
+#include "system/address-spaces.h"
+
+#include <linux/kvm_para.h>
+
+#include "cpu.h"
+#include "cpu-internal.h"
+#include "host-cpu.h"
+#include "hw/i386/e820_memory_layout.h"
+#include "hw/i386/tdvf.h"
+#include "hw/i386/x86.h"
+#include "hw/i386/tdvf-hob.h"
+#include "kvm_i386.h"
+#include "tdx.h"
+#include "tdx-quote-generator.h"
+
+#include "standard-headers/asm-x86/kvm_para.h"
+
+#define TDX_MIN_TSC_FREQUENCY_KHZ (100 * 1000)
+#define TDX_MAX_TSC_FREQUENCY_KHZ (10 * 1000 * 1000)
+
+#define TDX_TD_ATTRIBUTES_DEBUG BIT_ULL(0)
+#define TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE BIT_ULL(28)
+#define TDX_TD_ATTRIBUTES_PKS BIT_ULL(30)
+#define TDX_TD_ATTRIBUTES_PERFMON BIT_ULL(63)
+
+#define TDX_SUPPORTED_TD_ATTRS (TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE |\
+ TDX_TD_ATTRIBUTES_PKS | \
+ TDX_TD_ATTRIBUTES_PERFMON)
+
+#define TDX_SUPPORTED_KVM_FEATURES ((1U << KVM_FEATURE_NOP_IO_DELAY) | \
+ (1U << KVM_FEATURE_PV_UNHALT) | \
+ (1U << KVM_FEATURE_PV_TLB_FLUSH) | \
+ (1U << KVM_FEATURE_PV_SEND_IPI) | \
+ (1U << KVM_FEATURE_POLL_CONTROL) | \
+ (1U << KVM_FEATURE_PV_SCHED_YIELD) | \
+ (1U << KVM_FEATURE_MSI_EXT_DEST_ID))
+
+static TdxGuest *tdx_guest;
+
+static struct kvm_tdx_capabilities *tdx_caps;
+static struct kvm_cpuid2 *tdx_supported_cpuid;
+
+/* Valid after kvm_arch_init()->confidential_guest_kvm_init()->tdx_kvm_init() */
+bool is_tdx_vm(void)
+{
+ return !!tdx_guest;
+}
+
+enum tdx_ioctl_level {
+ TDX_VM_IOCTL,
+ TDX_VCPU_IOCTL,
+};
+
+static int tdx_ioctl_internal(enum tdx_ioctl_level level, void *state,
+ int cmd_id, __u32 flags, void *data,
+ Error **errp)
+{
+ struct kvm_tdx_cmd tdx_cmd = {};
+ int r;
+
+ const char *tdx_ioctl_name[] = {
+ [KVM_TDX_CAPABILITIES] = "KVM_TDX_CAPABILITIES",
+ [KVM_TDX_INIT_VM] = "KVM_TDX_INIT_VM",
+ [KVM_TDX_INIT_VCPU] = "KVM_TDX_INIT_VCPU",
+ [KVM_TDX_INIT_MEM_REGION] = "KVM_TDX_INIT_MEM_REGION",
+ [KVM_TDX_FINALIZE_VM] = "KVM_TDX_FINALIZE_VM",
+ [KVM_TDX_GET_CPUID] = "KVM_TDX_GET_CPUID",
+ };
+
+ tdx_cmd.id = cmd_id;
+ tdx_cmd.flags = flags;
+ tdx_cmd.data = (__u64)(unsigned long)data;
+
+ switch (level) {
+ case TDX_VM_IOCTL:
+ r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd);
+ break;
+ case TDX_VCPU_IOCTL:
+ r = kvm_vcpu_ioctl(state, KVM_MEMORY_ENCRYPT_OP, &tdx_cmd);
+ break;
+ default:
+ error_setg(errp, "Invalid tdx_ioctl_level %d", level);
+ return -EINVAL;
+ }
+
+ if (r < 0) {
+ error_setg_errno(errp, -r, "TDX ioctl %s failed, hw_errors: 0x%llx",
+ tdx_ioctl_name[cmd_id], tdx_cmd.hw_error);
+ }
+ return r;
+}
+
+static inline int tdx_vm_ioctl(int cmd_id, __u32 flags, void *data,
+ Error **errp)
+{
+ return tdx_ioctl_internal(TDX_VM_IOCTL, NULL, cmd_id, flags, data, errp);
+}
+
+static inline int tdx_vcpu_ioctl(CPUState *cpu, int cmd_id, __u32 flags,
+ void *data, Error **errp)
+{
+ return tdx_ioctl_internal(TDX_VCPU_IOCTL, cpu, cmd_id, flags, data, errp);
+}
+
+static int get_tdx_capabilities(Error **errp)
+{
+ struct kvm_tdx_capabilities *caps;
+ /* 1st generation of TDX reports 6 cpuid configs */
+ int nr_cpuid_configs = 6;
+ size_t size;
+ int r;
+
+ do {
+ Error *local_err = NULL;
+ size = sizeof(struct kvm_tdx_capabilities) +
+ nr_cpuid_configs * sizeof(struct kvm_cpuid_entry2);
+ caps = g_malloc0(size);
+ caps->cpuid.nent = nr_cpuid_configs;
+
+ r = tdx_vm_ioctl(KVM_TDX_CAPABILITIES, 0, caps, &local_err);
+ if (r == -E2BIG) {
+ g_free(caps);
+ nr_cpuid_configs *= 2;
+ if (nr_cpuid_configs > KVM_MAX_CPUID_ENTRIES) {
+ error_report("KVM TDX seems broken that number of CPUID entries"
+ " in kvm_tdx_capabilities exceeds limit: %d",
+ KVM_MAX_CPUID_ENTRIES);
+ error_propagate(errp, local_err);
+ return r;
+ }
+ error_free(local_err);
+ } else if (r < 0) {
+ g_free(caps);
+ error_propagate(errp, local_err);
+ return r;
+ }
+ } while (r == -E2BIG);
+
+ tdx_caps = caps;
+
+ return 0;
+}
+
+void tdx_set_tdvf_region(MemoryRegion *tdvf_mr)
+{
+ assert(!tdx_guest->tdvf_mr);
+ tdx_guest->tdvf_mr = tdvf_mr;
+}
+
+static TdxFirmwareEntry *tdx_get_hob_entry(TdxGuest *tdx)
+{
+ TdxFirmwareEntry *entry;
+
+ for_each_tdx_fw_entry(&tdx->tdvf, entry) {
+ if (entry->type == TDVF_SECTION_TYPE_TD_HOB) {
+ return entry;
+ }
+ }
+ error_report("TDVF metadata doesn't specify TD_HOB location.");
+ exit(1);
+}
+
+static void tdx_add_ram_entry(uint64_t address, uint64_t length,
+ enum TdxRamType type)
+{
+ uint32_t nr_entries = tdx_guest->nr_ram_entries;
+ tdx_guest->ram_entries = g_renew(TdxRamEntry, tdx_guest->ram_entries,
+ nr_entries + 1);
+
+ tdx_guest->ram_entries[nr_entries].address = address;
+ tdx_guest->ram_entries[nr_entries].length = length;
+ tdx_guest->ram_entries[nr_entries].type = type;
+ tdx_guest->nr_ram_entries++;
+}
+
+static int tdx_accept_ram_range(uint64_t address, uint64_t length)
+{
+ uint64_t head_start, tail_start, head_length, tail_length;
+ uint64_t tmp_address, tmp_length;
+ TdxRamEntry *e;
+ int i = 0;
+
+ do {
+ if (i == tdx_guest->nr_ram_entries) {
+ return -1;
+ }
+
+ e = &tdx_guest->ram_entries[i++];
+ } while (address + length <= e->address || address >= e->address + e->length);
+
+ /*
+ * The to-be-accepted ram range must be fully contained by one
+ * RAM entry.
+ */
+ if (e->address > address ||
+ e->address + e->length < address + length) {
+ return -1;
+ }
+
+ if (e->type == TDX_RAM_ADDED) {
+ return 0;
+ }
+
+ tmp_address = e->address;
+ tmp_length = e->length;
+
+ e->address = address;
+ e->length = length;
+ e->type = TDX_RAM_ADDED;
+
+ head_length = address - tmp_address;
+ if (head_length > 0) {
+ head_start = tmp_address;
+ tdx_add_ram_entry(head_start, head_length, TDX_RAM_UNACCEPTED);
+ }
+
+ tail_start = address + length;
+ if (tail_start < tmp_address + tmp_length) {
+ tail_length = tmp_address + tmp_length - tail_start;
+ tdx_add_ram_entry(tail_start, tail_length, TDX_RAM_UNACCEPTED);
+ }
+
+ return 0;
+}
+
+static int tdx_ram_entry_compare(const void *lhs_, const void* rhs_)
+{
+ const TdxRamEntry *lhs = lhs_;
+ const TdxRamEntry *rhs = rhs_;
+
+ if (lhs->address == rhs->address) {
+ return 0;
+ }
+ if (le64_to_cpu(lhs->address) > le64_to_cpu(rhs->address)) {
+ return 1;
+ }
+ return -1;
+}
+
+static void tdx_init_ram_entries(void)
+{
+ unsigned i, j, nr_e820_entries;
+
+ nr_e820_entries = e820_get_table(NULL);
+ tdx_guest->ram_entries = g_new(TdxRamEntry, nr_e820_entries);
+
+ for (i = 0, j = 0; i < nr_e820_entries; i++) {
+ uint64_t addr, len;
+
+ if (e820_get_entry(i, E820_RAM, &addr, &len)) {
+ tdx_guest->ram_entries[j].address = addr;
+ tdx_guest->ram_entries[j].length = len;
+ tdx_guest->ram_entries[j].type = TDX_RAM_UNACCEPTED;
+ j++;
+ }
+ }
+ tdx_guest->nr_ram_entries = j;
+}
+
+static void tdx_post_init_vcpus(void)
+{
+ TdxFirmwareEntry *hob;
+ CPUState *cpu;
+
+ hob = tdx_get_hob_entry(tdx_guest);
+ CPU_FOREACH(cpu) {
+ tdx_vcpu_ioctl(cpu, KVM_TDX_INIT_VCPU, 0, (void *)(uintptr_t)hob->address,
+ &error_fatal);
+ }
+}
+
+static void tdx_finalize_vm(Notifier *notifier, void *unused)
+{
+ TdxFirmware *tdvf = &tdx_guest->tdvf;
+ TdxFirmwareEntry *entry;
+ RAMBlock *ram_block;
+ Error *local_err = NULL;
+ int r;
+
+ tdx_init_ram_entries();
+
+ for_each_tdx_fw_entry(tdvf, entry) {
+ switch (entry->type) {
+ case TDVF_SECTION_TYPE_BFV:
+ case TDVF_SECTION_TYPE_CFV:
+ entry->mem_ptr = tdvf->mem_ptr + entry->data_offset;
+ break;
+ case TDVF_SECTION_TYPE_TD_HOB:
+ case TDVF_SECTION_TYPE_TEMP_MEM:
+ entry->mem_ptr = qemu_ram_mmap(-1, entry->size,
+ qemu_real_host_page_size(), 0, 0);
+ if (entry->mem_ptr == MAP_FAILED) {
+ error_report("Failed to mmap memory for TDVF section %d",
+ entry->type);
+ exit(1);
+ }
+ if (tdx_accept_ram_range(entry->address, entry->size)) {
+ error_report("Failed to accept memory for TDVF section %d",
+ entry->type);
+ qemu_ram_munmap(-1, entry->mem_ptr, entry->size);
+ exit(1);
+ }
+ break;
+ default:
+ error_report("Unsupported TDVF section %d", entry->type);
+ exit(1);
+ }
+ }
+
+ qsort(tdx_guest->ram_entries, tdx_guest->nr_ram_entries,
+ sizeof(TdxRamEntry), &tdx_ram_entry_compare);
+
+ tdvf_hob_create(tdx_guest, tdx_get_hob_entry(tdx_guest));
+
+ tdx_post_init_vcpus();
+
+ for_each_tdx_fw_entry(tdvf, entry) {
+ struct kvm_tdx_init_mem_region region;
+ uint32_t flags;
+
+ region = (struct kvm_tdx_init_mem_region) {
+ .source_addr = (uintptr_t)entry->mem_ptr,
+ .gpa = entry->address,
+ .nr_pages = entry->size >> 12,
+ };
+
+ flags = entry->attributes & TDVF_SECTION_ATTRIBUTES_MR_EXTEND ?
+ KVM_TDX_MEASURE_MEMORY_REGION : 0;
+
+ do {
+ error_free(local_err);
+ local_err = NULL;
+ r = tdx_vcpu_ioctl(first_cpu, KVM_TDX_INIT_MEM_REGION, flags,
+ &region, &local_err);
+ } while (r == -EAGAIN || r == -EINTR);
+ if (r < 0) {
+ error_report_err(local_err);
+ exit(1);
+ }
+
+ if (entry->type == TDVF_SECTION_TYPE_TD_HOB ||
+ entry->type == TDVF_SECTION_TYPE_TEMP_MEM) {
+ qemu_ram_munmap(-1, entry->mem_ptr, entry->size);
+ entry->mem_ptr = NULL;
+ }
+ }
+
+ /*
+ * TDVF image has been copied into private region above via
+ * KVM_MEMORY_MAPPING. It becomes useless.
+ */
+ ram_block = tdx_guest->tdvf_mr->ram_block;
+ ram_block_discard_range(ram_block, 0, ram_block->max_length);
+
+ tdx_vm_ioctl(KVM_TDX_FINALIZE_VM, 0, NULL, &error_fatal);
+ CONFIDENTIAL_GUEST_SUPPORT(tdx_guest)->ready = true;
+}
+
+static Notifier tdx_machine_done_notify = {
+ .notify = tdx_finalize_vm,
+};
+
+/*
+ * Some CPUID bits change from fixed1 to configurable bits when TDX module
+ * supports TDX_FEATURES0.VE_REDUCTION. e.g., MCA/MCE/MTRR/CORE_CAPABILITY.
+ *
+ * To make QEMU work with all the versions of TDX module, keep the fixed1 bits
+ * here if they are ever fixed1 bits in any of the version though not fixed1 in
+ * the latest version. Otherwise, with the older version of TDX module, QEMU may
+ * treat the fixed1 bit as unsupported.
+ *
+ * For newer TDX module, it does no harm to keep them in tdx_fixed1_bits even
+ * though they changed to configurable bits. Because tdx_fixed1_bits is used to
+ * setup the supported bits.
+ */
+KvmCpuidInfo tdx_fixed1_bits = {
+ .cpuid.nent = 8,
+ .entries[0] = {
+ .function = 0x1,
+ .index = 0,
+ .ecx = CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_DTES64 |
+ CPUID_EXT_DSCPL | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 |
+ CPUID_EXT_PDCM | CPUID_EXT_PCID | CPUID_EXT_SSE41 |
+ CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
+ CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_XSAVE |
+ CPUID_EXT_RDRAND | CPUID_EXT_HYPERVISOR,
+ .edx = CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
+ CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
+ CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
+ CPUID_PAT | CPUID_CLFLUSH | CPUID_DTS | CPUID_MMX | CPUID_FXSR |
+ CPUID_SSE | CPUID_SSE2,
+ },
+ .entries[1] = {
+ .function = 0x6,
+ .index = 0,
+ .eax = CPUID_6_EAX_ARAT,
+ },
+ .entries[2] = {
+ .function = 0x7,
+ .index = 0,
+ .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX,
+ .ebx = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_FDP_EXCPTN_ONLY |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_ZERO_FCS_FDS | CPUID_7_0_EBX_RDSEED |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
+ CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_SHA_NI,
+ .ecx = CPUID_7_0_ECX_BUS_LOCK_DETECT | CPUID_7_0_ECX_MOVDIRI |
+ CPUID_7_0_ECX_MOVDIR64B,
+ .edx = CPUID_7_0_EDX_MD_CLEAR | CPUID_7_0_EDX_SPEC_CTRL |
+ CPUID_7_0_EDX_STIBP | CPUID_7_0_EDX_FLUSH_L1D |
+ CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_CORE_CAPABILITY |
+ CPUID_7_0_EDX_SPEC_CTRL_SSBD,
+ },
+ .entries[3] = {
+ .function = 0x7,
+ .index = 2,
+ .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX,
+ .edx = CPUID_7_2_EDX_PSFD | CPUID_7_2_EDX_IPRED_CTRL |
+ CPUID_7_2_EDX_RRSBA_CTRL | CPUID_7_2_EDX_BHI_CTRL,
+ },
+ .entries[4] = {
+ .function = 0xD,
+ .index = 0,
+ .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX,
+ .eax = XSTATE_FP_MASK | XSTATE_SSE_MASK,
+ },
+ .entries[5] = {
+ .function = 0xD,
+ .index = 1,
+ .flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX,
+ .eax = CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC|
+ CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
+ },
+ .entries[6] = {
+ .function = 0x80000001,
+ .index = 0,
+ .ecx = CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_3DNOWPREFETCH,
+ /*
+ * Strictly speaking, SYSCALL is not fixed1 bit since it depends on
+ * the CPU to be in 64-bit mode. But here fixed1 is used to serve the
+ * purpose of supported bits for TDX. In this sense, SYACALL is always
+ * supported.
+ */
+ .edx = CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB |
+ CPUID_EXT2_RDTSCP | CPUID_EXT2_LM,
+ },
+ .entries[7] = {
+ .function = 0x80000007,
+ .index = 0,
+ .edx = CPUID_APM_INVTSC,
+ },
+};
+
+typedef struct TdxAttrsMap {
+ uint32_t attr_index;
+ uint32_t cpuid_leaf;
+ uint32_t cpuid_subleaf;
+ int cpuid_reg;
+ uint32_t feat_mask;
+} TdxAttrsMap;
+
+static TdxAttrsMap tdx_attrs_maps[] = {
+ {.attr_index = 27,
+ .cpuid_leaf = 7,
+ .cpuid_subleaf = 1,
+ .cpuid_reg = R_EAX,
+ .feat_mask = CPUID_7_1_EAX_LASS,},
+
+ {.attr_index = 30,
+ .cpuid_leaf = 7,
+ .cpuid_subleaf = 0,
+ .cpuid_reg = R_ECX,
+ .feat_mask = CPUID_7_0_ECX_PKS,},
+
+ {.attr_index = 31,
+ .cpuid_leaf = 7,
+ .cpuid_subleaf = 0,
+ .cpuid_reg = R_ECX,
+ .feat_mask = CPUID_7_0_ECX_KeyLocker,},
+};
+
+typedef struct TdxXFAMDep {
+ int xfam_bit;
+ FeatureMask feat_mask;
+} TdxXFAMDep;
+
+/*
+ * Note, only the CPUID bits whose virtualization type are "XFAM & Native" are
+ * defiend here.
+ *
+ * For those whose virtualization type are "XFAM & Configured & Native", they
+ * are reported as configurable bits. And they are not supported if not in the
+ * configureable bits list from KVM even if the corresponding XFAM bit is
+ * supported.
+ */
+TdxXFAMDep tdx_xfam_deps[] = {
+ { XSTATE_YMM_BIT, { FEAT_1_ECX, CPUID_EXT_FMA }},
+ { XSTATE_YMM_BIT, { FEAT_7_0_EBX, CPUID_7_0_EBX_AVX2 }},
+ { XSTATE_OPMASK_BIT, { FEAT_7_0_ECX, CPUID_7_0_ECX_AVX512_VBMI}},
+ { XSTATE_OPMASK_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AVX512_FP16}},
+ { XSTATE_PT_BIT, { FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT}},
+ { XSTATE_PKRU_BIT, { FEAT_7_0_ECX, CPUID_7_0_ECX_PKU}},
+ { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_BF16 }},
+ { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_TILE }},
+ { XSTATE_XTILE_CFG_BIT, { FEAT_7_0_EDX, CPUID_7_0_EDX_AMX_INT8 }},
+};
+
+static struct kvm_cpuid_entry2 *find_in_supported_entry(uint32_t function,
+ uint32_t index)
+{
+ struct kvm_cpuid_entry2 *e;
+
+ e = cpuid_find_entry(tdx_supported_cpuid, function, index);
+ if (!e) {
+ if (tdx_supported_cpuid->nent >= KVM_MAX_CPUID_ENTRIES) {
+ error_report("tdx_supported_cpuid requries more space than %d entries",
+ KVM_MAX_CPUID_ENTRIES);
+ exit(1);
+ }
+ e = &tdx_supported_cpuid->entries[tdx_supported_cpuid->nent++];
+ e->function = function;
+ e->index = index;
+ }
+
+ return e;
+}
+
+static void tdx_add_supported_cpuid_by_fixed1_bits(void)
+{
+ struct kvm_cpuid_entry2 *e, *e1;
+ int i;
+
+ for (i = 0; i < tdx_fixed1_bits.cpuid.nent; i++) {
+ e = &tdx_fixed1_bits.entries[i];
+
+ e1 = find_in_supported_entry(e->function, e->index);
+ e1->eax |= e->eax;
+ e1->ebx |= e->ebx;
+ e1->ecx |= e->ecx;
+ e1->edx |= e->edx;
+ }
+}
+
+static void tdx_add_supported_cpuid_by_attrs(void)
+{
+ struct kvm_cpuid_entry2 *e;
+ TdxAttrsMap *map;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tdx_attrs_maps); i++) {
+ map = &tdx_attrs_maps[i];
+ if (!((1ULL << map->attr_index) & tdx_caps->supported_attrs)) {
+ continue;
+ }
+
+ e = find_in_supported_entry(map->cpuid_leaf, map->cpuid_subleaf);
+
+ switch(map->cpuid_reg) {
+ case R_EAX:
+ e->eax |= map->feat_mask;
+ break;
+ case R_EBX:
+ e->ebx |= map->feat_mask;
+ break;
+ case R_ECX:
+ e->ecx |= map->feat_mask;
+ break;
+ case R_EDX:
+ e->edx |= map->feat_mask;
+ break;
+ }
+ }
+}
+
+static void tdx_add_supported_cpuid_by_xfam(void)
+{
+ struct kvm_cpuid_entry2 *e;
+ int i;
+
+ const TdxXFAMDep *xfam_dep;
+ const FeatureWordInfo *f;
+ for (i = 0; i < ARRAY_SIZE(tdx_xfam_deps); i++) {
+ xfam_dep = &tdx_xfam_deps[i];
+ if (!((1ULL << xfam_dep->xfam_bit) & tdx_caps->supported_xfam)) {
+ continue;
+ }
+
+ f = &feature_word_info[xfam_dep->feat_mask.index];
+ if (f->type != CPUID_FEATURE_WORD) {
+ continue;
+ }
+
+ e = find_in_supported_entry(f->cpuid.eax, f->cpuid.ecx);
+ switch(f->cpuid.reg) {
+ case R_EAX:
+ e->eax |= xfam_dep->feat_mask.mask;
+ break;
+ case R_EBX:
+ e->ebx |= xfam_dep->feat_mask.mask;
+ break;
+ case R_ECX:
+ e->ecx |= xfam_dep->feat_mask.mask;
+ break;
+ case R_EDX:
+ e->edx |= xfam_dep->feat_mask.mask;
+ break;
+ }
+ }
+
+ e = find_in_supported_entry(0xd, 0);
+ e->eax |= (tdx_caps->supported_xfam & CPUID_XSTATE_XCR0_MASK);
+ e->edx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XCR0_MASK) >> 32;
+
+ e = find_in_supported_entry(0xd, 1);
+ /*
+ * Mark XFD always support for TDX, it will be cleared finally in
+ * tdx_adjust_cpuid_features() if XFD is unavailable on the hardware
+ * because in this case the original data has it as 0.
+ */
+ e->eax |= CPUID_XSAVE_XFD;
+ e->ecx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XSS_MASK);
+ e->edx |= (tdx_caps->supported_xfam & CPUID_XSTATE_XSS_MASK) >> 32;
+}
+
+static void tdx_add_supported_kvm_features(void)
+{
+ struct kvm_cpuid_entry2 *e;
+
+ e = find_in_supported_entry(0x40000001, 0);
+ e->eax = TDX_SUPPORTED_KVM_FEATURES;
+}
+
+static void tdx_setup_supported_cpuid(void)
+{
+ if (tdx_supported_cpuid) {
+ return;
+ }
+
+ tdx_supported_cpuid = g_malloc0(sizeof(*tdx_supported_cpuid) +
+ KVM_MAX_CPUID_ENTRIES * sizeof(struct kvm_cpuid_entry2));
+
+ memcpy(tdx_supported_cpuid->entries, tdx_caps->cpuid.entries,
+ tdx_caps->cpuid.nent * sizeof(struct kvm_cpuid_entry2));
+ tdx_supported_cpuid->nent = tdx_caps->cpuid.nent;
+
+ tdx_add_supported_cpuid_by_fixed1_bits();
+ tdx_add_supported_cpuid_by_attrs();
+ tdx_add_supported_cpuid_by_xfam();
+
+ tdx_add_supported_kvm_features();
+}
+
+static int tdx_kvm_init(ConfidentialGuestSupport *cgs, Error **errp)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ X86MachineState *x86ms = X86_MACHINE(ms);
+ TdxGuest *tdx = TDX_GUEST(cgs);
+ int r = 0;
+
+ kvm_mark_guest_state_protected();
+
+ if (x86ms->smm == ON_OFF_AUTO_AUTO) {
+ x86ms->smm = ON_OFF_AUTO_OFF;
+ } else if (x86ms->smm == ON_OFF_AUTO_ON) {
+ error_setg(errp, "TDX VM doesn't support SMM");
+ return -EINVAL;
+ }
+
+ if (x86ms->pic == ON_OFF_AUTO_AUTO) {
+ x86ms->pic = ON_OFF_AUTO_OFF;
+ } else if (x86ms->pic == ON_OFF_AUTO_ON) {
+ error_setg(errp, "TDX VM doesn't support PIC");
+ return -EINVAL;
+ }
+
+ if (kvm_state->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
+ kvm_state->kernel_irqchip_split = ON_OFF_AUTO_ON;
+ } else if (kvm_state->kernel_irqchip_split != ON_OFF_AUTO_ON) {
+ error_setg(errp, "TDX VM requires kernel_irqchip to be split");
+ return -EINVAL;
+ }
+
+ if (!tdx_caps) {
+ r = get_tdx_capabilities(errp);
+ if (r) {
+ return r;
+ }
+ }
+
+ tdx_setup_supported_cpuid();
+
+ /* TDX relies on KVM_HC_MAP_GPA_RANGE to handle TDG.VP.VMCALL<MapGPA> */
+ if (!kvm_enable_hypercall(BIT_ULL(KVM_HC_MAP_GPA_RANGE))) {
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Set kvm_readonly_mem_allowed to false, because TDX only supports readonly
+ * memory for shared memory but not for private memory. Besides, whether a
+ * memslot is private or shared is not determined by QEMU.
+ *
+ * Thus, just mark readonly memory not supported for simplicity.
+ */
+ kvm_readonly_mem_allowed = false;
+
+ qemu_add_machine_init_done_notifier(&tdx_machine_done_notify);
+
+ tdx_guest = tdx;
+ return 0;
+}
+
+static int tdx_kvm_type(X86ConfidentialGuest *cg)
+{
+ /* Do the object check */
+ TDX_GUEST(cg);
+
+ return KVM_X86_TDX_VM;
+}
+
+static void tdx_cpu_instance_init(X86ConfidentialGuest *cg, CPUState *cpu)
+{
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
+ X86CPU *x86cpu = X86_CPU(cpu);
+
+ if (xcc->model) {
+ error_report("Named cpu model is not supported for TDX yet!");
+ exit(1);
+ }
+
+ object_property_set_bool(OBJECT(cpu), "pmu", false, &error_abort);
+
+ /* invtsc is fixed1 for TD guest */
+ object_property_set_bool(OBJECT(cpu), "invtsc", true, &error_abort);
+
+ x86cpu->force_cpuid_0x1f = true;
+}
+
+static uint32_t tdx_adjust_cpuid_features(X86ConfidentialGuest *cg,
+ uint32_t feature, uint32_t index,
+ int reg, uint32_t value)
+{
+ struct kvm_cpuid_entry2 *e;
+
+ e = cpuid_find_entry(&tdx_fixed1_bits.cpuid, feature, index);
+ if (e) {
+ value |= cpuid_entry_get_reg(e, reg);
+ }
+
+ if (is_feature_word_cpuid(feature, index, reg)) {
+ e = cpuid_find_entry(tdx_supported_cpuid, feature, index);
+ if (e) {
+ value &= cpuid_entry_get_reg(e, reg);
+ }
+ }
+
+ return value;
+}
+
+static struct kvm_cpuid2 *tdx_fetch_cpuid(CPUState *cpu, int *ret)
+{
+ struct kvm_cpuid2 *fetch_cpuid;
+ int size = KVM_MAX_CPUID_ENTRIES;
+ Error *local_err = NULL;
+ int r;
+
+ do {
+ error_free(local_err);
+ local_err = NULL;
+
+ fetch_cpuid = g_malloc0(sizeof(*fetch_cpuid) +
+ sizeof(struct kvm_cpuid_entry2) * size);
+ fetch_cpuid->nent = size;
+ r = tdx_vcpu_ioctl(cpu, KVM_TDX_GET_CPUID, 0, fetch_cpuid, &local_err);
+ if (r == -E2BIG) {
+ g_free(fetch_cpuid);
+ size = fetch_cpuid->nent;
+ }
+ } while (r == -E2BIG);
+
+ if (r < 0) {
+ error_report_err(local_err);
+ *ret = r;
+ return NULL;
+ }
+
+ return fetch_cpuid;
+}
+
+static int tdx_check_features(X86ConfidentialGuest *cg, CPUState *cs)
+{
+ uint64_t actual, requested, unavailable, forced_on;
+ g_autofree struct kvm_cpuid2 *fetch_cpuid;
+ const char *forced_on_prefix = NULL;
+ const char *unav_prefix = NULL;
+ struct kvm_cpuid_entry2 *entry;
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ FeatureWordInfo *wi;
+ FeatureWord w;
+ bool mismatch = false;
+ int r;
+
+ fetch_cpuid = tdx_fetch_cpuid(cs, &r);
+ if (!fetch_cpuid) {
+ return r;
+ }
+
+ if (cpu->check_cpuid || cpu->enforce_cpuid) {
+ unav_prefix = "TDX doesn't support requested feature";
+ forced_on_prefix = "TDX forcibly sets the feature";
+ }
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ wi = &feature_word_info[w];
+ actual = 0;
+
+ switch (wi->type) {
+ case CPUID_FEATURE_WORD:
+ entry = cpuid_find_entry(fetch_cpuid, wi->cpuid.eax, wi->cpuid.ecx);
+ if (!entry) {
+ /*
+ * If KVM doesn't report it means it's totally configurable
+ * by QEMU
+ */
+ continue;
+ }
+
+ actual = cpuid_entry_get_reg(entry, wi->cpuid.reg);
+ break;
+ case MSR_FEATURE_WORD:
+ /*
+ * TODO:
+ * validate MSR features when KVM has interface report them.
+ */
+ continue;
+ }
+
+ /* Fixup for special cases */
+ switch (w) {
+ case FEAT_8000_0001_EDX:
+ /*
+ * Intel enumerates SYSCALL bit as 1 only when processor in 64-bit
+ * mode and before vcpu running it's not in 64-bit mode.
+ */
+ actual |= CPUID_EXT2_SYSCALL;
+ break;
+ default:
+ break;
+ }
+
+ requested = env->features[w];
+ unavailable = requested & ~actual;
+ mark_unavailable_features(cpu, w, unavailable, unav_prefix);
+ if (unavailable) {
+ mismatch = true;
+ }
+
+ forced_on = actual & ~requested;
+ mark_forced_on_features(cpu, w, forced_on, forced_on_prefix);
+ if (forced_on) {
+ mismatch = true;
+ }
+ }
+
+ if (cpu->enforce_cpuid && mismatch) {
+ return -EINVAL;
+ }
+
+ if (cpu->phys_bits != host_cpu_phys_bits()) {
+ error_report("TDX requires guest CPU physical bits (%u) "
+ "to match host CPU physical bits (%u)",
+ cpu->phys_bits, host_cpu_phys_bits());
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tdx_validate_attributes(TdxGuest *tdx, Error **errp)
+{
+ if ((tdx->attributes & ~tdx_caps->supported_attrs)) {
+ error_setg(errp, "Invalid attributes 0x%"PRIx64" for TDX VM "
+ "(KVM supported: 0x%"PRIx64")", tdx->attributes,
+ (uint64_t)tdx_caps->supported_attrs);
+ return -1;
+ }
+
+ if (tdx->attributes & ~TDX_SUPPORTED_TD_ATTRS) {
+ error_setg(errp, "Some QEMU unsupported TD attribute bits being "
+ "requested: 0x%"PRIx64" (QEMU supported: 0x%"PRIx64")",
+ tdx->attributes, (uint64_t)TDX_SUPPORTED_TD_ATTRS);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int setup_td_guest_attributes(X86CPU *x86cpu, Error **errp)
+{
+ CPUX86State *env = &x86cpu->env;
+
+ tdx_guest->attributes |= (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS) ?
+ TDX_TD_ATTRIBUTES_PKS : 0;
+ tdx_guest->attributes |= x86cpu->enable_pmu ? TDX_TD_ATTRIBUTES_PERFMON : 0;
+
+ return tdx_validate_attributes(tdx_guest, errp);
+}
+
+static int setup_td_xfam(X86CPU *x86cpu, Error **errp)
+{
+ CPUX86State *env = &x86cpu->env;
+ uint64_t xfam;
+
+ xfam = env->features[FEAT_XSAVE_XCR0_LO] |
+ env->features[FEAT_XSAVE_XCR0_HI] |
+ env->features[FEAT_XSAVE_XSS_LO] |
+ env->features[FEAT_XSAVE_XSS_HI];
+
+ if (xfam & ~tdx_caps->supported_xfam) {
+ error_setg(errp, "Invalid XFAM 0x%"PRIx64" for TDX VM (supported: 0x%"PRIx64"))",
+ xfam, (uint64_t)tdx_caps->supported_xfam);
+ return -1;
+ }
+
+ tdx_guest->xfam = xfam;
+ return 0;
+}
+
+static void tdx_filter_cpuid(struct kvm_cpuid2 *cpuids)
+{
+ int i, dest_cnt = 0;
+ struct kvm_cpuid_entry2 *src, *dest, *conf;
+
+ for (i = 0; i < cpuids->nent; i++) {
+ src = cpuids->entries + i;
+ conf = cpuid_find_entry(&tdx_caps->cpuid, src->function, src->index);
+ if (!conf) {
+ continue;
+ }
+ dest = cpuids->entries + dest_cnt;
+
+ dest->function = src->function;
+ dest->index = src->index;
+ dest->flags = src->flags;
+ dest->eax = src->eax & conf->eax;
+ dest->ebx = src->ebx & conf->ebx;
+ dest->ecx = src->ecx & conf->ecx;
+ dest->edx = src->edx & conf->edx;
+
+ dest_cnt++;
+ }
+ cpuids->nent = dest_cnt++;
+}
+
+int tdx_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ X86CPU *x86cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86cpu->env;
+ g_autofree struct kvm_tdx_init_vm *init_vm = NULL;
+ Error *local_err = NULL;
+ size_t data_len;
+ int retry = 10000;
+ int r = 0;
+
+ QEMU_LOCK_GUARD(&tdx_guest->lock);
+ if (tdx_guest->initialized) {
+ return r;
+ }
+
+ init_vm = g_malloc0(sizeof(struct kvm_tdx_init_vm) +
+ sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
+
+ if (!kvm_check_extension(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS)) {
+ error_setg(errp, "KVM doesn't support KVM_CAP_X86_APIC_BUS_CYCLES_NS");
+ return -EOPNOTSUPP;
+ }
+
+ r = kvm_vm_enable_cap(kvm_state, KVM_CAP_X86_APIC_BUS_CYCLES_NS,
+ 0, TDX_APIC_BUS_CYCLES_NS);
+ if (r < 0) {
+ error_setg_errno(errp, -r,
+ "Unable to set core crystal clock frequency to 25MHz");
+ return r;
+ }
+
+ if (env->tsc_khz && (env->tsc_khz < TDX_MIN_TSC_FREQUENCY_KHZ ||
+ env->tsc_khz > TDX_MAX_TSC_FREQUENCY_KHZ)) {
+ error_setg(errp, "Invalid TSC %"PRId64" KHz, must specify cpu_frequency "
+ "between [%d, %d] kHz", env->tsc_khz,
+ TDX_MIN_TSC_FREQUENCY_KHZ, TDX_MAX_TSC_FREQUENCY_KHZ);
+ return -EINVAL;
+ }
+
+ if (env->tsc_khz % (25 * 1000)) {
+ error_setg(errp, "Invalid TSC %"PRId64" KHz, it must be multiple of 25MHz",
+ env->tsc_khz);
+ return -EINVAL;
+ }
+
+ /* it's safe even env->tsc_khz is 0. KVM uses host's tsc_khz in this case */
+ r = kvm_vm_ioctl(kvm_state, KVM_SET_TSC_KHZ, env->tsc_khz);
+ if (r < 0) {
+ error_setg_errno(errp, -r, "Unable to set TSC frequency to %"PRId64" kHz",
+ env->tsc_khz);
+ return r;
+ }
+
+ if (tdx_guest->mrconfigid) {
+ g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrconfigid,
+ strlen(tdx_guest->mrconfigid), &data_len, errp);
+ if (!data) {
+ return -1;
+ }
+ if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
+ error_setg(errp, "TDX 'mrconfigid' sha384 digest was %ld bytes, "
+ "expected %d bytes", data_len,
+ QCRYPTO_HASH_DIGEST_LEN_SHA384);
+ return -1;
+ }
+ memcpy(init_vm->mrconfigid, data, data_len);
+ }
+
+ if (tdx_guest->mrowner) {
+ g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrowner,
+ strlen(tdx_guest->mrowner), &data_len, errp);
+ if (!data) {
+ return -1;
+ }
+ if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
+ error_setg(errp, "TDX 'mrowner' sha384 digest was %ld bytes, "
+ "expected %d bytes", data_len,
+ QCRYPTO_HASH_DIGEST_LEN_SHA384);
+ return -1;
+ }
+ memcpy(init_vm->mrowner, data, data_len);
+ }
+
+ if (tdx_guest->mrownerconfig) {
+ g_autofree uint8_t *data = qbase64_decode(tdx_guest->mrownerconfig,
+ strlen(tdx_guest->mrownerconfig), &data_len, errp);
+ if (!data) {
+ return -1;
+ }
+ if (data_len != QCRYPTO_HASH_DIGEST_LEN_SHA384) {
+ error_setg(errp, "TDX 'mrownerconfig' sha384 digest was %ld bytes, "
+ "expected %d bytes", data_len,
+ QCRYPTO_HASH_DIGEST_LEN_SHA384);
+ return -1;
+ }
+ memcpy(init_vm->mrownerconfig, data, data_len);
+ }
+
+ r = setup_td_guest_attributes(x86cpu, errp);
+ if (r) {
+ return r;
+ }
+
+ r = setup_td_xfam(x86cpu, errp);
+ if (r) {
+ return r;
+ }
+
+ init_vm->cpuid.nent = kvm_x86_build_cpuid(env, init_vm->cpuid.entries, 0);
+ tdx_filter_cpuid(&init_vm->cpuid);
+
+ init_vm->attributes = tdx_guest->attributes;
+ init_vm->xfam = tdx_guest->xfam;
+
+ /*
+ * KVM_TDX_INIT_VM gets -EAGAIN when KVM side SEAMCALL(TDH_MNG_CREATE)
+ * gets TDX_RND_NO_ENTROPY due to Random number generation (e.g., RDRAND or
+ * RDSEED) is busy.
+ *
+ * Retry for the case.
+ */
+ do {
+ error_free(local_err);
+ local_err = NULL;
+ r = tdx_vm_ioctl(KVM_TDX_INIT_VM, 0, init_vm, &local_err);
+ } while (r == -EAGAIN && --retry);
+
+ if (r < 0) {
+ if (!retry) {
+ error_append_hint(&local_err, "Hardware RNG (Random Number "
+ "Generator) is busy occupied by someone (via RDRAND/RDSEED) "
+ "maliciously, which leads to KVM_TDX_INIT_VM keeping failure "
+ "due to lack of entropy.\n");
+ }
+ error_propagate(errp, local_err);
+ return r;
+ }
+
+ tdx_guest->initialized = true;
+
+ return 0;
+}
+
+int tdx_parse_tdvf(void *flash_ptr, int size)
+{
+ return tdvf_parse_metadata(&tdx_guest->tdvf, flash_ptr, size);
+}
+
+static void tdx_get_quote_completion(TdxGenerateQuoteTask *task)
+{
+ TdxGuest *tdx = task->opaque;
+ int ret;
+
+ /* Maintain the number of in-flight requests. */
+ qemu_mutex_lock(&tdx->lock);
+ tdx->num--;
+ qemu_mutex_unlock(&tdx->lock);
+
+ if (task->status_code == TDX_VP_GET_QUOTE_SUCCESS) {
+ ret = address_space_write(&address_space_memory, task->payload_gpa,
+ MEMTXATTRS_UNSPECIFIED, task->receive_buf,
+ task->receive_buf_received);
+ if (ret != MEMTX_OK) {
+ error_report("TDX: get-quote: failed to write quote data.");
+ } else {
+ task->hdr.out_len = cpu_to_le64(task->receive_buf_received);
+ }
+ }
+ task->hdr.error_code = cpu_to_le64(task->status_code);
+
+ /* Publish the response contents before marking this request completed. */
+ smp_wmb();
+ ret = address_space_write(&address_space_memory, task->buf_gpa,
+ MEMTXATTRS_UNSPECIFIED, &task->hdr,
+ TDX_GET_QUOTE_HDR_SIZE);
+ if (ret != MEMTX_OK) {
+ error_report("TDX: get-quote: failed to update GetQuote header.");
+ }
+
+ g_free(task->send_data);
+ g_free(task->receive_buf);
+ g_free(task);
+ object_unref(tdx);
+}
+
+void tdx_handle_get_quote(X86CPU *cpu, struct kvm_run *run)
+{
+ TdxGenerateQuoteTask *task;
+ struct tdx_get_quote_header hdr;
+ hwaddr buf_gpa = run->tdx.get_quote.gpa;
+ uint64_t buf_len = run->tdx.get_quote.size;
+
+ QEMU_BUILD_BUG_ON(sizeof(struct tdx_get_quote_header) != TDX_GET_QUOTE_HDR_SIZE);
+
+ run->tdx.get_quote.ret = TDG_VP_VMCALL_INVALID_OPERAND;
+
+ if (buf_len == 0) {
+ return;
+ }
+
+ if (!QEMU_IS_ALIGNED(buf_gpa, 4096) || !QEMU_IS_ALIGNED(buf_len, 4096)) {
+ run->tdx.get_quote.ret = TDG_VP_VMCALL_ALIGN_ERROR;
+ return;
+ }
+
+ if (address_space_read(&address_space_memory, buf_gpa, MEMTXATTRS_UNSPECIFIED,
+ &hdr, TDX_GET_QUOTE_HDR_SIZE) != MEMTX_OK) {
+ error_report("TDX: get-quote: failed to read GetQuote header.");
+ return;
+ }
+
+ if (le64_to_cpu(hdr.structure_version) != TDX_GET_QUOTE_STRUCTURE_VERSION) {
+ return;
+ }
+
+ /* Only safe-guard check to avoid too large buffer size. */
+ if (buf_len > TDX_GET_QUOTE_MAX_BUF_LEN ||
+ le32_to_cpu(hdr.in_len) > buf_len - TDX_GET_QUOTE_HDR_SIZE) {
+ return;
+ }
+
+ if (!tdx_guest->qg_sock_addr) {
+ hdr.error_code = cpu_to_le64(TDX_VP_GET_QUOTE_QGS_UNAVAILABLE);
+ if (address_space_write(&address_space_memory, buf_gpa,
+ MEMTXATTRS_UNSPECIFIED,
+ &hdr, TDX_GET_QUOTE_HDR_SIZE) != MEMTX_OK) {
+ error_report("TDX: failed to update GetQuote header.");
+ return;
+ }
+ run->tdx.get_quote.ret = TDG_VP_VMCALL_SUCCESS;
+ return;
+ }
+
+ qemu_mutex_lock(&tdx_guest->lock);
+ if (tdx_guest->num >= TDX_MAX_GET_QUOTE_REQUEST) {
+ qemu_mutex_unlock(&tdx_guest->lock);
+ run->tdx.get_quote.ret = TDG_VP_VMCALL_RETRY;
+ return;
+ }
+ tdx_guest->num++;
+ qemu_mutex_unlock(&tdx_guest->lock);
+
+ task = g_new(TdxGenerateQuoteTask, 1);
+ task->buf_gpa = buf_gpa;
+ task->payload_gpa = buf_gpa + TDX_GET_QUOTE_HDR_SIZE;
+ task->payload_len = buf_len - TDX_GET_QUOTE_HDR_SIZE;
+ task->hdr = hdr;
+ task->completion = tdx_get_quote_completion;
+
+ task->send_data_size = le32_to_cpu(hdr.in_len);
+ task->send_data = g_malloc(task->send_data_size);
+ task->send_data_sent = 0;
+
+ if (address_space_read(&address_space_memory, task->payload_gpa,
+ MEMTXATTRS_UNSPECIFIED, task->send_data,
+ task->send_data_size) != MEMTX_OK) {
+ goto out_free;
+ }
+
+ /* Mark the buffer in-flight. */
+ hdr.error_code = cpu_to_le64(TDX_VP_GET_QUOTE_IN_FLIGHT);
+ if (address_space_write(&address_space_memory, buf_gpa,
+ MEMTXATTRS_UNSPECIFIED,
+ &hdr, TDX_GET_QUOTE_HDR_SIZE) != MEMTX_OK) {
+ goto out_free;
+ }
+
+ task->receive_buf = g_malloc0(task->payload_len);
+ task->receive_buf_received = 0;
+ task->opaque = tdx_guest;
+
+ object_ref(tdx_guest);
+ tdx_generate_quote(task, tdx_guest->qg_sock_addr);
+ run->tdx.get_quote.ret = TDG_VP_VMCALL_SUCCESS;
+ return;
+
+out_free:
+ g_free(task->send_data);
+ g_free(task);
+}
+
+void tdx_handle_get_tdvmcall_info(X86CPU *cpu, struct kvm_run *run)
+{
+ if (run->tdx.get_tdvmcall_info.leaf != 1) {
+ return;
+ }
+
+ run->tdx.get_tdvmcall_info.r11 = TDG_VP_VMCALL_SUBFUNC_GET_QUOTE;
+ run->tdx.get_tdvmcall_info.r12 = 0;
+ run->tdx.get_tdvmcall_info.r13 = 0;
+ run->tdx.get_tdvmcall_info.r14 = 0;
+}
+
+static void tdx_panicked_on_fatal_error(X86CPU *cpu, uint64_t error_code,
+ char *message, uint64_t gpa)
+{
+ GuestPanicInformation *panic_info;
+
+ panic_info = g_new0(GuestPanicInformation, 1);
+ panic_info->type = GUEST_PANIC_INFORMATION_TYPE_TDX;
+ panic_info->u.tdx.error_code = (uint32_t) error_code;
+ panic_info->u.tdx.message = message;
+ panic_info->u.tdx.gpa = gpa;
+
+ qemu_system_guest_panicked(panic_info);
+}
+
+/*
+ * Only 8 registers can contain valid ASCII byte stream to form the fatal
+ * message, and their sequence is: R14, R15, RBX, RDI, RSI, R8, R9, RDX
+ */
+#define TDX_FATAL_MESSAGE_MAX 64
+
+#define TDX_REPORT_FATAL_ERROR_GPA_VALID BIT_ULL(63)
+
+int tdx_handle_report_fatal_error(X86CPU *cpu, struct kvm_run *run)
+{
+ uint64_t error_code = run->system_event.data[R_R12];
+ uint64_t reg_mask = run->system_event.data[R_ECX];
+ char *message = NULL;
+ uint64_t *tmp;
+ uint64_t gpa = -1ull;
+
+ if (error_code & 0xffff) {
+ error_report("TDX: REPORT_FATAL_ERROR: invalid error code: 0x%"PRIx64,
+ error_code);
+ return -1;
+ }
+
+ if (reg_mask) {
+ message = g_malloc0(TDX_FATAL_MESSAGE_MAX + 1);
+ tmp = (uint64_t *)message;
+
+#define COPY_REG(REG) \
+ do { \
+ if (reg_mask & BIT_ULL(REG)) { \
+ *(tmp++) = run->system_event.data[REG]; \
+ } \
+ } while (0)
+
+ COPY_REG(R_R14);
+ COPY_REG(R_R15);
+ COPY_REG(R_EBX);
+ COPY_REG(R_EDI);
+ COPY_REG(R_ESI);
+ COPY_REG(R_R8);
+ COPY_REG(R_R9);
+ COPY_REG(R_EDX);
+ *((char *)tmp) = '\0';
+ }
+#undef COPY_REG
+
+ if (error_code & TDX_REPORT_FATAL_ERROR_GPA_VALID) {
+ gpa = run->system_event.data[R_R13];
+ }
+
+ tdx_panicked_on_fatal_error(cpu, error_code, message, gpa);
+
+ return -1;
+}
+
+static bool tdx_guest_get_sept_ve_disable(Object *obj, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ return !!(tdx->attributes & TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE);
+}
+
+static void tdx_guest_set_sept_ve_disable(Object *obj, bool value, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ if (value) {
+ tdx->attributes |= TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
+ } else {
+ tdx->attributes &= ~TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
+ }
+}
+
+static char *tdx_guest_get_mrconfigid(Object *obj, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ return g_strdup(tdx->mrconfigid);
+}
+
+static void tdx_guest_set_mrconfigid(Object *obj, const char *value, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ g_free(tdx->mrconfigid);
+ tdx->mrconfigid = g_strdup(value);
+}
+
+static char *tdx_guest_get_mrowner(Object *obj, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ return g_strdup(tdx->mrowner);
+}
+
+static void tdx_guest_set_mrowner(Object *obj, const char *value, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ g_free(tdx->mrowner);
+ tdx->mrowner = g_strdup(value);
+}
+
+static char *tdx_guest_get_mrownerconfig(Object *obj, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ return g_strdup(tdx->mrownerconfig);
+}
+
+static void tdx_guest_set_mrownerconfig(Object *obj, const char *value, Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ g_free(tdx->mrownerconfig);
+ tdx->mrownerconfig = g_strdup(value);
+}
+
+static void tdx_guest_get_qgs(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ if (!tdx->qg_sock_addr) {
+ error_setg(errp, "quote-generation-socket is not set");
+ return;
+ }
+ visit_type_SocketAddress(v, name, &tdx->qg_sock_addr, errp);
+}
+
+static void tdx_guest_set_qgs(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ TdxGuest *tdx = TDX_GUEST(obj);
+ SocketAddress *sock = NULL;
+
+ if (!visit_type_SocketAddress(v, name, &sock, errp)) {
+ return;
+ }
+
+ if (tdx->qg_sock_addr) {
+ qapi_free_SocketAddress(tdx->qg_sock_addr);
+ }
+
+ tdx->qg_sock_addr = sock;
+}
+
+/* tdx guest */
+OBJECT_DEFINE_TYPE_WITH_INTERFACES(TdxGuest,
+ tdx_guest,
+ TDX_GUEST,
+ X86_CONFIDENTIAL_GUEST,
+ { TYPE_USER_CREATABLE },
+ { NULL })
+
+static void tdx_guest_init(Object *obj)
+{
+ ConfidentialGuestSupport *cgs = CONFIDENTIAL_GUEST_SUPPORT(obj);
+ TdxGuest *tdx = TDX_GUEST(obj);
+
+ qemu_mutex_init(&tdx->lock);
+
+ cgs->require_guest_memfd = true;
+ tdx->attributes = TDX_TD_ATTRIBUTES_SEPT_VE_DISABLE;
+
+ object_property_add_uint64_ptr(obj, "attributes", &tdx->attributes,
+ OBJ_PROP_FLAG_READWRITE);
+ object_property_add_bool(obj, "sept-ve-disable",
+ tdx_guest_get_sept_ve_disable,
+ tdx_guest_set_sept_ve_disable);
+ object_property_add_str(obj, "mrconfigid",
+ tdx_guest_get_mrconfigid,
+ tdx_guest_set_mrconfigid);
+ object_property_add_str(obj, "mrowner",
+ tdx_guest_get_mrowner, tdx_guest_set_mrowner);
+ object_property_add_str(obj, "mrownerconfig",
+ tdx_guest_get_mrownerconfig,
+ tdx_guest_set_mrownerconfig);
+
+ object_property_add(obj, "quote-generation-socket", "SocketAddress",
+ tdx_guest_get_qgs,
+ tdx_guest_set_qgs,
+ NULL, NULL);
+
+ qemu_mutex_init(&tdx->lock);
+}
+
+static void tdx_guest_finalize(Object *obj)
+{
+}
+
+static void tdx_guest_class_init(ObjectClass *oc, const void *data)
+{
+ ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc);
+ X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc);
+
+ klass->kvm_init = tdx_kvm_init;
+ x86_klass->kvm_type = tdx_kvm_type;
+ x86_klass->cpu_instance_init = tdx_cpu_instance_init;
+ x86_klass->adjust_cpuid_features = tdx_adjust_cpuid_features;
+ x86_klass->check_features = tdx_check_features;
+}
diff --git a/target/i386/kvm/tdx.h b/target/i386/kvm/tdx.h
new file mode 100644
index 0000000..35a09c1
--- /dev/null
+++ b/target/i386/kvm/tdx.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef QEMU_I386_TDX_H
+#define QEMU_I386_TDX_H
+
+#ifndef CONFIG_USER_ONLY
+#include CONFIG_DEVICES /* CONFIG_TDX */
+#endif
+
+#include "confidential-guest.h"
+#include "cpu.h"
+#include "hw/i386/tdvf.h"
+
+#include "tdx-quote-generator.h"
+
+#define TYPE_TDX_GUEST "tdx-guest"
+#define TDX_GUEST(obj) OBJECT_CHECK(TdxGuest, (obj), TYPE_TDX_GUEST)
+
+typedef struct TdxGuestClass {
+ X86ConfidentialGuestClass parent_class;
+} TdxGuestClass;
+
+/* TDX requires bus frequency 25MHz */
+#define TDX_APIC_BUS_CYCLES_NS 40
+
+#define TDVMCALL_GET_TD_VM_CALL_INFO 0x10000
+#define TDVMCALL_GET_QUOTE 0x10002
+
+#define TDG_VP_VMCALL_SUCCESS 0x0000000000000000ULL
+#define TDG_VP_VMCALL_RETRY 0x0000000000000001ULL
+#define TDG_VP_VMCALL_INVALID_OPERAND 0x8000000000000000ULL
+#define TDG_VP_VMCALL_GPA_INUSE 0x8000000000000001ULL
+#define TDG_VP_VMCALL_ALIGN_ERROR 0x8000000000000002ULL
+
+#define TDG_VP_VMCALL_SUBFUNC_GET_QUOTE 0x0000000000000001ULL
+
+enum TdxRamType {
+ TDX_RAM_UNACCEPTED,
+ TDX_RAM_ADDED,
+};
+
+typedef struct TdxRamEntry {
+ uint64_t address;
+ uint64_t length;
+ enum TdxRamType type;
+} TdxRamEntry;
+
+typedef struct TdxGuest {
+ X86ConfidentialGuest parent_obj;
+
+ QemuMutex lock;
+
+ bool initialized;
+ uint64_t attributes; /* TD attributes */
+ uint64_t xfam;
+ char *mrconfigid; /* base64 encoded sha384 digest */
+ char *mrowner; /* base64 encoded sha384 digest */
+ char *mrownerconfig; /* base64 encoded sha384 digest */
+
+ MemoryRegion *tdvf_mr;
+ TdxFirmware tdvf;
+
+ uint32_t nr_ram_entries;
+ TdxRamEntry *ram_entries;
+
+ /* GetQuote */
+ SocketAddress *qg_sock_addr;
+ int num;
+} TdxGuest;
+
+#ifdef CONFIG_TDX
+bool is_tdx_vm(void);
+#else
+#define is_tdx_vm() 0
+#endif /* CONFIG_TDX */
+
+int tdx_pre_create_vcpu(CPUState *cpu, Error **errp);
+void tdx_set_tdvf_region(MemoryRegion *tdvf_mr);
+int tdx_parse_tdvf(void *flash_ptr, int size);
+int tdx_handle_report_fatal_error(X86CPU *cpu, struct kvm_run *run);
+void tdx_handle_get_quote(X86CPU *cpu, struct kvm_run *run);
+void tdx_handle_get_tdvmcall_info(X86CPU *cpu, struct kvm_run *run);
+
+#endif /* QEMU_I386_TDX_H */
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 6cb561c..dd2dac1 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -1060,9 +1060,8 @@ static bool tsc_khz_needed(void *opaque)
{
X86CPU *cpu = opaque;
CPUX86State *env = &cpu->env;
- MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
- X86MachineClass *x86mc = X86_MACHINE_CLASS(mc);
- return env->tsc_khz && x86mc->save_tsc_khz;
+
+ return env->tsc_khz;
}
static const VMStateDescription vmstate_tsc_khz = {
diff --git a/target/i386/monitor.c b/target/i386/monitor.c
index 3ea92b0..3c9b6ca 100644
--- a/target/i386/monitor.c
+++ b/target/i386/monitor.c
@@ -29,7 +29,6 @@
#include "monitor/hmp.h"
#include "qobject/qdict.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-misc-target.h"
#include "qapi/qapi-commands-misc.h"
/* Perform linear address sign extension */
diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h
index f0aa189..a2e4d48 100644
--- a/target/i386/ops_sse.h
+++ b/target/i386/ops_sse.h
@@ -842,7 +842,7 @@ int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s)
void glue(helper_rsqrtps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
int i;
for (i = 0; i < 2 << SHIFT; i++) {
d->ZMM_S(i) = float32_div(float32_one,
@@ -855,7 +855,7 @@ void glue(helper_rsqrtps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s)
#if SHIFT == 1
void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
int i;
d->ZMM_S(0) = float32_div(float32_one,
float32_sqrt(s->ZMM_S(0), &env->sse_status),
@@ -869,7 +869,7 @@ void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s)
void glue(helper_rcpps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
int i;
for (i = 0; i < 2 << SHIFT; i++) {
d->ZMM_S(i) = float32_div(float32_one, s->ZMM_S(i), &env->sse_status);
@@ -880,7 +880,7 @@ void glue(helper_rcpps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s)
#if SHIFT == 1
void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
int i;
d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
for (i = 1; i < 2 << SHIFT; i++) {
@@ -1714,7 +1714,7 @@ void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
uint32_t mode)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
signed char prev_rounding_mode;
int i;
@@ -1738,7 +1738,7 @@ void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
uint32_t mode)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
signed char prev_rounding_mode;
int i;
@@ -1763,7 +1763,7 @@ void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s,
uint32_t mode)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
signed char prev_rounding_mode;
int i;
@@ -1788,7 +1788,7 @@ void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s,
void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s,
uint32_t mode)
{
- uint8_t old_flags = get_float_exception_flags(&env->sse_status);
+ int old_flags = get_float_exception_flags(&env->sse_status);
signed char prev_rounding_mode;
int i;
diff --git a/target/i386/sev-system-stub.c b/target/i386/sev-system-stub.c
index d5bf886..7c5c02a 100644
--- a/target/i386/sev-system-stub.c
+++ b/target/i386/sev-system-stub.c
@@ -14,34 +14,9 @@
#include "qemu/osdep.h"
#include "monitor/monitor.h"
#include "monitor/hmp-target.h"
-#include "qapi/qapi-commands-misc-target.h"
#include "qapi/error.h"
#include "sev.h"
-SevInfo *qmp_query_sev(Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
- return NULL;
-}
-
-SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
- return NULL;
-}
-
-SevCapability *qmp_query_sev_capabilities(Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
- return NULL;
-}
-
-void qmp_sev_inject_launch_secret(const char *packet_header, const char *secret,
- bool has_gpa, uint64_t gpa, Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
-}
-
int sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp)
{
g_assert_not_reached();
@@ -56,13 +31,6 @@ int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size)
g_assert_not_reached();
}
-SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce,
- Error **errp)
-{
- error_setg(errp, "SEV is not available in this QEMU");
- return NULL;
-}
-
void hmp_info_sev(Monitor *mon, const QDict *qdict)
{
monitor_printf(mon, "SEV is not available in this QEMU\n");
diff --git a/target/i386/sev.c b/target/i386/sev.c
index 7ee700d..1a12f06 100644
--- a/target/i386/sev.c
+++ b/target/i386/sev.c
@@ -37,7 +37,7 @@
#include "qom/object.h"
#include "monitor/monitor.h"
#include "monitor/hmp-target.h"
-#include "qapi/qapi-commands-misc-target.h"
+#include "qapi/qapi-commands-misc-i386.h"
#include "confidential-guest.h"
#include "hw/i386/pc.h"
#include "system/address-spaces.h"
@@ -212,14 +212,6 @@ static const char *const sev_fw_errlist[] = {
#define SEV_FW_MAX_ERROR ARRAY_SIZE(sev_fw_errlist)
-/* <linux/kvm.h> doesn't expose this, so re-use the max from kvm.c */
-#define KVM_MAX_CPUID_ENTRIES 100
-
-typedef struct KvmCpuidInfo {
- struct kvm_cpuid2 cpuid;
- struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
-} KvmCpuidInfo;
-
#define SNP_CPUID_FUNCTION_MAXCOUNT 64
#define SNP_CPUID_FUNCTION_UNKNOWN 0xFFFFFFFF
@@ -947,7 +939,7 @@ out:
}
static uint32_t
-sev_snp_mask_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index,
+sev_snp_adjust_cpuid_features(X86ConfidentialGuest *cg, uint32_t feature, uint32_t index,
int reg, uint32_t value)
{
switch (feature) {
@@ -2405,7 +2397,7 @@ sev_snp_guest_class_init(ObjectClass *oc, const void *data)
klass->launch_finish = sev_snp_launch_finish;
klass->launch_update_data = sev_snp_launch_update_data;
klass->kvm_init = sev_snp_kvm_init;
- x86_klass->mask_cpuid_features = sev_snp_mask_cpuid_features;
+ x86_klass->adjust_cpuid_features = sev_snp_adjust_cpuid_features;
x86_klass->kvm_type = sev_snp_kvm_type;
object_class_property_add(oc, "policy", "uint64",
diff --git a/target/i386/tcg/access.c b/target/i386/tcg/access.c
index 0fdd587..97e3f0e 100644
--- a/target/i386/tcg/access.c
+++ b/target/i386/tcg/access.c
@@ -4,7 +4,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "accel/tcg/cpu-ldst.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/probe.h"
#include "exec/target_page.h"
#include "access.h"
diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc
index cda32ee..55216e0 100644
--- a/target/i386/tcg/decode-new.c.inc
+++ b/target/i386/tcg/decode-new.c.inc
@@ -2542,7 +2542,13 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
s->has_modrm = false;
s->prefix = 0;
- next_byte:
+ next_byte:;
+#ifdef TARGET_X86_64
+ /* clear any REX prefix followed by other prefixes. */
+ int rex;
+ rex = -1;
+ next_byte_rex:
+#endif
b = x86_ldub_code(env, s);
/* Collect prefixes. */
@@ -2585,13 +2591,12 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
#ifdef TARGET_X86_64
case 0x40 ... 0x4f:
if (CODE64(s)) {
- /* REX prefix */
- s->prefix |= PREFIX_REX;
- s->vex_w = (b >> 3) & 1;
- s->rex_r = (b & 0x4) << 1;
- s->rex_x = (b & 0x2) << 2;
- s->rex_b = (b & 0x1) << 3;
- goto next_byte;
+ /*
+ * REX prefix; ignored unless it is the last prefix, so
+ * for now just stash it
+ */
+ rex = b;
+ goto next_byte_rex;
}
break;
#endif
@@ -2618,10 +2623,13 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
/* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ
- | PREFIX_LOCK | PREFIX_DATA | PREFIX_REX)) {
+ | PREFIX_LOCK | PREFIX_DATA)) {
goto illegal_op;
}
#ifdef TARGET_X86_64
+ if (rex != -1) {
+ goto illegal_op;
+ }
s->rex_r = (~vex2 >> 4) & 8;
#endif
if (b == 0xc5) {
@@ -2661,6 +2669,16 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
/* Post-process prefixes. */
if (CODE64(s)) {
+#ifdef TARGET_X86_64
+ if (rex != -1) {
+ s->prefix |= PREFIX_REX;
+ s->vex_w = (rex >> 3) & 1;
+ s->rex_r = (rex & 0x4) << 1;
+ s->rex_x = (rex & 0x2) << 2;
+ s->rex_b = (rex & 0x1) << 3;
+ }
+#endif
+
/*
* In 64-bit mode, the default data size is 32-bit. Select 64-bit
* data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index ca6bc2e..1a7fab93 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -19,16 +19,6 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-/*
- * Sometimes, knowing what the backend has can produce better code.
- * The exact opcode to check depends on 32- vs. 64-bit.
- */
-#ifdef TARGET_X86_64
-#define INDEX_op_extract2_tl INDEX_op_extract2_i64
-#else
-#define INDEX_op_extract2_tl INDEX_op_extract2_i32
-#endif
-
#define MMX_OFFSET(reg) \
({ assert((reg) >= 0 && (reg) <= 7); \
offsetof(CPUX86State, fpregs[reg].mmx); })
@@ -352,7 +342,7 @@ static void gen_writeback(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv
break;
case X86_OP_SEG:
/* Note that gen_movl_seg takes care of interrupt shadow and TF. */
- gen_movl_seg(s, op->n, s->T0);
+ gen_movl_seg(s, op->n, v, op->n == R_SS);
break;
case X86_OP_INT:
if (op->has_ea) {
@@ -2392,7 +2382,7 @@ static void gen_lxx_seg(DisasContext *s, X86DecodedInsn *decode, int seg)
gen_op_ld_v(s, MO_16, s->T1, s->A0);
/* load the segment here to handle exceptions properly */
- gen_movl_seg(s, seg, s->T1);
+ gen_movl_seg(s, seg, s->T1, false);
}
static void gen_LDS(DisasContext *s, X86DecodedInsn *decode)
@@ -3023,7 +3013,7 @@ static void gen_PMOVMSKB(DisasContext *s, X86DecodedInsn *decode)
tcg_gen_ld8u_tl(s->T0, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1)));
while (vec_len > 8) {
vec_len -= 8;
- if (tcg_op_supported(INDEX_op_extract2_tl, TCG_TYPE_TL, 0)) {
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_TL, 0)) {
/*
* Load the next byte of the result into the high byte of T.
* TCG does a similar expansion of deposit to shl+extract2; by
diff --git a/target/i386/tcg/excp_helper.c b/target/i386/tcg/excp_helper.c
index de71e68..6fb8036 100644
--- a/target/i386/tcg/excp_helper.c
+++ b/target/i386/tcg/excp_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "qemu/log.h"
#include "system/runstate.h"
#include "exec/helper-proto.h"
diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c
index 1cbadb1..b3b2382 100644
--- a/target/i386/tcg/fpu_helper.c
+++ b/target/i386/tcg/fpu_helper.c
@@ -189,25 +189,25 @@ void cpu_init_fp_statuses(CPUX86State *env)
set_float_default_nan_pattern(0b11000000, &env->mmx_status);
set_float_default_nan_pattern(0b11000000, &env->sse_status);
/*
- * TODO: x86 does flush-to-zero detection after rounding (the SDM
+ * x86 does flush-to-zero detection after rounding (the SDM
* section 10.2.3.3 on the FTZ bit of MXCSR says that we flush
* when we detect underflow, which x86 does after rounding).
*/
- set_float_ftz_detection(float_ftz_before_rounding, &env->fp_status);
- set_float_ftz_detection(float_ftz_before_rounding, &env->mmx_status);
- set_float_ftz_detection(float_ftz_before_rounding, &env->sse_status);
+ set_float_ftz_detection(float_ftz_after_rounding, &env->fp_status);
+ set_float_ftz_detection(float_ftz_after_rounding, &env->mmx_status);
+ set_float_ftz_detection(float_ftz_after_rounding, &env->sse_status);
}
-static inline uint8_t save_exception_flags(CPUX86State *env)
+static inline int save_exception_flags(CPUX86State *env)
{
- uint8_t old_flags = get_float_exception_flags(&env->fp_status);
+ int old_flags = get_float_exception_flags(&env->fp_status);
set_float_exception_flags(0, &env->fp_status);
return old_flags;
}
-static void merge_exception_flags(CPUX86State *env, uint8_t old_flags)
+static void merge_exception_flags(CPUX86State *env, int old_flags)
{
- uint8_t new_flags = get_float_exception_flags(&env->fp_status);
+ int new_flags = get_float_exception_flags(&env->fp_status);
float_raise(old_flags, &env->fp_status);
fpu_set_exception(env,
((new_flags & float_flag_invalid ? FPUS_IE : 0) |
@@ -215,12 +215,12 @@ static void merge_exception_flags(CPUX86State *env, uint8_t old_flags)
(new_flags & float_flag_overflow ? FPUS_OE : 0) |
(new_flags & float_flag_underflow ? FPUS_UE : 0) |
(new_flags & float_flag_inexact ? FPUS_PE : 0) |
- (new_flags & float_flag_input_denormal_flushed ? FPUS_DE : 0)));
+ (new_flags & float_flag_input_denormal_used ? FPUS_DE : 0)));
}
static inline floatx80 helper_fdiv(CPUX86State *env, floatx80 a, floatx80 b)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
floatx80 ret = floatx80_div(a, b, &env->fp_status);
merge_exception_flags(env, old_flags);
return ret;
@@ -240,7 +240,7 @@ static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr)
void helper_flds_FT0(CPUX86State *env, uint32_t val)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
union {
float32 f;
uint32_t i;
@@ -253,7 +253,7 @@ void helper_flds_FT0(CPUX86State *env, uint32_t val)
void helper_fldl_FT0(CPUX86State *env, uint64_t val)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
union {
float64 f;
uint64_t i;
@@ -271,7 +271,7 @@ void helper_fildl_FT0(CPUX86State *env, int32_t val)
void helper_flds_ST0(CPUX86State *env, uint32_t val)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int new_fpstt;
union {
float32 f;
@@ -288,7 +288,7 @@ void helper_flds_ST0(CPUX86State *env, uint32_t val)
void helper_fldl_ST0(CPUX86State *env, uint64_t val)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int new_fpstt;
union {
float64 f;
@@ -338,7 +338,7 @@ void helper_fildll_ST0(CPUX86State *env, int64_t val)
uint32_t helper_fsts_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
union {
float32 f;
uint32_t i;
@@ -351,7 +351,7 @@ uint32_t helper_fsts_ST0(CPUX86State *env)
uint64_t helper_fstl_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
union {
float64 f;
uint64_t i;
@@ -364,7 +364,7 @@ uint64_t helper_fstl_ST0(CPUX86State *env)
int32_t helper_fist_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int32_t val;
val = floatx80_to_int32(ST0, &env->fp_status);
@@ -378,7 +378,7 @@ int32_t helper_fist_ST0(CPUX86State *env)
int32_t helper_fistl_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int32_t val;
val = floatx80_to_int32(ST0, &env->fp_status);
@@ -391,7 +391,7 @@ int32_t helper_fistl_ST0(CPUX86State *env)
int64_t helper_fistll_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int64_t val;
val = floatx80_to_int64(ST0, &env->fp_status);
@@ -404,7 +404,7 @@ int64_t helper_fistll_ST0(CPUX86State *env)
int32_t helper_fistt_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int32_t val;
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
@@ -418,7 +418,7 @@ int32_t helper_fistt_ST0(CPUX86State *env)
int32_t helper_fisttl_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int32_t val;
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
@@ -431,7 +431,7 @@ int32_t helper_fisttl_ST0(CPUX86State *env)
int64_t helper_fisttll_ST0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int64_t val;
val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
@@ -527,7 +527,7 @@ static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
void helper_fcom_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
FloatRelation ret;
ret = floatx80_compare(ST0, FT0, &env->fp_status);
@@ -537,7 +537,7 @@ void helper_fcom_ST0_FT0(CPUX86State *env)
void helper_fucom_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
FloatRelation ret;
ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
@@ -549,7 +549,7 @@ static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
void helper_fcomi_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int eflags;
FloatRelation ret;
@@ -562,7 +562,7 @@ void helper_fcomi_ST0_FT0(CPUX86State *env)
void helper_fucomi_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int eflags;
FloatRelation ret;
@@ -575,28 +575,28 @@ void helper_fucomi_ST0_FT0(CPUX86State *env)
void helper_fadd_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST0 = floatx80_add(ST0, FT0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fmul_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fsub_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fsubr_ST0_FT0(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
@@ -615,28 +615,28 @@ void helper_fdivr_ST0_FT0(CPUX86State *env)
void helper_fadd_STN_ST0(CPUX86State *env, int st_index)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fmul_STN_ST0(CPUX86State *env, int st_index)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fsub_STN_ST0(CPUX86State *env, int st_index)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fsubr_STN_ST0(CPUX86State *env, int st_index)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
merge_exception_flags(env, old_flags);
}
@@ -861,7 +861,7 @@ void helper_fbld_ST0(CPUX86State *env, target_ulong ptr)
void helper_fbst_ST0(CPUX86State *env, target_ulong ptr)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
int v;
target_ulong mem_ref, mem_end;
int64_t val;
@@ -1136,7 +1136,7 @@ static const struct f2xm1_data f2xm1_table[65] = {
void helper_f2xm1(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
uint64_t sig = extractFloatx80Frac(ST0);
int32_t exp = extractFloatx80Exp(ST0);
bool sign = extractFloatx80Sign(ST0);
@@ -1369,7 +1369,7 @@ static const struct fpatan_data fpatan_table[9] = {
void helper_fpatan(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
uint64_t arg0_sig = extractFloatx80Frac(ST0);
int32_t arg0_exp = extractFloatx80Exp(ST0);
bool arg0_sign = extractFloatx80Sign(ST0);
@@ -1808,7 +1808,7 @@ void helper_fpatan(CPUX86State *env)
void helper_fxtract(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
CPU_LDoubleU temp;
temp.d = ST0;
@@ -1857,7 +1857,7 @@ void helper_fxtract(CPUX86State *env)
static void helper_fprem_common(CPUX86State *env, bool mod)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
uint64_t quotient;
CPU_LDoubleU temp0, temp1;
int exp0, exp1, expdiff;
@@ -2053,7 +2053,7 @@ static void helper_fyl2x_common(CPUX86State *env, floatx80 arg, int32_t *exp,
void helper_fyl2xp1(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
uint64_t arg0_sig = extractFloatx80Frac(ST0);
int32_t arg0_exp = extractFloatx80Exp(ST0);
bool arg0_sign = extractFloatx80Sign(ST0);
@@ -2151,7 +2151,7 @@ void helper_fyl2xp1(CPUX86State *env)
void helper_fyl2x(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
uint64_t arg0_sig = extractFloatx80Frac(ST0);
int32_t arg0_exp = extractFloatx80Exp(ST0);
bool arg0_sign = extractFloatx80Sign(ST0);
@@ -2298,7 +2298,7 @@ void helper_fyl2x(CPUX86State *env)
void helper_fsqrt(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
if (floatx80_is_neg(ST0)) {
env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
env->fpus |= 0x400;
@@ -2324,14 +2324,14 @@ void helper_fsincos(CPUX86State *env)
void helper_frndint(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
ST0 = floatx80_round_to_int(ST0, &env->fp_status);
merge_exception_flags(env, old_flags);
}
void helper_fscale(CPUX86State *env)
{
- uint8_t old_flags = save_exception_flags(env);
+ int old_flags = save_exception_flags(env);
if (floatx80_invalid_encoding(ST1, &env->fp_status) ||
floatx80_invalid_encoding(ST0, &env->fp_status)) {
float_raise(float_flag_invalid, &env->fp_status);
@@ -2369,7 +2369,7 @@ void helper_fscale(CPUX86State *env)
} else {
int n;
FloatX80RoundPrec save = env->fp_status.floatx80_rounding_precision;
- uint8_t save_flags = get_float_exception_flags(&env->fp_status);
+ int save_flags = get_float_exception_flags(&env->fp_status);
set_float_exception_flags(0, &env->fp_status);
n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
set_float_exception_flags(save_flags, &env->fp_status);
@@ -3254,6 +3254,7 @@ void update_mxcsr_status(CPUX86State *env)
/* Set exception flags. */
set_float_exception_flags((mxcsr & FPUS_IE ? float_flag_invalid : 0) |
+ (mxcsr & FPUS_DE ? float_flag_input_denormal_used : 0) |
(mxcsr & FPUS_ZE ? float_flag_divbyzero : 0) |
(mxcsr & FPUS_OE ? float_flag_overflow : 0) |
(mxcsr & FPUS_UE ? float_flag_underflow : 0) |
@@ -3269,15 +3270,9 @@ void update_mxcsr_status(CPUX86State *env)
void update_mxcsr_from_sse_status(CPUX86State *env)
{
- uint8_t flags = get_float_exception_flags(&env->sse_status);
- /*
- * The MXCSR denormal flag has opposite semantics to
- * float_flag_input_denormal_flushed (the softfloat code sets that flag
- * only when flushing input denormals to zero, but SSE sets it
- * only when not flushing them to zero), so is not converted
- * here.
- */
+ int flags = get_float_exception_flags(&env->sse_status);
env->mxcsr |= ((flags & float_flag_invalid ? FPUS_IE : 0) |
+ (flags & float_flag_input_denormal_used ? FPUS_DE : 0) |
(flags & float_flag_divbyzero ? FPUS_ZE : 0) |
(flags & float_flag_overflow ? FPUS_OE : 0) |
(flags & float_flag_underflow ? FPUS_UE : 0) |
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
index 54d8453..be011b0 100644
--- a/target/i386/tcg/helper-tcg.h
+++ b/target/i386/tcg/helper-tcg.h
@@ -20,7 +20,6 @@
#ifndef I386_HELPER_TCG_H
#define I386_HELPER_TCG_H
-#include "exec/exec-all.h"
#include "qemu/host-utils.h"
/* Maximum instruction code size */
@@ -98,7 +97,7 @@ static inline unsigned int compute_pf(uint8_t x)
/* misc_helper.c */
void cpu_load_eflags(CPUX86State *env, int eflags, int update_mask);
-/* sysemu/svm_helper.c */
+/* system/svm_helper.c */
#ifndef CONFIG_USER_ONLY
G_NORETURN void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code,
uint64_t exit_info_1, uintptr_t retaddr);
@@ -116,7 +115,7 @@ int exception_has_error_code(int intno);
/* smm_helper.c */
void do_smm_enter(X86CPU *cpu);
-/* sysemu/bpt_helper.c */
+/* system/bpt_helper.c */
bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update);
/*
diff --git a/target/i386/tcg/int_helper.c b/target/i386/tcg/int_helper.c
index 1a02e9d..46741d9 100644
--- a/target/i386/tcg/int_helper.c
+++ b/target/i386/tcg/int_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
#include "qapi/error.h"
diff --git a/target/i386/tcg/mem_helper.c b/target/i386/tcg/mem_helper.c
index 84a0815..9e7c2d8 100644
--- a/target/i386/tcg/mem_helper.c
+++ b/target/i386/tcg/mem_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "qemu/int128.h"
#include "qemu/atomic128.h"
diff --git a/target/i386/tcg/mpx_helper.c b/target/i386/tcg/mpx_helper.c
index a0f816d..fa8abcc 100644
--- a/target/i386/tcg/mpx_helper.c
+++ b/target/i386/tcg/mpx_helper.c
@@ -21,7 +21,6 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "accel/tcg/cpu-ldst.h"
-#include "exec/exec-all.h"
#include "exec/target_page.h"
#include "helper-tcg.h"
diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c
index 3af902e..071f3fb 100644
--- a/target/i386/tcg/seg_helper.c
+++ b/target/i386/tcg/seg_helper.c
@@ -22,8 +22,8 @@
#include "cpu.h"
#include "qemu/log.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/log.h"
#include "helper-tcg.h"
#include "seg_helper.h"
@@ -326,10 +326,10 @@ static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
#define SWITCH_TSS_IRET 1
#define SWITCH_TSS_CALL 2
-/* return 0 if switching to a 16-bit selector */
-static int switch_tss_ra(CPUX86State *env, int tss_selector,
- uint32_t e1, uint32_t e2, int source,
- uint32_t next_eip, uintptr_t retaddr)
+static void switch_tss_ra(CPUX86State *env, int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip, bool has_error_code,
+ uint32_t error_code, uintptr_t retaddr)
{
int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i;
target_ulong tss_base;
@@ -473,10 +473,6 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
new_segs[R_GS] = 0;
new_trap = 0;
}
- /* XXX: avoid a compiler warning, see
- http://support.amd.com/us/Processor_TechDocs/24593.pdf
- chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
- (void)new_trap;
/* clear busy bit (it is restartable) */
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
@@ -599,14 +595,43 @@ static int switch_tss_ra(CPUX86State *env, int tss_selector,
cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
}
#endif
- return type >> 3;
+
+ if (has_error_code) {
+ int cpl = env->hflags & HF_CPL_MASK;
+ StackAccess sa;
+
+ /* push the error code */
+ sa.env = env;
+ sa.ra = retaddr;
+ sa.mmu_index = x86_mmu_index_pl(env, cpl);
+ sa.sp = env->regs[R_ESP];
+ if (env->segs[R_SS].flags & DESC_B_MASK) {
+ sa.sp_mask = 0xffffffff;
+ } else {
+ sa.sp_mask = 0xffff;
+ }
+ sa.ss_base = env->segs[R_SS].base;
+ if (type & 8) {
+ pushl(&sa, error_code);
+ } else {
+ pushw(&sa, error_code);
+ }
+ SET_ESP(sa.sp, sa.sp_mask);
+ }
+
+ if (new_trap) {
+ env->dr[6] |= DR6_BT;
+ raise_exception_ra(env, EXCP01_DB, retaddr);
+ }
}
-static int switch_tss(CPUX86State *env, int tss_selector,
- uint32_t e1, uint32_t e2, int source,
- uint32_t next_eip)
+static void switch_tss(CPUX86State *env, int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip, bool has_error_code,
+ int error_code)
{
- return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
+ switch_tss_ra(env, tss_selector, e1, e2, source, next_eip,
+ has_error_code, error_code, 0);
}
static inline unsigned int get_sp_mask(unsigned int e2)
@@ -719,25 +744,8 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
if (!(e2 & DESC_P_MASK)) {
raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
}
- shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
- if (has_error_code) {
- /* push the error code on the destination stack */
- cpl = env->hflags & HF_CPL_MASK;
- sa.mmu_index = x86_mmu_index_pl(env, cpl);
- if (env->segs[R_SS].flags & DESC_B_MASK) {
- sa.sp_mask = 0xffffffff;
- } else {
- sa.sp_mask = 0xffff;
- }
- sa.sp = env->regs[R_ESP];
- sa.ss_base = env->segs[R_SS].base;
- if (shift) {
- pushl(&sa, error_code);
- } else {
- pushw(&sa, error_code);
- }
- SET_ESP(sa.sp, sa.sp_mask);
- }
+ switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip,
+ has_error_code, error_code);
return;
}
@@ -1533,7 +1541,8 @@ void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
if (dpl < cpl || dpl < rpl) {
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
- switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
+ switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip,
+ false, 0, GETPC());
break;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
@@ -1745,7 +1754,8 @@ void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
if (dpl < cpl || dpl < rpl) {
raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
}
- switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
+ switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip,
+ false, 0, GETPC());
return;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
@@ -2256,7 +2266,8 @@ void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
if (type != 3) {
raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
}
- switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
+ switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip,
+ false, 0, GETPC());
} else {
helper_ret_protected(env, shift, 1, 0, GETPC());
}
diff --git a/target/i386/tcg/system/bpt_helper.c b/target/i386/tcg/system/bpt_helper.c
index 08ccd3f..aebb5ca 100644
--- a/target/i386/tcg/system/bpt_helper.c
+++ b/target/i386/tcg/system/bpt_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "exec/watchpoint.h"
#include "tcg/helper-tcg.h"
diff --git a/target/i386/tcg/system/excp_helper.c b/target/i386/tcg/system/excp_helper.c
index 93614aa..c162621 100644
--- a/target/i386/tcg/system/excp_helper.c
+++ b/target/i386/tcg/system/excp_helper.c
@@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/cputlb.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
index e53aaa3..6f5dc06 100644
--- a/target/i386/tcg/tcg-cpu.c
+++ b/target/i386/tcg/tcg-cpu.c
@@ -24,6 +24,7 @@
#include "accel/accel-cpu-target.h"
#include "exec/translation-block.h"
#include "exec/target_page.h"
+#include "accel/tcg/cpu-ops.h"
#include "tcg-cpu.h"
/* Frob eflags into and out of the CPU temporary format. */
@@ -47,6 +48,25 @@ static void x86_cpu_exec_exit(CPUState *cs)
env->eflags = cpu_compute_eflags(env);
}
+static TCGTBCPUState x86_get_tb_cpu_state(CPUState *cs)
+{
+ CPUX86State *env = cpu_env(cs);
+ uint32_t flags, cs_base;
+ vaddr pc;
+
+ flags = env->hflags |
+ (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
+ if (env->hflags & HF_CS64_MASK) {
+ cs_base = 0;
+ pc = env->eip;
+ } else {
+ cs_base = env->segs[R_CS].base;
+ pc = (uint32_t)(cs_base + env->eip);
+ }
+
+ return (TCGTBCPUState){ .pc = pc, .flags = flags, .cs_base = cs_base };
+}
+
static void x86_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -120,18 +140,33 @@ static bool x86_debug_check_breakpoint(CPUState *cs)
/* RF disables all architectural breakpoints. */
return !(env->eflags & RF_MASK);
}
-#endif
-#include "accel/tcg/cpu-ops.h"
+static void x86_cpu_exec_reset(CPUState *cs)
+{
+ CPUArchState *env = cpu_env(cs);
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
+ do_cpu_init(env_archcpu(env));
+ cs->exception_index = EXCP_HALTED;
+}
+
+static vaddr x86_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return cpu_env(cs)->hflags & HF_CS64_MASK ? result : (uint32_t)result;
+}
+#endif
const TCGCPUOps x86_tcg_ops = {
.mttcg_supported = true,
+ .precise_smc = true,
/*
* The x86 has a strong memory model with some store-after-load re-ordering
*/
.guest_default_memory_order = TCG_MO_ALL & ~TCG_MO_ST_LD,
.initialize = tcg_x86_init,
.translate_code = x86_translate_code,
+ .get_tb_cpu_state = x86_get_tb_cpu_state,
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
.restore_state_to_opc = x86_restore_state_to_opc,
.mmu_index = x86_cpu_mmu_index,
@@ -143,9 +178,11 @@ const TCGCPUOps x86_tcg_ops = {
.record_sigbus = x86_cpu_record_sigbus,
#else
.tlb_fill = x86_cpu_tlb_fill,
+ .pointer_wrap = x86_pointer_wrap,
.do_interrupt = x86_cpu_do_interrupt,
.cpu_exec_halt = x86_cpu_exec_halt,
.cpu_exec_interrupt = x86_cpu_exec_interrupt,
+ .cpu_exec_reset = x86_cpu_exec_reset,
.do_unaligned_access = x86_cpu_do_unaligned_access,
.debug_excp_handler = breakpoint_handler,
.debug_check_breakpoint = x86_debug_check_breakpoint,
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index 8a64195..0cb87d0 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -21,7 +21,6 @@
#include "qemu/host-utils.h"
#include "cpu.h"
#include "accel/tcg/cpu-mmu-index.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
@@ -2026,27 +2025,39 @@ static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
/* move SRC to seg_reg and compute if the CPU state may change. Never
call this function with seg_reg == R_CS */
-static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src)
+static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src, bool inhibit_irq)
{
if (PE(s) && !VM86(s)) {
TCGv_i32 sel = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(sel, src);
gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), sel);
- /* abort translation because the addseg value may change or
- because ss32 may change. For R_SS, translation must always
- stop as a special handling must be done to disable hardware
- interrupts for the next instruction */
- if (seg_reg == R_SS) {
- s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
- } else if (CODE32(s) && seg_reg < R_FS) {
+
+ /*
+ * For moves to SS, the SS32 flag may change. For CODE32 only, changes
+ * to SS, DS and ES may change the ADDSEG flags.
+ */
+ if (seg_reg == R_SS || (CODE32(s) && seg_reg < R_FS)) {
s->base.is_jmp = DISAS_EOB_NEXT;
}
} else {
gen_op_movl_seg_real(s, seg_reg, src);
- if (seg_reg == R_SS) {
- s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
- }
+ }
+
+ /*
+ * For MOV or POP to SS (but not LSS) translation must always
+ * stop as a special handling must be done to disable hardware
+ * interrupts for the next instruction.
+ *
+ * This is the last instruction, so it's okay to overwrite
+ * HF_TF_MASK; the next TB will start with the flag set.
+ *
+ * DISAS_EOB_INHIBIT_IRQ is a superset of DISAS_EOB_NEXT which
+ * might have been set above.
+ */
+ if (inhibit_irq) {
+ s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
+ s->flags &= ~HF_TF_MASK;
}
}
@@ -2297,7 +2308,7 @@ gen_eob(DisasContext *s, int mode)
if (mode == DISAS_EOB_RECHECK_TF) {
gen_helper_rechecking_single_step(tcg_env);
tcg_gen_exit_tb(NULL, 0);
- } else if ((s->flags & HF_TF_MASK) && mode != DISAS_EOB_INHIBIT_IRQ) {
+ } else if (s->flags & HF_TF_MASK) {
gen_helper_single_step(tcg_env);
} else if (mode == DISAS_JUMP &&
/* give irqs a chance to happen */
diff --git a/target/i386/tcg/user/excp_helper.c b/target/i386/tcg/user/excp_helper.c
index b3bdb78..98fab4cb 100644
--- a/target/i386/tcg/user/excp_helper.c
+++ b/target/i386/tcg/user/excp_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "tcg/helper-tcg.h"
void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr,
diff --git a/target/i386/tcg/user/seg_helper.c b/target/i386/tcg/user/seg_helper.c
index 5692dd5..263f599 100644
--- a/target/i386/tcg/user/seg_helper.c
+++ b/target/i386/tcg/user/seg_helper.c
@@ -21,7 +21,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "tcg/helper-tcg.h"
#include "tcg/seg_helper.h"
diff --git a/target/loongarch/README b/target/loongarch/README
index 0b9dc0d..1ffd342 100644
--- a/target/loongarch/README
+++ b/target/loongarch/README
@@ -11,7 +11,7 @@
- System emulation
- You can reference docs/system/loongarch/loongson3.rst to get the information about system emulation of LoongArch.
+ You can reference docs/system/loongarch/virt.rst to get the information about system emulation of LoongArch.
- Linux-user emulation
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
index 8ad45b4..abad84c 100644
--- a/target/loongarch/cpu.c
+++ b/target/loongarch/cpu.c
@@ -15,7 +15,6 @@
#include "system/kvm.h"
#include "kvm/kvm_loongarch.h"
#include "hw/qdev-properties.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "cpu.h"
#include "internals.h"
@@ -30,6 +29,7 @@
#endif
#ifdef CONFIG_TCG
#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/cpu-ops.h"
#include "tcg/tcg.h"
#endif
#include "tcg/tcg_loongarch.h"
@@ -334,8 +334,28 @@ static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
return false;
}
+
+static vaddr loongarch_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return is_va32(cpu_env(cs)) ? (uint32_t)result : result;
+}
#endif
+static TCGTBCPUState loongarch_get_tb_cpu_state(CPUState *cs)
+{
+ CPULoongArchState *env = cpu_env(cs);
+ uint32_t flags;
+
+ flags = env->CSR_CRMD & (R_CSR_CRMD_PLV_MASK | R_CSR_CRMD_PG_MASK);
+ flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, FPE) * HW_FLAGS_EUEN_FPE;
+ flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE) * HW_FLAGS_EUEN_SXE;
+ flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE) * HW_FLAGS_EUEN_ASXE;
+ flags |= is_va32(env) * HW_FLAGS_VA32;
+
+ return (TCGTBCPUState){ .pc = env->pc, .flags = flags };
+}
+
static void loongarch_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -862,22 +882,23 @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
#ifdef CONFIG_TCG
-#include "accel/tcg/cpu-ops.h"
-
static const TCGCPUOps loongarch_tcg_ops = {
.guest_default_memory_order = 0,
.mttcg_supported = true,
.initialize = loongarch_translate_init,
.translate_code = loongarch_translate_code,
+ .get_tb_cpu_state = loongarch_get_tb_cpu_state,
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
.restore_state_to_opc = loongarch_restore_state_to_opc,
.mmu_index = loongarch_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = loongarch_cpu_tlb_fill,
+ .pointer_wrap = loongarch_pointer_wrap,
.cpu_exec_interrupt = loongarch_cpu_exec_interrupt,
.cpu_exec_halt = loongarch_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = loongarch_cpu_do_interrupt,
.do_transaction_failed = loongarch_cpu_do_transaction_failed,
#endif
diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h
index 70ff56e..9538e8d 100644
--- a/target/loongarch/cpu.h
+++ b/target/loongarch/cpu.h
@@ -492,18 +492,6 @@ static inline void set_pc(CPULoongArchState *env, uint64_t value)
#define HW_FLAGS_VA32 0x20
#define HW_FLAGS_EUEN_ASXE 0x40
-static inline void cpu_get_tb_cpu_state(CPULoongArchState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- *cs_base = 0;
- *flags = env->CSR_CRMD & (R_CSR_CRMD_PLV_MASK | R_CSR_CRMD_PG_MASK);
- *flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, FPE) * HW_FLAGS_EUEN_FPE;
- *flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE) * HW_FLAGS_EUEN_SXE;
- *flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE) * HW_FLAGS_EUEN_ASXE;
- *flags |= is_va32(env) * HW_FLAGS_VA32;
-}
-
#define CPU_RESOLVING_TYPE TYPE_LOONGARCH_CPU
void loongarch_cpu_post_init(Object *obj);
@@ -515,5 +503,6 @@ static inline void kvm_loongarch_cpu_post_init(LoongArchCPU *cpu)
{
}
#endif
+void kvm_loongarch_init_irq_routing(void);
#endif /* LOONGARCH_CPU_H */
diff --git a/target/loongarch/kvm/kvm.c b/target/loongarch/kvm/kvm.c
index 1bda570..e5ea2db 100644
--- a/target/loongarch/kvm/kvm.c
+++ b/target/loongarch/kvm/kvm.c
@@ -1071,7 +1071,11 @@ static int kvm_cpu_check_pv_features(CPUState *cs, Error **errp)
env->pv_features |= BIT(KVM_FEATURE_VIRT_EXTIOI);
}
}
+ return 0;
+}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
return 0;
}
@@ -1236,6 +1240,22 @@ void kvm_arch_init_irq_routing(KVMState *s)
{
}
+void kvm_loongarch_init_irq_routing(void)
+{
+ int i;
+
+ kvm_async_interrupts_allowed = true;
+ kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
+ if (kvm_has_gsi_routing()) {
+ for (i = 0; i < KVM_IRQCHIP_NUM_PINS; ++i) {
+ kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
+ }
+
+ kvm_gsi_routing_allowed = true;
+ kvm_irqchip_commit_routes(kvm_state);
+ }
+}
+
int kvm_arch_get_default_type(MachineState *ms)
{
return 0;
@@ -1249,7 +1269,12 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
int kvm_arch_irqchip_create(KVMState *s)
{
- return 0;
+ if (kvm_kernel_irqchip_split()) {
+ error_report("kernel_irqchip=split is not supported on LoongArch");
+ exit(1);
+ }
+
+ return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
}
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
diff --git a/target/loongarch/loongarch-qmp-cmds.c b/target/loongarch/loongarch-qmp-cmds.c
index 6f732d8..f5f1cd0 100644
--- a/target/loongarch/loongarch-qmp-cmds.c
+++ b/target/loongarch/loongarch-qmp-cmds.c
@@ -8,7 +8,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/qapi-commands-machine.h"
#include "cpu.h"
#include "qobject/qdict.h"
#include "qapi/qobject-input-visitor.h"
diff --git a/target/loongarch/tcg/fpu_helper.c b/target/loongarch/tcg/fpu_helper.c
index fc3fd05..fc9c64c 100644
--- a/target/loongarch/tcg/fpu_helper.c
+++ b/target/loongarch/tcg/fpu_helper.c
@@ -8,7 +8,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "fpu/softfloat.h"
#include "internals.h"
diff --git a/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc b/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc
index 3babf69..6a2c030 100644
--- a/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc
+++ b/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc
@@ -4,10 +4,15 @@
*/
/* bit0(signaling/quiet) bit1(lt) bit2(eq) bit3(un) bit4(neq) */
-static uint32_t get_fcmp_flags(int cond)
+static uint32_t get_fcmp_flags(DisasContext *ctx, int cond)
{
uint32_t flags = 0;
+ /*check cond , cond =[0-8,10,12] */
+ if ((cond > 8) &&(cond != 10) && (cond != 12)) {
+ return -1;
+ }
+
if (cond & 0x1) {
flags |= FCMP_LT;
}
@@ -26,9 +31,14 @@ static uint32_t get_fcmp_flags(int cond)
static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a)
{
TCGv var, src1, src2;
- uint32_t flags;
+ uint32_t flags = get_fcmp_flags(ctx, a->fcond >>1);
void (*fn)(TCGv, TCGv_env, TCGv, TCGv, TCGv_i32);
+ if (flags == -1) {
+ generate_exception(ctx, EXCCODE_INE);
+ return true;
+ }
+
if (!avail_FP_SP(ctx)) {
return false;
}
@@ -39,8 +49,6 @@ static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a)
src1 = get_fpr(ctx, a->fj);
src2 = get_fpr(ctx, a->fk);
fn = (a->fcond & 1 ? gen_helper_fcmp_s_s : gen_helper_fcmp_c_s);
- flags = get_fcmp_flags(a->fcond >> 1);
-
fn(var, tcg_env, src1, src2, tcg_constant_i32(flags));
tcg_gen_st8_tl(var, tcg_env, offsetof(CPULoongArchState, cf[a->cd]));
@@ -50,9 +58,14 @@ static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a)
static bool trans_fcmp_cond_d(DisasContext *ctx, arg_fcmp_cond_d *a)
{
TCGv var, src1, src2;
- uint32_t flags;
+ uint32_t flags = get_fcmp_flags(ctx, a->fcond >> 1);
void (*fn)(TCGv, TCGv_env, TCGv, TCGv, TCGv_i32);
+ if (flags == -1) {
+ generate_exception(ctx, EXCCODE_INE);
+ return true;
+ }
+
if (!avail_FP_DP(ctx)) {
return false;
}
@@ -63,8 +76,6 @@ static bool trans_fcmp_cond_d(DisasContext *ctx, arg_fcmp_cond_d *a)
src1 = get_fpr(ctx, a->fj);
src2 = get_fpr(ctx, a->fk);
fn = (a->fcond & 1 ? gen_helper_fcmp_s_d : gen_helper_fcmp_c_d);
- flags = get_fcmp_flags(a->fcond >> 1);
-
fn(var, tcg_env, src1, src2, tcg_constant_i32(flags));
tcg_gen_st8_tl(var, tcg_env, offsetof(CPULoongArchState, cf[a->cd]));
diff --git a/target/loongarch/tcg/insn_trans/trans_vec.c.inc b/target/loongarch/tcg/insn_trans/trans_vec.c.inc
index dff9277..7873002 100644
--- a/target/loongarch/tcg/insn_trans/trans_vec.c.inc
+++ b/target/loongarch/tcg/insn_trans/trans_vec.c.inc
@@ -3465,7 +3465,7 @@ TRANS(xvmsknz_b, LASX, gen_xx, gen_helper_vmsknz_b)
static uint64_t vldi_get_value(DisasContext *ctx, uint32_t imm)
{
int mode;
- uint64_t data, t;
+ uint64_t data = 0, t;
/*
* imm bit [11:8] is mode, mode value is 0-12.
@@ -3570,17 +3570,26 @@ static uint64_t vldi_get_value(DisasContext *ctx, uint32_t imm)
}
break;
default:
- generate_exception(ctx, EXCCODE_INE);
g_assert_not_reached();
}
return data;
}
+static bool check_valid_vldi_mode(arg_vldi *a)
+{
+ return extract32(a->imm, 8, 4) <= 12;
+}
+
static bool gen_vldi(DisasContext *ctx, arg_vldi *a, uint32_t oprsz)
{
int sel, vece;
uint64_t value;
+ if (!check_valid_vldi_mode(a)) {
+ generate_exception(ctx, EXCCODE_INE);
+ return true;
+ }
+
if (!check_vec(ctx, oprsz)) {
return true;
}
@@ -4655,19 +4664,23 @@ TRANS(xvslti_du, LASX, do_xcmpi, MO_64, TCG_COND_LTU)
static bool do_vfcmp_cond_s(DisasContext *ctx, arg_vvv_fcond *a, uint32_t sz)
{
- uint32_t flags;
+ uint32_t flags = get_fcmp_flags(ctx, a->fcond >> 1);
void (*fn)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
TCGv_i32 vd = tcg_constant_i32(a->vd);
TCGv_i32 vj = tcg_constant_i32(a->vj);
TCGv_i32 vk = tcg_constant_i32(a->vk);
TCGv_i32 oprsz = tcg_constant_i32(sz);
+ if(flags == -1){
+ generate_exception(ctx, EXCCODE_INE);
+ return true;
+ }
+
if (!check_vec(ctx, sz)) {
return true;
}
fn = (a->fcond & 1 ? gen_helper_vfcmp_s_s : gen_helper_vfcmp_c_s);
- flags = get_fcmp_flags(a->fcond >> 1);
fn(tcg_env, oprsz, vd, vj, vk, tcg_constant_i32(flags));
return true;
@@ -4675,19 +4688,23 @@ static bool do_vfcmp_cond_s(DisasContext *ctx, arg_vvv_fcond *a, uint32_t sz)
static bool do_vfcmp_cond_d(DisasContext *ctx, arg_vvv_fcond *a, uint32_t sz)
{
- uint32_t flags;
+ uint32_t flags = get_fcmp_flags(ctx, a->fcond >> 1);
void (*fn)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
TCGv_i32 vd = tcg_constant_i32(a->vd);
TCGv_i32 vj = tcg_constant_i32(a->vj);
TCGv_i32 vk = tcg_constant_i32(a->vk);
TCGv_i32 oprsz = tcg_constant_i32(sz);
+ if (flags == -1) {
+ generate_exception(ctx, EXCCODE_INE);
+ return true;
+ }
+
if (!check_vec(ctx, sz)) {
return true;
}
fn = (a->fcond & 1 ? gen_helper_vfcmp_s_d : gen_helper_vfcmp_c_d);
- flags = get_fcmp_flags(a->fcond >> 1);
fn(tcg_env, oprsz, vd, vj, vk, tcg_constant_i32(flags));
return true;
diff --git a/target/loongarch/tcg/iocsr_helper.c b/target/loongarch/tcg/iocsr_helper.c
index e62170d..c155f48 100644
--- a/target/loongarch/tcg/iocsr_helper.c
+++ b/target/loongarch/tcg/iocsr_helper.c
@@ -9,7 +9,6 @@
#include "cpu.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#define GET_MEMTXATTRS(cas) \
diff --git a/target/loongarch/tcg/op_helper.c b/target/loongarch/tcg/op_helper.c
index 94e3b28..16ac0d4 100644
--- a/target/loongarch/tcg/op_helper.c
+++ b/target/loongarch/tcg/op_helper.c
@@ -10,7 +10,6 @@
#include "cpu.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "internals.h"
#include "qemu/crc32c.h"
diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c
index af208d7..dc48b0f 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -13,7 +13,6 @@
#include "internals.h"
#include "exec/helper-proto.h"
#include "exec/cputlb.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "accel/tcg/cpu-ldst.h"
diff --git a/target/loongarch/tcg/vec_helper.c b/target/loongarch/tcg/vec_helper.c
index 3faf52c..a270998 100644
--- a/target/loongarch/tcg/vec_helper.c
+++ b/target/loongarch/tcg/vec_helper.c
@@ -7,7 +7,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
#include "internals.h"
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
index 6f33b86..6a09db3 100644
--- a/target/m68k/cpu.c
+++ b/target/m68k/cpu.c
@@ -23,6 +23,8 @@
#include "cpu.h"
#include "migration/vmstate.h"
#include "fpu/softfloat.h"
+#include "exec/translation-block.h"
+#include "accel/tcg/cpu-ops.h"
static void m68k_cpu_set_pc(CPUState *cs, vaddr value)
{
@@ -38,6 +40,24 @@ static vaddr m68k_cpu_get_pc(CPUState *cs)
return cpu->env.pc;
}
+static TCGTBCPUState m68k_get_tb_cpu_state(CPUState *cs)
+{
+ CPUM68KState *env = cpu_env(cs);
+ uint32_t flags;
+
+ flags = (env->macsr >> 4) & TB_FLAGS_MACSR;
+ if (env->sr & SR_S) {
+ flags |= TB_FLAGS_MSR_S;
+ flags |= (env->sfc << (TB_FLAGS_SFC_S_BIT - 2)) & TB_FLAGS_SFC_S;
+ flags |= (env->dfc << (TB_FLAGS_DFC_S_BIT - 2)) & TB_FLAGS_DFC_S;
+ }
+ if (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS) {
+ flags |= TB_FLAGS_TRACE;
+ }
+
+ return (TCGTBCPUState){ .pc = env->pc, .flags = flags };
+}
+
static void m68k_restore_state_to_opc(CPUState *cs,
const TranslationBlock *tb,
const uint64_t *data)
@@ -586,8 +606,6 @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
};
#endif /* !CONFIG_USER_ONLY */
-#include "accel/tcg/cpu-ops.h"
-
static const TCGCPUOps m68k_tcg_ops = {
/* MTTCG not yet supported: require strict ordering */
.guest_default_memory_order = TCG_MO_ALL,
@@ -595,13 +613,16 @@ static const TCGCPUOps m68k_tcg_ops = {
.initialize = m68k_tcg_init,
.translate_code = m68k_translate_code,
+ .get_tb_cpu_state = m68k_get_tb_cpu_state,
.restore_state_to_opc = m68k_restore_state_to_opc,
.mmu_index = m68k_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = m68k_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = m68k_cpu_exec_interrupt,
.cpu_exec_halt = m68k_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = m68k_cpu_do_interrupt,
.do_transaction_failed = m68k_cpu_transaction_failed,
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
index 39d0b9d..d9db6a4 100644
--- a/target/m68k/cpu.h
+++ b/target/m68k/cpu.h
@@ -605,22 +605,6 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
#define TB_FLAGS_TRACE 16
#define TB_FLAGS_TRACE_BIT (1 << TB_FLAGS_TRACE)
-static inline void cpu_get_tb_cpu_state(CPUM68KState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- *cs_base = 0;
- *flags = (env->macsr >> 4) & TB_FLAGS_MACSR;
- if (env->sr & SR_S) {
- *flags |= TB_FLAGS_MSR_S;
- *flags |= (env->sfc << (TB_FLAGS_SFC_S_BIT - 2)) & TB_FLAGS_SFC_S;
- *flags |= (env->dfc << (TB_FLAGS_DFC_S_BIT - 2)) & TB_FLAGS_DFC_S;
- }
- if (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS) {
- *flags |= TB_FLAGS_TRACE;
- }
-}
-
void dump_mmu(CPUM68KState *env);
#endif
diff --git a/target/m68k/fpu_helper.c b/target/m68k/fpu_helper.c
index ac4a0d8..5601286 100644
--- a/target/m68k/fpu_helper.c
+++ b/target/m68k/fpu_helper.c
@@ -21,7 +21,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "softfloat.h"
diff --git a/target/m68k/helper.c b/target/m68k/helper.c
index 3b880dd..15f110f 100644
--- a/target/m68k/helper.c
+++ b/target/m68k/helper.c
@@ -21,7 +21,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/cputlb.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "exec/gdbstub.h"
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
index 242aecc..f29ae12 100644
--- a/target/m68k/op_helper.c
+++ b/target/m68k/op_helper.c
@@ -20,7 +20,6 @@
#include "qemu/log.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "semihosting/semihost.h"
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index b1266a7..97afceb 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "exec/target_page.h"
#include "tcg/tcg-op.h"
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
index 00a2730..ee0a869 100644
--- a/target/microblaze/cpu.c
+++ b/target/microblaze/cpu.c
@@ -27,11 +27,11 @@
#include "cpu.h"
#include "qemu/module.h"
#include "hw/qdev-properties.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "exec/gdbstub.h"
#include "exec/translation-block.h"
#include "fpu/softfloat-helpers.h"
+#include "accel/tcg/cpu-ops.h"
#include "tcg/tcg.h"
static const struct {
@@ -95,6 +95,17 @@ static vaddr mb_cpu_get_pc(CPUState *cs)
return cpu->env.pc;
}
+static TCGTBCPUState mb_get_tb_cpu_state(CPUState *cs)
+{
+ CPUMBState *env = cpu_env(cs);
+
+ return (TCGTBCPUState){
+ .pc = env->pc,
+ .flags = (env->iflags & IFLAGS_TB_MASK) | (env->msr & MSR_TB_MASK),
+ .cs_base = (env->iflags & IMM_FLAG ? env->imm : 0),
+ };
+}
+
static void mb_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -252,6 +263,11 @@ static void mb_cpu_realizefn(DeviceState *dev, Error **errp)
return;
}
+ gdb_register_coprocessor(cs, mb_cpu_gdb_read_stack_protect,
+ mb_cpu_gdb_write_stack_protect,
+ gdb_find_static_feature("microblaze-stack-protect.xml"),
+ 0);
+
qemu_init_vcpu(cs);
version = cpu->cfg.version ? cpu->cfg.version : DEFAULT_CPU_VERSION;
@@ -324,20 +340,13 @@ static void mb_cpu_realizefn(DeviceState *dev, Error **errp)
static void mb_cpu_initfn(Object *obj)
{
- MicroBlazeCPU *cpu = MICROBLAZE_CPU(obj);
-
- gdb_register_coprocessor(CPU(cpu), mb_cpu_gdb_read_stack_protect,
- mb_cpu_gdb_write_stack_protect,
- gdb_find_static_feature("microblaze-stack-protect.xml"),
- 0);
-
#ifndef CONFIG_USER_ONLY
/* Inbound IRQ and FIR lines */
- qdev_init_gpio_in(DEVICE(cpu), microblaze_cpu_set_irq, 2);
- qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_dp, "ns_axi_dp", 1);
- qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_ip, "ns_axi_ip", 1);
- qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_dc, "ns_axi_dc", 1);
- qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_ic, "ns_axi_ic", 1);
+ qdev_init_gpio_in(DEVICE(obj), microblaze_cpu_set_irq, 2);
+ qdev_init_gpio_in_named(DEVICE(obj), mb_cpu_ns_axi_dp, "ns_axi_dp", 1);
+ qdev_init_gpio_in_named(DEVICE(obj), mb_cpu_ns_axi_ip, "ns_axi_ip", 1);
+ qdev_init_gpio_in_named(DEVICE(obj), mb_cpu_ns_axi_dc, "ns_axi_dc", 1);
+ qdev_init_gpio_in_named(DEVICE(obj), mb_cpu_ns_axi_ic, "ns_axi_ic", 1);
#endif
/* Restricted 'endianness' property is equivalent of 'little-endian' */
@@ -424,8 +433,6 @@ static const struct SysemuCPUOps mb_sysemu_ops = {
};
#endif
-#include "accel/tcg/cpu-ops.h"
-
static const TCGCPUOps mb_tcg_ops = {
/* MicroBlaze is always in-order. */
.guest_default_memory_order = TCG_MO_ALL,
@@ -433,14 +440,17 @@ static const TCGCPUOps mb_tcg_ops = {
.initialize = mb_tcg_init,
.translate_code = mb_translate_code,
+ .get_tb_cpu_state = mb_get_tb_cpu_state,
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
.restore_state_to_opc = mb_restore_state_to_opc,
.mmu_index = mb_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = mb_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = mb_cpu_exec_interrupt,
.cpu_exec_halt = mb_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = mb_cpu_do_interrupt,
.do_transaction_failed = mb_cpu_transaction_failed,
.do_unaligned_access = mb_cpu_do_unaligned_access,
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index d511f22..3ce28b3 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -248,7 +248,7 @@ struct CPUArchState {
uint32_t pc;
uint32_t msr; /* All bits of MSR except MSR[C] and MSR[CC] */
uint32_t msr_c; /* MSR[C], in low bit; other bits must be 0 */
- target_ulong ear;
+ uint64_t ear;
uint32_t esr;
uint32_t fsr;
uint32_t btr;
@@ -419,14 +419,6 @@ static inline bool mb_cpu_is_big_endian(CPUState *cs)
return !cpu->cfg.endi;
}
-static inline void cpu_get_tb_cpu_state(CPUMBState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- *flags = (env->iflags & IFLAGS_TB_MASK) | (env->msr & MSR_TB_MASK);
- *cs_base = (*flags & IMM_FLAG ? env->imm : 0);
-}
-
#if !defined(CONFIG_USER_ONLY)
bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index 9203192..ef0e2f9 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -26,8 +26,51 @@
#include "exec/target_page.h"
#include "qemu/host-utils.h"
#include "exec/log.h"
+#include "exec/helper-proto.h"
+
+
+G_NORETURN
+static void mb_unaligned_access_internal(CPUState *cs, uint64_t addr,
+ uintptr_t retaddr)
+{
+ CPUMBState *env = cpu_env(cs);
+ uint32_t esr, iflags;
+
+ /* Recover the pc and iflags from the corresponding insn_start. */
+ cpu_restore_state(cs, retaddr);
+ iflags = env->iflags;
+
+ qemu_log_mask(CPU_LOG_INT,
+ "Unaligned access addr=0x%" PRIx64 " pc=%x iflags=%x\n",
+ addr, env->pc, iflags);
+
+ esr = ESR_EC_UNALIGNED_DATA;
+ if (likely(iflags & ESR_ESS_FLAG)) {
+ esr |= iflags & ESR_ESS_MASK;
+ } else {
+ qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
+ }
+
+ env->ear = addr;
+ env->esr = esr;
+ cs->exception_index = EXCP_HW_EXCP;
+ cpu_loop_exit(cs);
+}
+
+void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ mb_unaligned_access_internal(cs, addr, retaddr);
+}
#ifndef CONFIG_USER_ONLY
+
+void HELPER(unaligned_access)(CPUMBState *env, uint64_t addr)
+{
+ mb_unaligned_access_internal(env_cpu(env), addr, GETPC());
+}
+
static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu,
MMUAccessType access_type)
{
@@ -269,31 +312,3 @@ bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
#endif /* !CONFIG_USER_ONLY */
-
-void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
- MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr)
-{
- MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
- uint32_t esr, iflags;
-
- /* Recover the pc and iflags from the corresponding insn_start. */
- cpu_restore_state(cs, retaddr);
- iflags = cpu->env.iflags;
-
- qemu_log_mask(CPU_LOG_INT,
- "Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n",
- (target_ulong)addr, cpu->env.pc, iflags);
-
- esr = ESR_EC_UNALIGNED_DATA;
- if (likely(iflags & ESR_ESS_FLAG)) {
- esr |= iflags & ESR_ESS_MASK;
- } else {
- qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
- }
-
- cpu->env.ear = addr;
- cpu->env.esr = esr;
- cs->exception_index = EXCP_HW_EXCP;
- cpu_loop_exit(cs);
-}
diff --git a/target/microblaze/helper.h b/target/microblaze/helper.h
index f740835..ef4fad9 100644
--- a/target/microblaze/helper.h
+++ b/target/microblaze/helper.h
@@ -20,12 +20,22 @@ DEF_HELPER_FLAGS_3(fcmp_ne, TCG_CALL_NO_WG, i32, env, i32, i32)
DEF_HELPER_FLAGS_3(fcmp_ge, TCG_CALL_NO_WG, i32, env, i32, i32)
DEF_HELPER_FLAGS_2(pcmpbf, TCG_CALL_NO_RWG_SE, i32, i32, i32)
-#if !defined(CONFIG_USER_ONLY)
-DEF_HELPER_FLAGS_3(mmu_read, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_4(mmu_write, TCG_CALL_NO_RWG, void, env, i32, i32, i32)
-#endif
-
DEF_HELPER_FLAGS_2(stackprot, TCG_CALL_NO_WG, void, env, tl)
-
DEF_HELPER_FLAGS_2(get, TCG_CALL_NO_RWG, i32, i32, i32)
DEF_HELPER_FLAGS_3(put, TCG_CALL_NO_RWG, void, i32, i32, i32)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_FLAGS_3(mmu_read, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_4(mmu_write, TCG_CALL_NO_RWG, void, env, i32, i32, i32)
+DEF_HELPER_FLAGS_2(unaligned_access, TCG_CALL_NO_WG, noreturn, env, i64)
+DEF_HELPER_FLAGS_2(lbuea, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_2(lhuea_be, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_2(lhuea_le, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_2(lwea_be, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_2(lwea_le, TCG_CALL_NO_WG, i32, env, i64)
+DEF_HELPER_FLAGS_3(sbea, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_3(shea_be, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_3(shea_le, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_3(swea_be, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_3(swea_le, TCG_CALL_NO_WG, void, env, i32, i64)
+#endif
diff --git a/target/microblaze/machine.c b/target/microblaze/machine.c
index 51705e4..a4cf38d 100644
--- a/target/microblaze/machine.c
+++ b/target/microblaze/machine.c
@@ -93,7 +93,7 @@ static const VMStateDescription vmstate_env = {
};
static const VMStateField vmstate_cpu_fields[] = {
- VMSTATE_CPU(),
+ VMSTATE_STRUCT(parent_obj, MicroBlazeCPU, 0, vmstate_cpu_common, CPUState),
VMSTATE_STRUCT(env, MicroBlazeCPU, 1, vmstate_env, CPUMBState),
VMSTATE_END_OF_LIST()
};
diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
index 95a12e1..8703ff5 100644
--- a/target/microblaze/mmu.c
+++ b/target/microblaze/mmu.c
@@ -172,7 +172,8 @@ unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
}
done:
qemu_log_mask(CPU_LOG_MMU,
- "MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
+ "MMU vaddr=0x" TARGET_FMT_lx
+ " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
vaddr, rw, tlb_wr, tlb_ex, hit);
return hit;
}
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
index 4624ce5..b8365b3 100644
--- a/target/microblaze/op_helper.c
+++ b/target/microblaze/op_helper.c
@@ -23,7 +23,6 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "fpu/softfloat.h"
@@ -383,6 +382,8 @@ void helper_stackprot(CPUMBState *env, target_ulong addr)
}
#if !defined(CONFIG_USER_ONLY)
+#include "system/memory.h"
+
/* Writes/reads to the MMU's special regs end up here. */
uint32_t helper_mmu_read(CPUMBState *env, uint32_t ext, uint32_t rn)
{
@@ -394,38 +395,90 @@ void helper_mmu_write(CPUMBState *env, uint32_t ext, uint32_t rn, uint32_t v)
mmu_write(env, ext, rn, v);
}
+static void mb_transaction_failed_internal(CPUState *cs, hwaddr physaddr,
+ uint64_t addr, unsigned size,
+ MMUAccessType access_type,
+ uintptr_t retaddr)
+{
+ CPUMBState *env = cpu_env(cs);
+ MicroBlazeCPU *cpu = env_archcpu(env);
+ const char *access_name = "INVALID";
+ bool take = env->msr & MSR_EE;
+ uint32_t esr = ESR_EC_DATA_BUS;
+
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ access_name = "INST_FETCH";
+ esr = ESR_EC_INSN_BUS;
+ take &= cpu->cfg.iopb_bus_exception;
+ break;
+ case MMU_DATA_LOAD:
+ access_name = "DATA_LOAD";
+ take &= cpu->cfg.dopb_bus_exception;
+ break;
+ case MMU_DATA_STORE:
+ access_name = "DATA_STORE";
+ take &= cpu->cfg.dopb_bus_exception;
+ break;
+ }
+
+ qemu_log_mask(CPU_LOG_INT, "Transaction failed: addr 0x%" PRIx64
+ "physaddr 0x" HWADDR_FMT_plx " size %d access-type %s (%s)\n",
+ addr, physaddr, size, access_name,
+ take ? "TAKEN" : "DROPPED");
+
+ if (take) {
+ env->esr = esr;
+ env->ear = addr;
+ cs->exception_index = EXCP_HW_EXCP;
+ cpu_loop_exit_restore(cs, retaddr);
+ }
+}
+
void mb_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
unsigned size, MMUAccessType access_type,
int mmu_idx, MemTxAttrs attrs,
MemTxResult response, uintptr_t retaddr)
{
- MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
- CPUMBState *env = &cpu->env;
+ mb_transaction_failed_internal(cs, physaddr, addr, size,
+ access_type, retaddr);
+}
- qemu_log_mask(CPU_LOG_INT, "Transaction failed: vaddr 0x%" VADDR_PRIx
- " physaddr 0x" HWADDR_FMT_plx " size %d access type %s\n",
- addr, physaddr, size,
- access_type == MMU_INST_FETCH ? "INST_FETCH" :
- (access_type == MMU_DATA_LOAD ? "DATA_LOAD" : "DATA_STORE"));
+#define LD_EA(NAME, TYPE, FUNC) \
+uint32_t HELPER(NAME)(CPUMBState *env, uint64_t ea) \
+{ \
+ CPUState *cs = env_cpu(env); \
+ MemTxResult txres; \
+ TYPE ret = FUNC(cs->as, ea, MEMTXATTRS_UNSPECIFIED, &txres); \
+ if (unlikely(txres != MEMTX_OK)) { \
+ mb_transaction_failed_internal(cs, ea, ea, sizeof(TYPE), \
+ MMU_DATA_LOAD, GETPC()); \
+ } \
+ return ret; \
+}
- if (!(env->msr & MSR_EE)) {
- return;
- }
+LD_EA(lbuea, uint8_t, address_space_ldub)
+LD_EA(lhuea_be, uint16_t, address_space_lduw_be)
+LD_EA(lhuea_le, uint16_t, address_space_lduw_le)
+LD_EA(lwea_be, uint32_t, address_space_ldl_be)
+LD_EA(lwea_le, uint32_t, address_space_ldl_le)
+
+#define ST_EA(NAME, TYPE, FUNC) \
+void HELPER(NAME)(CPUMBState *env, uint32_t data, uint64_t ea) \
+{ \
+ CPUState *cs = env_cpu(env); \
+ MemTxResult txres; \
+ FUNC(cs->as, ea, data, MEMTXATTRS_UNSPECIFIED, &txres); \
+ if (unlikely(txres != MEMTX_OK)) { \
+ mb_transaction_failed_internal(cs, ea, ea, sizeof(TYPE), \
+ MMU_DATA_STORE, GETPC()); \
+ } \
+}
- if (access_type == MMU_INST_FETCH) {
- if (!cpu->cfg.iopb_bus_exception) {
- return;
- }
- env->esr = ESR_EC_INSN_BUS;
- } else {
- if (!cpu->cfg.dopb_bus_exception) {
- return;
- }
- env->esr = ESR_EC_DATA_BUS;
- }
+ST_EA(sbea, uint8_t, address_space_stb)
+ST_EA(shea_be, uint16_t, address_space_stw_be)
+ST_EA(shea_le, uint16_t, address_space_stw_le)
+ST_EA(swea_be, uint32_t, address_space_stl_be)
+ST_EA(swea_le, uint32_t, address_space_stl_le)
- env->ear = addr;
- cs->exception_index = EXCP_HW_EXCP;
- cpu_loop_exit_restore(cs, retaddr);
-}
#endif
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 7dcad6c..5098a1d 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"
@@ -64,9 +63,6 @@ typedef struct DisasContext {
DisasContextBase base;
const MicroBlazeCPUConfig *cfg;
- TCGv_i32 r0;
- bool r0_set;
-
/* Decoder. */
uint32_t ext_imm;
unsigned int tb_flags;
@@ -180,14 +176,7 @@ static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
if (likely(reg != 0)) {
return cpu_R[reg];
}
- if (!dc->r0_set) {
- if (dc->r0 == NULL) {
- dc->r0 = tcg_temp_new_i32();
- }
- tcg_gen_movi_i32(dc->r0, 0);
- dc->r0_set = true;
- }
- return dc->r0;
+ return tcg_constant_i32(0);
}
static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
@@ -195,10 +184,7 @@ static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
if (likely(reg != 0)) {
return cpu_R[reg];
}
- if (dc->r0 == NULL) {
- dc->r0 = tcg_temp_new_i32();
- }
- return dc->r0;
+ return tcg_temp_new_i32();
}
static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
@@ -311,11 +297,7 @@ static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
/* Input and output carry. */
static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
{
- TCGv_i32 zero = tcg_constant_i32(0);
- TCGv_i32 tmp = tcg_temp_new_i32();
-
- tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
- tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
+ tcg_gen_addcio_i32(out, cpu_msr_c, ina, inb, cpu_msr_c);
}
/* Input carry, but no output carry. */
@@ -544,12 +526,10 @@ static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
/* Input and output carry. */
static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
{
- TCGv_i32 zero = tcg_constant_i32(0);
TCGv_i32 tmp = tcg_temp_new_i32();
tcg_gen_not_i32(tmp, ina);
- tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
- tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
+ tcg_gen_addcio_i32(out, cpu_msr_c, tmp, inb, cpu_msr_c);
}
/* No input or output carry. */
@@ -626,19 +606,18 @@ DO_TYPEBI(xori, false, tcg_gen_xori_i32)
static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
{
- TCGv ret = tcg_temp_new();
+ TCGv ret;
/* If any of the regs is r0, set t to the value of the other reg. */
if (ra && rb) {
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
- tcg_gen_extu_i32_tl(ret, tmp);
+ ret = tcg_temp_new_i32();
+ tcg_gen_add_i32(ret, cpu_R[ra], cpu_R[rb]);
} else if (ra) {
- tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
+ ret = cpu_R[ra];
} else if (rb) {
- tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
+ ret = cpu_R[rb];
} else {
- tcg_gen_movi_tl(ret, 0);
+ ret = tcg_constant_i32(0);
}
if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
@@ -649,15 +628,16 @@ static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
{
- TCGv ret = tcg_temp_new();
+ TCGv ret;
/* If any of the regs is r0, set t to the value of the other reg. */
- if (ra) {
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
- tcg_gen_extu_i32_tl(ret, tmp);
+ if (ra && imm) {
+ ret = tcg_temp_new_i32();
+ tcg_gen_addi_i32(ret, cpu_R[ra], imm);
+ } else if (ra) {
+ ret = cpu_R[ra];
} else {
- tcg_gen_movi_tl(ret, (uint32_t)imm);
+ ret = tcg_constant_i32(imm);
}
if (ra == 1 && dc->cfg->stackprot) {
@@ -667,23 +647,23 @@ static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
}
#ifndef CONFIG_USER_ONLY
-static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
+static TCGv_i64 compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
{
int addr_size = dc->cfg->addr_size;
- TCGv ret = tcg_temp_new();
+ TCGv_i64 ret = tcg_temp_new_i64();
if (addr_size == 32 || ra == 0) {
if (rb) {
- tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
+ tcg_gen_extu_i32_i64(ret, cpu_R[rb]);
} else {
- tcg_gen_movi_tl(ret, 0);
+ return tcg_constant_i64(0);
}
} else {
if (rb) {
tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
} else {
- tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
- tcg_gen_shli_tl(ret, ret, 32);
+ tcg_gen_extu_i32_i64(ret, cpu_R[ra]);
+ tcg_gen_shli_i64(ret, ret, 32);
}
if (addr_size < 64) {
/* Mask off out of range bits. */
@@ -707,6 +687,20 @@ static void record_unaligned_ess(DisasContext *dc, int rd,
tcg_set_insn_start_param(dc->base.insn_start, 1, iflags);
}
+
+static void gen_alignment_check_ea(DisasContext *dc, TCGv_i64 ea, int rb,
+ int rd, MemOp size, bool store)
+{
+ if (rb && (dc->tb_flags & MSR_EE) && dc->cfg->unaligned_exceptions) {
+ TCGLabel *over = gen_new_label();
+
+ record_unaligned_ess(dc, rd, size, store);
+
+ tcg_gen_brcondi_i64(TCG_COND_TSTEQ, ea, (1 << size) - 1, over);
+ gen_helper_unaligned_access(tcg_env, ea);
+ gen_set_label(over);
+ }
+}
#endif
static inline MemOp mo_endian(DisasContext *dc)
@@ -772,10 +766,11 @@ static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_helper_lbuea(reg_for_write(dc, arg->rd), tcg_env, addr);
+ return true;
#endif
}
@@ -803,10 +798,13 @@ static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_load(dc, arg->rd, addr, MO_UW, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, false);
+ (mo_endian(dc) == MO_BE ? gen_helper_lhuea_be : gen_helper_lhuea_le)
+ (reg_for_write(dc, arg->rd), tcg_env, addr);
+ return true;
#endif
}
@@ -834,10 +832,13 @@ static bool trans_lwea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_load(dc, arg->rd, addr, MO_UL, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, false);
+ (mo_endian(dc) == MO_BE ? gen_helper_lwea_be : gen_helper_lwea_le)
+ (reg_for_write(dc, arg->rd), tcg_env, addr);
+ return true;
#endif
}
@@ -925,10 +926,11 @@ static bool trans_sbea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_helper_sbea(tcg_env, reg_for_read(dc, arg->rd), addr);
+ return true;
#endif
}
@@ -956,10 +958,13 @@ static bool trans_shea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_store(dc, arg->rd, addr, MO_UW, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_16, true);
+ (mo_endian(dc) == MO_BE ? gen_helper_shea_be : gen_helper_shea_le)
+ (tcg_env, reg_for_read(dc, arg->rd), addr);
+ return true;
#endif
}
@@ -987,10 +992,13 @@ static bool trans_swea(DisasContext *dc, arg_typea *arg)
return true;
}
#ifdef CONFIG_USER_ONLY
- return true;
+ g_assert_not_reached();
#else
- TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
- return do_store(dc, arg->rd, addr, MO_UL, MMU_NOMMU_IDX, false);
+ TCGv_i64 addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
+ gen_alignment_check_ea(dc, addr, arg->rb, arg->rd, MO_32, true);
+ (mo_endian(dc) == MO_BE ? gen_helper_swea_be : gen_helper_swea_le)
+ (tcg_env, reg_for_read(dc, arg->rd), addr);
+ return true;
#endif
}
@@ -1614,8 +1622,6 @@ static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
dc->cfg = &cpu->cfg;
dc->tb_flags = dc->base.tb->flags;
dc->ext_imm = dc->base.tb->cs_base;
- dc->r0 = NULL;
- dc->r0_set = false;
dc->mem_index = cpu_mmu_index(cs, false);
dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
dc->jmp_dest = -1;
@@ -1654,11 +1660,6 @@ static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
trap_illegal(dc, true);
}
- if (dc->r0) {
- dc->r0 = NULL;
- dc->r0_set = false;
- }
-
/* Discard the imm global when its contents cannot be used. */
if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
tcg_gen_discard_i32(cpu_imm);
@@ -1836,7 +1837,7 @@ void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
- "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
+ "ear=0x%" PRIx64 " slr=0x%x shr=0x%x\n",
env->esr, env->fsr, env->btr, env->edr,
env->ear, env->slr, env->shr);
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
index d13361a..1f6c41f 100644
--- a/target/mips/cpu.c
+++ b/target/mips/cpu.c
@@ -29,7 +29,6 @@
#include "qemu/module.h"
#include "system/kvm.h"
#include "system/qtest.h"
-#include "exec/exec-all.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-clock.h"
#include "fpu_helper.h"
@@ -550,20 +549,42 @@ static int mips_cpu_mmu_index(CPUState *cs, bool ifunc)
return mips_env_mmu_index(cpu_env(cs));
}
+static TCGTBCPUState mips_get_tb_cpu_state(CPUState *cs)
+{
+ CPUMIPSState *env = cpu_env(cs);
+
+ return (TCGTBCPUState){
+ .pc = env->active_tc.PC,
+ .flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK |
+ MIPS_HFLAG_HWRENA_ULR),
+ };
+}
+
+#ifndef CONFIG_USER_ONLY
+static vaddr mips_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return cpu_env(cs)->hflags & MIPS_HFLAG_AWRAP ? (int32_t)result : result;
+}
+#endif
+
static const TCGCPUOps mips_tcg_ops = {
.mttcg_supported = TARGET_LONG_BITS == 32,
.guest_default_memory_order = 0,
.initialize = mips_tcg_init,
.translate_code = mips_translate_code,
+ .get_tb_cpu_state = mips_get_tb_cpu_state,
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
.restore_state_to_opc = mips_restore_state_to_opc,
.mmu_index = mips_cpu_mmu_index,
#if !defined(CONFIG_USER_ONLY)
.tlb_fill = mips_cpu_tlb_fill,
+ .pointer_wrap = mips_pointer_wrap,
.cpu_exec_interrupt = mips_cpu_exec_interrupt,
.cpu_exec_halt = mips_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = mips_cpu_do_interrupt,
.do_transaction_failed = mips_cpu_do_transaction_failed,
.do_unaligned_access = mips_cpu_do_unaligned_access,
diff --git a/target/mips/cpu.h b/target/mips/cpu.h
index d16f9a7..5cd4c6c 100644
--- a/target/mips/cpu.h
+++ b/target/mips/cpu.h
@@ -1366,15 +1366,6 @@ void cpu_mips_clock_init(MIPSCPU *cpu);
/* helper.c */
target_ulong exception_resume_pc(CPUMIPSState *env);
-static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->active_tc.PC;
- *cs_base = 0;
- *flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK |
- MIPS_HFLAG_HWRENA_ULR);
-}
-
/**
* mips_cpu_create_with_clock:
* @typename: a MIPS CPU type.
diff --git a/target/mips/kvm.c b/target/mips/kvm.c
index d67b7c1..ec53acb 100644
--- a/target/mips/kvm.c
+++ b/target/mips/kvm.c
@@ -61,6 +61,11 @@ int kvm_arch_irqchip_create(KVMState *s)
return 0;
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
CPUMIPSState *env = cpu_env(cs);
diff --git a/target/mips/system/mips-qmp-cmds.c b/target/mips/system/mips-qmp-cmds.c
index 7340ac7..d98d662 100644
--- a/target/mips/system/mips-qmp-cmds.c
+++ b/target/mips/system/mips-qmp-cmds.c
@@ -7,9 +7,19 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-machine.h"
#include "cpu.h"
+CpuModelExpansionInfo *
+qmp_query_cpu_model_expansion(CpuModelExpansionType type,
+ CpuModelInfo *model,
+ Error **errp)
+{
+ error_setg(errp, "CPU model expansion is not supported on this target");
+ return NULL;
+}
+
static void mips_cpu_add_definition(gpointer data, gpointer user_data)
{
ObjectClass *oc = data;
diff --git a/target/mips/system/physaddr.c b/target/mips/system/physaddr.c
index 505781d..b8e1a5a 100644
--- a/target/mips/system/physaddr.c
+++ b/target/mips/system/physaddr.c
@@ -18,7 +18,6 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "../internal.h"
diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c
index 1a8902e..d32bceb 100644
--- a/target/mips/tcg/exception.c
+++ b/target/mips/tcg/exception.c
@@ -23,7 +23,6 @@
#include "cpu.h"
#include "internal.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
target_ulong exception_resume_pc(CPUMIPSState *env)
diff --git a/target/mips/tcg/fpu_helper.c b/target/mips/tcg/fpu_helper.c
index 45d593d..36af980 100644
--- a/target/mips/tcg/fpu_helper.c
+++ b/target/mips/tcg/fpu_helper.c
@@ -24,7 +24,6 @@
#include "cpu.h"
#include "internal.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "fpu/softfloat.h"
#include "fpu_helper.h"
diff --git a/target/mips/tcg/ldst_helper.c b/target/mips/tcg/ldst_helper.c
index 2fb879f..10319bf 100644
--- a/target/mips/tcg/ldst_helper.c
+++ b/target/mips/tcg/ldst_helper.c
@@ -23,7 +23,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "exec/memop.h"
#include "internal.h"
diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c
index e349344..f554b3d 100644
--- a/target/mips/tcg/msa_helper.c
+++ b/target/mips/tcg/msa_helper.c
@@ -21,8 +21,8 @@
#include "cpu.h"
#include "internal.h"
#include "tcg/tcg.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/helper-proto.h"
#include "exec/memop.h"
#include "exec/target_page.h"
diff --git a/target/mips/tcg/op_helper.c b/target/mips/tcg/op_helper.c
index 65403f1..b906d10 100644
--- a/target/mips/tcg/op_helper.c
+++ b/target/mips/tcg/op_helper.c
@@ -22,7 +22,6 @@
#include "cpu.h"
#include "internal.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "exec/memop.h"
#include "fpu_helper.h"
diff --git a/target/mips/tcg/system/special_helper.c b/target/mips/tcg/system/special_helper.c
index 3ce3ae1..b54cbe8 100644
--- a/target/mips/tcg/system/special_helper.c
+++ b/target/mips/tcg/system/special_helper.c
@@ -22,7 +22,6 @@
#include "qemu/log.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "internal.h"
diff --git a/target/mips/tcg/system/tlb_helper.c b/target/mips/tcg/system/tlb_helper.c
index e477ef8..eccaf36 100644
--- a/target/mips/tcg/system/tlb_helper.c
+++ b/target/mips/tcg/system/tlb_helper.c
@@ -22,7 +22,6 @@
#include "cpu.h"
#include "internal.h"
#include "exec/cputlb.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "accel/tcg/cpu-ldst.h"
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
index 2ec267e..dfbb2df 100644
--- a/target/openrisc/cpu.c
+++ b/target/openrisc/cpu.c
@@ -21,9 +21,9 @@
#include "qapi/error.h"
#include "qemu/qemu-print.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "fpu/softfloat-helpers.h"
+#include "accel/tcg/cpu-ops.h"
#include "tcg/tcg.h"
static void openrisc_cpu_set_pc(CPUState *cs, vaddr value)
@@ -41,6 +41,18 @@ static vaddr openrisc_cpu_get_pc(CPUState *cs)
return cpu->env.pc;
}
+static TCGTBCPUState openrisc_get_tb_cpu_state(CPUState *cs)
+{
+ CPUOpenRISCState *env = cpu_env(cs);
+
+ return (TCGTBCPUState){
+ .pc = env->pc,
+ .flags = ((env->dflag ? TB_FLAGS_DFLAG : 0)
+ | (cpu_get_gpr(env, 0) ? 0 : TB_FLAGS_R0_0)
+ | (env->sr & (SR_SM | SR_DME | SR_IME | SR_OVE))),
+ };
+}
+
static void openrisc_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -240,22 +252,23 @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
};
#endif
-#include "accel/tcg/cpu-ops.h"
-
static const TCGCPUOps openrisc_tcg_ops = {
.guest_default_memory_order = 0,
.mttcg_supported = true,
.initialize = openrisc_translate_init,
.translate_code = openrisc_translate_code,
+ .get_tb_cpu_state = openrisc_get_tb_cpu_state,
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
.restore_state_to_opc = openrisc_restore_state_to_opc,
.mmu_index = openrisc_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = openrisc_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
.cpu_exec_halt = openrisc_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = openrisc_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
};
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
index 569819b..f4bcf00 100644
--- a/target/openrisc/cpu.h
+++ b/target/openrisc/cpu.h
@@ -349,16 +349,6 @@ static inline void cpu_set_gpr(CPUOpenRISCState *env, int i, uint32_t val)
env->shadow_gpr[0][i] = val;
}
-static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- *cs_base = 0;
- *flags = (env->dflag ? TB_FLAGS_DFLAG : 0)
- | (cpu_get_gpr(env, 0) ? 0 : TB_FLAGS_R0_0)
- | (env->sr & (SR_SM | SR_DME | SR_IME | SR_OVE));
-}
-
static inline uint32_t cpu_get_sr(const CPUOpenRISCState *env)
{
return (env->sr
diff --git a/target/openrisc/exception.c b/target/openrisc/exception.c
index 8699c3d..e213be3 100644
--- a/target/openrisc/exception.c
+++ b/target/openrisc/exception.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exception.h"
G_NORETURN void raise_exception(OpenRISCCPU *cpu, uint32_t excp)
diff --git a/target/openrisc/exception_helper.c b/target/openrisc/exception_helper.c
index 1f5be4b..c2c9d13 100644
--- a/target/openrisc/exception_helper.c
+++ b/target/openrisc/exception_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "exception.h"
diff --git a/target/openrisc/fpu_helper.c b/target/openrisc/fpu_helper.c
index 8b81d2f..dba9972 100644
--- a/target/openrisc/fpu_helper.c
+++ b/target/openrisc/fpu_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
diff --git a/target/openrisc/interrupt.c b/target/openrisc/interrupt.c
index b3b5b40..4868230 100644
--- a/target/openrisc/interrupt.c
+++ b/target/openrisc/interrupt.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "gdbstub/helpers.h"
#include "qemu/host-utils.h"
#ifndef CONFIG_USER_ONLY
diff --git a/target/openrisc/interrupt_helper.c b/target/openrisc/interrupt_helper.c
index ab4ea88..1553ebc 100644
--- a/target/openrisc/interrupt_helper.c
+++ b/target/openrisc/interrupt_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
void HELPER(rfe)(CPUOpenRISCState *env)
diff --git a/target/openrisc/machine.c b/target/openrisc/machine.c
index 3574e57..081c706 100644
--- a/target/openrisc/machine.c
+++ b/target/openrisc/machine.c
@@ -136,7 +136,7 @@ const VMStateDescription vmstate_openrisc_cpu = {
.minimum_version_id = 1,
.post_load = cpu_post_load,
.fields = (const VMStateField[]) {
- VMSTATE_CPU(),
+ VMSTATE_STRUCT(parent_obj, OpenRISCCPU, 0, vmstate_cpu_common, CPUState),
VMSTATE_STRUCT(env, OpenRISCCPU, 1, vmstate_env, CPUOpenRISCState),
VMSTATE_END_OF_LIST()
}
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
index 92badf0..d96b41a 100644
--- a/target/openrisc/sys_helper.c
+++ b/target/openrisc/sys_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/cputlb.h"
#include "exec/target_page.h"
#include "exec/helper-proto.h"
@@ -219,7 +218,7 @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd,
{
OpenRISCCPU *cpu = env_archcpu(env);
#ifndef CONFIG_USER_ONLY
- uint64_t data[TARGET_INSN_START_WORDS];
+ uint64_t data[INSN_START_WORDS];
MachineState *ms = MACHINE(qdev_get_machine());
CPUState *cs = env_cpu(env);
int idx;
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index d4ce601..5ab3bc7 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -21,7 +21,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "accel/tcg/cpu-mmu-index.h"
-#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
#include "qemu/log.h"
#include "qemu/bitops.h"
@@ -221,8 +220,7 @@ static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
TCGv t0 = tcg_temp_new();
TCGv res = tcg_temp_new();
- tcg_gen_add2_tl(res, cpu_sr_cy, srca, dc->zero, cpu_sr_cy, dc->zero);
- tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, dc->zero);
+ tcg_gen_addcio_tl(res, cpu_sr_cy, srca, srcb, cpu_sr_cy);
tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
tcg_gen_xor_tl(t0, res, srcb);
tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 13115a8..6b90543 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -2751,19 +2751,6 @@ void cpu_write_xer(CPUPPCState *env, target_ulong xer);
*/
#define is_book3s_arch2x(ctx) (!!((ctx)->insns_flags & PPC_SEGMENT_64B))
-#ifdef CONFIG_DEBUG_TCG
-void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags);
-#else
-static inline void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->nip;
- *cs_base = 0;
- *flags = env->hflags;
-}
-#endif
-
G_NORETURN void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
uint32_t error_code, uintptr_t raddr);
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index b0973b6..a0e77f2 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -45,7 +45,6 @@
#include "internal.h"
#include "spr_common.h"
#include "power8-pmu.h"
-
#ifndef CONFIG_USER_ONLY
#include "hw/boards.h"
#include "hw/intc/intc.h"
@@ -7115,7 +7114,7 @@ PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc)
}
/* Sort by PVR, ordering special case "host" last. */
-static gint ppc_cpu_list_compare(gconstpointer a, gconstpointer b)
+static gint ppc_cpu_list_compare(gconstpointer a, gconstpointer b, gpointer d)
{
ObjectClass *oc_a = (ObjectClass *)a;
ObjectClass *oc_b = (ObjectClass *)b;
@@ -7183,7 +7182,7 @@ static void ppc_cpu_list(void)
qemu_printf("Available CPUs:\n");
list = object_class_get_list(TYPE_POWERPC_CPU, false);
- list = g_slist_sort(list, ppc_cpu_list_compare);
+ list = g_slist_sort_with_data(list, ppc_cpu_list_compare, NULL);
g_slist_foreach(list, ppc_cpu_list_entry, NULL);
g_slist_free(list);
@@ -7387,6 +7386,12 @@ static void ppc_cpu_exec_exit(CPUState *cs)
cpu->vhyp_class->cpu_exec_exit(cpu->vhyp, cpu);
}
}
+
+static vaddr ppc_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return (cpu_env(cs)->hflags >> HFLAGS_64) & 1 ? result : (uint32_t)result;
+}
#endif /* CONFIG_TCG */
#endif /* !CONFIG_USER_ONLY */
@@ -7483,6 +7488,7 @@ static const TCGCPUOps ppc_tcg_ops = {
.guest_default_memory_order = 0,
.initialize = ppc_translate_init,
.translate_code = ppc_translate_code,
+ .get_tb_cpu_state = ppc_get_tb_cpu_state,
.restore_state_to_opc = ppc_restore_state_to_opc,
.mmu_index = ppc_cpu_mmu_index,
@@ -7490,8 +7496,10 @@ static const TCGCPUOps ppc_tcg_ops = {
.record_sigsegv = ppc_cpu_record_sigsegv,
#else
.tlb_fill = ppc_cpu_tlb_fill,
+ .pointer_wrap = ppc_pointer_wrap,
.cpu_exec_interrupt = ppc_cpu_exec_interrupt,
.cpu_exec_halt = ppc_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = ppc_cpu_do_interrupt,
.cpu_exec_enter = ppc_cpu_exec_enter,
.cpu_exec_exit = ppc_cpu_exec_exit,
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index da8b525..1efdc40 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -24,7 +24,6 @@
#include "system/system.h"
#include "system/runstate.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "internal.h"
#include "helper_regs.h"
#include "hw/ppc/ppc.h"
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
index d93cfed..07b782f 100644
--- a/target/ppc/fpu_helper.c
+++ b/target/ppc/fpu_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "internal.h"
#include "fpu/softfloat.h"
diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c
index f211bc9..7e57268 100644
--- a/target/ppc/helper_regs.c
+++ b/target/ppc/helper_regs.c
@@ -27,6 +27,8 @@
#include "power8-pmu.h"
#include "cpu-models.h"
#include "spr_common.h"
+#include "accel/tcg/cpu-ops.h"
+#include "internal.h"
/* Swap temporary saved registers with GPRs */
void hreg_swap_gpr_tgpr(CPUPPCState *env)
@@ -255,26 +257,23 @@ void hreg_update_pmu_hflags(CPUPPCState *env)
env->hflags |= hreg_compute_pmu_hflags_value(env);
}
-#ifdef CONFIG_DEBUG_TCG
-void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
+TCGTBCPUState ppc_get_tb_cpu_state(CPUState *cs)
{
+ CPUPPCState *env = cpu_env(cs);
uint32_t hflags_current = env->hflags;
- uint32_t hflags_rebuilt;
-
- *pc = env->nip;
- *cs_base = 0;
- *flags = hflags_current;
- hflags_rebuilt = hreg_compute_hflags_value(env);
+#ifdef CONFIG_DEBUG_TCG
+ uint32_t hflags_rebuilt = hreg_compute_hflags_value(env);
if (unlikely(hflags_current != hflags_rebuilt)) {
cpu_abort(env_cpu(env),
"TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
hflags_current, hflags_rebuilt);
}
-}
#endif
+ return (TCGTBCPUState){ .pc = env->nip, .flags = hflags_current };
+}
+
void cpu_interrupt_exittb(CPUState *cs)
{
/*
diff --git a/target/ppc/internal.h b/target/ppc/internal.h
index 9012d38..7723350 100644
--- a/target/ppc/internal.h
+++ b/target/ppc/internal.h
@@ -21,6 +21,7 @@
#include "exec/breakpoint.h"
#include "hw/registerfields.h"
#include "exec/page-protection.h"
+#include "accel/tcg/tb-cpu-state.h"
/* PM instructions */
typedef enum {
@@ -308,4 +309,6 @@ static inline int ger_pack_masks(int pmsk, int ymsk, int xmsk)
return msk;
}
+TCGTBCPUState ppc_get_tb_cpu_state(CPUState *cs);
+
#endif /* PPC_INTERNAL_H */
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 8a957c3..0156580 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -479,6 +479,11 @@ static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
}
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
diff --git a/target/ppc/machine.c b/target/ppc/machine.c
index 98df5b4..d72e5ec 100644
--- a/target/ppc/machine.c
+++ b/target/ppc/machine.c
@@ -1,6 +1,5 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "system/kvm.h"
#include "system/tcg.h"
#include "helper_regs.h"
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
index d7e8d67..6ab71a6 100644
--- a/target/ppc/mem_helper.c
+++ b/target/ppc/mem_helper.c
@@ -19,12 +19,13 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/target_page.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
#include "helper_regs.h"
#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/helper-retaddr.h"
+#include "accel/tcg/probe.h"
#include "internal.h"
#include "qemu/atomic128.h"
diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c
index 46ae454..e7d9462 100644
--- a/target/ppc/misc_helper.c
+++ b/target/ppc/misc_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/cputlb.h"
#include "exec/helper-proto.h"
#include "qemu/error-report.h"
diff --git a/target/ppc/mmu-hash32.c b/target/ppc/mmu-hash32.c
index 5bd3efe..8b980a5 100644
--- a/target/ppc/mmu-hash32.c
+++ b/target/ppc/mmu-hash32.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "system/kvm.h"
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
index 3ba4810..dd33755 100644
--- a/target/ppc/mmu-hash64.c
+++ b/target/ppc/mmu-hash64.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "qemu/error-report.h"
#include "qemu/qemu-print.h"
diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
index 4ab5f3b..33ac341 100644
--- a/target/ppc/mmu-radix64.c
+++ b/target/ppc/mmu-radix64.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "qemu/error-report.h"
#include "system/kvm.h"
diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c
index 394a0c9..52d4861 100644
--- a/target/ppc/mmu_common.c
+++ b/target/ppc/mmu_common.c
@@ -24,7 +24,6 @@
#include "kvm_ppc.h"
#include "mmu-hash64.h"
#include "mmu-hash32.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "exec/log.h"
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index 2138666..ac60705 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -25,7 +25,6 @@
#include "mmu-hash64.h"
#include "mmu-hash32.h"
#include "exec/cputlb.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "exec/log.h"
diff --git a/target/ppc/power8-pmu.c b/target/ppc/power8-pmu.c
index db9ee8e..2a7a5b4 100644
--- a/target/ppc/power8-pmu.c
+++ b/target/ppc/power8-pmu.c
@@ -13,7 +13,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "helper_regs.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/error-report.h"
#include "qemu/timer.h"
diff --git a/target/ppc/ppc-qmp-cmds.c b/target/ppc/ppc-qmp-cmds.c
index a25d86a..7022564 100644
--- a/target/ppc/ppc-qmp-cmds.c
+++ b/target/ppc/ppc-qmp-cmds.c
@@ -28,7 +28,8 @@
#include "qemu/ctype.h"
#include "monitor/hmp-target.h"
#include "monitor/hmp.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-machine.h"
#include "cpu-models.h"
#include "cpu-qom.h"
@@ -175,6 +176,15 @@ int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval)
return -EINVAL;
}
+CpuModelExpansionInfo *
+qmp_query_cpu_model_expansion(CpuModelExpansionType type,
+ CpuModelInfo *model,
+ Error **errp)
+{
+ error_setg(errp, "CPU model expansion is not supported on this target");
+ return NULL;
+}
+
static void ppc_cpu_defs_entry(gpointer data, gpointer user_data)
{
ObjectClass *oc = data;
diff --git a/target/ppc/tcg-excp_helper.c b/target/ppc/tcg-excp_helper.c
index 2b15e5f..f835be5 100644
--- a/target/ppc/tcg-excp_helper.c
+++ b/target/ppc/tcg-excp_helper.c
@@ -21,7 +21,6 @@
#include "qemu/log.h"
#include "target/ppc/cpu.h"
#include "accel/tcg/cpu-ldst.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "system/runstate.h"
diff --git a/target/ppc/timebase_helper.c b/target/ppc/timebase_helper.c
index 7312032..7209b41 100644
--- a/target/ppc/timebase_helper.c
+++ b/target/ppc/timebase_helper.c
@@ -20,7 +20,6 @@
#include "cpu.h"
#include "hw/ppc/ppc.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index fea2f2c..27f90c3 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -21,7 +21,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internal.h"
-#include "exec/exec-all.h"
#include "exec/target_page.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
@@ -1746,11 +1745,10 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
tcg_gen_mov_tl(ca32, ca);
}
} else {
- TCGv zero = tcg_constant_tl(0);
if (add_ca) {
- tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
- tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
+ tcg_gen_addcio_tl(t0, ca, arg1, arg2, ca);
} else {
+ TCGv zero = tcg_constant_tl(0);
tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero);
}
gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0);
@@ -1949,11 +1947,9 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
tcg_gen_mov_tl(cpu_ca32, cpu_ca);
}
} else if (add_ca) {
- TCGv zero, inv1 = tcg_temp_new();
+ TCGv inv1 = tcg_temp_new();
tcg_gen_not_tl(inv1, arg1);
- zero = tcg_constant_tl(0);
- tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
- tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
+ tcg_gen_addcio_tl(t0, cpu_ca, arg2, inv1, cpu_ca);
gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
} else {
tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
diff --git a/target/ppc/user_only_helper.c b/target/ppc/user_only_helper.c
index a4d07a0..ae210eb 100644
--- a/target/ppc/user_only_helper.c
+++ b/target/ppc/user_only_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "internal.h"
void ppc_cpu_record_sigsegv(CPUState *cs, vaddr address,
diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h
index 4cfdb74..1ee05eb 100644
--- a/target/riscv/cpu-qom.h
+++ b/target/riscv/cpu-qom.h
@@ -44,9 +44,11 @@
#define TYPE_RISCV_CPU_RVA23S64 RISCV_CPU_TYPE_NAME("rva23s64")
#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")
#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c")
+#define TYPE_RISCV_CPU_SIFIVE_E RISCV_CPU_TYPE_NAME("sifive-e")
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
#define TYPE_RISCV_CPU_SIFIVE_E34 RISCV_CPU_TYPE_NAME("sifive-e34")
#define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51")
+#define TYPE_RISCV_CPU_SIFIVE_U RISCV_CPU_TYPE_NAME("sifive-u")
#define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34")
#define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54")
#define TYPE_RISCV_CPU_THEAD_C906 RISCV_CPU_TYPE_NAME("thead-c906")
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index e0604f4..629ac37 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -24,7 +24,6 @@
#include "cpu.h"
#include "cpu_vendorid.h"
#include "internals.h"
-#include "exec/exec-all.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qemu/error-report.h"
@@ -74,6 +73,13 @@ bool riscv_cpu_option_set(const char *optname)
return g_hash_table_contains(general_user_opts, optname);
}
+static void riscv_cpu_cfg_merge(RISCVCPUConfig *dest, const RISCVCPUConfig *src)
+{
+#define BOOL_FIELD(x) dest->x |= src->x;
+#define TYPED_FIELD(type, x, default_) if (src->x != default_) dest->x = src->x;
+#include "cpu_cfg_fields.h.inc"
+}
+
#define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
{#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
@@ -357,7 +363,7 @@ void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
{
- return 16 << mcc->misa_mxl_max;
+ return 16 << mcc->def->misa_mxl_max;
}
#ifndef CONFIG_USER_ONLY
@@ -390,7 +396,7 @@ static uint8_t satp_mode_from_str(const char *satp_mode_str)
g_assert_not_reached();
}
-uint8_t satp_mode_max_from_map(uint32_t map)
+static uint8_t satp_mode_max_from_map(uint32_t map)
{
/*
* 'map = 0' will make us return (31 - 32), which C will
@@ -434,17 +440,23 @@ const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
g_assert_not_reached();
}
-static void set_satp_mode_max_supported(RISCVCPU *cpu,
- uint8_t satp_mode)
+static bool get_satp_mode_supported(RISCVCPU *cpu, uint16_t *supported)
{
- bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
+ bool rv32 = riscv_cpu_is_32bit(cpu);
const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
+ int satp_mode = cpu->cfg.max_satp_mode;
+ if (satp_mode == -1) {
+ return false;
+ }
+
+ *supported = 0;
for (int i = 0; i <= satp_mode; ++i) {
if (valid_vm[i]) {
- cpu->cfg.satp_mode.supported |= (1 << i);
+ *supported |= (1 << i);
}
}
+ return true;
}
/* Set the satp mode to the max supported */
@@ -453,380 +465,26 @@ static void set_satp_mode_default_map(RISCVCPU *cpu)
/*
* Bare CPUs do not default to the max available.
* Users must set a valid satp_mode in the command
- * line.
+ * line. Otherwise, leave the existing max_satp_mode
+ * in place.
*/
if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
warn_report("No satp mode set. Defaulting to 'bare'");
- cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
- return;
+ cpu->cfg.max_satp_mode = VM_1_10_MBARE;
}
-
- cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
-}
-#endif
-
-static void riscv_max_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
-
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-
- env->priv_ver = PRIV_VERSION_LATEST;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj),
- riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
- VM_1_10_SV32 : VM_1_10_SV57);
-#endif
-}
-
-#if defined(TARGET_RISCV64)
-static void rv64_base_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
-
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-
- /* Set latest version of privileged specification */
- env->priv_ver = PRIV_VERSION_LATEST;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
-#endif
-}
-
-static void rv64_sifive_u_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-}
-
-static void rv64_sifive_e_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
-}
-
-static void rv64_thead_c906_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
- env->priv_ver = PRIV_VERSION_1_11_0;
-
- cpu->cfg.ext_zfa = true;
- cpu->cfg.ext_zfh = true;
- cpu->cfg.mmu = true;
- cpu->cfg.ext_xtheadba = true;
- cpu->cfg.ext_xtheadbb = true;
- cpu->cfg.ext_xtheadbs = true;
- cpu->cfg.ext_xtheadcmo = true;
- cpu->cfg.ext_xtheadcondmov = true;
- cpu->cfg.ext_xtheadfmemidx = true;
- cpu->cfg.ext_xtheadmac = true;
- cpu->cfg.ext_xtheadmemidx = true;
- cpu->cfg.ext_xtheadmempair = true;
- cpu->cfg.ext_xtheadsync = true;
-
- cpu->cfg.mvendorid = THEAD_VENDOR_ID;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_SV39);
- th_register_custom_csrs(cpu);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.pmp = true;
-}
-
-static void rv64_veyron_v1_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
- env->priv_ver = PRIV_VERSION_1_12_0;
-
- /* Enable ISA extensions */
- cpu->cfg.mmu = true;
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
- cpu->cfg.ext_zicbom = true;
- cpu->cfg.cbom_blocksize = 64;
- cpu->cfg.cboz_blocksize = 64;
- cpu->cfg.ext_zicboz = true;
- cpu->cfg.ext_smaia = true;
- cpu->cfg.ext_ssaia = true;
- cpu->cfg.ext_sscofpmf = true;
- cpu->cfg.ext_sstc = true;
- cpu->cfg.ext_svinval = true;
- cpu->cfg.ext_svnapot = true;
- cpu->cfg.ext_svpbmt = true;
- cpu->cfg.ext_smstateen = true;
- cpu->cfg.ext_zba = true;
- cpu->cfg.ext_zbb = true;
- cpu->cfg.ext_zbc = true;
- cpu->cfg.ext_zbs = true;
- cpu->cfg.ext_XVentanaCondOps = true;
-
- cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
- cpu->cfg.marchid = VEYRON_V1_MARCHID;
- cpu->cfg.mimpid = VEYRON_V1_MIMPID;
-
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_SV48);
-#endif
-}
-
-/* Tenstorrent Ascalon */
-static void rv64_tt_ascalon_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV);
- env->priv_ver = PRIV_VERSION_1_13_0;
-
- /* Enable ISA extensions */
- cpu->cfg.mmu = true;
- cpu->cfg.vlenb = 256 >> 3;
- cpu->cfg.elen = 64;
- cpu->env.vext_ver = VEXT_VERSION_1_00_0;
- cpu->cfg.rvv_ma_all_1s = true;
- cpu->cfg.rvv_ta_all_1s = true;
- cpu->cfg.misa_w = true;
- cpu->cfg.pmp = true;
- cpu->cfg.cbom_blocksize = 64;
- cpu->cfg.cbop_blocksize = 64;
- cpu->cfg.cboz_blocksize = 64;
- cpu->cfg.ext_zic64b = true;
- cpu->cfg.ext_zicbom = true;
- cpu->cfg.ext_zicbop = true;
- cpu->cfg.ext_zicboz = true;
- cpu->cfg.ext_zicntr = true;
- cpu->cfg.ext_zicond = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zihintntl = true;
- cpu->cfg.ext_zihintpause = true;
- cpu->cfg.ext_zihpm = true;
- cpu->cfg.ext_zimop = true;
- cpu->cfg.ext_zawrs = true;
- cpu->cfg.ext_zfa = true;
- cpu->cfg.ext_zfbfmin = true;
- cpu->cfg.ext_zfh = true;
- cpu->cfg.ext_zfhmin = true;
- cpu->cfg.ext_zcb = true;
- cpu->cfg.ext_zcmop = true;
- cpu->cfg.ext_zba = true;
- cpu->cfg.ext_zbb = true;
- cpu->cfg.ext_zbs = true;
- cpu->cfg.ext_zkt = true;
- cpu->cfg.ext_zvbb = true;
- cpu->cfg.ext_zvbc = true;
- cpu->cfg.ext_zvfbfmin = true;
- cpu->cfg.ext_zvfbfwma = true;
- cpu->cfg.ext_zvfh = true;
- cpu->cfg.ext_zvfhmin = true;
- cpu->cfg.ext_zvkng = true;
- cpu->cfg.ext_smaia = true;
- cpu->cfg.ext_smstateen = true;
- cpu->cfg.ext_ssaia = true;
- cpu->cfg.ext_sscofpmf = true;
- cpu->cfg.ext_sstc = true;
- cpu->cfg.ext_svade = true;
- cpu->cfg.ext_svinval = true;
- cpu->cfg.ext_svnapot = true;
- cpu->cfg.ext_svpbmt = true;
-
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_SV57);
-#endif
-}
-
-static void rv64_xiangshan_nanhu_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU);
- env->priv_ver = PRIV_VERSION_1_12_0;
-
- /* Enable ISA extensions */
- cpu->cfg.ext_zbc = true;
- cpu->cfg.ext_zbkb = true;
- cpu->cfg.ext_zbkc = true;
- cpu->cfg.ext_zbkx = true;
- cpu->cfg.ext_zknd = true;
- cpu->cfg.ext_zkne = true;
- cpu->cfg.ext_zknh = true;
- cpu->cfg.ext_zksed = true;
- cpu->cfg.ext_zksh = true;
- cpu->cfg.ext_svinval = true;
-
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_SV39);
-#endif
-}
-
-#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
-static void rv128_base_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
-
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-
- /* Set latest version of privileged specification */
- env->priv_ver = PRIV_VERSION_LATEST;
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
-}
-#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
-
-static void rv64i_bare_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- riscv_cpu_set_misa_ext(env, RVI);
-}
-
-static void rv64e_bare_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- riscv_cpu_set_misa_ext(env, RVE);
-}
-
-#endif /* !TARGET_RISCV64 */
-
-#if defined(TARGET_RISCV32) || \
- (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
-
-static void rv32_base_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
-
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-
- /* Set latest version of privileged specification */
- env->priv_ver = PRIV_VERSION_LATEST;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
-#endif
-}
-
-static void rv32_sifive_u_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
- CPURISCVState *env = &cpu->env;
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.mmu = true;
- cpu->cfg.pmp = true;
-}
-
-static void rv32_sifive_e_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
}
-
-static void rv32_ibex_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
- env->priv_ver = PRIV_VERSION_1_12_0;
-#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
#endif
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
- cpu->cfg.ext_smepmp = true;
-
- cpu->cfg.ext_zba = true;
- cpu->cfg.ext_zbb = true;
- cpu->cfg.ext_zbc = true;
- cpu->cfg.ext_zbs = true;
-}
-static void rv32_imafcu_nommu_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- RISCVCPU *cpu = RISCV_CPU(obj);
-
- riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
- env->priv_ver = PRIV_VERSION_1_10_0;
#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
-#endif
-
- /* inherited from parent obj via riscv_cpu_init() */
- cpu->cfg.ext_zifencei = true;
- cpu->cfg.ext_zicsr = true;
- cpu->cfg.pmp = true;
-}
-
-static void rv32i_bare_cpu_init(Object *obj)
-{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- riscv_cpu_set_misa_ext(env, RVI);
-}
-
-static void rv32e_bare_cpu_init(Object *obj)
+static void riscv_register_custom_csrs(RISCVCPU *cpu, const RISCVCSR *csr_list)
{
- CPURISCVState *env = &RISCV_CPU(obj)->env;
- riscv_cpu_set_misa_ext(env, RVE);
+ for (size_t i = 0; csr_list[i].csr_ops.name; i++) {
+ int csrno = csr_list[i].csrno;
+ const riscv_csr_operations *csr_ops = &csr_list[i].csr_ops;
+ if (!csr_list[i].insertion_test || csr_list[i].insertion_test(cpu)) {
+ riscv_set_csr_ops(csrno, csr_ops);
+ }
+ }
}
#endif
@@ -1034,7 +692,7 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
mcc->parent_phases.hold(obj, type);
}
#ifndef CONFIG_USER_ONLY
- env->misa_mxl = mcc->misa_mxl_max;
+ env->misa_mxl = mcc->def->misa_mxl_max;
env->priv = PRV_M;
env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
if (env->misa_mxl > MXL_RV32) {
@@ -1171,18 +829,16 @@ static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
{
bool rv32 = riscv_cpu_is_32bit(cpu);
- uint8_t satp_mode_map_max, satp_mode_supported_max;
+ uint16_t supported;
+ uint8_t satp_mode_map_max;
- /* The CPU wants the OS to decide which satp mode to use */
- if (cpu->cfg.satp_mode.supported == 0) {
+ if (!get_satp_mode_supported(cpu, &supported)) {
+ /* The CPU wants the hypervisor to decide which satp mode to allow */
return;
}
- satp_mode_supported_max =
- satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
-
- if (cpu->cfg.satp_mode.map == 0) {
- if (cpu->cfg.satp_mode.init == 0) {
+ if (cpu->satp_modes.map == 0) {
+ if (cpu->satp_modes.init == 0) {
/* If unset by the user, we fallback to the default satp mode. */
set_satp_mode_default_map(cpu);
} else {
@@ -1192,27 +848,27 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
* valid_vm_1_10_32/64.
*/
for (int i = 1; i < 16; ++i) {
- if ((cpu->cfg.satp_mode.init & (1 << i)) &&
- (cpu->cfg.satp_mode.supported & (1 << i))) {
+ if ((cpu->satp_modes.init & (1 << i)) &&
+ supported & (1 << i)) {
for (int j = i - 1; j >= 0; --j) {
- if (cpu->cfg.satp_mode.supported & (1 << j)) {
- cpu->cfg.satp_mode.map |= (1 << j);
- break;
+ if (supported & (1 << j)) {
+ cpu->cfg.max_satp_mode = j;
+ return;
}
}
- break;
}
}
}
+ return;
}
- satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
+ satp_mode_map_max = satp_mode_max_from_map(cpu->satp_modes.map);
/* Make sure the user asked for a supported configuration (HW and qemu) */
- if (satp_mode_map_max > satp_mode_supported_max) {
+ if (satp_mode_map_max > cpu->cfg.max_satp_mode) {
error_setg(errp, "satp_mode %s is higher than hw max capability %s",
satp_mode_str(satp_mode_map_max, rv32),
- satp_mode_str(satp_mode_supported_max, rv32));
+ satp_mode_str(cpu->cfg.max_satp_mode, rv32));
return;
}
@@ -1222,9 +878,9 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
*/
if (!rv32) {
for (int i = satp_mode_map_max - 1; i >= 0; --i) {
- if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
- (cpu->cfg.satp_mode.init & (1 << i)) &&
- (cpu->cfg.satp_mode.supported & (1 << i))) {
+ if (!(cpu->satp_modes.map & (1 << i)) &&
+ (cpu->satp_modes.init & (1 << i)) &&
+ (supported & (1 << i))) {
error_setg(errp, "cannot disable %s satp mode if %s "
"is enabled", satp_mode_str(i, false),
satp_mode_str(satp_mode_map_max, false));
@@ -1233,12 +889,7 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
}
}
- /* Finally expand the map so that all valid modes are set */
- for (int i = satp_mode_map_max - 1; i >= 0; --i) {
- if (cpu->cfg.satp_mode.supported & (1 << i)) {
- cpu->cfg.satp_mode.map |= (1 << i);
- }
- }
+ cpu->cfg.max_satp_mode = satp_mode_map_max;
}
#endif
@@ -1316,11 +967,11 @@ bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- RISCVSATPMap *satp_map = opaque;
+ RISCVSATPModes *satp_modes = opaque;
uint8_t satp = satp_mode_from_str(name);
bool value;
- value = satp_map->map & (1 << satp);
+ value = satp_modes->map & (1 << satp);
visit_type_bool(v, name, &value, errp);
}
@@ -1328,7 +979,7 @@ static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- RISCVSATPMap *satp_map = opaque;
+ RISCVSATPModes *satp_modes = opaque;
uint8_t satp = satp_mode_from_str(name);
bool value;
@@ -1336,8 +987,8 @@ static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
return;
}
- satp_map->map = deposit32(satp_map->map, satp, 1, value);
- satp_map->init |= 1 << satp;
+ satp_modes->map = deposit32(satp_modes->map, satp, 1, value);
+ satp_modes->init |= 1 << satp;
}
void riscv_add_satp_mode_properties(Object *obj)
@@ -1346,16 +997,16 @@ void riscv_add_satp_mode_properties(Object *obj)
if (cpu->env.misa_mxl == MXL_RV32) {
object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
} else {
object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
- cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
+ cpu_riscv_set_satp, NULL, &cpu->satp_modes);
}
}
@@ -1432,18 +1083,13 @@ static bool riscv_cpu_is_dynamic(Object *cpu_obj)
return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
}
-static void riscv_cpu_post_init(Object *obj)
-{
- accel_cpu_instance_init(CPU(obj));
-}
-
static void riscv_cpu_init(Object *obj)
{
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
RISCVCPU *cpu = RISCV_CPU(obj);
CPURISCVState *env = &cpu->env;
- env->misa_mxl = mcc->misa_mxl_max;
+ env->misa_mxl = mcc->def->misa_mxl_max;
#ifndef CONFIG_USER_ONLY
qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
@@ -1461,8 +1107,8 @@ static void riscv_cpu_init(Object *obj)
* for all CPUs. Each accelerator will decide what to do when
* users disable them.
*/
- RISCV_CPU(obj)->cfg.ext_zicntr = true;
- RISCV_CPU(obj)->cfg.ext_zihpm = true;
+ RISCV_CPU(obj)->cfg.ext_zicntr = !mcc->def->bare;
+ RISCV_CPU(obj)->cfg.ext_zihpm = !mcc->def->bare;
/* Default values for non-bool cpu properties */
cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
@@ -1472,34 +1118,28 @@ static void riscv_cpu_init(Object *obj)
cpu->cfg.cbop_blocksize = 64;
cpu->cfg.cboz_blocksize = 64;
cpu->env.vext_ver = VEXT_VERSION_1_00_0;
-}
-
-static void riscv_bare_cpu_init(Object *obj)
-{
- RISCVCPU *cpu = RISCV_CPU(obj);
+ cpu->cfg.max_satp_mode = -1;
- /*
- * Bare CPUs do not inherit the timer and performance
- * counters from the parent class (see riscv_cpu_init()
- * for info on why the parent enables them).
- *
- * Users have to explicitly enable these counters for
- * bare CPUs.
- */
- cpu->cfg.ext_zicntr = false;
- cpu->cfg.ext_zihpm = false;
+ if (mcc->def->profile) {
+ mcc->def->profile->enabled = true;
+ }
- /* Set to QEMU's first supported priv version */
- cpu->env.priv_ver = PRIV_VERSION_1_10_0;
+ env->misa_ext_mask = env->misa_ext = mcc->def->misa_ext;
+ riscv_cpu_cfg_merge(&cpu->cfg, &mcc->def->cfg);
- /*
- * Support all available satp_mode settings. The default
- * value will be set to MBARE if the user doesn't set
- * satp_mode manually (see set_satp_mode_default()).
- */
+ if (mcc->def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) {
+ cpu->env.priv_ver = mcc->def->priv_spec;
+ }
+ if (mcc->def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) {
+ cpu->env.vext_ver = mcc->def->vext_spec;
+ }
#ifndef CONFIG_USER_ONLY
- set_satp_mode_max_supported(cpu, VM_1_10_SV64);
+ if (mcc->def->custom_csrs) {
+ riscv_register_custom_csrs(cpu, mcc->def->custom_csrs);
+ }
#endif
+
+ accel_cpu_instance_init(CPU(obj));
}
typedef struct misa_ext_info {
@@ -1534,7 +1174,7 @@ static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
CPUClass *cc = CPU_CLASS(mcc);
/* Validate that MISA_MXL is set properly. */
- switch (mcc->misa_mxl_max) {
+ switch (mcc->def->misa_mxl_max) {
#ifdef TARGET_RISCV64
case MXL_RV64:
case MXL_RV128:
@@ -2963,36 +2603,6 @@ static const Property riscv_cpu_properties[] = {
DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
};
-#if defined(TARGET_RISCV64)
-static void rva22u64_profile_cpu_init(Object *obj)
-{
- rv64i_bare_cpu_init(obj);
-
- RVA22U64.enabled = true;
-}
-
-static void rva22s64_profile_cpu_init(Object *obj)
-{
- rv64i_bare_cpu_init(obj);
-
- RVA22S64.enabled = true;
-}
-
-static void rva23u64_profile_cpu_init(Object *obj)
-{
- rv64i_bare_cpu_init(obj);
-
- RVA23U64.enabled = true;
-}
-
-static void rva23s64_profile_cpu_init(Object *obj)
-{
- rv64i_bare_cpu_init(obj);
-
- RVA23S64.enabled = true;
-}
-#endif
-
static const gchar *riscv_gdb_arch_name(CPUState *cs)
{
RISCVCPU *cpu = RISCV_CPU(cs);
@@ -3061,12 +2671,87 @@ static void riscv_cpu_common_class_init(ObjectClass *c, const void *data)
device_class_set_props(dc, riscv_cpu_properties);
}
-static void riscv_cpu_class_init(ObjectClass *c, const void *data)
+static bool profile_extends(RISCVCPUProfile *trial, RISCVCPUProfile *parent)
+{
+ RISCVCPUProfile *curr;
+ if (!parent) {
+ return true;
+ }
+
+ curr = trial;
+ while (curr) {
+ if (curr == parent) {
+ return true;
+ }
+ curr = curr->u_parent;
+ }
+
+ curr = trial;
+ while (curr) {
+ if (curr == parent) {
+ return true;
+ }
+ curr = curr->s_parent;
+ }
+
+ return false;
+}
+
+static void riscv_cpu_class_base_init(ObjectClass *c, const void *data)
{
RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
+ RISCVCPUClass *pcc = RISCV_CPU_CLASS(object_class_get_parent(c));
+
+ if (pcc->def) {
+ mcc->def = g_memdup2(pcc->def, sizeof(*pcc->def));
+ } else {
+ mcc->def = g_new0(RISCVCPUDef, 1);
+ }
- mcc->misa_mxl_max = (RISCVMXL)GPOINTER_TO_UINT(data);
- riscv_cpu_validate_misa_mxl(mcc);
+ if (data) {
+ const RISCVCPUDef *def = data;
+ mcc->def->bare |= def->bare;
+ if (def->profile) {
+ assert(profile_extends(def->profile, mcc->def->profile));
+ assert(mcc->def->bare);
+ mcc->def->profile = def->profile;
+ }
+ if (def->misa_mxl_max) {
+ assert(def->misa_mxl_max <= MXL_RV128);
+ mcc->def->misa_mxl_max = def->misa_mxl_max;
+
+#ifndef CONFIG_USER_ONLY
+ /*
+ * Hack to simplify CPU class hierarchies that include both 32- and
+ * 64-bit models: reduce SV39/48/57/64 to SV32 for 32-bit models.
+ */
+ if (mcc->def->misa_mxl_max == MXL_RV32 &&
+ !valid_vm_1_10_32[mcc->def->cfg.max_satp_mode]) {
+ mcc->def->cfg.max_satp_mode = VM_1_10_SV32;
+ }
+#endif
+ }
+ if (def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) {
+ assert(def->priv_spec <= PRIV_VERSION_LATEST);
+ mcc->def->priv_spec = def->priv_spec;
+ }
+ if (def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) {
+ assert(def->vext_spec != 0);
+ mcc->def->vext_spec = def->vext_spec;
+ }
+ mcc->def->misa_ext |= def->misa_ext;
+
+ riscv_cpu_cfg_merge(&mcc->def->cfg, &def->cfg);
+
+ if (def->custom_csrs) {
+ assert(!mcc->def->custom_csrs);
+ mcc->def->custom_csrs = def->custom_csrs;
+ }
+ }
+
+ if (!object_class_is_abstract(c)) {
+ riscv_cpu_validate_misa_mxl(mcc);
+ }
}
static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
@@ -3161,41 +2846,34 @@ void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
}
#endif
-#define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
+#define DEFINE_ABSTRACT_RISCV_CPU(type_name, parent_type_name, ...) \
{ \
.name = (type_name), \
- .parent = TYPE_RISCV_DYNAMIC_CPU, \
- .instance_init = (initfn), \
- .class_init = riscv_cpu_class_init, \
- .class_data = GUINT_TO_POINTER(misa_mxl_max) \
+ .parent = (parent_type_name), \
+ .abstract = true, \
+ .class_data = &(const RISCVCPUDef) { \
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \
+ .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \
+ .cfg.max_satp_mode = -1, \
+ __VA_ARGS__ \
+ }, \
}
-#define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \
+#define DEFINE_RISCV_CPU(type_name, parent_type_name, ...) \
{ \
.name = (type_name), \
- .parent = TYPE_RISCV_VENDOR_CPU, \
- .instance_init = (initfn), \
- .class_init = riscv_cpu_class_init, \
- .class_data = GUINT_TO_POINTER(misa_mxl_max) \
+ .parent = (parent_type_name), \
+ .class_data = &(const RISCVCPUDef) { \
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \
+ .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \
+ .cfg.max_satp_mode = -1, \
+ __VA_ARGS__ \
+ }, \
}
-#define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \
- { \
- .name = (type_name), \
- .parent = TYPE_RISCV_BARE_CPU, \
- .instance_init = (initfn), \
- .class_init = riscv_cpu_class_init, \
- .class_data = GUINT_TO_POINTER(misa_mxl_max) \
- }
-
-#define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
- { \
- .name = (type_name), \
- .parent = TYPE_RISCV_BARE_CPU, \
- .instance_init = (initfn), \
- .class_init = riscv_cpu_class_init, \
- .class_data = GUINT_TO_POINTER(misa_mxl_max) \
- }
+#define DEFINE_PROFILE_CPU(type_name, parent_type_name, profile_) \
+ DEFINE_RISCV_CPU(type_name, parent_type_name, \
+ .profile = &(profile_))
static const TypeInfo riscv_cpu_type_infos[] = {
{
@@ -3204,67 +2882,310 @@ static const TypeInfo riscv_cpu_type_infos[] = {
.instance_size = sizeof(RISCVCPU),
.instance_align = __alignof(RISCVCPU),
.instance_init = riscv_cpu_init,
- .instance_post_init = riscv_cpu_post_init,
.abstract = true,
.class_size = sizeof(RISCVCPUClass),
.class_init = riscv_cpu_common_class_init,
+ .class_base_init = riscv_cpu_class_base_init,
},
- {
- .name = TYPE_RISCV_DYNAMIC_CPU,
- .parent = TYPE_RISCV_CPU,
- .abstract = true,
- },
- {
- .name = TYPE_RISCV_VENDOR_CPU,
- .parent = TYPE_RISCV_CPU,
- .abstract = true,
- },
- {
- .name = TYPE_RISCV_BARE_CPU,
- .parent = TYPE_RISCV_CPU,
- .instance_init = riscv_bare_cpu_init,
- .abstract = true,
- },
+
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_DYNAMIC_CPU, TYPE_RISCV_CPU,
+ .cfg.mmu = true,
+ .cfg.pmp = true,
+ .priv_spec = PRIV_VERSION_LATEST,
+ ),
+
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_VENDOR_CPU, TYPE_RISCV_CPU),
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_BARE_CPU, TYPE_RISCV_CPU,
+ /*
+ * Bare CPUs do not inherit the timer and performance
+ * counters from the parent class (see riscv_cpu_init()
+ * for info on why the parent enables them).
+ *
+ * Users have to explicitly enable these counters for
+ * bare CPUs.
+ */
+ .bare = true,
+
+ /* Set to QEMU's first supported priv version */
+ .priv_spec = PRIV_VERSION_1_10_0,
+
+ /*
+ * Support all available satp_mode settings. By default
+ * only MBARE will be available if the user doesn't enable
+ * a mode manually (see riscv_cpu_satp_mode_finalize()).
+ */
+#ifdef TARGET_RISCV32
+ .cfg.max_satp_mode = VM_1_10_SV32,
+#else
+ .cfg.max_satp_mode = VM_1_10_SV57,
+#endif
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX, TYPE_RISCV_DYNAMIC_CPU,
#if defined(TARGET_RISCV32)
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init),
+ .misa_mxl_max = MXL_RV32,
+ .cfg.max_satp_mode = VM_1_10_SV32,
#elif defined(TARGET_RISCV64)
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init),
+ .misa_mxl_max = MXL_RV64,
+ .cfg.max_satp_mode = VM_1_10_SV57,
#endif
+ ),
+
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E, TYPE_RISCV_VENDOR_CPU,
+ .misa_ext = RVI | RVM | RVA | RVC | RVU,
+ .priv_spec = PRIV_VERSION_1_10_0,
+ .cfg.max_satp_mode = VM_1_10_MBARE,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zicsr = true,
+ .cfg.pmp = true
+ ),
+
+ DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U, TYPE_RISCV_VENDOR_CPU,
+ .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU,
+ .priv_spec = PRIV_VERSION_1_10_0,
+
+ .cfg.max_satp_mode = VM_1_10_SV39,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zicsr = true,
+ .cfg.mmu = true,
+ .cfg.pmp = true
+ ),
#if defined(TARGET_RISCV32) || \
(defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init),
- DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init),
- DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE32, TYPE_RISCV_DYNAMIC_CPU,
+ .cfg.max_satp_mode = VM_1_10_SV32,
+ .misa_mxl_max = MXL_RV32,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_IBEX, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV32,
+ .misa_ext = RVI | RVM | RVC | RVU,
+ .priv_spec = PRIV_VERSION_1_12_0,
+ .cfg.max_satp_mode = VM_1_10_MBARE,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zicsr = true,
+ .cfg.pmp = true,
+ .cfg.ext_smepmp = true,
+
+ .cfg.ext_zba = true,
+ .cfg.ext_zbb = true,
+ .cfg.ext_zbc = true,
+ .cfg.ext_zbs = true
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E31, TYPE_RISCV_CPU_SIFIVE_E,
+ .misa_mxl_max = MXL_RV32
+ ),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E34, TYPE_RISCV_CPU_SIFIVE_E,
+ .misa_mxl_max = MXL_RV32,
+ .misa_ext = RVF, /* IMAFCU */
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U34, TYPE_RISCV_CPU_SIFIVE_U,
+ .misa_mxl_max = MXL_RV32,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32I, TYPE_RISCV_BARE_CPU,
+ .misa_mxl_max = MXL_RV32,
+ .misa_ext = RVI
+ ),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32E, TYPE_RISCV_BARE_CPU,
+ .misa_mxl_max = MXL_RV32,
+ .misa_ext = RVE
+ ),
#endif
#if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY))
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX32, TYPE_RISCV_DYNAMIC_CPU,
+ .cfg.max_satp_mode = VM_1_10_SV32,
+ .misa_mxl_max = MXL_RV32,
+ ),
#endif
#if defined(TARGET_RISCV64)
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init),
- DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU,
- MXL_RV64, rv64_xiangshan_nanhu_cpu_init),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE64, TYPE_RISCV_DYNAMIC_CPU,
+ .cfg.max_satp_mode = VM_1_10_SV57,
+ .misa_mxl_max = MXL_RV64,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E51, TYPE_RISCV_CPU_SIFIVE_E,
+ .misa_mxl_max = MXL_RV64
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U54, TYPE_RISCV_CPU_SIFIVE_U,
+ .misa_mxl_max = MXL_RV64,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SHAKTI_C, TYPE_RISCV_CPU_SIFIVE_U,
+ .misa_mxl_max = MXL_RV64,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_THEAD_C906, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVG | RVC | RVS | RVU,
+ .priv_spec = PRIV_VERSION_1_11_0,
+
+ .cfg.ext_zfa = true,
+ .cfg.ext_zfh = true,
+ .cfg.mmu = true,
+ .cfg.ext_xtheadba = true,
+ .cfg.ext_xtheadbb = true,
+ .cfg.ext_xtheadbs = true,
+ .cfg.ext_xtheadcmo = true,
+ .cfg.ext_xtheadcondmov = true,
+ .cfg.ext_xtheadfmemidx = true,
+ .cfg.ext_xtheadmac = true,
+ .cfg.ext_xtheadmemidx = true,
+ .cfg.ext_xtheadmempair = true,
+ .cfg.ext_xtheadsync = true,
+ .cfg.pmp = true,
+
+ .cfg.mvendorid = THEAD_VENDOR_ID,
+
+ .cfg.max_satp_mode = VM_1_10_SV39,
+#ifndef CONFIG_USER_ONLY
+ .custom_csrs = th_csr_list,
+#endif
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_TT_ASCALON, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVG | RVC | RVS | RVU | RVH | RVV,
+ .priv_spec = PRIV_VERSION_1_13_0,
+ .vext_spec = VEXT_VERSION_1_00_0,
+
+ /* ISA extensions */
+ .cfg.mmu = true,
+ .cfg.vlenb = 256 >> 3,
+ .cfg.elen = 64,
+ .cfg.rvv_ma_all_1s = true,
+ .cfg.rvv_ta_all_1s = true,
+ .cfg.misa_w = true,
+ .cfg.pmp = true,
+ .cfg.cbom_blocksize = 64,
+ .cfg.cbop_blocksize = 64,
+ .cfg.cboz_blocksize = 64,
+ .cfg.ext_zic64b = true,
+ .cfg.ext_zicbom = true,
+ .cfg.ext_zicbop = true,
+ .cfg.ext_zicboz = true,
+ .cfg.ext_zicntr = true,
+ .cfg.ext_zicond = true,
+ .cfg.ext_zicsr = true,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zihintntl = true,
+ .cfg.ext_zihintpause = true,
+ .cfg.ext_zihpm = true,
+ .cfg.ext_zimop = true,
+ .cfg.ext_zawrs = true,
+ .cfg.ext_zfa = true,
+ .cfg.ext_zfbfmin = true,
+ .cfg.ext_zfh = true,
+ .cfg.ext_zfhmin = true,
+ .cfg.ext_zcb = true,
+ .cfg.ext_zcmop = true,
+ .cfg.ext_zba = true,
+ .cfg.ext_zbb = true,
+ .cfg.ext_zbs = true,
+ .cfg.ext_zkt = true,
+ .cfg.ext_zvbb = true,
+ .cfg.ext_zvbc = true,
+ .cfg.ext_zvfbfmin = true,
+ .cfg.ext_zvfbfwma = true,
+ .cfg.ext_zvfh = true,
+ .cfg.ext_zvfhmin = true,
+ .cfg.ext_zvkng = true,
+ .cfg.ext_smaia = true,
+ .cfg.ext_smstateen = true,
+ .cfg.ext_ssaia = true,
+ .cfg.ext_sscofpmf = true,
+ .cfg.ext_sstc = true,
+ .cfg.ext_svade = true,
+ .cfg.ext_svinval = true,
+ .cfg.ext_svnapot = true,
+ .cfg.ext_svpbmt = true,
+
+ .cfg.max_satp_mode = VM_1_10_SV57,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_VEYRON_V1, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVG | RVC | RVS | RVU | RVH,
+ .priv_spec = PRIV_VERSION_1_12_0,
+
+ /* ISA extensions */
+ .cfg.mmu = true,
+ .cfg.ext_zifencei = true,
+ .cfg.ext_zicsr = true,
+ .cfg.pmp = true,
+ .cfg.ext_zicbom = true,
+ .cfg.cbom_blocksize = 64,
+ .cfg.cboz_blocksize = 64,
+ .cfg.ext_zicboz = true,
+ .cfg.ext_smaia = true,
+ .cfg.ext_ssaia = true,
+ .cfg.ext_sscofpmf = true,
+ .cfg.ext_sstc = true,
+ .cfg.ext_svinval = true,
+ .cfg.ext_svnapot = true,
+ .cfg.ext_svpbmt = true,
+ .cfg.ext_smstateen = true,
+ .cfg.ext_zba = true,
+ .cfg.ext_zbb = true,
+ .cfg.ext_zbc = true,
+ .cfg.ext_zbs = true,
+ .cfg.ext_XVentanaCondOps = true,
+
+ .cfg.mvendorid = VEYRON_V1_MVENDORID,
+ .cfg.marchid = VEYRON_V1_MARCHID,
+ .cfg.mimpid = VEYRON_V1_MIMPID,
+
+ .cfg.max_satp_mode = VM_1_10_SV48,
+ ),
+
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, TYPE_RISCV_VENDOR_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVG | RVC | RVB | RVS | RVU,
+ .priv_spec = PRIV_VERSION_1_12_0,
+
+ /* ISA extensions */
+ .cfg.ext_zbc = true,
+ .cfg.ext_zbkb = true,
+ .cfg.ext_zbkc = true,
+ .cfg.ext_zbkx = true,
+ .cfg.ext_zknd = true,
+ .cfg.ext_zkne = true,
+ .cfg.ext_zknh = true,
+ .cfg.ext_zksed = true,
+ .cfg.ext_zksh = true,
+ .cfg.ext_svinval = true,
+
+ .cfg.mmu = true,
+ .cfg.pmp = true,
+
+ .cfg.max_satp_mode = VM_1_10_SV39,
+ ),
+
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
- DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init),
-#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
- DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init),
- DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init),
- DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init),
- DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init),
- DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init),
- DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, MXL_RV64, rva23s64_profile_cpu_init),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE128, TYPE_RISCV_DYNAMIC_CPU,
+ .cfg.max_satp_mode = VM_1_10_SV57,
+ .misa_mxl_max = MXL_RV128,
+ ),
+#endif /* CONFIG_TCG */
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64I, TYPE_RISCV_BARE_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVI
+ ),
+ DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64E, TYPE_RISCV_BARE_CPU,
+ .misa_mxl_max = MXL_RV64,
+ .misa_ext = RVE
+ ),
+
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, TYPE_RISCV_CPU_RV64I, RVA22U64),
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, TYPE_RISCV_CPU_RV64I, RVA22S64),
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, TYPE_RISCV_CPU_RV64I, RVA23U64),
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, TYPE_RISCV_CPU_RV64I, RVA23S64),
#endif /* TARGET_RISCV64 */
};
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 167909c..229ade9 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -75,6 +75,7 @@ const char *riscv_get_misa_ext_name(uint32_t bit);
const char *riscv_get_misa_ext_description(uint32_t bit);
#define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
+#define ENV_CSR_OFFSET(_csr) offsetof(CPURISCVState, _csr)
typedef struct riscv_cpu_profile {
struct riscv_cpu_profile *u_parent;
@@ -499,6 +500,19 @@ struct CPUArchState {
};
/*
+ * map is a 16-bit bitmap: the most significant set bit in map is the maximum
+ * satp mode that is supported. It may be chosen by the user and must respect
+ * what qemu implements (valid_1_10_32/64) and what the hw is capable of
+ * (supported bitmap below).
+ *
+ * init is a 16-bit bitmap used to make sure the user selected a correct
+ * configuration as per the specification.
+ */
+typedef struct {
+ uint16_t map, init;
+} RISCVSATPModes;
+
+/*
* RISCVCPU:
* @env: #CPURISCVState
*
@@ -514,6 +528,7 @@ struct ArchCPU {
/* Configuration Settings */
RISCVCPUConfig cfg;
+ RISCVSATPModes satp_modes;
QEMUTimer *pmu_timer;
/* A bitmask of Available programmable counters */
@@ -523,6 +538,19 @@ struct ArchCPU {
const GPtrArray *decoders;
};
+typedef struct RISCVCSR RISCVCSR;
+
+typedef struct RISCVCPUDef {
+ RISCVMXL misa_mxl_max; /* max mxl for this cpu */
+ RISCVCPUProfile *profile;
+ uint32_t misa_ext;
+ int priv_spec;
+ int32_t vext_spec;
+ RISCVCPUConfig cfg;
+ bool bare;
+ const RISCVCSR *custom_csrs;
+} RISCVCPUDef;
+
/**
* RISCVCPUClass:
* @parent_realize: The parent class' realize handler.
@@ -535,7 +563,7 @@ struct RISCVCPUClass {
DeviceRealize parent_realize;
ResettablePhases parent_phases;
- RISCVMXL misa_mxl_max; /* max mxl for this cpu */
+ RISCVCPUDef *def;
};
static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
@@ -802,9 +830,6 @@ static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
return vlen >> (vsew + 3 - lmul);
}
-void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags);
-
bool riscv_cpu_is_32bit(RISCVCPU *cpu);
bool riscv_cpu_virt_mem_enabled(CPURISCVState *env);
@@ -816,8 +841,8 @@ RISCVException riscv_csrr(CPURISCVState *env, int csrno,
target_ulong *ret_value);
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
- target_ulong *ret_value,
- target_ulong new_value, target_ulong write_mask);
+ target_ulong *ret_value, target_ulong new_value,
+ target_ulong write_mask, uintptr_t ra);
RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value,
@@ -826,13 +851,13 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
static inline void riscv_csr_write(CPURISCVState *env, int csrno,
target_ulong val)
{
- riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
+ riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0);
}
static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
{
target_ulong val = 0;
- riscv_csrrw(env, csrno, &val, 0, 0);
+ riscv_csrrw(env, csrno, &val, 0, 0, 0);
return val;
}
@@ -841,7 +866,8 @@ typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
target_ulong *ret_value);
typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
- target_ulong new_value);
+ target_ulong new_value,
+ uintptr_t ra);
typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value,
@@ -850,8 +876,8 @@ typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
Int128 *ret_value);
RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
- Int128 *ret_value,
- Int128 new_value, Int128 write_mask);
+ Int128 *ret_value, Int128 new_value,
+ Int128 write_mask, uintptr_t ra);
typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
Int128 *ret_value);
@@ -870,6 +896,12 @@ typedef struct {
uint32_t min_priv_ver;
} riscv_csr_operations;
+struct RISCVCSR {
+ int csrno;
+ bool (*insertion_test)(RISCVCPU *cpu);
+ riscv_csr_operations csr_ops;
+};
+
/* CSR function table constants */
enum {
CSR_TABLE_SIZE = 0x1000
@@ -924,18 +956,17 @@ extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
-void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
+void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops);
void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
target_ulong riscv_new_csr_seed(target_ulong new_value,
target_ulong write_mask);
-uint8_t satp_mode_max_from_map(uint32_t map);
const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
-/* Implemented in th_csr.c */
-void th_register_custom_csrs(RISCVCPU *cpu);
+/* In th_csr.c */
+extern const RISCVCSR th_csr_list[];
const char *priv_spec_to_str(int priv_version);
#endif /* RISCV_CPU_H */
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
index cfe371b..aa28dc8 100644
--- a/target/riscv/cpu_cfg.h
+++ b/target/riscv/cpu_cfg.h
@@ -21,182 +21,10 @@
#ifndef RISCV_CPU_CFG_H
#define RISCV_CPU_CFG_H
-/*
- * map is a 16-bit bitmap: the most significant set bit in map is the maximum
- * satp mode that is supported. It may be chosen by the user and must respect
- * what qemu implements (valid_1_10_32/64) and what the hw is capable of
- * (supported bitmap below).
- *
- * init is a 16-bit bitmap used to make sure the user selected a correct
- * configuration as per the specification.
- *
- * supported is a 16-bit bitmap used to reflect the hw capabilities.
- */
-typedef struct {
- uint16_t map, init, supported;
-} RISCVSATPMap;
-
struct RISCVCPUConfig {
- bool ext_zba;
- bool ext_zbb;
- bool ext_zbc;
- bool ext_zbkb;
- bool ext_zbkc;
- bool ext_zbkx;
- bool ext_zbs;
- bool ext_zca;
- bool ext_zcb;
- bool ext_zcd;
- bool ext_zce;
- bool ext_zcf;
- bool ext_zcmp;
- bool ext_zcmt;
- bool ext_zk;
- bool ext_zkn;
- bool ext_zknd;
- bool ext_zkne;
- bool ext_zknh;
- bool ext_zkr;
- bool ext_zks;
- bool ext_zksed;
- bool ext_zksh;
- bool ext_zkt;
- bool ext_zifencei;
- bool ext_zicntr;
- bool ext_zicsr;
- bool ext_zicbom;
- bool ext_zicbop;
- bool ext_zicboz;
- bool ext_zicfilp;
- bool ext_zicfiss;
- bool ext_zicond;
- bool ext_zihintntl;
- bool ext_zihintpause;
- bool ext_zihpm;
- bool ext_zimop;
- bool ext_zcmop;
- bool ext_ztso;
- bool ext_smstateen;
- bool ext_sstc;
- bool ext_smcdeleg;
- bool ext_ssccfg;
- bool ext_smcntrpmf;
- bool ext_smcsrind;
- bool ext_sscsrind;
- bool ext_ssdbltrp;
- bool ext_smdbltrp;
- bool ext_svadu;
- bool ext_svinval;
- bool ext_svnapot;
- bool ext_svpbmt;
- bool ext_svvptc;
- bool ext_svukte;
- bool ext_zdinx;
- bool ext_zaamo;
- bool ext_zacas;
- bool ext_zama16b;
- bool ext_zabha;
- bool ext_zalrsc;
- bool ext_zawrs;
- bool ext_zfa;
- bool ext_zfbfmin;
- bool ext_zfh;
- bool ext_zfhmin;
- bool ext_zfinx;
- bool ext_zhinx;
- bool ext_zhinxmin;
- bool ext_zve32f;
- bool ext_zve32x;
- bool ext_zve64f;
- bool ext_zve64d;
- bool ext_zve64x;
- bool ext_zvbb;
- bool ext_zvbc;
- bool ext_zvkb;
- bool ext_zvkg;
- bool ext_zvkned;
- bool ext_zvknha;
- bool ext_zvknhb;
- bool ext_zvksed;
- bool ext_zvksh;
- bool ext_zvkt;
- bool ext_zvkn;
- bool ext_zvknc;
- bool ext_zvkng;
- bool ext_zvks;
- bool ext_zvksc;
- bool ext_zvksg;
- bool ext_zmmul;
- bool ext_zvfbfmin;
- bool ext_zvfbfwma;
- bool ext_zvfh;
- bool ext_zvfhmin;
- bool ext_smaia;
- bool ext_ssaia;
- bool ext_smctr;
- bool ext_ssctr;
- bool ext_sscofpmf;
- bool ext_smepmp;
- bool ext_smrnmi;
- bool ext_ssnpm;
- bool ext_smnpm;
- bool ext_smmpm;
- bool ext_sspm;
- bool ext_supm;
- bool rvv_ta_all_1s;
- bool rvv_ma_all_1s;
- bool rvv_vl_half_avl;
-
- uint32_t mvendorid;
- uint64_t marchid;
- uint64_t mimpid;
-
- /* Named features */
- bool ext_svade;
- bool ext_zic64b;
- bool ext_ssstateen;
- bool ext_sha;
-
- /*
- * Always 'true' booleans for named features
- * TCG always implement/can't be user disabled,
- * based on spec version.
- */
- bool has_priv_1_13;
- bool has_priv_1_12;
- bool has_priv_1_11;
-
- /* Always enabled for TCG if has_priv_1_11 */
- bool ext_ziccrse;
-
- /* Vendor-specific custom extensions */
- bool ext_xtheadba;
- bool ext_xtheadbb;
- bool ext_xtheadbs;
- bool ext_xtheadcmo;
- bool ext_xtheadcondmov;
- bool ext_xtheadfmemidx;
- bool ext_xtheadfmv;
- bool ext_xtheadmac;
- bool ext_xtheadmemidx;
- bool ext_xtheadmempair;
- bool ext_xtheadsync;
- bool ext_XVentanaCondOps;
-
- uint32_t pmu_mask;
- uint16_t vlenb;
- uint16_t elen;
- uint16_t cbom_blocksize;
- uint16_t cbop_blocksize;
- uint16_t cboz_blocksize;
- bool mmu;
- bool pmp;
- bool debug;
- bool misa_w;
-
- bool short_isa_string;
-
- RISCVSATPMap satp_mode;
+#define BOOL_FIELD(x) bool x;
+#define TYPED_FIELD(type, x, default) type x;
+#include "cpu_cfg_fields.h.inc"
};
typedef struct RISCVCPUConfig RISCVCPUConfig;
diff --git a/target/riscv/cpu_cfg_fields.h.inc b/target/riscv/cpu_cfg_fields.h.inc
new file mode 100644
index 0000000..59f134a
--- /dev/null
+++ b/target/riscv/cpu_cfg_fields.h.inc
@@ -0,0 +1,170 @@
+/*
+ * Required definitions before including this file:
+ *
+ * #define BOOL_FIELD(x)
+ * #define TYPED_FIELD(type, x, default)
+ */
+
+BOOL_FIELD(ext_zba)
+BOOL_FIELD(ext_zbb)
+BOOL_FIELD(ext_zbc)
+BOOL_FIELD(ext_zbkb)
+BOOL_FIELD(ext_zbkc)
+BOOL_FIELD(ext_zbkx)
+BOOL_FIELD(ext_zbs)
+BOOL_FIELD(ext_zca)
+BOOL_FIELD(ext_zcb)
+BOOL_FIELD(ext_zcd)
+BOOL_FIELD(ext_zce)
+BOOL_FIELD(ext_zcf)
+BOOL_FIELD(ext_zcmp)
+BOOL_FIELD(ext_zcmt)
+BOOL_FIELD(ext_zk)
+BOOL_FIELD(ext_zkn)
+BOOL_FIELD(ext_zknd)
+BOOL_FIELD(ext_zkne)
+BOOL_FIELD(ext_zknh)
+BOOL_FIELD(ext_zkr)
+BOOL_FIELD(ext_zks)
+BOOL_FIELD(ext_zksed)
+BOOL_FIELD(ext_zksh)
+BOOL_FIELD(ext_zkt)
+BOOL_FIELD(ext_zifencei)
+BOOL_FIELD(ext_zicntr)
+BOOL_FIELD(ext_zicsr)
+BOOL_FIELD(ext_zicbom)
+BOOL_FIELD(ext_zicbop)
+BOOL_FIELD(ext_zicboz)
+BOOL_FIELD(ext_zicfilp)
+BOOL_FIELD(ext_zicfiss)
+BOOL_FIELD(ext_zicond)
+BOOL_FIELD(ext_zihintntl)
+BOOL_FIELD(ext_zihintpause)
+BOOL_FIELD(ext_zihpm)
+BOOL_FIELD(ext_zimop)
+BOOL_FIELD(ext_zcmop)
+BOOL_FIELD(ext_ztso)
+BOOL_FIELD(ext_smstateen)
+BOOL_FIELD(ext_sstc)
+BOOL_FIELD(ext_smcdeleg)
+BOOL_FIELD(ext_ssccfg)
+BOOL_FIELD(ext_smcntrpmf)
+BOOL_FIELD(ext_smcsrind)
+BOOL_FIELD(ext_sscsrind)
+BOOL_FIELD(ext_ssdbltrp)
+BOOL_FIELD(ext_smdbltrp)
+BOOL_FIELD(ext_svadu)
+BOOL_FIELD(ext_svinval)
+BOOL_FIELD(ext_svnapot)
+BOOL_FIELD(ext_svpbmt)
+BOOL_FIELD(ext_svvptc)
+BOOL_FIELD(ext_svukte)
+BOOL_FIELD(ext_zdinx)
+BOOL_FIELD(ext_zaamo)
+BOOL_FIELD(ext_zacas)
+BOOL_FIELD(ext_zama16b)
+BOOL_FIELD(ext_zabha)
+BOOL_FIELD(ext_zalrsc)
+BOOL_FIELD(ext_zawrs)
+BOOL_FIELD(ext_zfa)
+BOOL_FIELD(ext_zfbfmin)
+BOOL_FIELD(ext_zfh)
+BOOL_FIELD(ext_zfhmin)
+BOOL_FIELD(ext_zfinx)
+BOOL_FIELD(ext_zhinx)
+BOOL_FIELD(ext_zhinxmin)
+BOOL_FIELD(ext_zve32f)
+BOOL_FIELD(ext_zve32x)
+BOOL_FIELD(ext_zve64f)
+BOOL_FIELD(ext_zve64d)
+BOOL_FIELD(ext_zve64x)
+BOOL_FIELD(ext_zvbb)
+BOOL_FIELD(ext_zvbc)
+BOOL_FIELD(ext_zvkb)
+BOOL_FIELD(ext_zvkg)
+BOOL_FIELD(ext_zvkned)
+BOOL_FIELD(ext_zvknha)
+BOOL_FIELD(ext_zvknhb)
+BOOL_FIELD(ext_zvksed)
+BOOL_FIELD(ext_zvksh)
+BOOL_FIELD(ext_zvkt)
+BOOL_FIELD(ext_zvkn)
+BOOL_FIELD(ext_zvknc)
+BOOL_FIELD(ext_zvkng)
+BOOL_FIELD(ext_zvks)
+BOOL_FIELD(ext_zvksc)
+BOOL_FIELD(ext_zvksg)
+BOOL_FIELD(ext_zmmul)
+BOOL_FIELD(ext_zvfbfmin)
+BOOL_FIELD(ext_zvfbfwma)
+BOOL_FIELD(ext_zvfh)
+BOOL_FIELD(ext_zvfhmin)
+BOOL_FIELD(ext_smaia)
+BOOL_FIELD(ext_ssaia)
+BOOL_FIELD(ext_smctr)
+BOOL_FIELD(ext_ssctr)
+BOOL_FIELD(ext_sscofpmf)
+BOOL_FIELD(ext_smepmp)
+BOOL_FIELD(ext_smrnmi)
+BOOL_FIELD(ext_ssnpm)
+BOOL_FIELD(ext_smnpm)
+BOOL_FIELD(ext_smmpm)
+BOOL_FIELD(ext_sspm)
+BOOL_FIELD(ext_supm)
+BOOL_FIELD(rvv_ta_all_1s)
+BOOL_FIELD(rvv_ma_all_1s)
+BOOL_FIELD(rvv_vl_half_avl)
+/* Named features */
+BOOL_FIELD(ext_svade)
+BOOL_FIELD(ext_zic64b)
+BOOL_FIELD(ext_ssstateen)
+BOOL_FIELD(ext_sha)
+
+/*
+ * Always 'true' booleans for named features
+ * TCG always implement/can't be user disabled,
+ * based on spec version.
+ */
+BOOL_FIELD(has_priv_1_13)
+BOOL_FIELD(has_priv_1_12)
+BOOL_FIELD(has_priv_1_11)
+
+/* Always enabled for TCG if has_priv_1_11 */
+BOOL_FIELD(ext_ziccrse)
+
+/* Vendor-specific custom extensions */
+BOOL_FIELD(ext_xtheadba)
+BOOL_FIELD(ext_xtheadbb)
+BOOL_FIELD(ext_xtheadbs)
+BOOL_FIELD(ext_xtheadcmo)
+BOOL_FIELD(ext_xtheadcondmov)
+BOOL_FIELD(ext_xtheadfmemidx)
+BOOL_FIELD(ext_xtheadfmv)
+BOOL_FIELD(ext_xtheadmac)
+BOOL_FIELD(ext_xtheadmemidx)
+BOOL_FIELD(ext_xtheadmempair)
+BOOL_FIELD(ext_xtheadsync)
+BOOL_FIELD(ext_XVentanaCondOps)
+
+BOOL_FIELD(mmu)
+BOOL_FIELD(pmp)
+BOOL_FIELD(debug)
+BOOL_FIELD(misa_w)
+
+BOOL_FIELD(short_isa_string)
+
+TYPED_FIELD(uint32_t, mvendorid, 0)
+TYPED_FIELD(uint64_t, marchid, 0)
+TYPED_FIELD(uint64_t, mimpid, 0)
+
+TYPED_FIELD(uint32_t, pmu_mask, 0)
+TYPED_FIELD(uint16_t, vlenb, 0)
+TYPED_FIELD(uint16_t, elen, 0)
+TYPED_FIELD(uint16_t, cbom_blocksize, 0)
+TYPED_FIELD(uint16_t, cbop_blocksize, 0)
+TYPED_FIELD(uint16_t, cboz_blocksize, 0)
+
+TYPED_FIELD(int8_t, max_satp_mode, -1)
+
+#undef BOOL_FIELD
+#undef TYPED_FIELD
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 619c76c..2ed69d7 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -24,7 +24,6 @@
#include "internals.h"
#include "pmu.h"
#include "exec/cputlb.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "system/memory.h"
@@ -136,103 +135,6 @@ bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt)
#endif
}
-void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
-{
- RISCVCPU *cpu = env_archcpu(env);
- RISCVExtStatus fs, vs;
- uint32_t flags = 0;
- bool pm_signext = riscv_cpu_virt_mem_enabled(env);
-
- *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
- *cs_base = 0;
-
- if (cpu->cfg.ext_zve32x) {
- /*
- * If env->vl equals to VLMAX, we can use generic vector operation
- * expanders (GVEC) to accerlate the vector operations.
- * However, as LMUL could be a fractional number. The maximum
- * vector size can be operated might be less than 8 bytes,
- * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
- * only when maxsz >= 8 bytes.
- */
-
- /* lmul encoded as in DisasContext::lmul */
- int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
- uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
- uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
- uint32_t maxsz = vlmax << vsew;
- bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
- (maxsz >= 8);
- flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
- flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
- flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
- FIELD_EX64(env->vtype, VTYPE, VLMUL));
- flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
- flags = FIELD_DP32(flags, TB_FLAGS, VTA,
- FIELD_EX64(env->vtype, VTYPE, VTA));
- flags = FIELD_DP32(flags, TB_FLAGS, VMA,
- FIELD_EX64(env->vtype, VTYPE, VMA));
- flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
- } else {
- flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
- }
-
- if (cpu_get_fcfien(env)) {
- /*
- * For Forward CFI, only the expectation of a lpad at
- * the start of the block is tracked via env->elp. env->elp
- * is turned on during jalr translation.
- */
- flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp);
- flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1);
- }
-
- if (cpu_get_bcfien(env)) {
- flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1);
- }
-
-#ifdef CONFIG_USER_ONLY
- fs = EXT_STATUS_DIRTY;
- vs = EXT_STATUS_DIRTY;
-#else
- flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
-
- flags |= riscv_env_mmu_index(env, 0);
- fs = get_field(env->mstatus, MSTATUS_FS);
- vs = get_field(env->mstatus, MSTATUS_VS);
-
- if (env->virt_enabled) {
- flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
- /*
- * Merge DISABLED and !DIRTY states using MIN.
- * We will set both fields when dirtying.
- */
- fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
- vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
- }
-
- /* With Zfinx, floating point is enabled/disabled by Smstateen. */
- if (!riscv_has_ext(env, RVF)) {
- fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
- ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
- }
-
- if (cpu->cfg.debug && !icount_enabled()) {
- flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
- }
-#endif
-
- flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
- flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
- flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
- flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
- flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
- flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
-
- *pflags = flags;
-}
-
RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env)
{
#ifndef CONFIG_USER_ONLY
@@ -1664,9 +1566,11 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
target_ulong old_pte;
if (riscv_cpu_sxl(env) == MXL_RV32) {
- old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, pte, updated_pte);
+ old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, cpu_to_le32(pte), cpu_to_le32(updated_pte));
+ old_pte = le32_to_cpu(old_pte);
} else {
- old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte);
+ old_pte = qatomic_cmpxchg(pte_pa, cpu_to_le64(pte), cpu_to_le64(updated_pte));
+ old_pte = le64_to_cpu(old_pte);
}
if (old_pte != pte) {
goto restart;
diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c
index bb084e0..a0fb54b 100644
--- a/target/riscv/crypto_helper.c
+++ b/target/riscv/crypto_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "crypto/aes.h"
#include "crypto/aes-round.h"
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index c52c87f..fb14972 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -24,12 +24,14 @@
#include "tcg/tcg-cpu.h"
#include "pmu.h"
#include "time_helper.h"
-#include "exec/exec-all.h"
#include "exec/cputlb.h"
#include "exec/tb-flush.h"
#include "exec/icount.h"
+#include "accel/tcg/getpc.h"
#include "qemu/guest-random.h"
#include "qapi/error.h"
+#include "tcg/insn-start-words.h"
+#include "internals.h"
#include <stdbool.h>
/* CSR function table public API */
@@ -38,7 +40,7 @@ void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
*ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
}
-void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
+void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops)
{
csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
}
@@ -830,13 +832,15 @@ static RISCVException seed(CPURISCVState *env, int csrno)
}
/* zicfiss CSR_SSP read and write */
-static int read_ssp(CPURISCVState *env, int csrno, target_ulong *val)
+static RISCVException read_ssp(CPURISCVState *env, int csrno,
+ target_ulong *val)
{
*val = env->ssp;
return RISCV_EXCP_NONE;
}
-static int write_ssp(CPURISCVState *env, int csrno, target_ulong val)
+static RISCVException write_ssp(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
env->ssp = val;
return RISCV_EXCP_NONE;
@@ -851,7 +855,7 @@ static RISCVException read_fflags(CPURISCVState *env, int csrno,
}
static RISCVException write_fflags(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVF)) {
@@ -870,7 +874,7 @@ static RISCVException read_frm(CPURISCVState *env, int csrno,
}
static RISCVException write_frm(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVF)) {
@@ -890,7 +894,7 @@ static RISCVException read_fcsr(CPURISCVState *env, int csrno,
}
static RISCVException write_fcsr(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVF)) {
@@ -942,7 +946,7 @@ static RISCVException read_vxrm(CPURISCVState *env, int csrno,
}
static RISCVException write_vxrm(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@@ -959,7 +963,7 @@ static RISCVException read_vxsat(CPURISCVState *env, int csrno,
}
static RISCVException write_vxsat(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@@ -976,7 +980,7 @@ static RISCVException read_vstart(CPURISCVState *env, int csrno,
}
static RISCVException write_vstart(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@@ -997,7 +1001,7 @@ static RISCVException read_vcsr(CPURISCVState *env, int csrno,
}
static RISCVException write_vcsr(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
#if !defined(CONFIG_USER_ONLY)
env->mstatus |= MSTATUS_VS;
@@ -1055,7 +1059,7 @@ static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
}
static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t inh_avail_mask;
@@ -1084,7 +1088,7 @@ static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
}
static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
MCYCLECFGH_BIT_MINH);
@@ -1109,7 +1113,7 @@ static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t inh_avail_mask;
@@ -1136,7 +1140,7 @@ static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
}
static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
MINSTRETCFGH_BIT_MINH);
@@ -1163,7 +1167,7 @@ static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
}
static RISCVException write_mhpmevent(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
int evt_index = csrno - CSR_MCOUNTINHIBIT;
uint64_t mhpmevt_val = val;
@@ -1201,7 +1205,7 @@ static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno,
}
static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
int evt_index = csrno - CSR_MHPMEVENT3H + 3;
uint64_t mhpmevth_val;
@@ -1343,14 +1347,16 @@ static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong val,
return RISCV_EXCP_NONE;
}
-static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val)
+static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
int ctr_idx = csrno - CSR_MCYCLE;
return riscv_pmu_write_ctr(env, val, ctr_idx);
}
-static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val)
+static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
int ctr_idx = csrno - CSR_MCYCLEH;
@@ -1661,7 +1667,7 @@ static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
}
static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (riscv_cpu_mxl(env) == MXL_RV32) {
env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
@@ -1676,7 +1682,7 @@ static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
}
static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
@@ -1710,13 +1716,13 @@ static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
}
static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (env->virt_enabled) {
if (env->hvictl & HVICTL_VTI) {
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
}
- return write_vstimecmp(env, csrno, val);
+ return write_vstimecmp(env, csrno, val, ra);
}
if (riscv_cpu_mxl(env) == MXL_RV32) {
@@ -1731,13 +1737,13 @@ static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
}
static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (env->virt_enabled) {
if (env->hvictl & HVICTL_VTI) {
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
}
- return write_vstimecmph(env, csrno, val);
+ return write_vstimecmph(env, csrno, val, ra);
}
env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
@@ -1842,7 +1848,7 @@ static RISCVException read_zero(CPURISCVState *env, int csrno,
}
static RISCVException write_ignore(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return RISCV_EXCP_NONE;
}
@@ -1906,8 +1912,13 @@ static RISCVException read_mstatus(CPURISCVState *env, int csrno,
static bool validate_vm(CPURISCVState *env, target_ulong vm)
{
- uint64_t mode_supported = riscv_cpu_cfg(env)->satp_mode.map;
- return get_field(mode_supported, (1 << vm));
+ bool rv32 = riscv_cpu_mxl(env) == MXL_RV32;
+ RISCVCPU *cpu = env_archcpu(env);
+ int satp_mode_supported_max = cpu->cfg.max_satp_mode;
+ const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
+
+ assert(satp_mode_supported_max >= 0);
+ return vm <= satp_mode_supported_max && valid_vm[vm];
}
static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp,
@@ -1963,7 +1974,7 @@ static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp,
}
static RISCVException write_mstatus(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mstatus = env->mstatus;
uint64_t mask = 0;
@@ -2042,7 +2053,7 @@ static RISCVException read_mstatush(CPURISCVState *env, int csrno,
}
static RISCVException write_mstatush(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t valh = (uint64_t)val << 32;
uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
@@ -2095,8 +2106,21 @@ static RISCVException read_misa(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static target_ulong get_next_pc(CPURISCVState *env, uintptr_t ra)
+{
+ uint64_t data[INSN_START_WORDS];
+
+ /* Outside of a running cpu, env contains the next pc. */
+ if (ra == 0 || !cpu_unwind_state_data(env_cpu(env), ra, data)) {
+ return env->pc;
+ }
+
+ /* Within unwind data, [0] is pc and [1] is the opcode. */
+ return data[0] + insn_len(data[1]);
+}
+
static RISCVException write_misa(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
uint32_t orig_misa_ext = env->misa_ext;
@@ -2110,11 +2134,8 @@ static RISCVException write_misa(CPURISCVState *env, int csrno,
/* Mask extensions that are not supported by this hart */
val &= env->misa_ext_mask;
- /*
- * Suppress 'C' if next instruction is not aligned
- * TODO: this should check next_pc
- */
- if ((val & RVC) && (GETPC() & ~3) != 0) {
+ /* Suppress 'C' if next instruction is not aligned. */
+ if ((val & RVC) && (get_next_pc(env, ra) & 3) != 0) {
val &= ~RVC;
}
@@ -2160,7 +2181,7 @@ static RISCVException read_medeleg(CPURISCVState *env, int csrno,
}
static RISCVException write_medeleg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
return RISCV_EXCP_NONE;
@@ -2955,7 +2976,7 @@ static RISCVException read_mtvec(CPURISCVState *env, int csrno,
}
static RISCVException write_mtvec(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
/* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val & 3) < 2) {
@@ -2974,7 +2995,7 @@ static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
}
static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
int cidx;
PMUCTRState *counter;
@@ -3049,10 +3070,9 @@ static RISCVException read_scountinhibit(CPURISCVState *env, int csrno,
}
static RISCVException write_scountinhibit(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
- write_mcountinhibit(env, csrno, val & env->mcounteren);
- return RISCV_EXCP_NONE;
+ return write_mcountinhibit(env, csrno, val & env->mcounteren, ra);
}
static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
@@ -3063,7 +3083,7 @@ static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
}
static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
@@ -3097,7 +3117,7 @@ static RISCVException read_mscratch(CPURISCVState *env, int csrno,
}
static RISCVException write_mscratch(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mscratch = val;
return RISCV_EXCP_NONE;
@@ -3111,7 +3131,7 @@ static RISCVException read_mepc(CPURISCVState *env, int csrno,
}
static RISCVException write_mepc(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mepc = val;
return RISCV_EXCP_NONE;
@@ -3125,7 +3145,7 @@ static RISCVException read_mcause(CPURISCVState *env, int csrno,
}
static RISCVException write_mcause(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mcause = val;
return RISCV_EXCP_NONE;
@@ -3139,7 +3159,7 @@ static RISCVException read_mtval(CPURISCVState *env, int csrno,
}
static RISCVException write_mtval(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mtval = val;
return RISCV_EXCP_NONE;
@@ -3154,9 +3174,9 @@ static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
- target_ulong val);
+ target_ulong val, uintptr_t ra);
static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE |
@@ -3188,9 +3208,7 @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
}
}
env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
- write_henvcfg(env, CSR_HENVCFG, env->henvcfg);
-
- return RISCV_EXCP_NONE;
+ return write_henvcfg(env, CSR_HENVCFG, env->henvcfg, ra);
}
static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
@@ -3201,9 +3219,9 @@ static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
}
static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
- target_ulong val);
+ target_ulong val, uintptr_t ra);
static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
@@ -3218,9 +3236,7 @@ static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
}
env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
- write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32);
-
- return RISCV_EXCP_NONE;
+ return write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32, ra);
}
static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
@@ -3238,7 +3254,7 @@ static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
RISCVException ret;
@@ -3295,7 +3311,7 @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
RISCVException ret;
@@ -3350,7 +3366,7 @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
}
static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
HENVCFG_ADUE | HENVCFG_DTE);
@@ -3388,7 +3404,7 @@ static RISCVException write_mstateen(CPURISCVState *env, int csrno,
}
static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
if (!riscv_has_ext(env, RVF)) {
@@ -3420,7 +3436,7 @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
}
static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -3447,7 +3463,7 @@ static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
}
static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@@ -3463,7 +3479,7 @@ static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
}
static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -3492,7 +3508,7 @@ static RISCVException write_hstateen(CPURISCVState *env, int csrno,
}
static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@@ -3521,7 +3537,7 @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
}
static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -3552,7 +3568,7 @@ static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
}
static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@@ -3564,7 +3580,7 @@ static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
}
static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -3603,7 +3619,7 @@ static RISCVException write_sstateen(CPURISCVState *env, int csrno,
}
static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
@@ -3615,7 +3631,7 @@ static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
}
static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
- target_ulong new_val)
+ target_ulong new_val, uintptr_t ra)
{
return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
}
@@ -3866,7 +3882,7 @@ static RISCVException read_sstatus(CPURISCVState *env, int csrno,
}
static RISCVException write_sstatus(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
target_ulong mask = (sstatus_v1_10_mask);
@@ -3883,7 +3899,7 @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno,
mask |= SSTATUS_SDT;
}
target_ulong newval = (env->mstatus & ~mask) | (val & mask);
- return write_mstatus(env, CSR_MSTATUS, newval);
+ return write_mstatus(env, CSR_MSTATUS, newval, ra);
}
static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
@@ -4035,7 +4051,7 @@ static RISCVException read_stvec(CPURISCVState *env, int csrno,
}
static RISCVException write_stvec(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
/* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val & 3) < 2) {
@@ -4054,7 +4070,7 @@ static RISCVException read_scounteren(CPURISCVState *env, int csrno,
}
static RISCVException write_scounteren(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
@@ -4088,7 +4104,7 @@ static RISCVException read_sscratch(CPURISCVState *env, int csrno,
}
static RISCVException write_sscratch(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->sscratch = val;
return RISCV_EXCP_NONE;
@@ -4102,7 +4118,7 @@ static RISCVException read_sepc(CPURISCVState *env, int csrno,
}
static RISCVException write_sepc(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->sepc = val;
return RISCV_EXCP_NONE;
@@ -4116,7 +4132,7 @@ static RISCVException read_scause(CPURISCVState *env, int csrno,
}
static RISCVException write_scause(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->scause = val;
return RISCV_EXCP_NONE;
@@ -4130,7 +4146,7 @@ static RISCVException read_stval(CPURISCVState *env, int csrno,
}
static RISCVException write_stval(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->stval = val;
return RISCV_EXCP_NONE;
@@ -4270,7 +4286,7 @@ static RISCVException read_satp(CPURISCVState *env, int csrno,
}
static RISCVException write_satp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (!riscv_cpu_cfg(env)->mmu) {
return RISCV_EXCP_NONE;
@@ -4492,7 +4508,7 @@ static RISCVException read_hstatus(CPURISCVState *env, int csrno,
}
static RISCVException write_hstatus(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mask = (target_ulong)-1;
if (!env_archcpu(env)->cfg.ext_svukte) {
@@ -4524,7 +4540,7 @@ static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
}
static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->hedeleg = val & vs_delegable_excps;
return RISCV_EXCP_NONE;
@@ -4545,7 +4561,7 @@ static RISCVException read_hedelegh(CPURISCVState *env, int csrno,
}
static RISCVException write_hedelegh(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVException ret;
ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
@@ -4808,7 +4824,7 @@ static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
}
static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
RISCVCPU *cpu = env_archcpu(env);
@@ -4828,7 +4844,7 @@ static RISCVException read_hgeie(CPURISCVState *env, int csrno,
}
static RISCVException write_hgeie(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
/* Only GEILEN:1 bits implemented and BIT0 is never implemented */
val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
@@ -4847,7 +4863,7 @@ static RISCVException read_htval(CPURISCVState *env, int csrno,
}
static RISCVException write_htval(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->htval = val;
return RISCV_EXCP_NONE;
@@ -4861,7 +4877,7 @@ static RISCVException read_htinst(CPURISCVState *env, int csrno,
}
static RISCVException write_htinst(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return RISCV_EXCP_NONE;
}
@@ -4883,7 +4899,7 @@ static RISCVException read_hgatp(CPURISCVState *env, int csrno,
}
static RISCVException write_hgatp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->hgatp = legalize_xatp(env, env->hgatp, val);
return RISCV_EXCP_NONE;
@@ -4901,7 +4917,7 @@ static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
}
static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (!env->rdtime_fn) {
return RISCV_EXCP_ILLEGAL_INST;
@@ -4933,7 +4949,7 @@ static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
}
static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (!env->rdtime_fn) {
return RISCV_EXCP_ILLEGAL_INST;
@@ -4957,7 +4973,7 @@ static RISCVException read_hvictl(CPURISCVState *env, int csrno,
}
static RISCVException write_hvictl(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->hvictl = val & HVICTL_VALID_MASK;
return RISCV_EXCP_NONE;
@@ -5022,7 +5038,7 @@ static RISCVException read_hviprio1(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio1(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 0, env->hviprio, val);
}
@@ -5034,7 +5050,7 @@ static RISCVException read_hviprio1h(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio1h(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 4, env->hviprio, val);
}
@@ -5046,7 +5062,7 @@ static RISCVException read_hviprio2(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio2(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 8, env->hviprio, val);
}
@@ -5058,7 +5074,7 @@ static RISCVException read_hviprio2h(CPURISCVState *env, int csrno,
}
static RISCVException write_hviprio2h(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
return write_hvipriox(env, 12, env->hviprio, val);
}
@@ -5072,7 +5088,7 @@ static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
}
static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint64_t mask = (target_ulong)-1;
if ((val & VSSTATUS64_UXL) == 0) {
@@ -5097,7 +5113,7 @@ static RISCVException read_vstvec(CPURISCVState *env, int csrno,
}
static RISCVException write_vstvec(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
/* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val & 3) < 2) {
@@ -5116,7 +5132,7 @@ static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
}
static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vsscratch = val;
return RISCV_EXCP_NONE;
@@ -5130,7 +5146,7 @@ static RISCVException read_vsepc(CPURISCVState *env, int csrno,
}
static RISCVException write_vsepc(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vsepc = val;
return RISCV_EXCP_NONE;
@@ -5144,7 +5160,7 @@ static RISCVException read_vscause(CPURISCVState *env, int csrno,
}
static RISCVException write_vscause(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vscause = val;
return RISCV_EXCP_NONE;
@@ -5158,7 +5174,7 @@ static RISCVException read_vstval(CPURISCVState *env, int csrno,
}
static RISCVException write_vstval(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vstval = val;
return RISCV_EXCP_NONE;
@@ -5172,7 +5188,7 @@ static RISCVException read_vsatp(CPURISCVState *env, int csrno,
}
static RISCVException write_vsatp(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->vsatp = legalize_xatp(env, env->vsatp, val);
return RISCV_EXCP_NONE;
@@ -5186,7 +5202,7 @@ static RISCVException read_mtval2(CPURISCVState *env, int csrno,
}
static RISCVException write_mtval2(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mtval2 = val;
return RISCV_EXCP_NONE;
@@ -5200,7 +5216,7 @@ static RISCVException read_mtinst(CPURISCVState *env, int csrno,
}
static RISCVException write_mtinst(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->mtinst = val;
return RISCV_EXCP_NONE;
@@ -5215,7 +5231,7 @@ static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
}
static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
mseccfg_csr_write(env, val);
return RISCV_EXCP_NONE;
@@ -5231,7 +5247,7 @@ static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
}
static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
uint32_t reg_index = csrno - CSR_PMPCFG0;
@@ -5247,7 +5263,7 @@ static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
}
static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
return RISCV_EXCP_NONE;
@@ -5261,7 +5277,7 @@ static RISCVException read_tselect(CPURISCVState *env, int csrno,
}
static RISCVException write_tselect(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
tselect_csr_write(env, val);
return RISCV_EXCP_NONE;
@@ -5285,7 +5301,7 @@ static RISCVException read_tdata(CPURISCVState *env, int csrno,
}
static RISCVException write_tdata(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
if (!tdata_available(env, csrno - CSR_TDATA1)) {
return RISCV_EXCP_ILLEGAL_INST;
@@ -5310,7 +5326,7 @@ static RISCVException read_mcontext(CPURISCVState *env, int csrno,
}
static RISCVException write_mcontext(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
int32_t mask;
@@ -5334,43 +5350,50 @@ static RISCVException read_mnscratch(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
-static int write_mnscratch(CPURISCVState *env, int csrno, target_ulong val)
+static RISCVException write_mnscratch(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
env->mnscratch = val;
return RISCV_EXCP_NONE;
}
-static int read_mnepc(CPURISCVState *env, int csrno, target_ulong *val)
+static RISCVException read_mnepc(CPURISCVState *env, int csrno,
+ target_ulong *val)
{
*val = env->mnepc;
return RISCV_EXCP_NONE;
}
-static int write_mnepc(CPURISCVState *env, int csrno, target_ulong val)
+static RISCVException write_mnepc(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
env->mnepc = val;
return RISCV_EXCP_NONE;
}
-static int read_mncause(CPURISCVState *env, int csrno, target_ulong *val)
+static RISCVException read_mncause(CPURISCVState *env, int csrno,
+ target_ulong *val)
{
*val = env->mncause;
return RISCV_EXCP_NONE;
}
-static int write_mncause(CPURISCVState *env, int csrno, target_ulong val)
+static RISCVException write_mncause(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
env->mncause = val;
return RISCV_EXCP_NONE;
}
-static int read_mnstatus(CPURISCVState *env, int csrno, target_ulong *val)
+static RISCVException read_mnstatus(CPURISCVState *env, int csrno,
+ target_ulong *val)
{
*val = env->mnstatus;
return RISCV_EXCP_NONE;
}
-static int write_mnstatus(CPURISCVState *env, int csrno, target_ulong val)
+static RISCVException write_mnstatus(CPURISCVState *env, int csrno,
+ target_ulong val, uintptr_t ra)
{
target_ulong mask = (MNSTATUS_NMIE | MNSTATUS_MNPP);
@@ -5510,7 +5533,8 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
target_ulong *ret_value,
target_ulong new_value,
- target_ulong write_mask)
+ target_ulong write_mask,
+ uintptr_t ra)
{
RISCVException ret;
target_ulong old_value = 0;
@@ -5540,7 +5564,7 @@ static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
if (write_mask) {
new_value = (old_value & ~write_mask) | (new_value & write_mask);
if (csr_ops[csrno].write) {
- ret = csr_ops[csrno].write(env, csrno, new_value);
+ ret = csr_ops[csrno].write(env, csrno, new_value, ra);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
@@ -5563,25 +5587,25 @@ RISCVException riscv_csrr(CPURISCVState *env, int csrno,
return ret;
}
- return riscv_csrrw_do64(env, csrno, ret_value, 0, 0);
+ return riscv_csrrw_do64(env, csrno, ret_value, 0, 0, 0);
}
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
- target_ulong *ret_value,
- target_ulong new_value, target_ulong write_mask)
+ target_ulong *ret_value, target_ulong new_value,
+ target_ulong write_mask, uintptr_t ra)
{
RISCVException ret = riscv_csrrw_check(env, csrno, true);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
- return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
+ return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask, ra);
}
static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
Int128 *ret_value,
Int128 new_value,
- Int128 write_mask)
+ Int128 write_mask, uintptr_t ra)
{
RISCVException ret;
Int128 old_value;
@@ -5603,7 +5627,7 @@ static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
}
} else if (csr_ops[csrno].write) {
/* avoids having to write wrappers for all registers */
- ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
+ ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value), ra);
if (ret != RISCV_EXCP_NONE) {
return ret;
}
@@ -5630,7 +5654,7 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
if (csr_ops[csrno].read128) {
return riscv_csrrw_do128(env, csrno, ret_value,
- int128_zero(), int128_zero());
+ int128_zero(), int128_zero(), 0);
}
/*
@@ -5641,9 +5665,7 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
* accesses
*/
target_ulong old_value;
- ret = riscv_csrrw_do64(env, csrno, &old_value,
- (target_ulong)0,
- (target_ulong)0);
+ ret = riscv_csrrw_do64(env, csrno, &old_value, 0, 0, 0);
if (ret == RISCV_EXCP_NONE && ret_value) {
*ret_value = int128_make64(old_value);
}
@@ -5651,8 +5673,8 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
}
RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
- Int128 *ret_value,
- Int128 new_value, Int128 write_mask)
+ Int128 *ret_value, Int128 new_value,
+ Int128 write_mask, uintptr_t ra)
{
RISCVException ret;
@@ -5662,7 +5684,8 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
}
if (csr_ops[csrno].read128) {
- return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
+ return riscv_csrrw_do128(env, csrno, ret_value,
+ new_value, write_mask, ra);
}
/*
@@ -5675,7 +5698,7 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
target_ulong old_value;
ret = riscv_csrrw_do64(env, csrno, &old_value,
int128_getlo(new_value),
- int128_getlo(write_mask));
+ int128_getlo(write_mask), ra);
if (ret == RISCV_EXCP_NONE && ret_value) {
*ret_value = int128_make64(old_value);
}
@@ -5698,7 +5721,7 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
if (!write_mask) {
ret = riscv_csrr(env, csrno, ret_value);
} else {
- ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
+ ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask, 0);
}
#if !defined(CONFIG_USER_ONLY)
env->debugger = false;
@@ -5714,7 +5737,7 @@ static RISCVException read_jvt(CPURISCVState *env, int csrno,
}
static RISCVException write_jvt(CPURISCVState *env, int csrno,
- target_ulong val)
+ target_ulong val, uintptr_t ra)
{
env->jvt = val;
return RISCV_EXCP_NONE;
diff --git a/target/riscv/debug.c b/target/riscv/debug.c
index 8564f0b..5664466 100644
--- a/target/riscv/debug.c
+++ b/target/riscv/debug.c
@@ -28,7 +28,6 @@
#include "qapi/error.h"
#include "cpu.h"
#include "trace.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "exec/watchpoint.h"
#include "system/cpu-timers.h"
diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
index 91b1a56..706bdfa 100644
--- a/target/riscv/fpu_helper.c
+++ b/target/riscv/fpu_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
#include "internals.h"
diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
index 18e88f4..1934f91 100644
--- a/target/riscv/gdbstub.c
+++ b/target/riscv/gdbstub.c
@@ -62,7 +62,7 @@ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
return 0;
}
- switch (mcc->misa_mxl_max) {
+ switch (mcc->def->misa_mxl_max) {
case MXL_RV32:
return gdb_get_reg32(mem_buf, tmp);
case MXL_RV64:
@@ -82,7 +82,7 @@ int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
int length = 0;
target_ulong tmp;
- switch (mcc->misa_mxl_max) {
+ switch (mcc->def->misa_mxl_max) {
case MXL_RV32:
tmp = (int32_t)ldl_p(mem_buf);
length = 4;
@@ -359,7 +359,7 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
ricsv_gen_dynamic_vector_feature(cs, cs->gdb_num_regs),
0);
}
- switch (mcc->misa_mxl_max) {
+ switch (mcc->def->misa_mxl_max) {
case MXL_RV32:
gdb_register_coprocessor(cs, riscv_gdb_get_virtual,
riscv_gdb_set_virtual,
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index 6d1a13c..cd23b1f 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -703,14 +703,14 @@ vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm
# Vector widening ordered and unordered float reduction sum
vfwredusum_vs 110001 . ..... ..... 001 ..... 1010111 @r_vm
vfwredosum_vs 110011 . ..... ..... 001 ..... 1010111 @r_vm
-vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r
-vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r
-vmandn_mm 011000 - ..... ..... 010 ..... 1010111 @r
-vmxor_mm 011011 - ..... ..... 010 ..... 1010111 @r
-vmor_mm 011010 - ..... ..... 010 ..... 1010111 @r
-vmnor_mm 011110 - ..... ..... 010 ..... 1010111 @r
-vmorn_mm 011100 - ..... ..... 010 ..... 1010111 @r
-vmxnor_mm 011111 - ..... ..... 010 ..... 1010111 @r
+vmand_mm 011001 1 ..... ..... 010 ..... 1010111 @r
+vmnand_mm 011101 1 ..... ..... 010 ..... 1010111 @r
+vmandn_mm 011000 1 ..... ..... 010 ..... 1010111 @r
+vmxor_mm 011011 1 ..... ..... 010 ..... 1010111 @r
+vmor_mm 011010 1 ..... ..... 010 ..... 1010111 @r
+vmnor_mm 011110 1 ..... ..... 010 ..... 1010111 @r
+vmorn_mm 011100 1 ..... ..... 010 ..... 1010111 @r
+vmxnor_mm 011111 1 ..... ..... 010 ..... 1010111 @r
vcpop_m 010000 . ..... 10000 010 ..... 1010111 @r2_vm
vfirst_m 010000 . ..... 10001 010 ..... 1010111 @r2_vm
vmsbf_m 010100 . ..... 00001 010 ..... 1010111 @r2_vm
@@ -732,7 +732,7 @@ vrgather_vv 001100 . ..... ..... 000 ..... 1010111 @r_vm
vrgatherei16_vv 001110 . ..... ..... 000 ..... 1010111 @r_vm
vrgather_vx 001100 . ..... ..... 100 ..... 1010111 @r_vm
vrgather_vi 001100 . ..... ..... 011 ..... 1010111 @r_vm
-vcompress_vm 010111 - ..... ..... 010 ..... 1010111 @r
+vcompress_vm 010111 1 ..... ..... 010 ..... 1010111 @r
vmv1r_v 100111 1 ..... 00000 011 ..... 1010111 @r2rd
vmv2r_v 100111 1 ..... 00001 011 ..... 1010111 @r2rd
vmv4r_v 100111 1 ..... 00011 011 ..... 1010111 @r2rd
diff --git a/target/riscv/insn_trans/trans_rvbf16.c.inc b/target/riscv/insn_trans/trans_rvbf16.c.inc
index 0a9cd1e..066dc36 100644
--- a/target/riscv/insn_trans/trans_rvbf16.c.inc
+++ b/target/riscv/insn_trans/trans_rvbf16.c.inc
@@ -119,8 +119,11 @@ static bool trans_vfwmaccbf16_vv(DisasContext *ctx, arg_vfwmaccbf16_vv *a)
REQUIRE_FPU;
REQUIRE_ZVFBFWMA(ctx);
+ uint8_t sew = ctx->sew;
if (require_rvv(ctx) && vext_check_isa_ill(ctx) && (ctx->sew == MO_16) &&
- vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm)) {
+ vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm) &&
+ vext_check_input_eew(ctx, a->rd, sew + 1, a->rs1, sew, a->vm) &&
+ vext_check_input_eew(ctx, a->rd, sew + 1, a->rs2, sew, a->vm)) {
uint32_t data = 0;
gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN);
@@ -146,8 +149,10 @@ static bool trans_vfwmaccbf16_vf(DisasContext *ctx, arg_vfwmaccbf16_vf *a)
REQUIRE_FPU;
REQUIRE_ZVFBFWMA(ctx);
+ uint8_t sew = ctx->sew;
if (require_rvv(ctx) && (ctx->sew == MO_16) && vext_check_isa_ill(ctx) &&
- vext_check_ds(ctx, a->rd, a->rs2, a->vm)) {
+ vext_check_ds(ctx, a->rd, a->rs2, a->vm) &&
+ vext_check_input_eew(ctx, a->rd, sew + 1, a->rs2, sew, a->vm)) {
uint32_t data = 0;
gen_set_rm(ctx, RISCV_FRM_DYN);
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
index b9883a5..2b6077a 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -100,10 +100,33 @@ static bool require_scale_rvfmin(DisasContext *s)
}
}
-/* Destination vector register group cannot overlap source mask register. */
-static bool require_vm(int vm, int vd)
+/*
+ * Source and destination vector register groups cannot overlap source mask
+ * register:
+ *
+ * A vector register cannot be used to provide source operands with more than
+ * one EEW for a single instruction. A mask register source is considered to
+ * have EEW=1 for this constraint. An encoding that would result in the same
+ * vector register being read with two or more different EEWs, including when
+ * the vector register appears at different positions within two or more vector
+ * register groups, is reserved.
+ * (Section 5.2)
+ *
+ * A destination vector register group can overlap a source vector
+ * register group only if one of the following holds:
+ * 1. The destination EEW equals the source EEW.
+ * 2. The destination EEW is smaller than the source EEW and the overlap
+ * is in the lowest-numbered part of the source register group.
+ * 3. The destination EEW is greater than the source EEW, the source EMUL
+ * is at least 1, and the overlap is in the highest-numbered part of
+ * the destination register group.
+ * For the purpose of determining register group overlap constraints, mask
+ * elements have EEW=1.
+ * (Section 5.2)
+ */
+static bool require_vm(int vm, int v)
{
- return (vm != 0 || vd != 0);
+ return (vm != 0 || v != 0);
}
static bool require_nf(int vd, int nf, int lmul)
@@ -356,11 +379,41 @@ static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
return ret;
}
+/*
+ * Check whether a vector register is used to provide source operands with
+ * more than one EEW for the vector instruction.
+ * Returns true if the instruction has valid encoding
+ * Returns false if encoding violates the mismatched input EEWs constraint
+ */
+static bool vext_check_input_eew(DisasContext *s, int vs1, uint8_t eew_vs1,
+ int vs2, uint8_t eew_vs2, int vm)
+{
+ bool is_valid = true;
+ int8_t emul_vs1 = eew_vs1 - s->sew + s->lmul;
+ int8_t emul_vs2 = eew_vs2 - s->sew + s->lmul;
+
+ /* When vm is 0, vs1 & vs2(EEW!=1) group can't overlap v0 (EEW=1) */
+ if ((vs1 != -1 && !require_vm(vm, vs1)) ||
+ (vs2 != -1 && !require_vm(vm, vs2))) {
+ is_valid = false;
+ }
+
+ /* When eew_vs1 != eew_vs2, check whether vs1 and vs2 are overlapped */
+ if ((vs1 != -1 && vs2 != -1) && (eew_vs1 != eew_vs2) &&
+ is_overlapped(vs1, 1 << MAX(emul_vs1, 0),
+ vs2, 1 << MAX(emul_vs2, 0))) {
+ is_valid = false;
+ }
+
+ return is_valid;
+}
+
static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
{
return require_vm(vm, vd) &&
require_align(vd, s->lmul) &&
- require_align(vs, s->lmul);
+ require_align(vs, s->lmul) &&
+ vext_check_input_eew(s, vs, s->sew, -1, s->sew, vm);
}
/*
@@ -379,6 +432,7 @@ static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ss(s, vd, vs2, vm) &&
+ vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) &&
require_align(vs1, s->lmul);
}
@@ -474,6 +528,7 @@ static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
{
return vext_wide_check_common(s, vd, vm) &&
+ vext_check_input_eew(s, vs, s->sew, -1, 0, vm) &&
require_align(vs, s->lmul) &&
require_noover(vd, s->lmul + 1, vs, s->lmul);
}
@@ -481,6 +536,7 @@ static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
{
return vext_wide_check_common(s, vd, vm) &&
+ vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm) &&
require_align(vs, s->lmul + 1);
}
@@ -499,6 +555,7 @@ static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ds(s, vd, vs2, vm) &&
+ vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) &&
require_align(vs1, s->lmul) &&
require_noover(vd, s->lmul + 1, vs1, s->lmul);
}
@@ -521,12 +578,14 @@ static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_ds(s, vd, vs1, vm) &&
+ vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) &&
require_align(vs2, s->lmul + 1);
}
static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
{
- bool ret = vext_narrow_check_common(s, vd, vs, vm);
+ bool ret = vext_narrow_check_common(s, vd, vs, vm) &&
+ vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm);
if (vd != vs) {
ret &= require_noover(vd, s->lmul, vs, s->lmul + 1);
}
@@ -549,6 +608,7 @@ static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
{
return vext_check_sd(s, vd, vs2, vm) &&
+ vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) &&
require_align(vs1, s->lmul);
}
@@ -584,7 +644,9 @@ static bool vext_check_slide(DisasContext *s, int vd, int vs2,
{
bool ret = require_align(vs2, s->lmul) &&
require_align(vd, s->lmul) &&
- require_vm(vm, vd);
+ require_vm(vm, vd) &&
+ vext_check_input_eew(s, -1, 0, vs2, s->sew, vm);
+
if (is_over) {
ret &= (vd != vs2);
}
@@ -802,32 +864,286 @@ GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check)
GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check)
/*
- *** stride load and store
+ * MAXSZ returns the maximum vector size can be operated in bytes,
+ * which is used in GVEC IR when vl_eq_vlmax flag is set to true
+ * to accelerate vector operation.
+ */
+static inline uint32_t MAXSZ(DisasContext *s)
+{
+ int max_sz = s->cfg_ptr->vlenb << 3;
+ return max_sz >> (3 - s->lmul);
+}
+
+static inline uint32_t get_log2(uint32_t a)
+{
+ uint32_t i = 0;
+ for (; a > 0;) {
+ a >>= 1;
+ i++;
+ }
+ return i;
+}
+
+typedef void gen_tl_ldst(TCGv, TCGv_ptr, tcg_target_long);
+
+/*
+ * Simulate the strided load/store main loop:
+ *
+ * for (i = env->vstart; i < env->vl; env->vstart = ++i) {
+ * k = 0;
+ * while (k < nf) {
+ * if (!vm && !vext_elem_mask(v0, i)) {
+ * vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
+ * (i + k * max_elems + 1) * esz);
+ * k++;
+ * continue;
+ * }
+ * target_ulong addr = base + stride * i + (k << log2_esz);
+ * ldst(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
+ * k++;
+ * }
+ * }
*/
-typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
- TCGv, TCGv_env, TCGv_i32);
+static void gen_ldst_stride_main_loop(DisasContext *s, TCGv dest, uint32_t rs1,
+ uint32_t rs2, uint32_t vm, uint32_t nf,
+ gen_tl_ldst *ld_fn, gen_tl_ldst *st_fn,
+ bool is_load)
+{
+ TCGv addr = tcg_temp_new();
+ TCGv base = get_gpr(s, rs1, EXT_NONE);
+ TCGv stride = get_gpr(s, rs2, EXT_NONE);
+
+ TCGv i = tcg_temp_new();
+ TCGv i_esz = tcg_temp_new();
+ TCGv k = tcg_temp_new();
+ TCGv k_esz = tcg_temp_new();
+ TCGv k_max = tcg_temp_new();
+ TCGv mask = tcg_temp_new();
+ TCGv mask_offs = tcg_temp_new();
+ TCGv mask_offs_64 = tcg_temp_new();
+ TCGv mask_elem = tcg_temp_new();
+ TCGv mask_offs_rem = tcg_temp_new();
+ TCGv vreg = tcg_temp_new();
+ TCGv dest_offs = tcg_temp_new();
+ TCGv stride_offs = tcg_temp_new();
+
+ uint32_t max_elems = MAXSZ(s) >> s->sew;
+
+ TCGLabel *start = gen_new_label();
+ TCGLabel *end = gen_new_label();
+ TCGLabel *start_k = gen_new_label();
+ TCGLabel *inc_k = gen_new_label();
+ TCGLabel *end_k = gen_new_label();
+
+ MemOp atomicity = MO_ATOM_NONE;
+ if (s->sew == 0) {
+ atomicity = MO_ATOM_NONE;
+ } else {
+ atomicity = MO_ATOM_IFALIGN_PAIR;
+ }
+
+ mark_vs_dirty(s);
+
+ tcg_gen_addi_tl(mask, (TCGv)tcg_env, vreg_ofs(s, 0));
+
+ /* Start of outer loop. */
+ tcg_gen_mov_tl(i, cpu_vstart);
+ gen_set_label(start);
+ tcg_gen_brcond_tl(TCG_COND_GE, i, cpu_vl, end);
+ tcg_gen_shli_tl(i_esz, i, s->sew);
+ /* Start of inner loop. */
+ tcg_gen_movi_tl(k, 0);
+ gen_set_label(start_k);
+ tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end_k);
+ /*
+ * If we are in mask agnostic regime and the operation is not unmasked we
+ * set the inactive elements to 1.
+ */
+ if (!vm && s->vma) {
+ TCGLabel *active_element = gen_new_label();
+ /* (i + k * max_elems) * esz */
+ tcg_gen_shli_tl(mask_offs, k, get_log2(max_elems << s->sew));
+ tcg_gen_add_tl(mask_offs, mask_offs, i_esz);
+
+ /*
+ * Check whether the i bit of the mask is 0 or 1.
+ *
+ * static inline int vext_elem_mask(void *v0, int index)
+ * {
+ * int idx = index / 64;
+ * int pos = index % 64;
+ * return (((uint64_t *)v0)[idx] >> pos) & 1;
+ * }
+ */
+ tcg_gen_shri_tl(mask_offs_64, mask_offs, 3);
+ tcg_gen_add_tl(mask_offs_64, mask_offs_64, mask);
+ tcg_gen_ld_i64((TCGv_i64)mask_elem, (TCGv_ptr)mask_offs_64, 0);
+ tcg_gen_rem_tl(mask_offs_rem, mask_offs, tcg_constant_tl(8));
+ tcg_gen_shr_tl(mask_elem, mask_elem, mask_offs_rem);
+ tcg_gen_andi_tl(mask_elem, mask_elem, 1);
+ tcg_gen_brcond_tl(TCG_COND_NE, mask_elem, tcg_constant_tl(0),
+ active_element);
+ /*
+ * Set masked-off elements in the destination vector register to 1s.
+ * Store instructions simply skip this bit as memory ops access memory
+ * only for active elements.
+ */
+ if (is_load) {
+ tcg_gen_shli_tl(mask_offs, mask_offs, s->sew);
+ tcg_gen_add_tl(mask_offs, mask_offs, dest);
+ st_fn(tcg_constant_tl(-1), (TCGv_ptr)mask_offs, 0);
+ }
+ tcg_gen_br(inc_k);
+ gen_set_label(active_element);
+ }
+ /*
+ * The element is active, calculate the address with stride:
+ * target_ulong addr = base + stride * i + (k << log2_esz);
+ */
+ tcg_gen_mul_tl(stride_offs, stride, i);
+ tcg_gen_shli_tl(k_esz, k, s->sew);
+ tcg_gen_add_tl(stride_offs, stride_offs, k_esz);
+ tcg_gen_add_tl(addr, base, stride_offs);
+ /* Calculate the offset in the dst/src vector register. */
+ tcg_gen_shli_tl(k_max, k, get_log2(max_elems));
+ tcg_gen_add_tl(dest_offs, i, k_max);
+ tcg_gen_shli_tl(dest_offs, dest_offs, s->sew);
+ tcg_gen_add_tl(dest_offs, dest_offs, dest);
+ if (is_load) {
+ tcg_gen_qemu_ld_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity);
+ st_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0);
+ } else {
+ ld_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0);
+ tcg_gen_qemu_st_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity);
+ }
+ /*
+ * We don't execute the load/store above if the element was inactive.
+ * We jump instead directly to incrementing k and continuing the loop.
+ */
+ if (!vm && s->vma) {
+ gen_set_label(inc_k);
+ }
+ tcg_gen_addi_tl(k, k, 1);
+ tcg_gen_br(start_k);
+ /* End of the inner loop. */
+ gen_set_label(end_k);
+
+ tcg_gen_addi_tl(i, i, 1);
+ tcg_gen_mov_tl(cpu_vstart, i);
+ tcg_gen_br(start);
+
+ /* End of the outer loop. */
+ gen_set_label(end);
+
+ return;
+}
+
+
+/*
+ * Set the tail bytes of the strided loads/stores to 1:
+ *
+ * for (k = 0; k < nf; ++k) {
+ * cnt = (k * max_elems + vl) * esz;
+ * tot = (k * max_elems + max_elems) * esz;
+ * for (i = cnt; i < tot; i += esz) {
+ * store_1s(-1, vd[vl+i]);
+ * }
+ * }
+ */
+static void gen_ldst_stride_tail_loop(DisasContext *s, TCGv dest, uint32_t nf,
+ gen_tl_ldst *st_fn)
+{
+ TCGv i = tcg_temp_new();
+ TCGv k = tcg_temp_new();
+ TCGv tail_cnt = tcg_temp_new();
+ TCGv tail_tot = tcg_temp_new();
+ TCGv tail_addr = tcg_temp_new();
+
+ TCGLabel *start = gen_new_label();
+ TCGLabel *end = gen_new_label();
+ TCGLabel *start_i = gen_new_label();
+ TCGLabel *end_i = gen_new_label();
+
+ uint32_t max_elems_b = MAXSZ(s);
+ uint32_t esz = 1 << s->sew;
+
+ /* Start of the outer loop. */
+ tcg_gen_movi_tl(k, 0);
+ tcg_gen_shli_tl(tail_cnt, cpu_vl, s->sew);
+ tcg_gen_movi_tl(tail_tot, max_elems_b);
+ tcg_gen_add_tl(tail_addr, dest, tail_cnt);
+ gen_set_label(start);
+ tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end);
+ /* Start of the inner loop. */
+ tcg_gen_mov_tl(i, tail_cnt);
+ gen_set_label(start_i);
+ tcg_gen_brcond_tl(TCG_COND_GE, i, tail_tot, end_i);
+ /* store_1s(-1, vd[vl+i]); */
+ st_fn(tcg_constant_tl(-1), (TCGv_ptr)tail_addr, 0);
+ tcg_gen_addi_tl(tail_addr, tail_addr, esz);
+ tcg_gen_addi_tl(i, i, esz);
+ tcg_gen_br(start_i);
+ /* End of the inner loop. */
+ gen_set_label(end_i);
+ /* Update the counts */
+ tcg_gen_addi_tl(tail_cnt, tail_cnt, max_elems_b);
+ tcg_gen_addi_tl(tail_tot, tail_cnt, max_elems_b);
+ tcg_gen_addi_tl(k, k, 1);
+ tcg_gen_br(start);
+ /* End of the outer loop. */
+ gen_set_label(end);
+
+ return;
+}
static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
- uint32_t data, gen_helper_ldst_stride *fn,
- DisasContext *s)
+ uint32_t data, DisasContext *s, bool is_load)
{
- TCGv_ptr dest, mask;
- TCGv base, stride;
- TCGv_i32 desc;
+ if (!s->vstart_eq_zero) {
+ return false;
+ }
- dest = tcg_temp_new_ptr();
- mask = tcg_temp_new_ptr();
- base = get_gpr(s, rs1, EXT_NONE);
- stride = get_gpr(s, rs2, EXT_NONE);
- desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
- s->cfg_ptr->vlenb, data));
+ TCGv dest = tcg_temp_new();
- tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
- tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
+ uint32_t nf = FIELD_EX32(data, VDATA, NF);
+ uint32_t vm = FIELD_EX32(data, VDATA, VM);
+
+ /* Destination register and mask register */
+ tcg_gen_addi_tl(dest, (TCGv)tcg_env, vreg_ofs(s, vd));
+
+ /*
+ * Select the appropriate load/tore to retrieve data from the vector
+ * register given a specific sew.
+ */
+ static gen_tl_ldst * const ld_fns[4] = {
+ tcg_gen_ld8u_tl, tcg_gen_ld16u_tl,
+ tcg_gen_ld32u_tl, tcg_gen_ld_tl
+ };
+
+ static gen_tl_ldst * const st_fns[4] = {
+ tcg_gen_st8_tl, tcg_gen_st16_tl,
+ tcg_gen_st32_tl, tcg_gen_st_tl
+ };
+
+ gen_tl_ldst *ld_fn = ld_fns[s->sew];
+ gen_tl_ldst *st_fn = st_fns[s->sew];
+
+ if (ld_fn == NULL || st_fn == NULL) {
+ return false;
+ }
mark_vs_dirty(s);
- fn(dest, mask, base, stride, tcg_env, desc);
+ gen_ldst_stride_main_loop(s, dest, rs1, rs2, vm, nf, ld_fn, st_fn, is_load);
+
+ tcg_gen_movi_tl(cpu_vstart, 0);
+
+ /*
+ * Set the tail bytes to 1 if tail agnostic:
+ */
+ if (s->vta != 0 && is_load) {
+ gen_ldst_stride_tail_loop(s, dest, nf, st_fn);
+ }
finalize_rvv_inst(s);
return true;
@@ -836,16 +1152,6 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
{
uint32_t data = 0;
- gen_helper_ldst_stride *fn;
- static gen_helper_ldst_stride * const fns[4] = {
- gen_helper_vlse8_v, gen_helper_vlse16_v,
- gen_helper_vlse32_v, gen_helper_vlse64_v
- };
-
- fn = fns[eew];
- if (fn == NULL) {
- return false;
- }
uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
@@ -853,7 +1159,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
data = FIELD_DP32(data, VDATA, NF, a->nf);
data = FIELD_DP32(data, VDATA, VTA, s->vta);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
- return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
+ return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, true);
}
static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@@ -871,23 +1177,13 @@ GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
{
uint32_t data = 0;
- gen_helper_ldst_stride *fn;
- static gen_helper_ldst_stride * const fns[4] = {
- /* masked stride store */
- gen_helper_vsse8_v, gen_helper_vsse16_v,
- gen_helper_vsse32_v, gen_helper_vsse64_v
- };
uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
- fn = fns[eew];
- if (fn == NULL) {
- return false;
- }
- return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
+ return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, false);
}
static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@@ -981,7 +1277,8 @@ static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
- vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
+ vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew) &&
+ vext_check_input_eew(s, -1, 0, a->rs2, eew, a->vm);
}
GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check)
@@ -1033,7 +1330,8 @@ static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
- vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
+ vext_check_st_index(s, a->rd, a->rs2, a->nf, eew) &&
+ vext_check_input_eew(s, a->rd, s->sew, a->rs2, eew, a->vm);
}
GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check)
@@ -1100,25 +1398,86 @@ GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
- gen_helper_ldst_whole *fn,
- DisasContext *s)
+ uint32_t log2_esz, gen_helper_ldst_whole *fn,
+ DisasContext *s, bool is_load)
{
- TCGv_ptr dest;
- TCGv base;
- TCGv_i32 desc;
-
- uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
- data = FIELD_DP32(data, VDATA, VM, 1);
- dest = tcg_temp_new_ptr();
- desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
- s->cfg_ptr->vlenb, data));
-
- base = get_gpr(s, rs1, EXT_NONE);
- tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
-
mark_vs_dirty(s);
- fn(dest, base, tcg_env, desc);
+ /*
+ * Load/store multiple bytes per iteration.
+ * When possible do this atomically.
+ * Update vstart with the number of processed elements.
+ * Use the helper function if either:
+ * - vstart is not 0.
+ * - the target has 32 bit registers and we are loading/storing 64 bit long
+ * elements. This is to ensure that we process every element with a single
+ * memory instruction.
+ */
+
+ bool use_helper_fn = !(s->vstart_eq_zero) ||
+ (TCG_TARGET_REG_BITS == 32 && log2_esz == 3);
+
+ if (!use_helper_fn) {
+ TCGv addr = tcg_temp_new();
+ uint32_t size = s->cfg_ptr->vlenb * nf;
+ TCGv_i64 t8 = tcg_temp_new_i64();
+ TCGv_i32 t4 = tcg_temp_new_i32();
+ MemOp atomicity = MO_ATOM_NONE;
+ if (log2_esz == 0) {
+ atomicity = MO_ATOM_NONE;
+ } else {
+ atomicity = MO_ATOM_IFALIGN_PAIR;
+ }
+ if (TCG_TARGET_REG_BITS == 64) {
+ for (int i = 0; i < size; i += 8) {
+ addr = get_address(s, rs1, i);
+ if (is_load) {
+ tcg_gen_qemu_ld_i64(t8, addr, s->mem_idx,
+ MO_LE | MO_64 | atomicity);
+ tcg_gen_st_i64(t8, tcg_env, vreg_ofs(s, vd) + i);
+ } else {
+ tcg_gen_ld_i64(t8, tcg_env, vreg_ofs(s, vd) + i);
+ tcg_gen_qemu_st_i64(t8, addr, s->mem_idx,
+ MO_LE | MO_64 | atomicity);
+ }
+ if (i == size - 8) {
+ tcg_gen_movi_tl(cpu_vstart, 0);
+ } else {
+ tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 8 >> log2_esz);
+ }
+ }
+ } else {
+ for (int i = 0; i < size; i += 4) {
+ addr = get_address(s, rs1, i);
+ if (is_load) {
+ tcg_gen_qemu_ld_i32(t4, addr, s->mem_idx,
+ MO_LE | MO_32 | atomicity);
+ tcg_gen_st_i32(t4, tcg_env, vreg_ofs(s, vd) + i);
+ } else {
+ tcg_gen_ld_i32(t4, tcg_env, vreg_ofs(s, vd) + i);
+ tcg_gen_qemu_st_i32(t4, addr, s->mem_idx,
+ MO_LE | MO_32 | atomicity);
+ }
+ if (i == size - 4) {
+ tcg_gen_movi_tl(cpu_vstart, 0);
+ } else {
+ tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 4 >> log2_esz);
+ }
+ }
+ }
+ } else {
+ TCGv_ptr dest;
+ TCGv base;
+ TCGv_i32 desc;
+ uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
+ data = FIELD_DP32(data, VDATA, VM, 1);
+ dest = tcg_temp_new_ptr();
+ desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
+ s->cfg_ptr->vlenb, data));
+ base = get_gpr(s, rs1, EXT_NONE);
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
+ fn(dest, base, tcg_env, desc);
+ }
finalize_rvv_inst(s);
return true;
@@ -1128,58 +1487,47 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
* load and store whole register instructions ignore vtype and vl setting.
* Thus, we don't need to check vill bit. (Section 7.9)
*/
-#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF) \
-static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
-{ \
- if (require_rvv(s) && \
- QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
- return ldst_whole_trans(a->rd, a->rs1, ARG_NF, \
- gen_helper_##NAME, s); \
- } \
- return false; \
-}
-
-GEN_LDST_WHOLE_TRANS(vl1re8_v, 1)
-GEN_LDST_WHOLE_TRANS(vl1re16_v, 1)
-GEN_LDST_WHOLE_TRANS(vl1re32_v, 1)
-GEN_LDST_WHOLE_TRANS(vl1re64_v, 1)
-GEN_LDST_WHOLE_TRANS(vl2re8_v, 2)
-GEN_LDST_WHOLE_TRANS(vl2re16_v, 2)
-GEN_LDST_WHOLE_TRANS(vl2re32_v, 2)
-GEN_LDST_WHOLE_TRANS(vl2re64_v, 2)
-GEN_LDST_WHOLE_TRANS(vl4re8_v, 4)
-GEN_LDST_WHOLE_TRANS(vl4re16_v, 4)
-GEN_LDST_WHOLE_TRANS(vl4re32_v, 4)
-GEN_LDST_WHOLE_TRANS(vl4re64_v, 4)
-GEN_LDST_WHOLE_TRANS(vl8re8_v, 8)
-GEN_LDST_WHOLE_TRANS(vl8re16_v, 8)
-GEN_LDST_WHOLE_TRANS(vl8re32_v, 8)
-GEN_LDST_WHOLE_TRANS(vl8re64_v, 8)
+#define GEN_LDST_WHOLE_TRANS(NAME, ETYPE, ARG_NF, IS_LOAD) \
+static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
+{ \
+ if (require_rvv(s) && \
+ QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
+ return ldst_whole_trans(a->rd, a->rs1, ARG_NF, ctzl(sizeof(ETYPE)), \
+ gen_helper_##NAME, s, IS_LOAD); \
+ } \
+ return false; \
+}
+
+GEN_LDST_WHOLE_TRANS(vl1re8_v, int8_t, 1, true)
+GEN_LDST_WHOLE_TRANS(vl1re16_v, int16_t, 1, true)
+GEN_LDST_WHOLE_TRANS(vl1re32_v, int32_t, 1, true)
+GEN_LDST_WHOLE_TRANS(vl1re64_v, int64_t, 1, true)
+GEN_LDST_WHOLE_TRANS(vl2re8_v, int8_t, 2, true)
+GEN_LDST_WHOLE_TRANS(vl2re16_v, int16_t, 2, true)
+GEN_LDST_WHOLE_TRANS(vl2re32_v, int32_t, 2, true)
+GEN_LDST_WHOLE_TRANS(vl2re64_v, int64_t, 2, true)
+GEN_LDST_WHOLE_TRANS(vl4re8_v, int8_t, 4, true)
+GEN_LDST_WHOLE_TRANS(vl4re16_v, int16_t, 4, true)
+GEN_LDST_WHOLE_TRANS(vl4re32_v, int32_t, 4, true)
+GEN_LDST_WHOLE_TRANS(vl4re64_v, int64_t, 4, true)
+GEN_LDST_WHOLE_TRANS(vl8re8_v, int8_t, 8, true)
+GEN_LDST_WHOLE_TRANS(vl8re16_v, int16_t, 8, true)
+GEN_LDST_WHOLE_TRANS(vl8re32_v, int32_t, 8, true)
+GEN_LDST_WHOLE_TRANS(vl8re64_v, int64_t, 8, true)
/*
* The vector whole register store instructions are encoded similar to
* unmasked unit-stride store of elements with EEW=8.
*/
-GEN_LDST_WHOLE_TRANS(vs1r_v, 1)
-GEN_LDST_WHOLE_TRANS(vs2r_v, 2)
-GEN_LDST_WHOLE_TRANS(vs4r_v, 4)
-GEN_LDST_WHOLE_TRANS(vs8r_v, 8)
+GEN_LDST_WHOLE_TRANS(vs1r_v, int8_t, 1, false)
+GEN_LDST_WHOLE_TRANS(vs2r_v, int8_t, 2, false)
+GEN_LDST_WHOLE_TRANS(vs4r_v, int8_t, 4, false)
+GEN_LDST_WHOLE_TRANS(vs8r_v, int8_t, 8, false)
/*
*** Vector Integer Arithmetic Instructions
*/
-/*
- * MAXSZ returns the maximum vector size can be operated in bytes,
- * which is used in GVEC IR when vl_eq_vlmax flag is set to true
- * to accelerate vector operation.
- */
-static inline uint32_t MAXSZ(DisasContext *s)
-{
- int max_sz = s->cfg_ptr->vlenb * 8;
- return max_sz >> (3 - s->lmul);
-}
-
static bool opivv_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
@@ -1475,6 +1823,16 @@ static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
}
+/* OPIVV with overwrite and WIDEN */
+static bool opivv_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return require_rvv(s) &&
+ vext_check_isa_ill(s) &&
+ vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
+}
+
static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
gen_helper_gvec_4_ptr *fn,
bool (*checkfn)(DisasContext *, arg_rmrr *))
@@ -1522,6 +1880,14 @@ static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_ds(s, a->rd, a->rs2, a->vm);
}
+static bool opivx_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return require_rvv(s) &&
+ vext_check_isa_ill(s) &&
+ vext_check_ds(s, a->rd, a->rs2, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
+}
+
#define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
{ \
@@ -1993,13 +2359,13 @@ GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
/* Vector Widening Integer Multiply-Add Instructions */
-GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
-GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
-GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
-GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check)
-GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check)
-GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check)
-GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_overwrite_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_overwrite_widen_check)
+GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_overwrite_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_overwrite_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_overwrite_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_overwrite_widen_check)
+GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_overwrite_widen_check)
/* Vector Integer Merge and Move Instructions */
static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
@@ -2340,6 +2706,17 @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
}
+static bool opfvv_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return require_rvv(s) &&
+ require_rvf(s) &&
+ require_scale_rvf(s) &&
+ vext_check_isa_ill(s) &&
+ vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
+}
+
/* OPFVV with WIDEN */
#define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
@@ -2379,11 +2756,21 @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
vext_check_ds(s, a->rd, a->rs2, a->vm);
}
+static bool opfvf_overwrite_widen_check(DisasContext *s, arg_rmrr *a)
+{
+ return require_rvv(s) &&
+ require_rvf(s) &&
+ require_scale_rvf(s) &&
+ vext_check_isa_ill(s) &&
+ vext_check_ds(s, a->rd, a->rs2, a->vm) &&
+ vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm);
+}
+
/* OPFVF with WIDEN */
-#define GEN_OPFVF_WIDEN_TRANS(NAME) \
+#define GEN_OPFVF_WIDEN_TRANS(NAME, CHECK) \
static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
{ \
- if (opfvf_widen_check(s, a)) { \
+ if (CHECK(s, a)) { \
uint32_t data = 0; \
static gen_helper_opfvf *const fns[2] = { \
gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
@@ -2399,8 +2786,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
return false; \
}
-GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
-GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
+GEN_OPFVF_WIDEN_TRANS(vfwadd_vf, opfvf_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwsub_vf, opfvf_widen_check)
static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
{
@@ -2482,7 +2869,7 @@ GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check)
/* Vector Widening Floating-Point Multiply */
GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
-GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
+GEN_OPFVF_WIDEN_TRANS(vfwmul_vf, opfvf_widen_check)
/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
@@ -2503,14 +2890,14 @@ GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
/* Vector Widening Floating-Point Fused Multiply-Add Instructions */
-GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
-GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
-GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
-GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
-GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
-GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
-GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
-GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
+GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_overwrite_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_overwrite_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_overwrite_widen_check)
+GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_overwrite_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf, opfvf_overwrite_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf, opfvf_overwrite_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf, opfvf_overwrite_widen_check)
+GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf, opfvf_overwrite_widen_check)
/* Vector Floating-Point Square-Root Instruction */
@@ -3426,6 +3813,7 @@ static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
+ vext_check_input_eew(s, a->rs1, s->sew, a->rs2, s->sew, a->vm) &&
require_align(a->rd, s->lmul) &&
require_align(a->rs1, s->lmul) &&
require_align(a->rs2, s->lmul) &&
@@ -3438,6 +3826,7 @@ static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a)
int8_t emul = MO_16 - s->sew + s->lmul;
return require_rvv(s) &&
vext_check_isa_ill(s) &&
+ vext_check_input_eew(s, a->rs1, MO_16, a->rs2, s->sew, a->vm) &&
(emul >= -3 && emul <= 3) &&
require_align(a->rd, s->lmul) &&
require_align(a->rs1, emul) &&
@@ -3457,6 +3846,7 @@ static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
vext_check_isa_ill(s) &&
+ vext_check_input_eew(s, -1, MO_64, a->rs2, s->sew, a->vm) &&
require_align(a->rd, s->lmul) &&
require_align(a->rs2, s->lmul) &&
(a->rd != a->rs2) &&
@@ -3600,7 +3990,9 @@ static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
require_align(a->rd, s->lmul) &&
require_align(a->rs2, s->lmul - div) &&
require_vm(a->vm, a->rd) &&
- require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
+ require_noover(a->rd, s->lmul, a->rs2, s->lmul - div) &&
+ vext_check_input_eew(s, -1, 0, a->rs2, s->sew, a->vm);
+
return ret;
}
diff --git a/target/riscv/internals.h b/target/riscv/internals.h
index 213aff3..4570bd5 100644
--- a/target/riscv/internals.h
+++ b/target/riscv/internals.h
@@ -201,4 +201,9 @@ static inline target_ulong adjust_addr_virt(CPURISCVState *env,
return adjust_addr_body(env, addr, true);
}
+static inline int insn_len(uint16_t first_word)
+{
+ return (first_word & 3) == 3 ? 4 : 2;
+}
+
#endif
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
index 75724b6..e1a04be 100644
--- a/target/riscv/kvm/kvm-cpu.c
+++ b/target/riscv/kvm/kvm-cpu.c
@@ -58,33 +58,17 @@ void riscv_kvm_aplic_request(void *opaque, int irq, int level)
static bool cap_has_mp_state;
-static uint64_t kvm_riscv_reg_id_ulong(CPURISCVState *env, uint64_t type,
- uint64_t idx)
-{
- uint64_t id = KVM_REG_RISCV | type | idx;
+#define KVM_RISCV_REG_ID_U32(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U32 | \
+ type | idx)
- switch (riscv_cpu_mxl(env)) {
- case MXL_RV32:
- id |= KVM_REG_SIZE_U32;
- break;
- case MXL_RV64:
- id |= KVM_REG_SIZE_U64;
- break;
- default:
- g_assert_not_reached();
- }
- return id;
-}
-
-static uint64_t kvm_riscv_reg_id_u32(uint64_t type, uint64_t idx)
-{
- return KVM_REG_RISCV | KVM_REG_SIZE_U32 | type | idx;
-}
+#define KVM_RISCV_REG_ID_U64(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U64 | \
+ type | idx)
-static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx)
-{
- return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx;
-}
+#if defined(TARGET_RISCV64)
+#define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U64(type, idx)
+#else
+#define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U32(type, idx)
+#endif
static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b)
{
@@ -107,45 +91,29 @@ static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu,
return kvm_encode_reg_size_id(id, size_b);
}
-#define RISCV_CORE_REG(env, name) \
- kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \
+#define RISCV_CORE_REG(name) \
+ KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, \
KVM_REG_RISCV_CORE_REG(name))
-#define RISCV_CSR_REG(env, name) \
- kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CSR, \
+#define RISCV_CSR_REG(name) \
+ KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CSR, \
KVM_REG_RISCV_CSR_REG(name))
-#define RISCV_CONFIG_REG(env, name) \
- kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, \
+#define RISCV_CONFIG_REG(name) \
+ KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, \
KVM_REG_RISCV_CONFIG_REG(name))
-#define RISCV_TIMER_REG(name) kvm_riscv_reg_id_u64(KVM_REG_RISCV_TIMER, \
+#define RISCV_TIMER_REG(name) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_TIMER, \
KVM_REG_RISCV_TIMER_REG(name))
-#define RISCV_FP_F_REG(idx) kvm_riscv_reg_id_u32(KVM_REG_RISCV_FP_F, idx)
+#define RISCV_FP_F_REG(idx) KVM_RISCV_REG_ID_U32(KVM_REG_RISCV_FP_F, idx)
-#define RISCV_FP_D_REG(idx) kvm_riscv_reg_id_u64(KVM_REG_RISCV_FP_D, idx)
+#define RISCV_FP_D_REG(idx) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_FP_D, idx)
-#define RISCV_VECTOR_CSR_REG(env, name) \
- kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_VECTOR, \
+#define RISCV_VECTOR_CSR_REG(name) \
+ KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_VECTOR, \
KVM_REG_RISCV_VECTOR_CSR_REG(name))
-#define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
- do { \
- int _ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
- if (_ret) { \
- return _ret; \
- } \
- } while (0)
-
-#define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
- do { \
- int _ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
- if (_ret) { \
- return _ret; \
- } \
- } while (0)
-
#define KVM_RISCV_GET_TIMER(cs, name, reg) \
do { \
int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), &reg); \
@@ -167,6 +135,7 @@ typedef struct KVMCPUConfig {
const char *description;
target_ulong offset;
uint64_t kvm_reg_id;
+ uint32_t prop_size;
bool user_set;
bool supported;
} KVMCPUConfig;
@@ -248,7 +217,7 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
/* If we're here we're going to disable the MISA bit */
reg = 0;
- id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
+ id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
misa_cfg->kvm_reg_id);
ret = kvm_set_one_reg(cs, id, &reg);
if (ret != 0) {
@@ -267,6 +236,56 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
}
}
+#define KVM_CSR_CFG(_name, _env_prop, reg_id) \
+ {.name = _name, .offset = ENV_CSR_OFFSET(_env_prop), \
+ .prop_size = sizeof(((CPURISCVState *)0)->_env_prop), \
+ .kvm_reg_id = reg_id}
+
+static KVMCPUConfig kvm_csr_cfgs[] = {
+ KVM_CSR_CFG("sstatus", mstatus, RISCV_CSR_REG(sstatus)),
+ KVM_CSR_CFG("sie", mie, RISCV_CSR_REG(sie)),
+ KVM_CSR_CFG("stvec", stvec, RISCV_CSR_REG(stvec)),
+ KVM_CSR_CFG("sscratch", sscratch, RISCV_CSR_REG(sscratch)),
+ KVM_CSR_CFG("sepc", sepc, RISCV_CSR_REG(sepc)),
+ KVM_CSR_CFG("scause", scause, RISCV_CSR_REG(scause)),
+ KVM_CSR_CFG("stval", stval, RISCV_CSR_REG(stval)),
+ KVM_CSR_CFG("sip", mip, RISCV_CSR_REG(sip)),
+ KVM_CSR_CFG("satp", satp, RISCV_CSR_REG(satp)),
+ KVM_CSR_CFG("scounteren", scounteren, RISCV_CSR_REG(scounteren)),
+ KVM_CSR_CFG("senvcfg", senvcfg, RISCV_CSR_REG(senvcfg)),
+};
+
+static void *kvmconfig_get_env_addr(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
+{
+ return (void *)&cpu->env + csr_cfg->offset;
+}
+
+static uint32_t kvm_cpu_csr_get_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
+{
+ uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
+ return *val32;
+}
+
+static uint64_t kvm_cpu_csr_get_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg)
+{
+ uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
+ return *val64;
+}
+
+static void kvm_cpu_csr_set_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
+ uint32_t val)
+{
+ uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg);
+ *val32 = val;
+}
+
+static void kvm_cpu_csr_set_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg,
+ uint64_t val)
+{
+ uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg);
+ *val64 = val;
+}
+
#define KVM_EXT_CFG(_name, _prop, _reg_id) \
{.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
.kvm_reg_id = _reg_id}
@@ -434,7 +453,6 @@ static KVMCPUConfig kvm_sbi_dbcn = {
static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
{
- CPURISCVState *env = &cpu->env;
uint64_t id, reg;
int i, ret;
@@ -445,7 +463,7 @@ static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
continue;
}
- id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
+ id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
ret = kvm_set_one_reg(cs, id, &reg);
@@ -570,14 +588,14 @@ static int kvm_riscv_get_regs_core(CPUState *cs)
target_ulong reg;
CPURISCVState *env = &RISCV_CPU(cs)->env;
- ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_CORE_REG(regs.pc), &reg);
if (ret) {
return ret;
}
env->pc = reg;
for (i = 1; i < 32; i++) {
- uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i);
+ uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
ret = kvm_get_one_reg(cs, id, &reg);
if (ret) {
return ret;
@@ -596,13 +614,13 @@ static int kvm_riscv_put_regs_core(CPUState *cs)
CPURISCVState *env = &RISCV_CPU(cs)->env;
reg = env->pc;
- ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_CORE_REG(regs.pc), &reg);
if (ret) {
return ret;
}
for (i = 1; i < 32; i++) {
- uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i);
+ uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i);
reg = env->gpr[i];
ret = kvm_set_one_reg(cs, id, &reg);
if (ret) {
@@ -613,53 +631,81 @@ static int kvm_riscv_put_regs_core(CPUState *cs)
return ret;
}
-static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
-{
- env->mstatus = 0;
- env->mie = 0;
- env->stvec = 0;
- env->sscratch = 0;
- env->sepc = 0;
- env->scause = 0;
- env->stval = 0;
- env->mip = 0;
- env->satp = 0;
-}
-
static int kvm_riscv_get_regs_csr(CPUState *cs)
{
- CPURISCVState *env = &RISCV_CPU(cs)->env;
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ uint64_t reg;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
+ KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
+
+ if (!csr_cfg->supported) {
+ continue;
+ }
+
+ ret = kvm_get_one_reg(cs, csr_cfg->kvm_reg_id, &reg);
+ if (ret) {
+ return ret;
+ }
- KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus);
- KVM_RISCV_GET_CSR(cs, env, sie, env->mie);
- KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec);
- KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch);
- KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc);
- KVM_RISCV_GET_CSR(cs, env, scause, env->scause);
- KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
- KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
- KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
+ if (csr_cfg->prop_size == sizeof(uint32_t)) {
+ kvm_cpu_csr_set_u32(cpu, csr_cfg, (uint32_t)reg);
+ } else if (csr_cfg->prop_size == sizeof(uint64_t)) {
+ kvm_cpu_csr_set_u64(cpu, csr_cfg, reg);
+ } else {
+ g_assert_not_reached();
+ }
+ }
return 0;
}
static int kvm_riscv_put_regs_csr(CPUState *cs)
{
- CPURISCVState *env = &RISCV_CPU(cs)->env;
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ uint64_t reg;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
+ KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
+
+ if (!csr_cfg->supported) {
+ continue;
+ }
+
+ if (csr_cfg->prop_size == sizeof(uint32_t)) {
+ reg = kvm_cpu_csr_get_u32(cpu, csr_cfg);
+ } else if (csr_cfg->prop_size == sizeof(uint64_t)) {
+ reg = kvm_cpu_csr_get_u64(cpu, csr_cfg);
+ } else {
+ g_assert_not_reached();
+ }
- KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus);
- KVM_RISCV_SET_CSR(cs, env, sie, env->mie);
- KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec);
- KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch);
- KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc);
- KVM_RISCV_SET_CSR(cs, env, scause, env->scause);
- KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
- KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
- KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
+ ret = kvm_set_one_reg(cs, csr_cfg->kvm_reg_id, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
return 0;
}
+static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
+{
+ env->mstatus = 0;
+ env->mie = 0;
+ env->stvec = 0;
+ env->sscratch = 0;
+ env->sepc = 0;
+ env->scause = 0;
+ env->stval = 0;
+ env->mip = 0;
+ env->satp = 0;
+ env->scounteren = 0;
+ env->senvcfg = 0;
+}
+
static int kvm_riscv_get_regs_fp(CPUState *cs)
{
int ret = 0;
@@ -800,26 +846,26 @@ static int kvm_riscv_get_regs_vector(CPUState *cs)
return 0;
}
- ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), &reg);
if (ret) {
return ret;
}
env->vstart = reg;
- ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), &reg);
if (ret) {
return ret;
}
env->vl = reg;
- ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), &reg);
if (ret) {
return ret;
}
env->vtype = reg;
if (kvm_v_vlenb.supported) {
- ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), &reg);
+ ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), &reg);
if (ret) {
return ret;
}
@@ -857,26 +903,26 @@ static int kvm_riscv_put_regs_vector(CPUState *cs)
}
reg = env->vstart;
- ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), &reg);
if (ret) {
return ret;
}
reg = env->vl;
- ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), &reg);
if (ret) {
return ret;
}
reg = env->vtype;
- ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), &reg);
if (ret) {
return ret;
}
if (kvm_v_vlenb.supported) {
reg = cpu->cfg.vlenb;
- ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), &reg);
+ ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), &reg);
for (int i = 0; i < 32; i++) {
/*
@@ -955,25 +1001,24 @@ static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch)
static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
{
- CPURISCVState *env = &cpu->env;
struct kvm_one_reg reg;
int ret;
- reg.id = RISCV_CONFIG_REG(env, mvendorid);
+ reg.id = RISCV_CONFIG_REG(mvendorid);
reg.addr = (uint64_t)&cpu->cfg.mvendorid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_report("Unable to retrieve mvendorid from host, error %d", ret);
}
- reg.id = RISCV_CONFIG_REG(env, marchid);
+ reg.id = RISCV_CONFIG_REG(marchid);
reg.addr = (uint64_t)&cpu->cfg.marchid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
error_report("Unable to retrieve marchid from host, error %d", ret);
}
- reg.id = RISCV_CONFIG_REG(env, mimpid);
+ reg.id = RISCV_CONFIG_REG(mimpid);
reg.addr = (uint64_t)&cpu->cfg.mimpid;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
if (ret != 0) {
@@ -988,7 +1033,7 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
struct kvm_one_reg reg;
int ret;
- reg.id = RISCV_CONFIG_REG(env, isa);
+ reg.id = RISCV_CONFIG_REG(isa);
reg.addr = (uint64_t)&env->misa_ext_mask;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
@@ -1005,11 +1050,10 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
KVMCPUConfig *cbomz_cfg)
{
- CPURISCVState *env = &cpu->env;
struct kvm_one_reg reg;
int ret;
- reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
+ reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
cbomz_cfg->kvm_reg_id);
reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
@@ -1023,7 +1067,6 @@ static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
KVMScratchCPU *kvmcpu)
{
- CPURISCVState *env = &cpu->env;
uint64_t val;
int i, ret;
@@ -1031,7 +1074,7 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
struct kvm_one_reg reg;
- reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
+ reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
@@ -1061,6 +1104,32 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
}
}
+static void kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU *kvmcpu)
+{
+ uint64_t val;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
+ KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
+ struct kvm_one_reg reg;
+
+ reg.id = csr_cfg->kvm_reg_id;
+ reg.addr = (uint64_t)&val;
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
+ if (ret != 0) {
+ if (errno == EINVAL) {
+ csr_cfg->supported = false;
+ } else {
+ error_report("Unable to read KVM CSR %s: %s",
+ csr_cfg->name, strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ } else {
+ csr_cfg->supported = true;
+ }
+ }
+}
+
static int uint64_cmp(const void *a, const void *b)
{
uint64_t val1 = *(const uint64_t *)a;
@@ -1078,7 +1147,6 @@ static int uint64_cmp(const void *a, const void *b)
}
static void kvm_riscv_check_sbi_dbcn_support(RISCVCPU *cpu,
- KVMScratchCPU *kvmcpu,
struct kvm_reg_list *reglist)
{
struct kvm_reg_list *reg_search;
@@ -1118,12 +1186,31 @@ static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
}
}
-static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
+static void kvm_riscv_read_csr_cfg(struct kvm_reg_list *reglist)
+{
+ struct kvm_reg_list *reg_search;
+ uint64_t reg_id;
+
+ for (int i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) {
+ KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i];
+
+ reg_id = csr_cfg->kvm_reg_id;
+ reg_search = bsearch(&reg_id, reglist->reg, reglist->n,
+ sizeof(uint64_t), uint64_cmp);
+ if (!reg_search) {
+ continue;
+ }
+
+ csr_cfg->supported = true;
+ }
+}
+
+static void kvm_riscv_init_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
{
+ g_autofree struct kvm_reg_list *reglist = NULL;
KVMCPUConfig *multi_ext_cfg;
struct kvm_one_reg reg;
struct kvm_reg_list rl_struct;
- struct kvm_reg_list *reglist;
uint64_t val, reg_id, *reg_search;
int i, ret;
@@ -1135,7 +1222,9 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
* (EINVAL). Use read_legacy() in this case.
*/
if (errno == EINVAL) {
- return kvm_riscv_read_multiext_legacy(cpu, kvmcpu);
+ kvm_riscv_read_multiext_legacy(cpu, kvmcpu);
+ kvm_riscv_read_csr_cfg_legacy(kvmcpu);
+ return;
} else if (errno != E2BIG) {
/*
* E2BIG is an expected error message for the API since we
@@ -1164,7 +1253,7 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
multi_ext_cfg = &kvm_multi_ext_cfgs[i];
- reg_id = kvm_riscv_reg_id_ulong(&cpu->env, KVM_REG_RISCV_ISA_EXT,
+ reg_id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT,
multi_ext_cfg->kvm_reg_id);
reg_search = bsearch(&reg_id, reglist->reg, reglist->n,
sizeof(uint64_t), uint64_cmp);
@@ -1197,7 +1286,8 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
kvm_riscv_read_vlenb(cpu, kvmcpu, reglist);
}
- kvm_riscv_check_sbi_dbcn_support(cpu, kvmcpu, reglist);
+ kvm_riscv_check_sbi_dbcn_support(cpu, reglist);
+ kvm_riscv_read_csr_cfg(reglist);
}
static void riscv_init_kvm_registers(Object *cpu_obj)
@@ -1211,7 +1301,7 @@ static void riscv_init_kvm_registers(Object *cpu_obj)
kvm_riscv_init_machine_ids(cpu, &kvmcpu);
kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu);
- kvm_riscv_init_multiext_cfg(cpu, &kvmcpu);
+ kvm_riscv_init_cfg(cpu, &kvmcpu);
kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
}
@@ -1343,12 +1433,11 @@ void kvm_arch_init_irq_routing(KVMState *s)
static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
{
- CPURISCVState *env = &cpu->env;
target_ulong reg;
uint64_t id;
int ret;
- id = RISCV_CONFIG_REG(env, mvendorid);
+ id = RISCV_CONFIG_REG(mvendorid);
/*
* cfg.mvendorid is an uint32 but a target_ulong will
* be written. Assign it to a target_ulong var to avoid
@@ -1360,13 +1449,13 @@ static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
return ret;
}
- id = RISCV_CONFIG_REG(env, marchid);
+ id = RISCV_CONFIG_REG(marchid);
ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid);
if (ret != 0) {
return ret;
}
- id = RISCV_CONFIG_REG(env, mimpid);
+ id = RISCV_CONFIG_REG(mimpid);
ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid);
return ret;
@@ -1383,6 +1472,11 @@ static int kvm_vcpu_enable_sbi_dbcn(RISCVCPU *cpu, CPUState *cs)
return kvm_set_one_reg(cs, kvm_sbi_dbcn.kvm_reg_id, &reg);
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
int ret = 0;
@@ -1916,7 +2010,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
if (cpu->cfg.ext_zicbom &&
riscv_cpu_option_set(kvm_cbom_blocksize.name)) {
- reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
+ reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
kvm_cbom_blocksize.kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, &reg);
@@ -1935,7 +2029,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
if (cpu->cfg.ext_zicboz &&
riscv_cpu_option_set(kvm_cboz_blocksize.name)) {
- reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
+ reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG,
kvm_cboz_blocksize.kvm_reg_id);
reg.addr = (uint64_t)&val;
ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, &reg);
@@ -1997,22 +2091,25 @@ static void kvm_cpu_accel_register_types(void)
}
type_init(kvm_cpu_accel_register_types);
-static void riscv_host_cpu_class_init(ObjectClass *c, const void *data)
-{
- RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
-
-#if defined(TARGET_RISCV32)
- mcc->misa_mxl_max = MXL_RV32;
-#elif defined(TARGET_RISCV64)
- mcc->misa_mxl_max = MXL_RV64;
-#endif
-}
-
static const TypeInfo riscv_kvm_cpu_type_infos[] = {
{
.name = TYPE_RISCV_CPU_HOST,
.parent = TYPE_RISCV_CPU,
- .class_init = riscv_host_cpu_class_init,
+#if defined(TARGET_RISCV32)
+ .class_data = &(const RISCVCPUDef) {
+ .misa_mxl_max = MXL_RV32,
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .vext_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .cfg.max_satp_mode = -1,
+ },
+#elif defined(TARGET_RISCV64)
+ .class_data = &(const RISCVCPUDef) {
+ .misa_mxl_max = MXL_RV64,
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .vext_spec = RISCV_PROFILE_ATTR_UNUSED,
+ .cfg.max_satp_mode = -1,
+ },
+#endif
}
};
diff --git a/target/riscv/m128_helper.c b/target/riscv/m128_helper.c
index ec14aaa..7d9b83b 100644
--- a/target/riscv/m128_helper.c
+++ b/target/riscv/m128_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
target_ulong HELPER(divu_i128)(CPURISCVState *env,
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
index a1f70cc..c97e9ce 100644
--- a/target/riscv/machine.c
+++ b/target/riscv/machine.c
@@ -170,7 +170,7 @@ static bool rv128_needed(void *opaque)
{
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(opaque);
- return mcc->misa_mxl_max == MXL_RV128;
+ return mcc->def->misa_mxl_max == MXL_RV128;
}
static const VMStateDescription vmstate_rv128 = {
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index 5b0db2c..557807b 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -21,9 +21,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
-#include "exec/exec-all.h"
#include "exec/cputlb.h"
#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/helper-proto.h"
#include "exec/tlb-flags.h"
#include "trace.h"
@@ -71,7 +71,7 @@ target_ulong helper_csrr(CPURISCVState *env, int csr)
void helper_csrw(CPURISCVState *env, int csr, target_ulong src)
{
target_ulong mask = env->xl == MXL_RV32 ? UINT32_MAX : (target_ulong)-1;
- RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask);
+ RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask, GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@@ -82,7 +82,7 @@ target_ulong helper_csrrw(CPURISCVState *env, int csr,
target_ulong src, target_ulong write_mask)
{
target_ulong val = 0;
- RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask);
+ RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask, GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@@ -108,7 +108,7 @@ void helper_csrw_i128(CPURISCVState *env, int csr,
{
RISCVException ret = riscv_csrrw_i128(env, csr, NULL,
int128_make128(srcl, srch),
- UINT128_MAX);
+ UINT128_MAX, GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
@@ -116,13 +116,14 @@ void helper_csrw_i128(CPURISCVState *env, int csr,
}
target_ulong helper_csrrw_i128(CPURISCVState *env, int csr,
- target_ulong srcl, target_ulong srch,
- target_ulong maskl, target_ulong maskh)
+ target_ulong srcl, target_ulong srch,
+ target_ulong maskl, target_ulong maskh)
{
Int128 rv = int128_zero();
RISCVException ret = riscv_csrrw_i128(env, csr, &rv,
int128_make128(srcl, srch),
- int128_make128(maskl, maskh));
+ int128_make128(maskl, maskh),
+ GETPC());
if (ret != RISCV_EXCP_NONE) {
riscv_raise_exception(env, ret, GETPC());
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index c13a117..5af295e 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -33,6 +33,15 @@ static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
/*
+ * Convert the PMP permissions to match the truth table in the Smepmp spec.
+ */
+static inline uint8_t pmp_get_smepmp_operation(uint8_t cfg)
+{
+ return ((cfg & PMP_LOCK) >> 4) | ((cfg & PMP_READ) << 2) |
+ (cfg & PMP_WRITE) | ((cfg & PMP_EXEC) >> 2);
+}
+
+/*
* Accessor method to extract address matching type 'a field' from cfg reg
*/
static inline uint8_t pmp_get_a_field(uint8_t cfg)
@@ -46,21 +55,58 @@ static inline uint8_t pmp_get_a_field(uint8_t cfg)
*/
static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
{
- /* mseccfg.RLB is set */
- if (MSECCFG_RLB_ISSET(env)) {
- return 0;
- }
-
if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
return 1;
}
- /* Top PMP has no 'next' to check */
- if ((pmp_index + 1u) >= MAX_RISCV_PMPS) {
+ return 0;
+}
+
+/*
+ * Check whether a PMP is locked for writing or not.
+ * (i.e. has LOCK flag and mseccfg.RLB is unset)
+ */
+static int pmp_is_readonly(CPURISCVState *env, uint32_t pmp_index)
+{
+ return pmp_is_locked(env, pmp_index) && !MSECCFG_RLB_ISSET(env);
+}
+
+/*
+ * Check whether `val` is an invalid Smepmp config value
+ */
+static int pmp_is_invalid_smepmp_cfg(CPURISCVState *env, uint8_t val)
+{
+ /* No check if mseccfg.MML is not set or if mseccfg.RLB is set */
+ if (!MSECCFG_MML_ISSET(env) || MSECCFG_RLB_ISSET(env)) {
return 0;
}
- return 0;
+ /*
+ * Adding a rule with executable privileges that either is M-mode-only
+ * or a locked Shared-Region is not possible
+ */
+ switch (pmp_get_smepmp_operation(val)) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ case 12:
+ case 14:
+ case 15:
+ return 0;
+ case 9:
+ case 10:
+ case 11:
+ case 13:
+ return 1;
+ default:
+ g_assert_not_reached();
+ }
}
/*
@@ -91,45 +137,18 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
{
if (pmp_index < MAX_RISCV_PMPS) {
- bool locked = true;
-
- if (riscv_cpu_cfg(env)->ext_smepmp) {
- /* mseccfg.RLB is set */
- if (MSECCFG_RLB_ISSET(env)) {
- locked = false;
- }
-
- /* mseccfg.MML is not set */
- if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) {
- locked = false;
- }
-
- /* mseccfg.MML is set */
- if (MSECCFG_MML_ISSET(env)) {
- /* not adding execute bit */
- if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) {
- locked = false;
- }
- /* shared region and not adding X bit */
- if ((val & PMP_LOCK) != PMP_LOCK &&
- (val & 0x7) != (PMP_WRITE | PMP_EXEC)) {
- locked = false;
- }
- }
- } else {
- if (!pmp_is_locked(env, pmp_index)) {
- locked = false;
- }
+ if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
+ /* no change */
+ return false;
}
- if (locked) {
- qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
- } else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) {
- /* If !mseccfg.MML then ignore writes with encoding RW=01 */
- if ((val & PMP_WRITE) && !(val & PMP_READ) &&
- !MSECCFG_MML_ISSET(env)) {
- return false;
- }
+ if (pmp_is_readonly(env, pmp_index)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpcfg write - read only\n");
+ } else if (pmp_is_invalid_smepmp_cfg(env, val)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpcfg write - invalid\n");
+ } else {
env->pmp_state.pmp[pmp_index].cfg_reg = val;
pmp_update_rule_addr(env, pmp_index);
return true;
@@ -353,16 +372,6 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
const uint8_t a_field =
pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
- /*
- * Convert the PMP permissions to match the truth table in the
- * Smepmp spec.
- */
- const uint8_t smepmp_operation =
- ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
- ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
- (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
- ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2);
-
if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
/*
* If the PMP entry is not off and the address is in range,
@@ -381,6 +390,9 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
/*
* If mseccfg.MML Bit set, do the enhanced pmp priv check
*/
+ const uint8_t smepmp_operation =
+ pmp_get_smepmp_operation(env->pmp_state.pmp[i].cfg_reg);
+
if (mode == PRV_M) {
switch (smepmp_operation) {
case 0:
@@ -517,6 +529,11 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
bool is_next_cfg_tor = false;
if (addr_index < MAX_RISCV_PMPS) {
+ if (env->pmp_state.pmp[addr_index].addr_reg == val) {
+ /* no change */
+ return;
+ }
+
/*
* In TOR mode, need to check the lock bit of the next pmp
* (if there is a next).
@@ -525,25 +542,23 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
- if (pmp_is_locked(env, addr_index + 1) && is_next_cfg_tor) {
+ if (pmp_is_readonly(env, addr_index + 1) && is_next_cfg_tor) {
qemu_log_mask(LOG_GUEST_ERROR,
- "ignoring pmpaddr write - pmpcfg + 1 locked\n");
+ "ignoring pmpaddr write - pmpcfg+1 read only\n");
return;
}
}
- if (!pmp_is_locked(env, addr_index)) {
- if (env->pmp_state.pmp[addr_index].addr_reg != val) {
- env->pmp_state.pmp[addr_index].addr_reg = val;
- pmp_update_rule_addr(env, addr_index);
- if (is_next_cfg_tor) {
- pmp_update_rule_addr(env, addr_index + 1);
- }
- tlb_flush(env_cpu(env));
+ if (!pmp_is_readonly(env, addr_index)) {
+ env->pmp_state.pmp[addr_index].addr_reg = val;
+ pmp_update_rule_addr(env, addr_index);
+ if (is_next_cfg_tor) {
+ pmp_update_rule_addr(env, addr_index + 1);
}
+ tlb_flush(env_cpu(env));
} else {
qemu_log_mask(LOG_GUEST_ERROR,
- "ignoring pmpaddr write - locked\n");
+ "ignoring pmpaddr write - read only\n");
}
} else {
qemu_log_mask(LOG_GUEST_ERROR,
diff --git a/target/riscv/riscv-qmp-cmds.c b/target/riscv/riscv-qmp-cmds.c
index d0a3243..8ba8aa0 100644
--- a/target/riscv/riscv-qmp-cmds.c
+++ b/target/riscv/riscv-qmp-cmds.c
@@ -25,7 +25,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/qapi-commands-machine.h"
#include "qobject/qbool.h"
#include "qobject/qdict.h"
#include "qapi/qobject-input-visitor.h"
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
index 54ac54f..55fd9e5 100644
--- a/target/riscv/tcg/tcg-cpu.c
+++ b/target/riscv/tcg/tcg-cpu.c
@@ -18,7 +18,6 @@
*/
#include "qemu/osdep.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "tcg-cpu.h"
#include "cpu.h"
@@ -37,6 +36,7 @@
#ifndef CONFIG_USER_ONLY
#include "hw/boards.h"
#include "system/tcg.h"
+#include "exec/icount.h"
#endif
/* Hash that stores user set extensions */
@@ -98,6 +98,103 @@ static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
return riscv_env_mmu_index(cpu_env(cs), ifetch);
}
+static TCGTBCPUState riscv_get_tb_cpu_state(CPUState *cs)
+{
+ CPURISCVState *env = cpu_env(cs);
+ RISCVCPU *cpu = env_archcpu(env);
+ RISCVExtStatus fs, vs;
+ uint32_t flags = 0;
+ bool pm_signext = riscv_cpu_virt_mem_enabled(env);
+
+ if (cpu->cfg.ext_zve32x) {
+ /*
+ * If env->vl equals to VLMAX, we can use generic vector operation
+ * expanders (GVEC) to accerlate the vector operations.
+ * However, as LMUL could be a fractional number. The maximum
+ * vector size can be operated might be less than 8 bytes,
+ * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
+ * only when maxsz >= 8 bytes.
+ */
+
+ /* lmul encoded as in DisasContext::lmul */
+ int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
+ uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
+ uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
+ uint32_t maxsz = vlmax << vsew;
+ bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
+ (maxsz >= 8);
+ flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
+ flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
+ flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
+ FIELD_EX64(env->vtype, VTYPE, VLMUL));
+ flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
+ flags = FIELD_DP32(flags, TB_FLAGS, VTA,
+ FIELD_EX64(env->vtype, VTYPE, VTA));
+ flags = FIELD_DP32(flags, TB_FLAGS, VMA,
+ FIELD_EX64(env->vtype, VTYPE, VMA));
+ flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
+ } else {
+ flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
+ }
+
+ if (cpu_get_fcfien(env)) {
+ /*
+ * For Forward CFI, only the expectation of a lpad at
+ * the start of the block is tracked via env->elp. env->elp
+ * is turned on during jalr translation.
+ */
+ flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp);
+ flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1);
+ }
+
+ if (cpu_get_bcfien(env)) {
+ flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1);
+ }
+
+#ifdef CONFIG_USER_ONLY
+ fs = EXT_STATUS_DIRTY;
+ vs = EXT_STATUS_DIRTY;
+#else
+ flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
+
+ flags |= riscv_env_mmu_index(env, 0);
+ fs = get_field(env->mstatus, MSTATUS_FS);
+ vs = get_field(env->mstatus, MSTATUS_VS);
+
+ if (env->virt_enabled) {
+ flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
+ /*
+ * Merge DISABLED and !DIRTY states using MIN.
+ * We will set both fields when dirtying.
+ */
+ fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
+ vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
+ }
+
+ /* With Zfinx, floating point is enabled/disabled by Smstateen. */
+ if (!riscv_has_ext(env, RVF)) {
+ fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
+ ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
+ }
+
+ if (cpu->cfg.debug && !icount_enabled()) {
+ flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
+ }
+#endif
+
+ flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
+ flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
+ flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
+ flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
+
+ return (TCGTBCPUState){
+ .pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc,
+ .flags = flags
+ };
+}
+
static void riscv_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -140,20 +237,48 @@ static void riscv_restore_state_to_opc(CPUState *cs,
env->excp_uw2 = data[2];
}
+#ifndef CONFIG_USER_ONLY
+static vaddr riscv_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ CPURISCVState *env = cpu_env(cs);
+ uint32_t pm_len;
+ bool pm_signext;
+
+ if (cpu_address_xl(env) == MXL_RV32) {
+ return (uint32_t)result;
+ }
+
+ pm_len = riscv_pm_get_pmlen(riscv_pm_get_pmm(env));
+ if (pm_len == 0) {
+ return result;
+ }
+
+ pm_signext = riscv_cpu_virt_mem_enabled(env);
+ if (pm_signext) {
+ return sextract64(result, 0, 64 - pm_len);
+ }
+ return extract64(result, 0, 64 - pm_len);
+}
+#endif
+
const TCGCPUOps riscv_tcg_ops = {
.mttcg_supported = true,
.guest_default_memory_order = 0,
.initialize = riscv_translate_init,
.translate_code = riscv_translate_code,
+ .get_tb_cpu_state = riscv_get_tb_cpu_state,
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
.restore_state_to_opc = riscv_restore_state_to_opc,
.mmu_index = riscv_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = riscv_cpu_tlb_fill,
+ .pointer_wrap = riscv_pointer_wrap,
.cpu_exec_interrupt = riscv_cpu_exec_interrupt,
.cpu_exec_halt = riscv_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = riscv_cpu_do_interrupt,
.do_transaction_failed = riscv_cpu_do_transaction_failed,
.do_unaligned_access = riscv_cpu_do_unaligned_access,
@@ -592,7 +717,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
return;
}
- if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
+ if (mcc->def->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
error_setg(errp, "Zcf extension is only relevant to RV32");
return;
}
@@ -689,7 +814,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
return;
}
- if (mcc->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) {
+ if (mcc->def->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) {
error_setg(errp, "svukte is not supported for RV32");
return;
}
@@ -717,8 +842,9 @@ static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
RISCVCPUProfile *profile,
bool send_warn)
{
- int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
+ int satp_max = cpu->cfg.max_satp_mode;
+ assert(satp_max >= 0);
if (profile->satp_mode > satp_max) {
if (send_warn) {
bool is_32bit = riscv_cpu_is_32bit(cpu);
@@ -926,7 +1052,7 @@ static void cpu_enable_zc_implied_rules(RISCVCPU *cpu)
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true);
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true);
- if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
+ if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) {
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
}
}
@@ -935,7 +1061,7 @@ static void cpu_enable_zc_implied_rules(RISCVCPU *cpu)
if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
- if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
+ if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) {
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
}
@@ -1061,7 +1187,7 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
#ifndef CONFIG_USER_ONLY
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
- if (mcc->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) {
+ if (mcc->def->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) {
/* Missing 128-bit aligned atomics */
error_setg(errp,
"128-bit RISC-V currently does not work with Multi "
diff --git a/target/riscv/th_csr.c b/target/riscv/th_csr.c
index 6c970d4..49eb7bb 100644
--- a/target/riscv/th_csr.c
+++ b/target/riscv/th_csr.c
@@ -27,12 +27,6 @@
#define TH_SXSTATUS_MAEE BIT(21)
#define TH_SXSTATUS_THEADISAEE BIT(22)
-typedef struct {
- int csrno;
- int (*insertion_test)(RISCVCPU *cpu);
- riscv_csr_operations csr_ops;
-} riscv_csr;
-
static RISCVException smode(CPURISCVState *env, int csrno)
{
if (riscv_has_ext(env, RVS)) {
@@ -42,13 +36,9 @@ static RISCVException smode(CPURISCVState *env, int csrno)
return RISCV_EXCP_ILLEGAL_INST;
}
-static int test_thead_mvendorid(RISCVCPU *cpu)
+static bool test_thead_mvendorid(RISCVCPU *cpu)
{
- if (cpu->cfg.mvendorid != THEAD_VENDOR_ID) {
- return -1;
- }
-
- return 0;
+ return cpu->cfg.mvendorid == THEAD_VENDOR_ID;
}
static RISCVException read_th_sxstatus(CPURISCVState *env, int csrno,
@@ -59,21 +49,11 @@ static RISCVException read_th_sxstatus(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
-static riscv_csr th_csr_list[] = {
+const RISCVCSR th_csr_list[] = {
{
.csrno = CSR_TH_SXSTATUS,
.insertion_test = test_thead_mvendorid,
.csr_ops = { "th.sxstatus", smode, read_th_sxstatus }
- }
+ },
+ { }
};
-
-void th_register_custom_csrs(RISCVCPU *cpu)
-{
- for (size_t i = 0; i < ARRAY_SIZE(th_csr_list); i++) {
- int csrno = th_csr_list[i].csrno;
- riscv_csr_operations *csr_ops = &th_csr_list[i].csr_ops;
- if (!th_csr_list[i].insertion_test(cpu)) {
- riscv_set_csr_ops(csrno, csr_ops);
- }
- }
-}
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index cef61b5..d7a6de0 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -20,7 +20,6 @@
#include "qemu/log.h"
#include "cpu.h"
#include "tcg/tcg-op.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
#include "exec/target_page.h"
@@ -1210,11 +1209,6 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
/* The specification allows for longer insns, but not supported by qemu. */
#define MAX_INSN_LEN 4
-static inline int insn_len(uint16_t first_word)
-{
- return (first_word & 3) == 3 ? 4 : 2;
-}
-
const RISCVDecoder decoder_table[] = {
{ always_true_p, decode_insn32 },
{ has_xthead_p, decode_xthead},
@@ -1282,7 +1276,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->cfg_vta_all_1s = cpu->cfg.rvv_ta_all_1s;
ctx->vstart_eq_zero = FIELD_EX32(tb_flags, TB_FLAGS, VSTART_EQ_ZERO);
ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
- ctx->misa_mxl_max = mcc->misa_mxl_max;
+ ctx->misa_mxl_max = mcc->def->misa_mxl_max;
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL);
ctx->cs = cs;
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
index 1526de9..9a0d9b4 100644
--- a/target/riscv/vcrypto_helper.c
+++ b/target/riscv/vcrypto_helper.c
@@ -26,7 +26,6 @@
#include "crypto/aes-round.h"
#include "crypto/sm4.h"
#include "exec/memop.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "internals.h"
#include "vector_internals.h"
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index b8ae704..5dc1c10 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -21,8 +21,8 @@
#include "qemu/bitops.h"
#include "cpu.h"
#include "exec/memop.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/page-protection.h"
#include "exec/helper-proto.h"
#include "exec/tlb-flags.h"
@@ -117,25 +117,42 @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz)
* It will trigger an exception if there is no mapping in TLB
* and page table walk can't fill the TLB entry. Then the guest
* software can return here after process the exception or never return.
+ *
+ * This function can also be used when direct access to probe_access_flags is
+ * needed in order to access the flags. If a pointer to a flags operand is
+ * provided the function will call probe_access_flags instead, use nonfault
+ * and update host and flags.
*/
-static void probe_pages(CPURISCVState *env, target_ulong addr,
- target_ulong len, uintptr_t ra,
- MMUAccessType access_type)
+static void probe_pages(CPURISCVState *env, target_ulong addr, target_ulong len,
+ uintptr_t ra, MMUAccessType access_type, int mmu_index,
+ void **host, int *flags, bool nonfault)
{
target_ulong pagelen = -(addr | TARGET_PAGE_MASK);
target_ulong curlen = MIN(pagelen, len);
- int mmu_index = riscv_env_mmu_index(env, false);
- probe_access(env, adjust_addr(env, addr), curlen, access_type,
- mmu_index, ra);
+ if (flags != NULL) {
+ *flags = probe_access_flags(env, adjust_addr(env, addr), curlen,
+ access_type, mmu_index, nonfault, host, ra);
+ } else {
+ probe_access(env, adjust_addr(env, addr), curlen, access_type,
+ mmu_index, ra);
+ }
+
if (len > curlen) {
addr += curlen;
curlen = len - curlen;
- probe_access(env, adjust_addr(env, addr), curlen, access_type,
- mmu_index, ra);
+ if (flags != NULL) {
+ *flags = probe_access_flags(env, adjust_addr(env, addr), curlen,
+ access_type, mmu_index, nonfault,
+ host, ra);
+ } else {
+ probe_access(env, adjust_addr(env, addr), curlen, access_type,
+ mmu_index, ra);
+ }
}
}
+
static inline void vext_set_elem_mask(void *v0, int index,
uint8_t value)
{
@@ -335,8 +352,8 @@ vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr,
MMUAccessType access_type = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
/* Check page permission/pmp/watchpoint/etc. */
- flags = probe_access_flags(env, adjust_addr(env, addr), size, access_type,
- mmu_index, true, &host, ra);
+ probe_pages(env, addr, size, ra, access_type, mmu_index, &host, &flags,
+ true);
if (flags == 0) {
if (nf == 1) {
@@ -635,7 +652,7 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
uint32_t vma = vext_vma(desc);
target_ulong addr, addr_probe, addr_i, offset, remain, page_split, elems;
int mmu_index = riscv_env_mmu_index(env, false);
- int flags;
+ int flags, probe_flags;
void *host;
VSTART_CHECK_EARLY_EXIT(env, env->vl);
@@ -649,15 +666,15 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
}
/* Check page permission/pmp/watchpoint/etc. */
- flags = probe_access_flags(env, adjust_addr(env, addr), elems * msize,
- MMU_DATA_LOAD, mmu_index, true, &host, ra);
+ probe_pages(env, addr, elems * msize, ra, MMU_DATA_LOAD, mmu_index, &host,
+ &flags, true);
/* If we are crossing a page check also the second page. */
if (env->vl > elems) {
addr_probe = addr + (elems << log2_esz);
- flags |= probe_access_flags(env, adjust_addr(env, addr_probe),
- elems * msize, MMU_DATA_LOAD, mmu_index,
- true, &host, ra);
+ probe_pages(env, addr_probe, elems * msize, ra, MMU_DATA_LOAD,
+ mmu_index, &host, &probe_flags, true);
+ flags |= probe_flags;
}
if (flags & ~TLB_WATCHPOINT) {
@@ -669,16 +686,16 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
addr_i = adjust_addr(env, base + i * (nf << log2_esz));
if (i == 0) {
/* Allow fault on first element. */
- probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD);
+ probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD,
+ mmu_index, &host, NULL, false);
} else {
remain = nf << log2_esz;
while (remain > 0) {
offset = -(addr_i | TARGET_PAGE_MASK);
/* Probe nonfault on subsequent elements. */
- flags = probe_access_flags(env, addr_i, offset,
- MMU_DATA_LOAD, mmu_index, true,
- &host, 0);
+ probe_pages(env, addr_i, offset, 0, MMU_DATA_LOAD,
+ mmu_index, &host, &flags, true);
/*
* Stop if invalid (unmapped) or mmio (transaction may
@@ -5116,9 +5133,11 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
} \
\
for (i = i_max; i < vl; ++i) { \
- if (vm || vext_elem_mask(v0, i)) { \
- *((ETYPE *)vd + H(i)) = 0; \
+ if (!vm && !vext_elem_mask(v0, i)) { \
+ vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
+ continue; \
} \
+ *((ETYPE *)vd + H(i)) = 0; \
} \
\
env->vstart = 0; \
diff --git a/target/riscv/zce_helper.c b/target/riscv/zce_helper.c
index 50d65f3..55221f5 100644
--- a/target/riscv/zce_helper.c
+++ b/target/riscv/zce_helper.c
@@ -18,7 +18,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "accel/tcg/cpu-ldst.h"
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
index a51b543..c6dd5d6 100644
--- a/target/rx/cpu.c
+++ b/target/rx/cpu.c
@@ -28,6 +28,7 @@
#include "hw/loader.h"
#include "fpu/softfloat.h"
#include "tcg/debug-assert.h"
+#include "accel/tcg/cpu-ops.h"
static void rx_cpu_set_pc(CPUState *cs, vaddr value)
{
@@ -43,6 +44,17 @@ static vaddr rx_cpu_get_pc(CPUState *cs)
return cpu->env.pc;
}
+static TCGTBCPUState rx_get_tb_cpu_state(CPUState *cs)
+{
+ CPURXState *env = cpu_env(cs);
+ uint32_t flags = 0;
+
+ flags = FIELD_DP32(flags, PSW, PM, env->psw_pm);
+ flags = FIELD_DP32(flags, PSW, U, env->psw_u);
+
+ return (TCGTBCPUState){ .pc = env->pc, .flags = flags };
+}
+
static void rx_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -201,8 +213,6 @@ static const struct SysemuCPUOps rx_sysemu_ops = {
.get_phys_page_debug = rx_cpu_get_phys_page_debug,
};
-#include "accel/tcg/cpu-ops.h"
-
static const TCGCPUOps rx_tcg_ops = {
/* MTTCG not yet supported: require strict ordering */
.guest_default_memory_order = TCG_MO_ALL,
@@ -210,13 +220,16 @@ static const TCGCPUOps rx_tcg_ops = {
.initialize = rx_translate_init,
.translate_code = rx_translate_code,
+ .get_tb_cpu_state = rx_get_tb_cpu_state,
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
.restore_state_to_opc = rx_restore_state_to_opc,
.mmu_index = rx_cpu_mmu_index,
.tlb_fill = rx_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = rx_cpu_exec_interrupt,
.cpu_exec_halt = rx_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = rx_cpu_do_interrupt,
};
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
index 5c19c83..ba5761b 100644
--- a/target/rx/cpu.h
+++ b/target/rx/cpu.h
@@ -153,15 +153,6 @@ void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
#define RX_CPU_IRQ 0
#define RX_CPU_FIR 1
-static inline void cpu_get_tb_cpu_state(CPURXState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- *cs_base = 0;
- *flags = FIELD_DP32(0, PSW, PM, env->psw_pm);
- *flags = FIELD_DP32(*flags, PSW, U, env->psw_u);
-}
-
static inline uint32_t rx_cpu_pack_psw(CPURXState *env)
{
uint32_t psw = 0;
diff --git a/target/rx/op_helper.c b/target/rx/op_helper.c
index a2f1f38..2b190a4 100644
--- a/target/rx/op_helper.c
+++ b/target/rx/op_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "qemu/bitops.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "accel/tcg/cpu-ldst.h"
#include "fpu/softfloat.h"
diff --git a/target/rx/translate.c b/target/rx/translate.c
index bbda703..19a9584 100644
--- a/target/rx/translate.c
+++ b/target/rx/translate.c
@@ -20,7 +20,6 @@
#include "qemu/bswap.h"
#include "qemu/qemu-print.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
index 3d644f5..f05ce31 100644
--- a/target/s390x/cpu.c
+++ b/target/s390x/cpu.c
@@ -302,15 +302,16 @@ static const Property s390x_cpu_properties[] = {
#ifdef CONFIG_TCG
#include "accel/tcg/cpu-ops.h"
+#include "tcg/tcg_s390x.h"
static int s390x_cpu_mmu_index(CPUState *cs, bool ifetch)
{
return s390x_env_mmu_index(cpu_env(cs), ifetch);
}
-void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
+static TCGTBCPUState s390x_get_tb_cpu_state(CPUState *cs)
{
+ CPUS390XState *env = cpu_env(cs);
uint32_t flags;
if (env->psw.addr & 1) {
@@ -322,9 +323,6 @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, 0);
}
- *pc = env->psw.addr;
- *cs_base = env->ex_value;
-
flags = (env->psw.mask >> FLAG_MASK_PSW_SHIFT) & FLAG_MASK_PSW;
if (env->psw.mask & PSW_MASK_PER) {
flags |= env->cregs[9] & (FLAG_MASK_PER_BRANCH |
@@ -341,11 +339,25 @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
if (env->cregs[0] & CR0_VECTOR) {
flags |= FLAG_MASK_VECTOR;
}
- *pflags = flags;
+
+ return (TCGTBCPUState){
+ .pc = env->psw.addr,
+ .flags = flags,
+ .cs_base = env->ex_value,
+ };
}
+#ifndef CONFIG_USER_ONLY
+static vaddr s390_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return wrap_address(cpu_env(cs), result);
+}
+#endif
+
static const TCGCPUOps s390_tcg_ops = {
.mttcg_supported = true,
+ .precise_smc = true,
/*
* The z/Architecture has a strong memory model with some
* store-after-load re-ordering.
@@ -354,6 +366,7 @@ static const TCGCPUOps s390_tcg_ops = {
.initialize = s390x_translate_init,
.translate_code = s390x_translate_code,
+ .get_tb_cpu_state = s390x_get_tb_cpu_state,
.restore_state_to_opc = s390x_restore_state_to_opc,
.mmu_index = s390x_cpu_mmu_index,
@@ -362,8 +375,10 @@ static const TCGCPUOps s390_tcg_ops = {
.record_sigbus = s390_cpu_record_sigbus,
#else
.tlb_fill = s390_cpu_tlb_fill,
+ .pointer_wrap = s390_pointer_wrap,
.cpu_exec_interrupt = s390_cpu_exec_interrupt,
.cpu_exec_halt = s390_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = s390_cpu_do_interrupt,
.debug_excp_handler = s390x_cpu_debug_excp_handler,
.do_unaligned_access = s390x_cpu_do_unaligned_access,
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index d9ca250..aa931cb 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -35,8 +35,6 @@
#define ELF_MACHINE_UNAME "S390X"
-#define TARGET_HAS_PRECISE_SMC
-
#define MMU_USER_IDX 0
#define S390_MAX_CPUS 248
@@ -413,15 +411,6 @@ static inline int s390x_env_mmu_index(CPUS390XState *env, bool ifetch)
#endif
}
-#ifdef CONFIG_TCG
-
-#include "tcg/tcg_s390x.h"
-
-void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags);
-
-#endif /* CONFIG_TCG */
-
/* PER bits from control register 9 */
#define PER_CR9_EVENT_BRANCH 0x80000000
#define PER_CR9_EVENT_IFETCH 0x40000000
diff --git a/target/s390x/cpu_features_def.h.inc b/target/s390x/cpu_features_def.h.inc
index e23e603..c017bff 100644
--- a/target/s390x/cpu_features_def.h.inc
+++ b/target/s390x/cpu_features_def.h.inc
@@ -186,7 +186,7 @@ DEF_FEAT(PLO_CSO, "plo-cso", PLO, 25, "PLO Compare and swap (256 bit in paramete
DEF_FEAT(PLO_DCSO, "plo-dcso", PLO, 26, "PLO Double compare and swap (256 bit in parameter list)")
DEF_FEAT(PLO_CSSTO, "plo-cssto", PLO, 27, "PLO Compare and swap and store (256 bit in parameter list)")
DEF_FEAT(PLO_CSDSTO, "plo-csdsto", PLO, 28, "PLO Compare and swap and double store (256 bit in parameter list)")
-DEF_FEAT(PLO_CSTSTO, "plo-cststo", PLO, 29, "PLO Compare and swap and trible store (256 bit in parameter list)")
+DEF_FEAT(PLO_CSTSTO, "plo-cststo", PLO, 29, "PLO Compare and swap and triple store (256 bit in parameter list)")
DEF_FEAT(PLO_TCS, "plo-tcs", PLO, 30, "Triple compare and swap (32 bit in parameter list)")
DEF_FEAT(PLO_TCSG, "plo-tcsg", PLO, 31, "Triple compare and swap (64 bit in parameter list)")
DEF_FEAT(PLO_TCSX, "plo-tcsx", PLO, 32, "Triple compare and swap (128 bit in parameter list)")
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
index 8951f1b..954a7a9 100644
--- a/target/s390x/cpu_models.c
+++ b/target/s390x/cpu_models.c
@@ -373,7 +373,7 @@ static void s390_print_cpu_model_list_entry(gpointer data, gpointer user_data)
g_free(name);
}
-static gint s390_cpu_list_compare(gconstpointer a, gconstpointer b)
+static gint s390_cpu_list_compare(gconstpointer a, gconstpointer b, gpointer d)
{
const S390CPUClass *cc_a = S390_CPU_CLASS((ObjectClass *)a);
const S390CPUClass *cc_b = S390_CPU_CLASS((ObjectClass *)b);
@@ -415,7 +415,7 @@ void s390_cpu_list(void)
qemu_printf("Available CPUs:\n");
list = object_class_get_list(TYPE_S390_CPU, false);
- list = g_slist_sort(list, s390_cpu_list_compare);
+ list = g_slist_sort_with_data(list, s390_cpu_list_compare, NULL);
g_slist_foreach(list, s390_print_cpu_model_list_entry, NULL);
g_slist_free(list);
diff --git a/target/s390x/cpu_models_system.c b/target/s390x/cpu_models_system.c
index 4351182..5b84604 100644
--- a/target/s390x/cpu_models_system.c
+++ b/target/s390x/cpu_models_system.c
@@ -19,7 +19,7 @@
#include "qapi/visitor.h"
#include "qapi/qobject-input-visitor.h"
#include "qobject/qdict.h"
-#include "qapi/qapi-commands-machine-target.h"
+#include "qapi/qapi-commands-machine.h"
static void list_add_feat(const char *name, void *opaque);
@@ -252,6 +252,9 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type,
s390_feat_bitmap_to_ascii(deprecated_feats,
&expansion_info->deprecated_props, list_add_feat);
+
+ expansion_info->has_deprecated_props = !!expansion_info->deprecated_props;
+
return expansion_info;
}
diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c
index 4184067..8218e64 100644
--- a/target/s390x/gen-features.c
+++ b/target/s390x/gen-features.c
@@ -844,13 +844,11 @@ static uint16_t default_GEN17_GA1[] = {
/* QEMU (CPU model) features */
-static uint16_t qemu_V2_11[] = {
+static uint16_t qemu_MIN[] = {
+ /* Features supported by the default CPU of the oldest machine type */
S390_FEAT_GROUP_PLO,
S390_FEAT_ESAN3,
S390_FEAT_ZARCH,
-};
-
-static uint16_t qemu_V3_1[] = {
S390_FEAT_DAT_ENH,
S390_FEAT_IDTE_SEGMENT,
S390_FEAT_STFLE,
@@ -880,18 +878,12 @@ static uint16_t qemu_V3_1[] = {
S390_FEAT_ADAPTER_INT_SUPPRESSION,
S390_FEAT_MSA_EXT_3,
S390_FEAT_MSA_EXT_4,
-};
-
-static uint16_t qemu_V4_0[] = {
/*
* Only BFP bits are implemented (HFP, DFP, PFPO and DIVIDE TO INTEGER not
* implemented yet).
*/
S390_FEAT_FLOATING_POINT_EXT,
S390_FEAT_ZPCI,
-};
-
-static uint16_t qemu_V4_1[] = {
S390_FEAT_STFLE_53,
S390_FEAT_VECTOR,
};
@@ -1053,10 +1045,7 @@ static FeatGroupDefSpec FeatGroupDef[] = {
* QEMU (CPU model) features
*******************************/
static FeatGroupDefSpec QemuFeatDef[] = {
- QEMU_FEAT_INITIALIZER(V2_11),
- QEMU_FEAT_INITIALIZER(V3_1),
- QEMU_FEAT_INITIALIZER(V4_0),
- QEMU_FEAT_INITIALIZER(V4_1),
+ QEMU_FEAT_INITIALIZER(MIN),
QEMU_FEAT_INITIALIZER(V6_0),
QEMU_FEAT_INITIALIZER(V6_2),
QEMU_FEAT_INITIALIZER(V7_0),
diff --git a/target/s390x/interrupt.c b/target/s390x/interrupt.c
index 4ae6e2d..1dca835 100644
--- a/target/s390x/interrupt.c
+++ b/target/s390x/interrupt.c
@@ -11,7 +11,6 @@
#include "cpu.h"
#include "kvm/kvm_s390x.h"
#include "s390x-internal.h"
-#include "exec/exec-all.h"
#include "system/kvm.h"
#include "system/tcg.h"
#include "hw/s390x/ioinst.h"
diff --git a/target/s390x/ioinst.c b/target/s390x/ioinst.c
index fe62ba5..2320dd4 100644
--- a/target/s390x/ioinst.c
+++ b/target/s390x/ioinst.c
@@ -18,6 +18,7 @@
#include "trace.h"
#include "hw/s390x/s390-pci-bus.h"
#include "target/s390x/kvm/pv.h"
+#include "hw/s390x/ap-bridge.h"
/* All I/O instructions but chsc use the s format */
static uint64_t get_address_from_regs(CPUS390XState *env, uint32_t ipb,
@@ -574,13 +575,19 @@ out:
static int chsc_sei_nt0_get_event(void *res)
{
- /* no events yet */
+ if (s390_has_feat(S390_FEAT_AP)) {
+ return ap_chsc_sei_nt0_get_event(res);
+ }
+
return 1;
}
static int chsc_sei_nt0_have_event(void)
{
- /* no events yet */
+ if (s390_has_feat(S390_FEAT_AP)) {
+ return ap_chsc_sei_nt0_have_event();
+ }
+
return 0;
}
diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c
index b9f1422..67d9a19 100644
--- a/target/s390x/kvm/kvm.c
+++ b/target/s390x/kvm/kvm.c
@@ -298,12 +298,6 @@ void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp)
return;
}
- if (!hpage_1m_allowed()) {
- error_setg(errp, "This QEMU machine does not support huge page "
- "mappings");
- return;
- }
-
if (pagesize != 1 * MiB) {
error_setg(errp, "Memory backing with 2G pages was specified, "
"but KVM does not support this memory backing");
@@ -404,6 +398,11 @@ unsigned long kvm_arch_vcpu_id(CPUState *cpu)
return cpu->cpu_index;
}
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus;
diff --git a/target/s390x/kvm/pv.c b/target/s390x/kvm/pv.c
index fe0a72c..2bc916a 100644
--- a/target/s390x/kvm/pv.c
+++ b/target/s390x/kvm/pv.c
@@ -30,7 +30,7 @@ static struct kvm_s390_pv_info_vm info_vm;
static struct kvm_s390_pv_info_dump info_dump;
static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data,
- int *pvrc)
+ struct S390PVResponse *pv_resp)
{
struct kvm_pv_cmd pv_cmd = {
.cmd = cmd,
@@ -47,8 +47,10 @@ static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data,
"IOCTL rc: %d", cmd, cmdname, pv_cmd.rc, pv_cmd.rrc,
rc);
}
- if (pvrc) {
- *pvrc = pv_cmd.rc;
+ if (pv_resp) {
+ pv_resp->cmd = cmd;
+ pv_resp->rc = pv_cmd.rc;
+ pv_resp->rrc = pv_cmd.rrc;
}
return rc;
}
@@ -57,16 +59,15 @@ static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data,
* This macro lets us pass the command as a string to the function so
* we can print it on an error.
*/
-#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data, NULL)
-#define s390_pv_cmd_pvrc(cmd, data, pvrc) __s390_pv_cmd(cmd, #cmd, data, pvrc)
-#define s390_pv_cmd_exit(cmd, data) \
-{ \
- int rc; \
- \
- rc = __s390_pv_cmd(cmd, #cmd, data, NULL); \
- if (rc) { \
- exit(1); \
- } \
+#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data, NULL)
+#define s390_pv_cmd_pv_resp(cmd, data, pv_resp) \
+ __s390_pv_cmd(cmd, #cmd, data, pv_resp)
+
+static void s390_pv_cmd_exit(uint32_t cmd, void *data)
+{
+ if (s390_pv_cmd(cmd, data)) {
+ exit(1);
+ }
}
int s390_pv_query_info(void)
@@ -147,18 +148,20 @@ bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms)
return true;
}
-int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, Error **errp)
+#define UV_RC_SSC_INVAL_HOSTKEY 0x0108
+int s390_pv_set_sec_parms(uint64_t origin, uint64_t length,
+ struct S390PVResponse *pv_resp, Error **errp)
{
- int ret, pvrc;
+ int ret;
struct kvm_s390_pv_sec_parm args = {
.origin = origin,
.length = length,
};
- ret = s390_pv_cmd_pvrc(KVM_PV_SET_SEC_PARMS, &args, &pvrc);
+ ret = s390_pv_cmd_pv_resp(KVM_PV_SET_SEC_PARMS, &args, pv_resp);
if (ret) {
error_setg(errp, "Failed to set secure execution parameters");
- if (pvrc == 0x108) {
+ if (pv_resp->rc == UV_RC_SSC_INVAL_HOSTKEY) {
error_append_hint(errp, "Please check whether the image is "
"correctly encrypted for this host\n");
}
@@ -170,7 +173,8 @@ int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, Error **errp)
/*
* Called for each component in the SE type IPL parameter block 0.
*/
-int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak)
+int s390_pv_unpack(uint64_t addr, uint64_t size,
+ uint64_t tweak, struct S390PVResponse *pv_resp)
{
struct kvm_s390_pv_unp args = {
.addr = addr,
@@ -178,7 +182,7 @@ int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak)
.tweak = tweak,
};
- return s390_pv_cmd(KVM_PV_UNPACK, &args);
+ return s390_pv_cmd_pv_resp(KVM_PV_UNPACK, &args, pv_resp);
}
void s390_pv_prep_reset(void)
@@ -186,9 +190,9 @@ void s390_pv_prep_reset(void)
s390_pv_cmd_exit(KVM_PV_PREP_RESET, NULL);
}
-int s390_pv_verify(void)
+int s390_pv_verify(struct S390PVResponse *pv_resp)
{
- return s390_pv_cmd(KVM_PV_VERIFY, NULL);
+ return s390_pv_cmd_pv_resp(KVM_PV_VERIFY, NULL, pv_resp);
}
void s390_pv_unshare(void)
@@ -196,13 +200,29 @@ void s390_pv_unshare(void)
s390_pv_cmd_exit(KVM_PV_UNSHARE_ALL, NULL);
}
-void s390_pv_inject_reset_error(CPUState *cs)
+void s390_pv_inject_reset_error(CPUState *cs,
+ struct S390PVResponse pv_resp)
{
int r1 = (cs->kvm_run->s390_sieic.ipa & 0x00f0) >> 4;
CPUS390XState *env = &S390_CPU(cs)->env;
+ union {
+ struct {
+ uint16_t pv_cmd;
+ uint16_t pv_rrc;
+ uint16_t pv_rc;
+ uint16_t diag_rc;
+ };
+ uint64_t regs;
+ } resp = {
+ .pv_cmd = pv_resp.cmd,
+ .pv_rrc = pv_resp.rrc,
+ .pv_rc = pv_resp.rc,
+ .diag_rc = DIAG_308_RC_INVAL_FOR_PV
+ };
+
/* Report that we are unable to enter protected mode */
- env->regs[r1 + 1] = DIAG_308_RC_INVAL_FOR_PV;
+ env->regs[r1 + 1] = resp.regs;
}
uint64_t kvm_s390_pv_dmp_get_size_cpu(void)
diff --git a/target/s390x/kvm/pv.h b/target/s390x/kvm/pv.h
index 5e9c8bd..94e885e 100644
--- a/target/s390x/kvm/pv.h
+++ b/target/s390x/kvm/pv.h
@@ -16,6 +16,12 @@
#include "system/kvm.h"
#include "hw/s390x/s390-virtio-ccw.h"
+struct S390PVResponse {
+ uint16_t cmd;
+ uint16_t rrc;
+ uint16_t rc;
+};
+
#ifdef CONFIG_KVM
#include "cpu.h"
@@ -42,12 +48,15 @@ int s390_pv_query_info(void);
int s390_pv_vm_enable(void);
void s390_pv_vm_disable(void);
bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms);
-int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, Error **errp);
-int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak);
+int s390_pv_set_sec_parms(uint64_t origin, uint64_t length,
+ struct S390PVResponse *pv_resp, Error **errp);
+int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak,
+ struct S390PVResponse *pv_resp);
void s390_pv_prep_reset(void);
-int s390_pv_verify(void);
+int s390_pv_verify(struct S390PVResponse *pv_resp);
void s390_pv_unshare(void);
-void s390_pv_inject_reset_error(CPUState *cs);
+void s390_pv_inject_reset_error(CPUState *cs,
+ struct S390PVResponse pv_resp);
uint64_t kvm_s390_pv_dmp_get_size_cpu(void);
uint64_t kvm_s390_pv_dmp_get_size_mem_state(void);
uint64_t kvm_s390_pv_dmp_get_size_completion_data(void);
@@ -63,12 +72,15 @@ static inline int s390_pv_vm_enable(void) { return 0; }
static inline void s390_pv_vm_disable(void) {}
static inline bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms) { return false; }
static inline int s390_pv_set_sec_parms(uint64_t origin, uint64_t length,
+ struct S390PVResponse *pv_resp,
Error **errp) { return 0; }
-static inline int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak) { return 0; }
+static inline int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak,
+ struct S390PVResponse *pv_resp) { return 0; }
static inline void s390_pv_prep_reset(void) {}
-static inline int s390_pv_verify(void) { return 0; }
+static inline int s390_pv_verify(struct S390PVResponse *pv_resp) { return 0; }
static inline void s390_pv_unshare(void) {}
-static inline void s390_pv_inject_reset_error(CPUState *cs) {};
+static inline void s390_pv_inject_reset_error(CPUState *cs,
+ struct S390PVResponse pv_resp) {};
static inline uint64_t kvm_s390_pv_dmp_get_size_cpu(void) { return 0; }
static inline uint64_t kvm_s390_pv_dmp_get_size_mem_state(void) { return 0; }
static inline uint64_t kvm_s390_pv_dmp_get_size_completion_data(void) { return 0; }
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
index 0e133cb..00946e9 100644
--- a/target/s390x/mmu_helper.c
+++ b/target/s390x/mmu_helper.c
@@ -23,7 +23,6 @@
#include "kvm/kvm_s390x.h"
#include "system/kvm.h"
#include "system/tcg.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "hw/hw.h"
diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c
index a3347f1..5e95c497 100644
--- a/target/s390x/sigp.c
+++ b/target/s390x/sigp.c
@@ -16,7 +16,6 @@
#include "system/runstate.h"
#include "system/address-spaces.h"
#include "exec/cputlb.h"
-#include "exec/exec-all.h"
#include "system/tcg.h"
#include "trace.h"
#include "qapi/qapi-types-machine.h"
diff --git a/target/s390x/tcg/cc_helper.c b/target/s390x/tcg/cc_helper.c
index b36f8cd..6595ac7 100644
--- a/target/s390x/tcg/cc_helper.c
+++ b/target/s390x/tcg/cc_helper.c
@@ -22,7 +22,6 @@
#include "cpu.h"
#include "s390x-internal.h"
#include "tcg_s390x.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
diff --git a/target/s390x/tcg/crypto_helper.c b/target/s390x/tcg/crypto_helper.c
index 642c1b1..4447bb6 100644
--- a/target/s390x/tcg/crypto_helper.c
+++ b/target/s390x/tcg/crypto_helper.c
@@ -17,7 +17,6 @@
#include "s390x-internal.h"
#include "tcg_s390x.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
static uint64_t R(uint64_t x, int c)
diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c
index 6cd813e..e4c75d0 100644
--- a/target/s390x/tcg/excp_helper.c
+++ b/target/s390x/tcg/excp_helper.c
@@ -23,7 +23,6 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/cputlb.h"
-#include "exec/exec-all.h"
#include "exec/target_page.h"
#include "exec/watchpoint.h"
#include "s390x-internal.h"
diff --git a/target/s390x/tcg/fpu_helper.c b/target/s390x/tcg/fpu_helper.c
index 5041c13..1ba4371 100644
--- a/target/s390x/tcg/fpu_helper.c
+++ b/target/s390x/tcg/fpu_helper.c
@@ -22,7 +22,6 @@
#include "cpu.h"
#include "s390x-internal.h"
#include "tcg_s390x.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
diff --git a/target/s390x/tcg/int_helper.c b/target/s390x/tcg/int_helper.c
index 253c036..fbda396 100644
--- a/target/s390x/tcg/int_helper.c
+++ b/target/s390x/tcg/int_helper.c
@@ -22,7 +22,6 @@
#include "cpu.h"
#include "s390x-internal.h"
#include "tcg_s390x.h"
-#include "exec/exec-all.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
#include "accel/tcg/cpu-ldst.h"
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
index 0cdfd38..a03609a 100644
--- a/target/s390x/tcg/mem_helper.c
+++ b/target/s390x/tcg/mem_helper.c
@@ -25,13 +25,14 @@
#include "tcg_s390x.h"
#include "exec/helper-proto.h"
#include "exec/cpu-common.h"
-#include "exec/exec-all.h"
#include "exec/cputlb.h"
#include "exec/page-protection.h"
#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/target_page.h"
#include "exec/tlb-flags.h"
#include "accel/tcg/cpu-ops.h"
+#include "accel/tcg/helper-retaddr.h"
#include "qemu/int128.h"
#include "qemu/atomic128.h"
diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c
index d508849..f7101be 100644
--- a/target/s390x/tcg/misc_helper.c
+++ b/target/s390x/tcg/misc_helper.c
@@ -26,7 +26,6 @@
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
#include "qemu/timer.h"
-#include "exec/exec-all.h"
#include "exec/cputlb.h"
#include "accel/tcg/cpu-ldst.h"
#include "exec/target_page.h"
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index 00073c5..c7e8574 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -31,7 +31,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "s390x-internal.h"
-#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "qemu/log.h"
@@ -1250,11 +1249,7 @@ static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
{
compute_carry(s);
-
- TCGv_i64 zero = tcg_constant_i64(0);
- tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
- tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
-
+ tcg_gen_addcio_i64(o->out, cc_src, o->in1, o->in2, cc_src);
return DISAS_NEXT;
}
diff --git a/target/s390x/tcg/vec_fpu_helper.c b/target/s390x/tcg/vec_fpu_helper.c
index 1bbaa82..744f800 100644
--- a/target/s390x/tcg/vec_fpu_helper.c
+++ b/target/s390x/tcg/vec_fpu_helper.c
@@ -15,7 +15,6 @@
#include "vec.h"
#include "tcg_s390x.h"
#include "tcg/tcg-gvec-desc.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
diff --git a/target/s390x/tcg/vec_helper.c b/target/s390x/tcg/vec_helper.c
index 781ccc5..46ec4a9 100644
--- a/target/s390x/tcg/vec_helper.c
+++ b/target/s390x/tcg/vec_helper.c
@@ -17,7 +17,6 @@
#include "tcg/tcg-gvec-desc.h"
#include "exec/helper-proto.h"
#include "accel/tcg/cpu-ldst.h"
-#include "exec/exec-all.h"
void HELPER(gvec_vbperm)(void *v1, const void *v2, const void *v3,
uint32_t desc)
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
index 57d7b5f..4f561e8 100644
--- a/target/sh4/cpu.c
+++ b/target/sh4/cpu.c
@@ -24,9 +24,9 @@
#include "qemu/qemu-print.h"
#include "cpu.h"
#include "migration/vmstate.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "fpu/softfloat-helpers.h"
+#include "accel/tcg/cpu-ops.h"
#include "tcg/tcg.h"
static void superh_cpu_set_pc(CPUState *cs, vaddr value)
@@ -43,6 +43,29 @@ static vaddr superh_cpu_get_pc(CPUState *cs)
return cpu->env.pc;
}
+static TCGTBCPUState superh_get_tb_cpu_state(CPUState *cs)
+{
+ CPUSH4State *env = cpu_env(cs);
+ uint32_t flags;
+
+ flags = env->flags
+ | (env->fpscr & TB_FLAG_FPSCR_MASK)
+ | (env->sr & TB_FLAG_SR_MASK)
+ | (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */
+#ifdef CONFIG_USER_ONLY
+ flags |= TB_FLAG_UNALIGN * !cs->prctl_unalign_sigbus;
+#endif
+
+ return (TCGTBCPUState){
+ .pc = env->pc,
+ .flags = flags,
+#ifdef CONFIG_USER_ONLY
+ /* For a gUSA region, notice the end of the region. */
+ .cs_base = flags & TB_FLAG_GUSA_MASK ? env->gregs[0] : 0,
+#endif
+ };
+}
+
static void superh_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -259,8 +282,6 @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
};
#endif
-#include "accel/tcg/cpu-ops.h"
-
static const TCGCPUOps superh_tcg_ops = {
/* MTTCG not yet supported: require strict ordering */
.guest_default_memory_order = TCG_MO_ALL,
@@ -268,14 +289,17 @@ static const TCGCPUOps superh_tcg_ops = {
.initialize = sh4_translate_init,
.translate_code = sh4_translate_code,
+ .get_tb_cpu_state = superh_get_tb_cpu_state,
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
.restore_state_to_opc = superh_restore_state_to_opc,
.mmu_index = sh4_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = superh_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_notreached,
.cpu_exec_interrupt = superh_cpu_exec_interrupt,
.cpu_exec_halt = superh_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = superh_cpu_do_interrupt,
.do_unaligned_access = superh_cpu_do_unaligned_access,
.io_recompile_replay_branch = superh_io_recompile_replay_branch,
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index 906f99d..c41ab70 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -380,19 +380,4 @@ static inline void cpu_write_sr(CPUSH4State *env, target_ulong sr)
env->sr = sr & ~((1u << SR_M) | (1u << SR_Q) | (1u << SR_T));
}
-static inline void cpu_get_tb_cpu_state(CPUSH4State *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- /* For a gUSA region, notice the end of the region. */
- *cs_base = env->flags & TB_FLAG_GUSA_MASK ? env->gregs[0] : 0;
- *flags = env->flags
- | (env->fpscr & TB_FLAG_FPSCR_MASK)
- | (env->sr & TB_FLAG_SR_MASK)
- | (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */
-#ifdef CONFIG_USER_ONLY
- *flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
-#endif
-}
-
#endif /* SH4_CPU_H */
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index b41d14d..fb7642b 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -21,7 +21,6 @@
#include "cpu.h"
#include "exec/cputlb.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "exec/log.h"
diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c
index e7fcad3..557b1bf 100644
--- a/target/sh4/op_helper.c
+++ b/target/sh4/op_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include "fpu/softfloat.h"
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index d796ad5..70fd13a 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
@@ -55,7 +54,7 @@ typedef struct DisasContext {
#define UNALIGN(C) (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
#else
#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
-#define UNALIGN(C) 0
+#define UNALIGN(C) MO_ALIGN
#endif
/* Target-specific values for ctx->base.is_jmp. */
@@ -695,14 +694,8 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
return;
case 0x300e: /* addc Rm,Rn */
- {
- TCGv t0, t1;
- t0 = tcg_constant_tl(0);
- t1 = tcg_temp_new();
- tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
- tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
- REG(B11_8), t0, t1, cpu_sr_t);
- }
+ tcg_gen_addcio_i32(REG(B11_8), cpu_sr_t,
+ REG(B11_8), REG(B7_4), cpu_sr_t);
return;
case 0x300f: /* addv Rm,Rn */
{
@@ -1940,16 +1933,16 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
NEXT_INSN;
switch (ctx->opcode & 0xf00f) {
case 0x300c: /* add Rm,Rn */
- op_opc = INDEX_op_add_i32;
+ op_opc = INDEX_op_add;
goto do_reg_op;
case 0x2009: /* and Rm,Rn */
- op_opc = INDEX_op_and_i32;
+ op_opc = INDEX_op_and;
goto do_reg_op;
case 0x200a: /* xor Rm,Rn */
- op_opc = INDEX_op_xor_i32;
+ op_opc = INDEX_op_xor;
goto do_reg_op;
case 0x200b: /* or Rm,Rn */
- op_opc = INDEX_op_or_i32;
+ op_opc = INDEX_op_or;
do_reg_op:
/* The operation register should be as expected, and the
other input cannot depend on the load. */
@@ -1976,7 +1969,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
goto fail;
}
op_dst = B11_8;
- op_opc = INDEX_op_xor_i32;
+ op_opc = INDEX_op_xor;
op_arg = tcg_constant_i32(-1);
break;
@@ -1984,7 +1977,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
if (op_dst != B11_8 || mv_src >= 0) {
goto fail;
}
- op_opc = INDEX_op_add_i32;
+ op_opc = INDEX_op_add;
op_arg = tcg_constant_i32(B7_0s);
break;
@@ -1995,7 +1988,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
goto fail;
}
- op_opc = INDEX_op_setcond_i32; /* placeholder */
+ op_opc = INDEX_op_setcond; /* placeholder */
op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
op_arg = REG(op_src);
@@ -2030,7 +2023,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
goto fail;
}
- op_opc = INDEX_op_setcond_i32;
+ op_opc = INDEX_op_setcond;
op_arg = tcg_constant_i32(0);
NEXT_INSN;
@@ -2087,7 +2080,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
ctx->memidx, ld_mop);
break;
- case INDEX_op_add_i32:
+ case INDEX_op_add:
if (op_dst != st_src) {
goto fail;
}
@@ -2105,7 +2098,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
}
break;
- case INDEX_op_and_i32:
+ case INDEX_op_and:
if (op_dst != st_src) {
goto fail;
}
@@ -2119,7 +2112,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
}
break;
- case INDEX_op_or_i32:
+ case INDEX_op_or:
if (op_dst != st_src) {
goto fail;
}
@@ -2133,7 +2126,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
}
break;
- case INDEX_op_xor_i32:
+ case INDEX_op_xor:
if (op_dst != st_src) {
goto fail;
}
@@ -2147,7 +2140,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
}
break;
- case INDEX_op_setcond_i32:
+ case INDEX_op_setcond:
if (st_src == ld_dst) {
goto fail;
}
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
index bc753d5..ed7701b 100644
--- a/target/sparc/cpu.c
+++ b/target/sparc/cpu.c
@@ -23,7 +23,6 @@
#include "qemu/module.h"
#include "qemu/qemu-print.h"
#include "accel/tcg/cpu-mmu-index.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "hw/qdev-properties.h"
#include "qapi/visitor.h"
@@ -717,13 +716,11 @@ static void sparc_cpu_synchronize_from_tb(CPUState *cs,
cpu->env.npc = tb->cs_base;
}
-void cpu_get_tb_cpu_state(CPUSPARCState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
+static TCGTBCPUState sparc_get_tb_cpu_state(CPUState *cs)
{
- uint32_t flags;
- *pc = env->pc;
- *cs_base = env->npc;
- flags = cpu_mmu_index(env_cpu(env), false);
+ CPUSPARCState *env = cpu_env(cs);
+ uint32_t flags = cpu_mmu_index(cs, false);
+
#ifndef CONFIG_USER_ONLY
if (cpu_supervisor_mode(env)) {
flags |= TB_FLAG_SUPER;
@@ -752,7 +749,12 @@ void cpu_get_tb_cpu_state(CPUSPARCState *env, vaddr *pc,
}
#endif /* !CONFIG_USER_ONLY */
#endif /* TARGET_SPARC64 */
- *pflags = flags;
+
+ return (TCGTBCPUState){
+ .pc = env->pc,
+ .flags = flags,
+ .cs_base = env->npc,
+ };
}
static void sparc_restore_state_to_opc(CPUState *cs,
@@ -1000,6 +1002,18 @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
#ifdef CONFIG_TCG
#include "accel/tcg/cpu-ops.h"
+#ifndef CONFIG_USER_ONLY
+static vaddr sparc_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+#ifdef TARGET_SPARC64
+ return cpu_env(cs)->pstate & PS_AM ? (uint32_t)result : result;
+#else
+ return (uint32_t)result;
+#endif
+}
+#endif
+
static const TCGCPUOps sparc_tcg_ops = {
/*
* From Oracle SPARC Architecture 2015:
@@ -1027,14 +1041,17 @@ static const TCGCPUOps sparc_tcg_ops = {
.initialize = sparc_tcg_init,
.translate_code = sparc_translate_code,
+ .get_tb_cpu_state = sparc_get_tb_cpu_state,
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
.restore_state_to_opc = sparc_restore_state_to_opc,
.mmu_index = sparc_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = sparc_cpu_tlb_fill,
+ .pointer_wrap = sparc_pointer_wrap,
.cpu_exec_interrupt = sparc_cpu_exec_interrupt,
.cpu_exec_halt = sparc_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = sparc_cpu_do_interrupt,
.do_transaction_failed = sparc_cpu_do_transaction_failed,
.do_unaligned_access = sparc_cpu_do_unaligned_access,
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
index 37fd1e0..31cb3d9 100644
--- a/target/sparc/cpu.h
+++ b/target/sparc/cpu.h
@@ -741,9 +741,6 @@ trap_state* cpu_tsptr(CPUSPARCState* env);
#define TB_FLAG_FSR_QNE (1 << 8)
#define TB_FLAG_ASI_SHIFT 24
-void cpu_get_tb_cpu_state(CPUSPARCState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags);
-
static inline bool tb_fpu_enabled(int tb_flags)
{
#if defined(CONFIG_USER_ONLY)
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
index c25097d..29fd166 100644
--- a/target/sparc/fop_helper.c
+++ b/target/sparc/fop_helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
@@ -446,7 +445,6 @@ static uint32_t finish_fcmp(CPUSPARCState *env, FloatRelation r, uintptr_t ra)
case float_relation_greater:
return 2;
case float_relation_unordered:
- env->fsr |= FSR_NVA;
return 3;
}
g_assert_not_reached();
diff --git a/target/sparc/helper.c b/target/sparc/helper.c
index 7846ddd..9163b9d 100644
--- a/target/sparc/helper.c
+++ b/target/sparc/helper.c
@@ -19,7 +19,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "qemu/timer.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
index 4c5dba1..2c63eb9 100644
--- a/target/sparc/ldst_helper.c
+++ b/target/sparc/ldst_helper.c
@@ -23,7 +23,6 @@
#include "cpu.h"
#include "tcg/tcg.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "exec/cputlb.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
diff --git a/target/sparc/machine.c b/target/sparc/machine.c
index 222e570..4dd75af 100644
--- a/target/sparc/machine.c
+++ b/target/sparc/machine.c
@@ -1,6 +1,5 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "qemu/timer.h"
#include "migration/cpu.h"
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index adebddf..b922e53 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -22,7 +22,6 @@
#include "cpu.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "exec/target_page.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
@@ -396,8 +395,7 @@ static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
TCGv z = tcg_constant_tl(0);
if (cin) {
- tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
- tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
+ tcg_gen_addcio_tl(cpu_cc_N, cpu_cc_C, src1, src2, cin);
} else {
tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
}
diff --git a/target/sparc/win_helper.c b/target/sparc/win_helper.c
index 0c4b09f..9ad9d01 100644
--- a/target/sparc/win_helper.c
+++ b/target/sparc/win_helper.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "trace.h"
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
index 098cd06..4f035b6 100644
--- a/target/tricore/cpu.c
+++ b/target/tricore/cpu.c
@@ -20,10 +20,10 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "qemu/error-report.h"
#include "tcg/debug-assert.h"
+#include "accel/tcg/cpu-ops.h"
static inline void set_feature(CPUTriCoreState *env, int feature)
{
@@ -45,6 +45,16 @@ static vaddr tricore_cpu_get_pc(CPUState *cs)
return cpu_env(cs)->PC;
}
+static TCGTBCPUState tricore_get_tb_cpu_state(CPUState *cs)
+{
+ CPUTriCoreState *env = cpu_env(cs);
+
+ return (TCGTBCPUState){
+ .pc = env->PC,
+ .flags = FIELD_DP32(0, TB_FLAGS, PRIV, extract32(env->PSW, 10, 2)),
+ };
+}
+
static void tricore_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
@@ -169,20 +179,21 @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
.get_phys_page_debug = tricore_cpu_get_phys_page_debug,
};
-#include "accel/tcg/cpu-ops.h"
-
static const TCGCPUOps tricore_tcg_ops = {
/* MTTCG not yet supported: require strict ordering */
.guest_default_memory_order = TCG_MO_ALL,
.mttcg_supported = false,
.initialize = tricore_tcg_init,
.translate_code = tricore_translate_code,
+ .get_tb_cpu_state = tricore_get_tb_cpu_state,
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
.restore_state_to_opc = tricore_restore_state_to_opc,
.mmu_index = tricore_cpu_mmu_index,
.tlb_fill = tricore_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = tricore_cpu_exec_interrupt,
.cpu_exec_halt = tricore_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
};
static void tricore_cpu_class_init(ObjectClass *c, const void *data)
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
index c76e65f..82085fb 100644
--- a/target/tricore/cpu.h
+++ b/target/tricore/cpu.h
@@ -258,18 +258,6 @@ void tricore_tcg_init(void);
void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
int *max_insns, vaddr pc, void *host_pc);
-static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- uint32_t new_flags = 0;
- *pc = env->PC;
- *cs_base = 0;
-
- new_flags |= FIELD_DP32(new_flags, TB_FLAGS, PRIV,
- extract32(env->PSW, 10, 2));
- *flags = new_flags;
-}
-
#define CPU_RESOLVING_TYPE TYPE_TRICORE_CPU
/* helpers.c */
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
index ae559b6..9910c13 100644
--- a/target/tricore/op_helper.c
+++ b/target/tricore/op_helper.c
@@ -18,7 +18,6 @@
#include "cpu.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "accel/tcg/cpu-ldst.h"
#include <zlib.h> /* for crc32 */
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 7cd26d8..3d0e7a1 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
#include "accel/tcg/cpu-ldst.h"
#include "qemu/qemu-print.h"
@@ -1346,15 +1345,11 @@ static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
{
- TCGv carry = tcg_temp_new_i32();
- TCGv t0 = tcg_temp_new_i32();
+ TCGv t0 = tcg_temp_new_i32();
TCGv result = tcg_temp_new_i32();
- tcg_gen_movi_tl(t0, 0);
- tcg_gen_setcondi_tl(TCG_COND_NE, carry, cpu_PSW_C, 0);
/* Addition, carry and set C/V/SV bits */
- tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, carry, t0);
- tcg_gen_add2_i32(result, cpu_PSW_C, result, cpu_PSW_C, r2, t0);
+ tcg_gen_addcio_i32(result, cpu_PSW_C, r1, r2, cpu_PSW_C);
/* calc V bit */
tcg_gen_xor_tl(cpu_PSW_V, result, r1);
tcg_gen_xor_tl(t0, r1, r2);
@@ -3981,7 +3976,7 @@ static void decode_bit_andacc(DisasContext *ctx)
pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl);
break;
case OPC2_32_BIT_AND_NOR_T:
- if (tcg_op_supported(INDEX_op_andc_i32, TCG_TYPE_I32, 0)) {
+ if (tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0)) {
gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl);
} else {
@@ -4114,7 +4109,7 @@ static void decode_bit_orand(DisasContext *ctx)
pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_or_tl);
break;
case OPC2_32_BIT_OR_NOR_T:
- if (tcg_op_supported(INDEX_op_orc_i32, TCG_TYPE_I32, 0)) {
+ if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I32, 0)) {
gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
pos1, pos2, &tcg_gen_or_tl, &tcg_gen_orc_tl);
} else {
diff --git a/target/xtensa/core-dc232b/gdb-config.c.inc b/target/xtensa/core-dc232b/gdb-config.c.inc
index d871686..8c88cae 100644
--- a/target/xtensa/core-dc232b/gdb-config.c.inc
+++ b/target/xtensa/core-dc232b/gdb-config.c.inc
@@ -15,9 +15,8 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor,
- Boston, MA 02110-1301, USA. */
+ along with this program; if not, see
+ <https://www.gnu.org/licenses/>. */
XTREG(0, 0, 32, 4, 4, 0x0020, 0x0006, -2, 9, 0x0100, pc,
0, 0, 0, 0, 0, 0)
diff --git a/target/xtensa/core-dc232b/xtensa-modules.c.inc b/target/xtensa/core-dc232b/xtensa-modules.c.inc
index 164df3b..bb9ebd2 100644
--- a/target/xtensa/core-dc232b/xtensa-modules.c.inc
+++ b/target/xtensa/core-dc232b/xtensa-modules.c.inc
@@ -14,9 +14,8 @@
General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with this program; if not, see
+ <https://www.gnu.org/licenses/>. */
#include "qemu/osdep.h"
#include "xtensa-isa.h"
diff --git a/target/xtensa/core-fsf/xtensa-modules.c.inc b/target/xtensa/core-fsf/xtensa-modules.c.inc
index c32683f..531f5e2 100644
--- a/target/xtensa/core-fsf/xtensa-modules.c.inc
+++ b/target/xtensa/core-fsf/xtensa-modules.c.inc
@@ -14,9 +14,8 @@
General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with this program; if not, see
+ <https://www.gnu.org/licenses/>. */
#include "qemu/osdep.h"
#include "xtensa-isa.h"
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
index 27d6e40..ea9b6df 100644
--- a/target/xtensa/cpu.c
+++ b/target/xtensa/cpu.c
@@ -35,6 +35,7 @@
#include "qemu/module.h"
#include "migration/vmstate.h"
#include "hw/qdev-clock.h"
+#include "accel/tcg/cpu-ops.h"
#ifndef CONFIG_USER_ONLY
#include "system/memory.h"
#endif
@@ -54,6 +55,80 @@ static vaddr xtensa_cpu_get_pc(CPUState *cs)
return cpu->env.pc;
}
+static TCGTBCPUState xtensa_get_tb_cpu_state(CPUState *cs)
+{
+ CPUXtensaState *env = cpu_env(cs);
+ uint32_t flags = 0;
+ target_ulong cs_base = 0;
+
+ flags |= xtensa_get_ring(env);
+ if (env->sregs[PS] & PS_EXCM) {
+ flags |= XTENSA_TBFLAG_EXCM;
+ } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_LOOP)) {
+ target_ulong lend_dist =
+ env->sregs[LEND] - (env->pc & -(1u << TARGET_PAGE_BITS));
+
+ /*
+ * 0 in the csbase_lend field means that there may not be a loopback
+ * for any instruction that starts inside this page. Any other value
+ * means that an instruction that ends at this offset from the page
+ * start may loop back and will need loopback code to be generated.
+ *
+ * lend_dist is 0 when LEND points to the start of the page, but
+ * no instruction that starts inside this page may end at offset 0,
+ * so it's still correct.
+ *
+ * When an instruction ends at a page boundary it may only start in
+ * the previous page. lend_dist will be encoded as TARGET_PAGE_SIZE
+ * for the TB that contains this instruction.
+ */
+ if (lend_dist < (1u << TARGET_PAGE_BITS) + env->config->max_insn_size) {
+ target_ulong lbeg_off = env->sregs[LEND] - env->sregs[LBEG];
+
+ cs_base = lend_dist;
+ if (lbeg_off < 256) {
+ cs_base |= lbeg_off << XTENSA_CSBASE_LBEG_OFF_SHIFT;
+ }
+ }
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_EXTENDED_L32R) &&
+ (env->sregs[LITBASE] & 1)) {
+ flags |= XTENSA_TBFLAG_LITBASE;
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_DEBUG)) {
+ if (xtensa_get_cintlevel(env) < env->config->debug_level) {
+ flags |= XTENSA_TBFLAG_DEBUG;
+ }
+ if (xtensa_get_cintlevel(env) < env->sregs[ICOUNTLEVEL]) {
+ flags |= XTENSA_TBFLAG_ICOUNT;
+ }
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_COPROCESSOR)) {
+ flags |= env->sregs[CPENABLE] << XTENSA_TBFLAG_CPENABLE_SHIFT;
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER) &&
+ (env->sregs[PS] & (PS_WOE | PS_EXCM)) == PS_WOE) {
+ uint32_t windowstart = xtensa_replicate_windowstart(env) >>
+ (env->sregs[WINDOW_BASE] + 1);
+ uint32_t w = ctz32(windowstart | 0x8);
+
+ flags |= (w << XTENSA_TBFLAG_WINDOW_SHIFT) | XTENSA_TBFLAG_CWOE;
+ flags |= extract32(env->sregs[PS], PS_CALLINC_SHIFT,
+ PS_CALLINC_LEN) << XTENSA_TBFLAG_CALLINC_SHIFT;
+ } else {
+ flags |= 3 << XTENSA_TBFLAG_WINDOW_SHIFT;
+ }
+ if (env->yield_needed) {
+ flags |= XTENSA_TBFLAG_YIELD;
+ }
+
+ return (TCGTBCPUState){
+ .pc = env->pc,
+ .flags = flags,
+ .cs_base = cs_base,
+ };
+}
+
static void xtensa_restore_state_to_opc(CPUState *cs,
const TranslationBlock *tb,
const uint64_t *data)
@@ -229,8 +304,6 @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
};
#endif
-#include "accel/tcg/cpu-ops.h"
-
static const TCGCPUOps xtensa_tcg_ops = {
/* Xtensa processors have a weak memory model */
.guest_default_memory_order = 0,
@@ -239,13 +312,16 @@ static const TCGCPUOps xtensa_tcg_ops = {
.initialize = xtensa_translate_init,
.translate_code = xtensa_translate_code,
.debug_excp_handler = xtensa_breakpoint_handler,
+ .get_tb_cpu_state = xtensa_get_tb_cpu_state,
.restore_state_to_opc = xtensa_restore_state_to_opc,
.mmu_index = xtensa_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
.tlb_fill = xtensa_cpu_tlb_fill,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
.cpu_exec_halt = xtensa_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = xtensa_cpu_do_interrupt,
.do_transaction_failed = xtensa_cpu_do_transaction_failed,
.do_unaligned_access = xtensa_cpu_do_unaligned_access,
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
index c03ed71..74122eb 100644
--- a/target/xtensa/cpu.h
+++ b/target/xtensa/cpu.h
@@ -733,74 +733,6 @@ static inline uint32_t xtensa_replicate_windowstart(CPUXtensaState *env)
#define XTENSA_CSBASE_LBEG_OFF_MASK 0x00ff0000
#define XTENSA_CSBASE_LBEG_OFF_SHIFT 16
-static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
-{
- *pc = env->pc;
- *cs_base = 0;
- *flags = 0;
- *flags |= xtensa_get_ring(env);
- if (env->sregs[PS] & PS_EXCM) {
- *flags |= XTENSA_TBFLAG_EXCM;
- } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_LOOP)) {
- target_ulong lend_dist =
- env->sregs[LEND] - (env->pc & -(1u << TARGET_PAGE_BITS));
-
- /*
- * 0 in the csbase_lend field means that there may not be a loopback
- * for any instruction that starts inside this page. Any other value
- * means that an instruction that ends at this offset from the page
- * start may loop back and will need loopback code to be generated.
- *
- * lend_dist is 0 when LEND points to the start of the page, but
- * no instruction that starts inside this page may end at offset 0,
- * so it's still correct.
- *
- * When an instruction ends at a page boundary it may only start in
- * the previous page. lend_dist will be encoded as TARGET_PAGE_SIZE
- * for the TB that contains this instruction.
- */
- if (lend_dist < (1u << TARGET_PAGE_BITS) + env->config->max_insn_size) {
- target_ulong lbeg_off = env->sregs[LEND] - env->sregs[LBEG];
-
- *cs_base = lend_dist;
- if (lbeg_off < 256) {
- *cs_base |= lbeg_off << XTENSA_CSBASE_LBEG_OFF_SHIFT;
- }
- }
- }
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_EXTENDED_L32R) &&
- (env->sregs[LITBASE] & 1)) {
- *flags |= XTENSA_TBFLAG_LITBASE;
- }
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_DEBUG)) {
- if (xtensa_get_cintlevel(env) < env->config->debug_level) {
- *flags |= XTENSA_TBFLAG_DEBUG;
- }
- if (xtensa_get_cintlevel(env) < env->sregs[ICOUNTLEVEL]) {
- *flags |= XTENSA_TBFLAG_ICOUNT;
- }
- }
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_COPROCESSOR)) {
- *flags |= env->sregs[CPENABLE] << XTENSA_TBFLAG_CPENABLE_SHIFT;
- }
- if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER) &&
- (env->sregs[PS] & (PS_WOE | PS_EXCM)) == PS_WOE) {
- uint32_t windowstart = xtensa_replicate_windowstart(env) >>
- (env->sregs[WINDOW_BASE] + 1);
- uint32_t w = ctz32(windowstart | 0x8);
-
- *flags |= (w << XTENSA_TBFLAG_WINDOW_SHIFT) | XTENSA_TBFLAG_CWOE;
- *flags |= extract32(env->sregs[PS], PS_CALLINC_SHIFT,
- PS_CALLINC_LEN) << XTENSA_TBFLAG_CALLINC_SHIFT;
- } else {
- *flags |= 3 << XTENSA_TBFLAG_WINDOW_SHIFT;
- }
- if (env->yield_needed) {
- *flags |= XTENSA_TBFLAG_YIELD;
- }
-}
-
XtensaCPU *xtensa_cpu_create_with_clock(const char *cpu_type,
Clock *cpu_refclk);
diff --git a/target/xtensa/dbg_helper.c b/target/xtensa/dbg_helper.c
index c4f4298..3b91f7c 100644
--- a/target/xtensa/dbg_helper.c
+++ b/target/xtensa/dbg_helper.c
@@ -30,7 +30,6 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
#include "exec/watchpoint.h"
#include "system/address-spaces.h"
diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c
index ca629f0..b611c9b 100644
--- a/target/xtensa/exc_helper.c
+++ b/target/xtensa/exc_helper.c
@@ -32,7 +32,6 @@
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
#include "qemu/atomic.h"
-#include "exec/exec-all.h"
void HELPER(exception)(CPUXtensaState *env, uint32_t excp)
{
diff --git a/target/xtensa/fpu_helper.c b/target/xtensa/fpu_helper.c
index 53fc7cf..5358060 100644
--- a/target/xtensa/fpu_helper.c
+++ b/target/xtensa/fpu_helper.c
@@ -30,7 +30,6 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
#include "fpu/softfloat.h"
enum {
diff --git a/target/xtensa/mmu_helper.c b/target/xtensa/mmu_helper.c
index a7dd810..71330fc 100644
--- a/target/xtensa/mmu_helper.c
+++ b/target/xtensa/mmu_helper.c
@@ -34,7 +34,7 @@
#include "qemu/host-utils.h"
#include "exec/cputlb.h"
#include "accel/tcg/cpu-mmu-index.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/probe.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "system/memory.h"
diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c
index c125fa4..fc47eba 100644
--- a/target/xtensa/op_helper.c
+++ b/target/xtensa/op_helper.c
@@ -30,7 +30,6 @@
#include "exec/helper-proto.h"
#include "exec/page-protection.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
#include "system/memory.h"
#include "qemu/atomic.h"
#include "qemu/timer.h"
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index 2af83c0..34ae2f4 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -31,7 +31,6 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
#include "qemu/log.h"
#include "qemu/qemu-print.h"
diff --git a/target/xtensa/win_helper.c b/target/xtensa/win_helper.c
index ec9ff44..4b25f8f 100644
--- a/target/xtensa/win_helper.c
+++ b/target/xtensa/win_helper.c
@@ -30,7 +30,6 @@
#include "cpu.h"
#include "exec/helper-proto.h"
#include "qemu/host-utils.h"
-#include "exec/exec-all.h"
static void copy_window_from_phys(CPUXtensaState *env,
uint32_t window, uint32_t phys, uint32_t n)