aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/arm/cpregs-gcs.c156
-rw-r--r--target/arm/cpregs.h42
-rw-r--r--target/arm/cpu-features.h20
-rw-r--r--target/arm/cpu.c20
-rw-r--r--target/arm/cpu.h253
-rw-r--r--target/arm/gdbstub64.c2
-rw-r--r--target/arm/helper.c391
-rw-r--r--target/arm/internals.h151
-rw-r--r--target/arm/machine.c113
-rw-r--r--target/arm/meson.build9
-rw-r--r--target/arm/mmuidx-internal.h113
-rw-r--r--target/arm/mmuidx.c66
-rw-r--r--target/arm/mmuidx.h241
-rw-r--r--target/arm/ptw.c365
-rw-r--r--target/arm/syndrome.h35
-rw-r--r--target/arm/tcg-stubs.c2
-rw-r--r--target/arm/tcg/a64.decode5
-rw-r--r--target/arm/tcg/cpu64.c4
-rw-r--r--target/arm/tcg/helper-a64.c35
-rw-r--r--target/arm/tcg/helper-a64.h5
-rw-r--r--target/arm/tcg/hflags.c38
-rw-r--r--target/arm/tcg/mte_helper.c2
-rw-r--r--target/arm/tcg/op_helper.c11
-rw-r--r--target/arm/tcg/tlb-insns.c47
-rw-r--r--target/arm/tcg/tlb_helper.c18
-rw-r--r--target/arm/tcg/translate-a64.c438
-rw-r--r--target/arm/tcg/translate.c78
-rw-r--r--target/arm/tcg/translate.h46
-rw-r--r--target/i386/cpu.c32
-rw-r--r--target/i386/cpu.h16
-rw-r--r--target/i386/emulate/meson.build7
-rw-r--r--target/i386/emulate/x86_decode.c27
-rw-r--r--target/i386/emulate/x86_decode.h9
-rw-r--r--target/i386/emulate/x86_emu.c3
-rw-r--r--target/i386/emulate/x86_emu.h2
-rw-r--r--target/i386/kvm/kvm.c6
-rw-r--r--target/i386/meson.build2
-rw-r--r--target/i386/mshv/meson.build8
-rw-r--r--target/i386/mshv/mshv-cpu.c1763
-rw-r--r--target/i386/mshv/x86.c297
-rw-r--r--target/loongarch/cpu.c318
-rw-r--r--target/loongarch/internals.h4
-rw-r--r--target/loongarch/tcg/meson.build1
-rw-r--r--target/loongarch/tcg/tcg_cpu.c322
-rw-r--r--target/loongarch/tcg/tcg_loongarch.h1
-rw-r--r--target/s390x/helper.c122
-rw-r--r--target/s390x/s390x-internal.h5
-rw-r--r--target/s390x/sigp.c117
-rw-r--r--target/s390x/tcg/excp_helper.c19
-rw-r--r--target/s390x/tcg/misc_helper.c4
50 files changed, 4743 insertions, 1048 deletions
diff --git a/target/arm/cpregs-gcs.c b/target/arm/cpregs-gcs.c
new file mode 100644
index 0000000..1ed52a2
--- /dev/null
+++ b/target/arm/cpregs-gcs.c
@@ -0,0 +1,156 @@
+/*
+ * QEMU ARM CP Register GCS regiters and instructions
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/timer.h"
+#include "exec/icount.h"
+#include "hw/irq.h"
+#include "cpu.h"
+#include "cpu-features.h"
+#include "cpregs.h"
+#include "internals.h"
+
+
+static CPAccessResult access_gcs(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) < 3
+ && arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_GCSEN)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_gcs_el0(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 0 && !(env->cp15.gcscr_el[0] & GCSCRE0_NTR)) {
+ return CP_ACCESS_TRAP_EL1;
+ }
+ return access_gcs(env, ri, isread);
+}
+
+static void gcspr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * Bits [2:0] are RES0, so we might as well clear them now,
+ * rather than upon each usage a-la GetCurrentGCSPointer.
+ */
+ raw_write(env, ri, value & ~7);
+}
+
+static CPAccessResult access_gcspushm(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+ if (!(env->cp15.gcscr_el[el] & GCSCR_PUSHMEN)) {
+ return CP_ACCESS_TRAP_BIT | (el ? el : 1);
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_gcspushx(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* Trap if lock taken, and enabled. */
+ if (!(env->pstate & PSTATE_EXLOCK)) {
+ int el = arm_current_el(env);
+ if (env->cp15.gcscr_el[el] & GCSCR_EXLOCKEN) {
+ return CP_ACCESS_EXLOCK;
+ }
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_gcspopcx(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* Trap if lock not taken, and enabled. */
+ if (env->pstate & PSTATE_EXLOCK) {
+ int el = arm_current_el(env);
+ if (env->cp15.gcscr_el[el] & GCSCR_EXLOCKEN) {
+ return CP_ACCESS_EXLOCK;
+ }
+ }
+ return CP_ACCESS_OK;
+}
+
+static const ARMCPRegInfo gcs_reginfo[] = {
+ { .name = "GCSCRE0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 5, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_gcs, .fgt = FGT_NGCS_EL0,
+ .fieldoffset = offsetof(CPUARMState, cp15.gcscr_el[0]) },
+ { .name = "GCSCR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 5, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_gcs, .fgt = FGT_NGCS_EL1,
+ .nv2_redirect_offset = 0x8d0 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 5, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 5, 0),
+ .fieldoffset = offsetof(CPUARMState, cp15.gcscr_el[1]) },
+ { .name = "GCSCR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 5, .opc2 = 0,
+ .access = PL2_RW, .accessfn = access_gcs,
+ .fieldoffset = offsetof(CPUARMState, cp15.gcscr_el[2]) },
+ { .name = "GCSCR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 5, .opc2 = 0,
+ .access = PL3_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.gcscr_el[3]) },
+
+ { .name = "GCSPR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 5, .opc2 = 1,
+ .access = PL0_R | PL1_W, .accessfn = access_gcs_el0,
+ .fgt = FGT_NGCS_EL0, .writefn = gcspr_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.gcspr_el[0]) },
+ { .name = "GCSPR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 5, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_gcs,
+ .fgt = FGT_NGCS_EL1, .writefn = gcspr_write,
+ .nv2_redirect_offset = 0x8c0 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 5, 1),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 5, 1),
+ .fieldoffset = offsetof(CPUARMState, cp15.gcspr_el[1]) },
+ { .name = "GCSPR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 5, .opc2 = 1,
+ .access = PL2_RW, .accessfn = access_gcs, .writefn = gcspr_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.gcspr_el[2]) },
+ { .name = "GCSPR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 5, .opc2 = 1,
+ .access = PL3_RW, .writefn = gcspr_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.gcspr_el[2]) },
+
+ { .name = "GCSPUSHM", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 7, .opc2 = 0,
+ .access = PL0_W, .accessfn = access_gcspushm,
+ .fgt = FGT_NGCSPUSHM_EL1, .type = ARM_CP_GCSPUSHM },
+ { .name = "GCSPOPM", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 7, .opc2 = 1,
+ .access = PL0_R, .type = ARM_CP_GCSPOPM },
+ { .name = "GCSSS1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 7, .opc2 = 2,
+ .access = PL0_W, .type = ARM_CP_GCSSS1 },
+ { .name = "GCSSS2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 7, .opc2 = 3,
+ .access = PL0_R, .type = ARM_CP_GCSSS2 },
+ { .name = "GCSPUSHX", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 7, .opc2 = 4,
+ .access = PL1_W, .accessfn = access_gcspushx, .fgt = FGT_NGCSEPP,
+ .type = ARM_CP_GCSPUSHX },
+ { .name = "GCSPOPCX", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 7, .opc2 = 5,
+ .access = PL1_W, .accessfn = access_gcspopcx, .fgt = FGT_NGCSEPP,
+ .type = ARM_CP_GCSPOPCX },
+ { .name = "GCSPOPX", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 7, .opc2 = 6,
+ .access = PL1_W, .type = ARM_CP_GCSPOPX },
+};
+
+void define_gcs_cpregs(ARMCPU *cpu)
+{
+ if (cpu_isar_feature(aa64_gcs, cpu)) {
+ define_arm_cp_regs(cpu, gcs_reginfo);
+ }
+}
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
index 57fde5f..763de5e 100644
--- a/target/arm/cpregs.h
+++ b/target/arm/cpregs.h
@@ -47,6 +47,14 @@ enum {
ARM_CP_DC_ZVA = 0x0005,
ARM_CP_DC_GVA = 0x0006,
ARM_CP_DC_GZVA = 0x0007,
+ /* Special: gcs instructions */
+ ARM_CP_GCSPUSHM = 0x0008,
+ ARM_CP_GCSPOPM = 0x0009,
+ ARM_CP_GCSPUSHX = 0x000a,
+ ARM_CP_GCSPOPX = 0x000b,
+ ARM_CP_GCSPOPCX = 0x000c,
+ ARM_CP_GCSSS1 = 0x000d,
+ ARM_CP_GCSSS2 = 0x000e,
/* Flag: reads produce resetvalue; writes ignored. */
ARM_CP_CONST = 1 << 4,
@@ -136,6 +144,11 @@ enum {
* identically to the normal one, other than FGT trapping handling.)
*/
ARM_CP_ADD_TLBI_NXS = 1 << 21,
+ /*
+ * Flag: even though this sysreg has opc1 == 4 or 5, it
+ * should not trap to EL2 when HCR_EL2.NV is set.
+ */
+ ARM_CP_NV_NO_TRAP = 1 << 22,
};
/*
@@ -351,6 +364,14 @@ typedef enum CPAccessResult {
* specified target EL.
*/
CP_ACCESS_UNDEFINED = (2 << 2),
+
+ /*
+ * Access fails with EXLOCK, a GCS exception syndrome.
+ * These traps are always to the current execution EL,
+ * which is the same as the usual target EL because
+ * they cannot occur from EL0.
+ */
+ CP_ACCESS_EXLOCK = (3 << 2),
} CPAccessResult;
/* Indexes into fgt_read[] */
@@ -779,8 +800,12 @@ typedef enum FGTBit {
DO_BIT(HFGRTR, VBAR_EL1),
DO_BIT(HFGRTR, ICC_IGRPENN_EL1),
DO_BIT(HFGRTR, ERRIDR_EL1),
+ DO_REV_BIT(HFGRTR, NGCS_EL0),
+ DO_REV_BIT(HFGRTR, NGCS_EL1),
DO_REV_BIT(HFGRTR, NSMPRI_EL1),
DO_REV_BIT(HFGRTR, NTPIDR2_EL0),
+ DO_REV_BIT(HFGRTR, NPIRE0_EL1),
+ DO_REV_BIT(HFGRTR, NPIR_EL1),
/* Trap bits in HDFGRTR_EL2 / HDFGWTR_EL2, starting from bit 0. */
DO_BIT(HDFGRTR, DBGBCRN_EL1),
@@ -859,6 +884,8 @@ typedef enum FGTBit {
DO_BIT(HFGITR, DVPRCTX),
DO_BIT(HFGITR, CPPRCTX),
DO_BIT(HFGITR, DCCVAC),
+ DO_REV_BIT(HFGITR, NGCSPUSHM_EL1),
+ DO_REV_BIT(HFGITR, NGCSEPP),
DO_BIT(HFGITR, ATS1E1A),
} FGTBit;
@@ -1156,12 +1183,17 @@ static inline bool arm_cpreg_traps_in_nv(const ARMCPRegInfo *ri)
* fragile to future new sysregs, but this seems the least likely
* to break.
*
- * In particular, note that the released sysreg XML defines that
- * the FEAT_MEC sysregs and instructions do not follow this FEAT_NV
- * trapping rule, so we will need to add an ARM_CP_* flag to indicate
- * "register does not trap on NV" to handle those if/when we implement
- * FEAT_MEC.
+ * In particular, note that the FEAT_MEC sysregs and instructions
+ * are exceptions to this trapping rule, so they are marked as
+ * ARM_CP_NV_NO_TRAP to indicate that they should not be trapped
+ * to EL2. (They are an exception because the FEAT_MEC sysregs UNDEF
+ * unless in Realm, and Realm is not expected to be virtualized.)
*/
+
+ if (ri->type & ARM_CP_NV_NO_TRAP) {
+ return false;
+ }
+
return ri->opc1 == 4 || ri->opc1 == 5;
}
diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h
index f59c18b..37f1eca 100644
--- a/target/arm/cpu-features.h
+++ b/target/arm/cpu-features.h
@@ -1149,6 +1149,11 @@ static inline bool isar_feature_aa64_nmi(const ARMISARegisters *id)
return FIELD_EX64_IDREG(id, ID_AA64PFR1, NMI) != 0;
}
+static inline bool isar_feature_aa64_gcs(const ARMISARegisters *id)
+{
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, GCS) != 0;
+}
+
static inline bool isar_feature_aa64_tgran4_lpa2(const ARMISARegisters *id)
{
return FIELD_SEX64_IDREG(id, ID_AA64MMFR0, TGRAN4) >= 1;
@@ -1349,6 +1354,21 @@ static inline bool isar_feature_aa64_sctlr2(const ARMISARegisters *id)
return FIELD_EX64_IDREG(id, ID_AA64MMFR3, SCTLRX) != 0;
}
+static inline bool isar_feature_aa64_s1pie(const ARMISARegisters *id)
+{
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR3, S1PIE) != 0;
+}
+
+static inline bool isar_feature_aa64_s2pie(const ARMISARegisters *id)
+{
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR3, S2PIE) != 0;
+}
+
+static inline bool isar_feature_aa64_mec(const ARMISARegisters *id)
+{
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR3, MEC) != 0;
+}
+
static inline bool isar_feature_aa64_pmuv3p1(const ARMISARegisters *id)
{
return FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) >= 4 &&
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 30e29fd..3b556f1 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -311,6 +311,10 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type)
env->cp15.mdscr_el1 |= 1 << 12;
/* Enable FEAT_MOPS */
env->cp15.sctlr_el[1] |= SCTLR_MSCEN;
+ /* For Linux, GCSPR_EL0 is always readable. */
+ if (cpu_isar_feature(aa64_gcs, cpu)) {
+ env->cp15.gcscr_el[0] = GCSCRE0_NTR;
+ }
#else
/* Reset into the highest available EL */
if (arm_feature(env, ARM_FEATURE_EL3)) {
@@ -635,12 +639,22 @@ void arm_emulate_firmware_reset(CPUState *cpustate, int target_el)
if (cpu_isar_feature(aa64_fgt, cpu)) {
env->cp15.scr_el3 |= SCR_FGTEN;
}
+ if (cpu_isar_feature(aa64_gcs, cpu)) {
+ env->cp15.scr_el3 |= SCR_GCSEN;
+ }
if (cpu_isar_feature(aa64_tcr2, cpu)) {
env->cp15.scr_el3 |= SCR_TCR2EN;
}
if (cpu_isar_feature(aa64_sctlr2, cpu)) {
env->cp15.scr_el3 |= SCR_SCTLR2EN;
}
+ if (cpu_isar_feature(aa64_s1pie, cpu) ||
+ cpu_isar_feature(aa64_s2pie, cpu)) {
+ env->cp15.scr_el3 |= SCR_PIEN;
+ }
+ if (cpu_isar_feature(aa64_mec, cpu)) {
+ env->cp15.scr_el3 |= SCR_MECEN;
+ }
}
if (target_el == 2) {
@@ -819,7 +833,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
- uint32_t psr = pstate_read(env);
+ uint64_t psr = pstate_read(env);
int i, j;
int el = arm_current_el(env);
uint64_t hcr = arm_hcr_el2_eff(env);
@@ -841,7 +855,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
} else {
ns_status = "";
}
- qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
+ qemu_fprintf(f, "PSTATE=%016" PRIx64 " %c%c%c%c %sEL%d%c",
psr,
psr & PSTATE_N ? 'N' : '-',
psr & PSTATE_Z ? 'Z' : '-',
@@ -858,7 +872,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
(FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-'));
}
if (cpu_isar_feature(aa64_bti, cpu)) {
- qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
+ qemu_fprintf(f, " BTYPE=%d", (int)(psr & PSTATE_BTYPE) >> 10);
}
qemu_fprintf(f, "%s%s%s",
(hcr & HCR_NV) ? " NV" : "",
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 41414ac..1d4e133 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -33,6 +33,7 @@
#include "target/arm/multiprocessing.h"
#include "target/arm/gtimer.h"
#include "target/arm/cpu-sysregs.h"
+#include "target/arm/mmuidx.h"
#define EXCP_UDEF 1 /* undefined instruction */
#define EXCP_SWI 2 /* software interrupt */
@@ -267,7 +268,7 @@ typedef struct CPUArchState {
uint64_t xregs[32];
uint64_t pc;
/* PSTATE isn't an architectural register for ARMv8. However, it is
- * convenient for us to assemble the underlying state into a 32 bit format
+ * convenient for us to assemble the underlying state into a 64 bit format
* identical to the architectural format used for the SPSR. (This is also
* what the Linux kernel's 'pstate' field in signal handlers and KVM's
* 'pstate' register are.) Of the PSTATE bits:
@@ -279,7 +280,7 @@ typedef struct CPUArchState {
* SM and ZA are kept in env->svcr
* all other bits are stored in their correct places in env->pstate
*/
- uint32_t pstate;
+ uint64_t pstate;
bool aarch64; /* True if CPU is in aarch64 state; inverse of PSTATE.nRW */
bool thumb; /* True if CPU is in thumb mode; cpsr[5] */
@@ -368,6 +369,9 @@ typedef struct CPUArchState {
uint64_t tcr2_el[3];
uint64_t vtcr_el2; /* Virtualization Translation Control. */
uint64_t vstcr_el2; /* Secure Virtualization Translation Control. */
+ uint64_t pir_el[4]; /* PIRE0_EL1, PIR_EL1, PIR_EL2, PIR_EL3 */
+ uint64_t pire0_el2;
+ uint64_t s2pir_el2;
uint32_t c2_data; /* MPU data cacheable bits. */
uint32_t c2_insn; /* MPU instruction cacheable bits. */
union { /* MMU domain access control register
@@ -576,6 +580,18 @@ typedef struct CPUArchState {
/* NV2 register */
uint64_t vncr_el2;
+
+ uint64_t gcscr_el[4]; /* GCSCRE0_EL1, GCSCR_EL[123] */
+ uint64_t gcspr_el[4]; /* GCSPR_EL[0123] */
+
+ /* MEC registers */
+ uint64_t mecid_p0_el2;
+ uint64_t mecid_a0_el2;
+ uint64_t mecid_p1_el2;
+ uint64_t mecid_a1_el2;
+ uint64_t mecid_rl_a_el3;
+ uint64_t vmecid_p_el2;
+ uint64_t vmecid_a_el2;
} cp15;
struct {
@@ -630,13 +646,10 @@ typedef struct CPUArchState {
* entry process.
*/
struct {
- uint32_t syndrome; /* AArch64 format syndrome register */
- uint32_t fsr; /* AArch32 format fault status register info */
+ uint64_t syndrome; /* AArch64 format syndrome register */
uint64_t vaddress; /* virtual addr associated with exception, if any */
+ uint32_t fsr; /* AArch32 format fault status register info */
uint32_t target_el; /* EL the exception should be targeted for */
- /* If we implement EL2 we will also need to store information
- * about the intermediate physical address for stage 2 faults.
- */
} exception;
/* Information associated with an SError */
@@ -1498,6 +1511,7 @@ void pmu_init(ARMCPU *cpu);
#define PSTATE_C (1U << 29)
#define PSTATE_Z (1U << 30)
#define PSTATE_N (1U << 31)
+#define PSTATE_EXLOCK (1ULL << 34)
#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
#define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F)
#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF | PSTATE_BTYPE)
@@ -1534,7 +1548,7 @@ static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
* interprocessing, so we don't attempt to sync with the cpsr state used by
* the 32 bit decoder.
*/
-static inline uint32_t pstate_read(CPUARMState *env)
+static inline uint64_t pstate_read(CPUARMState *env)
{
int ZF;
@@ -1544,7 +1558,7 @@ static inline uint32_t pstate_read(CPUARMState *env)
| env->pstate | env->daif | (env->btype << 10);
}
-static inline void pstate_write(CPUARMState *env, uint32_t val)
+static inline void pstate_write(CPUARMState *env, uint64_t val)
{
env->ZF = (~val) & PSTATE_Z;
env->NF = val;
@@ -1716,13 +1730,24 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
#define SCR_ENAS0 (1ULL << 36)
#define SCR_ADEN (1ULL << 37)
#define SCR_HXEN (1ULL << 38)
+#define SCR_GCSEN (1ULL << 39)
#define SCR_TRNDR (1ULL << 40)
#define SCR_ENTP2 (1ULL << 41)
#define SCR_TCR2EN (1ULL << 43)
#define SCR_SCTLR2EN (1ULL << 44)
+#define SCR_PIEN (1ULL << 45)
#define SCR_GPF (1ULL << 48)
+#define SCR_MECEN (1ULL << 49)
#define SCR_NSE (1ULL << 62)
+/* GCSCR_ELx fields */
+#define GCSCR_PCRSEL (1ULL << 0)
+#define GCSCR_RVCHKEN (1ULL << 5)
+#define GCSCR_EXLOCKEN (1ULL << 6)
+#define GCSCR_PUSHMEN (1ULL << 8)
+#define GCSCR_STREN (1ULL << 9)
+#define GCSCRE0_NTR (1ULL << 10)
+
/* Return the current FPSCR value. */
uint32_t vfp_get_fpscr(CPUARMState *env);
void vfp_set_fpscr(CPUARMState *env, uint32_t val);
@@ -2221,6 +2246,7 @@ static inline bool arm_is_el2_enabled(CPUARMState *env)
*/
uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space);
uint64_t arm_hcr_el2_eff(CPUARMState *env);
+uint64_t arm_hcr_el2_nvx_eff(CPUARMState *env);
uint64_t arm_hcrx_el2_eff(CPUARMState *env);
/*
@@ -2300,212 +2326,6 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU
-/* ARM has the following "translation regimes" (as the ARM ARM calls them):
- *
- * If EL3 is 64-bit:
- * + NonSecure EL1 & 0 stage 1
- * + NonSecure EL1 & 0 stage 2
- * + NonSecure EL2
- * + NonSecure EL2 & 0 (ARMv8.1-VHE)
- * + Secure EL1 & 0 stage 1
- * + Secure EL1 & 0 stage 2 (FEAT_SEL2)
- * + Secure EL2 (FEAT_SEL2)
- * + Secure EL2 & 0 (FEAT_SEL2)
- * + Realm EL1 & 0 stage 1 (FEAT_RME)
- * + Realm EL1 & 0 stage 2 (FEAT_RME)
- * + Realm EL2 (FEAT_RME)
- * + EL3
- * If EL3 is 32-bit:
- * + NonSecure PL1 & 0 stage 1
- * + NonSecure PL1 & 0 stage 2
- * + NonSecure PL2
- * + Secure PL1 & 0
- * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
- *
- * For QEMU, an mmu_idx is not quite the same as a translation regime because:
- * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes,
- * because they may differ in access permissions even if the VA->PA map is
- * the same
- * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
- * translation, which means that we have one mmu_idx that deals with two
- * concatenated translation regimes [this sort of combined s1+2 TLB is
- * architecturally permitted]
- * 3. we don't need to allocate an mmu_idx to translations that we won't be
- * handling via the TLB. The only way to do a stage 1 translation without
- * the immediate stage 2 translation is via the ATS or AT system insns,
- * which can be slow-pathed and always do a page table walk.
- * The only use of stage 2 translations is either as part of an s1+2
- * lookup or when loading the descriptors during a stage 1 page table walk,
- * and in both those cases we don't use the TLB.
- * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
- * translation regimes, because they map reasonably well to each other
- * and they can't both be active at the same time.
- * 5. we want to be able to use the TLB for accesses done as part of a
- * stage1 page table walk, rather than having to walk the stage2 page
- * table over and over.
- * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
- * Never (PAN) bit within PSTATE.
- * 7. we fold together most secure and non-secure regimes for A-profile,
- * because there are no banked system registers for aarch64, so the
- * process of switching between secure and non-secure is
- * already heavyweight.
- * 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure,
- * because both are in use simultaneously for Secure EL2.
- *
- * This gives us the following list of cases:
- *
- * EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2)
- * EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2)
- * EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN)
- * EL0 EL2&0
- * EL2 EL2&0
- * EL2 EL2&0 +PAN
- * EL2 (aka NS PL2)
- * EL3 (aka AArch32 S PL1 PL1&0)
- * AArch32 S PL0 PL1&0 (we call this EL30_0)
- * AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN)
- * Stage2 Secure
- * Stage2 NonSecure
- * plus one TLB per Physical address space: S, NS, Realm, Root
- *
- * for a total of 16 different mmu_idx.
- *
- * R profile CPUs have an MPU, but can use the same set of MMU indexes
- * as A profile. They only need to distinguish EL0 and EL1 (and
- * EL2 for cores like the Cortex-R52).
- *
- * M profile CPUs are rather different as they do not have a true MMU.
- * They have the following different MMU indexes:
- * User
- * Privileged
- * User, execution priority negative (ie the MPU HFNMIENA bit may apply)
- * Privileged, execution priority negative (ditto)
- * If the CPU supports the v8M Security Extension then there are also:
- * Secure User
- * Secure Privileged
- * Secure User, execution priority negative
- * Secure Privileged, execution priority negative
- *
- * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
- * are not quite the same -- different CPU types (most notably M profile
- * vs A/R profile) would like to use MMU indexes with different semantics,
- * but since we don't ever need to use all of those in a single CPU we
- * can avoid having to set NB_MMU_MODES to "total number of A profile MMU
- * modes + total number of M profile MMU modes". The lower bits of
- * ARMMMUIdx are the core TLB mmu index, and the higher bits are always
- * the same for any particular CPU.
- * Variables of type ARMMUIdx are always full values, and the core
- * index values are in variables of type 'int'.
- *
- * Our enumeration includes at the end some entries which are not "true"
- * mmu_idx values in that they don't have corresponding TLBs and are only
- * valid for doing slow path page table walks.
- *
- * The constant names here are patterned after the general style of the names
- * of the AT/ATS operations.
- * The values used are carefully arranged to make mmu_idx => EL lookup easy.
- * For M profile we arrange them to have a bit for priv, a bit for negpri
- * and a bit for secure.
- */
-#define ARM_MMU_IDX_A 0x10 /* A profile */
-#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
-#define ARM_MMU_IDX_M 0x40 /* M profile */
-
-/* Meanings of the bits for M profile mmu idx values */
-#define ARM_MMU_IDX_M_PRIV 0x1
-#define ARM_MMU_IDX_M_NEGPRI 0x2
-#define ARM_MMU_IDX_M_S 0x4 /* Secure */
-
-#define ARM_MMU_IDX_TYPE_MASK \
- (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
-#define ARM_MMU_IDX_COREIDX_MASK 0xf
-
-typedef enum ARMMMUIdx {
- /*
- * A-profile.
- */
- ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
- ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
- ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
- ARMMMUIdx_E20_2 = 3 | ARM_MMU_IDX_A,
- ARMMMUIdx_E10_1_PAN = 4 | ARM_MMU_IDX_A,
- ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A,
- ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
- ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
- ARMMMUIdx_E30_0 = 8 | ARM_MMU_IDX_A,
- ARMMMUIdx_E30_3_PAN = 9 | ARM_MMU_IDX_A,
-
- /*
- * Used for second stage of an S12 page table walk, or for descriptor
- * loads during first stage of an S1 page table walk. Note that both
- * are in use simultaneously for SecureEL2: the security state for
- * the S2 ptw is selected by the NS bit from the S1 ptw.
- */
- ARMMMUIdx_Stage2_S = 10 | ARM_MMU_IDX_A,
- ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A,
-
- /* TLBs with 1-1 mapping to the physical address spaces. */
- ARMMMUIdx_Phys_S = 12 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_NS = 13 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_Root = 14 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_Realm = 15 | ARM_MMU_IDX_A,
-
- /*
- * These are not allocated TLBs and are used only for AT system
- * instructions or for the first stage of an S12 page table walk.
- */
- ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
- ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
- ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
-
- /*
- * M-profile.
- */
- ARMMMUIdx_MUser = ARM_MMU_IDX_M,
- ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
- ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
- ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI,
- ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S,
- ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
- ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
- ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
-} ARMMMUIdx;
-
-/*
- * Bit macros for the core-mmu-index values for each index,
- * for use when calling tlb_flush_by_mmuidx() and friends.
- */
-#define TO_CORE_BIT(NAME) \
- ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK)
-
-typedef enum ARMMMUIdxBit {
- TO_CORE_BIT(E10_0),
- TO_CORE_BIT(E20_0),
- TO_CORE_BIT(E10_1),
- TO_CORE_BIT(E10_1_PAN),
- TO_CORE_BIT(E2),
- TO_CORE_BIT(E20_2),
- TO_CORE_BIT(E20_2_PAN),
- TO_CORE_BIT(E3),
- TO_CORE_BIT(E30_0),
- TO_CORE_BIT(E30_3_PAN),
- TO_CORE_BIT(Stage2),
- TO_CORE_BIT(Stage2_S),
-
- TO_CORE_BIT(MUser),
- TO_CORE_BIT(MPriv),
- TO_CORE_BIT(MUserNegPri),
- TO_CORE_BIT(MPrivNegPri),
- TO_CORE_BIT(MSUser),
- TO_CORE_BIT(MSPriv),
- TO_CORE_BIT(MSUserNegPri),
- TO_CORE_BIT(MSPrivNegPri),
-} ARMMMUIdxBit;
-
-#undef TO_CORE_BIT
-
-#define MMU_USER_IDX 0
-
/* Indexes used when registering address spaces with cpu_address_space_init */
typedef enum ARMASIdx {
ARMASIdx_NS = 0,
@@ -2667,6 +2487,9 @@ FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1)
FIELD(TBFLAG_A64, AH, 37, 1) /* FPCR.AH */
FIELD(TBFLAG_A64, NEP, 38, 1) /* FPCR.NEP */
FIELD(TBFLAG_A64, ZT0EXC_EL, 39, 2)
+FIELD(TBFLAG_A64, GCS_EN, 41, 1)
+FIELD(TBFLAG_A64, GCS_RVCEN, 42, 1)
+FIELD(TBFLAG_A64, GCSSTR_EL, 43, 2)
/*
* Helpers for using the above. Note that only the A64 accessors use
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
index 3bccde2..65d6bbe 100644
--- a/target/arm/gdbstub64.c
+++ b/target/arm/gdbstub64.c
@@ -47,6 +47,7 @@ int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
case 32:
return gdb_get_reg64(mem_buf, env->pc);
case 33:
+ /* pstate is now a 64-bit value; can we simply adjust the xml? */
return gdb_get_reg32(mem_buf, pstate_read(env));
}
/* Unknown register. */
@@ -75,6 +76,7 @@ int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return 8;
case 33:
/* CPSR */
+ /* pstate is now a 64-bit value; can we simply adjust the xml? */
pstate_write(env, tmp);
return 4;
}
diff --git a/target/arm/helper.c b/target/arm/helper.c
index b7bf45a..167f290 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -420,7 +420,9 @@ int alle1_tlbmask(CPUARMState *env)
*/
return (ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_1_GCS |
ARMMMUIdxBit_E10_0 |
+ ARMMMUIdxBit_E10_0_GCS |
ARMMMUIdxBit_Stage2 |
ARMMMUIdxBit_Stage2_S);
}
@@ -764,12 +766,22 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
if (cpu_isar_feature(aa64_ecv, cpu)) {
valid_mask |= SCR_ECVEN;
}
+ if (cpu_isar_feature(aa64_gcs, cpu)) {
+ valid_mask |= SCR_GCSEN;
+ }
if (cpu_isar_feature(aa64_tcr2, cpu)) {
valid_mask |= SCR_TCR2EN;
}
if (cpu_isar_feature(aa64_sctlr2, cpu)) {
valid_mask |= SCR_SCTLR2EN;
}
+ if (cpu_isar_feature(aa64_s1pie, cpu) ||
+ cpu_isar_feature(aa64_s2pie, cpu)) {
+ valid_mask |= SCR_PIEN;
+ }
+ if (cpu_isar_feature(aa64_mec, cpu)) {
+ valid_mask |= SCR_MECEN;
+ }
} else {
valid_mask &= ~(SCR_RW | SCR_ST);
if (cpu_isar_feature(aa32_ras, cpu)) {
@@ -804,12 +816,17 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
*/
if (changed & (SCR_NS | SCR_NSE)) {
tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
+ ARMMMUIdxBit_E10_0_GCS |
ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS |
ARMMMUIdxBit_E10_1 |
- ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_1_GCS |
+ ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E2));
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E2 |
+ ARMMMUIdxBit_E2_GCS));
}
}
@@ -2783,7 +2800,9 @@ static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
(arm_hcr_el2_eff(env) & HCR_E2H)) {
uint16_t mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E20_0;
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS;
tlb_flush_by_mmuidx(env_cpu(env), mask);
}
raw_write(env, ri, value);
@@ -3407,15 +3426,71 @@ static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
}
+static CPAccessResult access_nv1_with_nvx(uint64_t hcr_nv)
+{
+ return hcr_nv == (HCR_NV | HCR_NV1) ? CP_ACCESS_TRAP_EL2 : CP_ACCESS_OK;
+}
+
static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
if (arm_current_el(env) == 1) {
- uint64_t hcr_nv = arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1 | HCR_NV2);
+ return access_nv1_with_nvx(arm_hcr_el2_nvx_eff(env));
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_nv1_or_exlock_el1(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1) {
+ uint64_t nvx = arm_hcr_el2_nvx_eff(env);
- if (hcr_nv == (HCR_NV | HCR_NV1)) {
- return CP_ACCESS_TRAP_EL2;
+ if (!isread &&
+ (env->pstate & PSTATE_EXLOCK) &&
+ (env->cp15.gcscr_el[1] & GCSCR_EXLOCKEN) &&
+ !(nvx & HCR_NV1)) {
+ return CP_ACCESS_EXLOCK;
}
+ return access_nv1_with_nvx(nvx);
+ }
+
+ /*
+ * At EL2, since VHE redirection is done at translation time,
+ * el_is_in_host is always false here, so EXLOCK does not apply.
+ */
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_exlock_el2(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el == 3) {
+ return CP_ACCESS_OK;
+ }
+
+ /*
+ * Access to the EL2 register from EL1 means NV is set, and
+ * EXLOCK has priority over an NV1 trap to EL2.
+ */
+ if (!isread &&
+ (env->pstate & PSTATE_EXLOCK) &&
+ (env->cp15.gcscr_el[el] & GCSCR_EXLOCKEN)) {
+ return CP_ACCESS_EXLOCK;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_exlock_el3(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ if (!isread &&
+ (env->pstate & PSTATE_EXLOCK) &&
+ (env->cp15.gcscr_el[3] & GCSCR_EXLOCKEN)) {
+ return CP_ACCESS_EXLOCK;
}
return CP_ACCESS_OK;
}
@@ -3591,7 +3666,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
- .access = PL1_RW, .accessfn = access_nv1,
+ .access = PL1_RW, .accessfn = access_nv1_or_exlock_el1,
.nv2_redirect_offset = 0x230 | NV2_REDIR_NV1,
.vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 4, 0, 1),
.vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 4, 0, 1),
@@ -3599,7 +3674,7 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
- .access = PL1_RW, .accessfn = access_nv1,
+ .access = PL1_RW, .accessfn = access_nv1_or_exlock_el1,
.nv2_redirect_offset = 0x160 | NV2_REDIR_NV1,
.vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 4, 0, 0),
.vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 4, 0, 0),
@@ -3888,6 +3963,16 @@ uint64_t arm_hcr_el2_eff(CPUARMState *env)
return arm_hcr_el2_eff_secstate(env, arm_security_space_below_el3(env));
}
+uint64_t arm_hcr_el2_nvx_eff(CPUARMState *env)
+{
+ uint64_t hcr = arm_hcr_el2_eff(env);
+
+ if (!(hcr & HCR_NV)) {
+ return 0; /* CONSTRAINED UNPREDICTABLE wrt NV1 */
+ }
+ return hcr & (HCR_NV2 | HCR_NV1 | HCR_NV);
+}
+
/*
* Corresponds to ARM pseudocode function ELIsInHost().
*/
@@ -3940,6 +4025,9 @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (cpu_isar_feature(aa64_sctlr2, cpu)) {
valid_mask |= HCRX_SCTLR2EN;
}
+ if (cpu_isar_feature(aa64_gcs, cpu)) {
+ valid_mask |= HCRX_GCSEN;
+ }
/* Clear RES0 bits. */
env->cp15.hcrx_el2 = value & valid_mask;
@@ -4010,6 +4098,9 @@ uint64_t arm_hcrx_el2_eff(CPUARMState *env)
if (cpu_isar_feature(aa64_sctlr2, cpu)) {
hcrx |= HCRX_SCTLR2EN;
}
+ if (cpu_isar_feature(aa64_gcs, cpu)) {
+ hcrx |= HCRX_GCSEN;
+ }
return hcrx;
}
if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) {
@@ -4067,7 +4158,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
- .access = PL2_RW,
+ .access = PL2_RW, .accessfn = access_exlock_el2,
.fieldoffset = offsetof(CPUARMState, elr_el[2]) },
{ .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
.type = ARM_CP_NV2_REDIRECT,
@@ -4085,7 +4176,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
{ .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
- .access = PL2_RW,
+ .access = PL2_RW, .accessfn = access_exlock_el2,
.fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
{ .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
@@ -4367,7 +4458,7 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
{ .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
- .access = PL3_RW,
+ .access = PL3_RW, .accessfn = access_exlock_el3,
.fieldoffset = offsetof(CPUARMState, elr_el[3]) },
{ .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
@@ -4378,7 +4469,7 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
{ .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
- .access = PL3_RW,
+ .access = PL3_RW, .accessfn = access_exlock_el3,
.fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
{ .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
@@ -5000,6 +5091,96 @@ static const ARMCPRegInfo nmi_reginfo[] = {
.resetfn = arm_cp_reset_ignore },
};
+static CPAccessResult mecid_access(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el == 2) {
+ if (arm_security_space(env) != ARMSS_Realm) {
+ return CP_ACCESS_UNDEFINED;
+ }
+
+ if (!(env->cp15.scr_el3 & SCR_MECEN)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static void mecid_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value = extract64(value, 0, MECID_WIDTH);
+ raw_write(env, ri, value);
+}
+
+static CPAccessResult cipae_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ switch (arm_security_space(env)) {
+ case ARMSS_Root: /* EL3 */
+ case ARMSS_Realm: /* Realm EL2 */
+ return CP_ACCESS_OK;
+ default:
+ return CP_ACCESS_UNDEFINED;
+ }
+}
+
+static const ARMCPRegInfo mec_reginfo[] = {
+ { .name = "MECIDR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 7, .crn = 10, .crm = 8,
+ .access = PL2_R, .type = ARM_CP_CONST | ARM_CP_NV_NO_TRAP,
+ .resetvalue = MECID_WIDTH - 1 },
+ { .name = "MECID_P0_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 0, .crn = 10, .crm = 8,
+ .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
+ .accessfn = mecid_access, .writefn = mecid_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.mecid_p0_el2) },
+ { .name = "MECID_A0_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 1, .crn = 10, .crm = 8,
+ .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
+ .accessfn = mecid_access, .writefn = mecid_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.mecid_a0_el2) },
+ { .name = "MECID_P1_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 2, .crn = 10, .crm = 8,
+ .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
+ .accessfn = mecid_access, .writefn = mecid_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.mecid_p1_el2) },
+ { .name = "MECID_A1_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 10, .crm = 8,
+ .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
+ .accessfn = mecid_access, .writefn = mecid_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.mecid_a1_el2) },
+ { .name = "MECID_RL_A_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .opc2 = 1, .crn = 10, .crm = 10,
+ .access = PL3_RW, .accessfn = mecid_access,
+ .writefn = mecid_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.mecid_rl_a_el3) },
+ { .name = "VMECID_P_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 0, .crn = 10, .crm = 9,
+ .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
+ .accessfn = mecid_access, .writefn = mecid_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.vmecid_p_el2) },
+ { .name = "VMECID_A_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 1, .crn = 10, .crm = 9,
+ .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
+ .accessfn = mecid_access, .writefn = mecid_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.vmecid_a_el2) },
+ { .name = "DC_CIPAE", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 14, .opc2 = 0,
+ .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_NV_NO_TRAP,
+ .accessfn = cipae_access },
+};
+
+static const ARMCPRegInfo mec_mte_reginfo[] = {
+ { .name = "DC_CIGDPAE", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 14, .opc2 = 7,
+ .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_NV_NO_TRAP,
+ .accessfn = cipae_access },
+};
+
#ifndef CONFIG_USER_ONLY
/*
* We don't know until after realize whether there's a GICv3
@@ -5842,6 +6023,9 @@ static void sctlr2_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
uint64_t valid_mask = 0;
+ if (cpu_isar_feature(aa64_mec, env_archcpu(env))) {
+ valid_mask |= SCTLR2_EMEC;
+ }
value &= valid_mask;
raw_write(env, ri, value);
}
@@ -5851,6 +6035,9 @@ static void sctlr2_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
uint64_t valid_mask = 0;
+ if (cpu_isar_feature(aa64_mec, env_archcpu(env))) {
+ valid_mask |= SCTLR2_EMEC;
+ }
value &= valid_mask;
raw_write(env, ri, value);
}
@@ -5902,8 +6089,12 @@ static CPAccessResult tcr2_el1_access(CPUARMState *env, const ARMCPRegInfo *ri,
static void tcr2_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+ ARMCPU *cpu = env_archcpu(env);
uint64_t valid_mask = 0;
+ if (cpu_isar_feature(aa64_s1pie, cpu)) {
+ valid_mask |= TCR2_PIE;
+ }
value &= valid_mask;
raw_write(env, ri, value);
}
@@ -5911,8 +6102,15 @@ static void tcr2_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void tcr2_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+ ARMCPU *cpu = env_archcpu(env);
uint64_t valid_mask = 0;
+ if (cpu_isar_feature(aa64_s1pie, cpu)) {
+ valid_mask |= TCR2_PIE;
+ }
+ if (cpu_isar_feature(aa64_mec, env_archcpu(env))) {
+ valid_mask |= TCR2_AMEC0 | TCR2_AMEC1;
+ }
value &= valid_mask;
raw_write(env, ri, value);
}
@@ -5933,6 +6131,64 @@ static const ARMCPRegInfo tcr2_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.tcr2_el[2]) },
};
+static CPAccessResult pien_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_PIEN)
+ && arm_current_el(env) < 3) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult pien_el1_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ CPAccessResult ret = access_tvm_trvm(env, ri, isread);
+ if (ret == CP_ACCESS_OK) {
+ ret = pien_access(env, ri, isread);
+ }
+ return ret;
+}
+
+static const ARMCPRegInfo s1pie_reginfo[] = {
+ { .name = "PIR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 10, .crm = 2,
+ .access = PL1_RW, .accessfn = pien_el1_access,
+ .fgt = FGT_NPIR_EL1, .nv2_redirect_offset = 0x2a0 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 3),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 3),
+ .fieldoffset = offsetof(CPUARMState, cp15.pir_el[1]) },
+ { .name = "PIR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 10, .crm = 2,
+ .access = PL2_RW, .accessfn = pien_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.pir_el[2]) },
+ { .name = "PIR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .opc2 = 3, .crn = 10, .crm = 2,
+ .access = PL3_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.pir_el[3]) },
+ { .name = "PIRE0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 10, .crm = 2,
+ .access = PL1_RW, .accessfn = pien_el1_access,
+ .fgt = FGT_NPIRE0_EL1, .nv2_redirect_offset = 0x290 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 2),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 2),
+ .fieldoffset = offsetof(CPUARMState, cp15.pir_el[0]) },
+ { .name = "PIRE0_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 2, .crn = 10, .crm = 2,
+ .access = PL2_RW, .accessfn = pien_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.pire0_el2) },
+};
+
+static const ARMCPRegInfo s2pie_reginfo[] = {
+ { .name = "S2PIR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 5, .crn = 10, .crm = 2,
+ .access = PL2_RW, .accessfn = pien_access,
+ .nv2_redirect_offset = 0x2b0,
+ .fieldoffset = offsetof(CPUARMState, cp15.s2pir_el2) },
+};
+
void register_cp_regs_for_features(ARMCPU *cpu)
{
/* Register all the coprocessor registers based on feature bits */
@@ -7165,6 +7421,19 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, tcr2_reginfo);
}
+ if (cpu_isar_feature(aa64_s1pie, cpu)) {
+ define_arm_cp_regs(cpu, s1pie_reginfo);
+ }
+ if (cpu_isar_feature(aa64_s2pie, cpu)) {
+ define_arm_cp_regs(cpu, s2pie_reginfo);
+ }
+ if (cpu_isar_feature(aa64_mec, cpu)) {
+ define_arm_cp_regs(cpu, mec_reginfo);
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ define_arm_cp_regs(cpu, mec_mte_reginfo);
+ }
+ }
+
if (cpu_isar_feature(any_predinv, cpu)) {
define_arm_cp_regs(cpu, predinv_reginfo);
}
@@ -7174,6 +7443,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
}
define_pm_cpregs(cpu);
+ define_gcs_cpregs(cpu);
}
/*
@@ -8800,7 +9070,7 @@ static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
}
}
-static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
+uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
{
uint32_t ret = cpsr_read(env);
@@ -8815,6 +9085,24 @@ static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
return ret;
}
+void cpsr_write_from_spsr_elx(CPUARMState *env, uint32_t val)
+{
+ uint32_t mask;
+
+ /* Save SPSR_ELx.SS into PSTATE. */
+ env->pstate = (env->pstate & ~PSTATE_SS) | (val & PSTATE_SS);
+ val &= ~PSTATE_SS;
+
+ /* Move DIT to the correct location for CPSR */
+ if (val & PSTATE_DIT) {
+ val &= ~PSTATE_DIT;
+ val |= CPSR_DIT;
+ }
+
+ mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
+ cpsr_write(env, val, mask, CPSRWriteRaw);
+}
+
static bool syndrome_is_sync_extabt(uint32_t syndrome)
{
/* Return true if this syndrome value is a synchronous external abort */
@@ -8847,8 +9135,8 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
CPUARMState *env = &cpu->env;
unsigned int new_el = env->exception.target_el;
vaddr addr = env->cp15.vbar_el[new_el];
- unsigned int new_mode = aarch64_pstate_mode(new_el, true);
- unsigned int old_mode;
+ uint64_t new_mode = aarch64_pstate_mode(new_el, true);
+ uint64_t old_mode;
unsigned int cur_el = arm_current_el(env);
int rt;
@@ -8891,8 +9179,13 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
} else {
addr += 0x600;
}
- } else if (pstate_read(env) & PSTATE_SP) {
- addr += 0x200;
+ } else {
+ if (pstate_read(env) & PSTATE_SP) {
+ addr += 0x200;
+ }
+ if (is_a64(env) && (env->cp15.gcscr_el[new_el] & GCSCR_EXLOCKEN)) {
+ new_mode |= PSTATE_EXLOCK;
+ }
}
switch (cs->exception_index) {
@@ -8996,7 +9289,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
* If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN)
* If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM)
*/
- old_mode = deposit32(old_mode, 2, 2, 2);
+ old_mode = deposit64(old_mode, 2, 2, 2);
}
}
} else {
@@ -9009,7 +9302,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
}
env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
- qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%x\n", old_mode);
+ qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%" PRIx64 "\n", old_mode);
qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
env->elr_el[new_el]);
@@ -9063,7 +9356,8 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
env->pc = addr;
- qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
+ qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64
+ " PSTATE 0x%" PRIx64 "\n",
new_el, env->pc, pstate_read(env));
}
@@ -9119,7 +9413,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
new_el);
if (qemu_loglevel_mask(CPU_LOG_INT)
&& !excp_is_internal(cs->exception_index)) {
- qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
+ qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx64 "\n",
syn_get_ec(env->exception.syndrome),
env->exception.syndrome);
}
@@ -9309,21 +9603,34 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
bool el1_is_aa32)
{
uint64_t tcr = regime_tcr(env, mmu_idx);
- bool epd, hpd, tsz_oob, ds, ha, hd;
+ bool epd, hpd, tsz_oob, ds, ha, hd, pie = false;
int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
ARMGranuleSize gran;
ARMCPU *cpu = env_archcpu(env);
bool stage2 = regime_is_stage2(mmu_idx);
+ int r_el = regime_el(mmu_idx);
if (!regime_has_2_ranges(mmu_idx)) {
select = 0;
tsz = extract32(tcr, 0, 6);
gran = tg0_to_gran_size(extract32(tcr, 14, 2));
if (stage2) {
- /* VTCR_EL2 */
- hpd = false;
+ /*
+ * Stage2 does not have hierarchical permissions.
+ * Thus disabling them makes things easier during ptw.
+ */
+ hpd = true;
+ pie = extract64(tcr, 36, 1) && cpu_isar_feature(aa64_s2pie, cpu);
} else {
hpd = extract32(tcr, 24, 1);
+ if (r_el == 3) {
+ pie = (extract64(tcr, 35, 1)
+ && cpu_isar_feature(aa64_s1pie, cpu));
+ } else {
+ pie = ((env->cp15.tcr2_el[2] & TCR2_PIE)
+ && (!arm_feature(env, ARM_FEATURE_EL3)
+ || (env->cp15.scr_el3 & SCR_TCR2EN)));
+ }
}
epd = false;
sh = extract32(tcr, 12, 2);
@@ -9360,10 +9667,16 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
ds = extract64(tcr, 59, 1);
if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) &&
- regime_is_user(env, mmu_idx)) {
+ regime_is_user(mmu_idx)) {
epd = true;
}
+
+ pie = ((env->cp15.tcr2_el[r_el] & TCR2_PIE)
+ && (!arm_feature(env, ARM_FEATURE_EL3)
+ || (env->cp15.scr_el3 & SCR_TCR2EN))
+ && (r_el == 2 || (arm_hcrx_el2_eff(env) & HCRX_TCR2EN)));
}
+ hpd |= pie;
gran = sanitize_gran_size(cpu, gran, stage2);
@@ -9442,6 +9755,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
.ha = ha,
.hd = ha && hd,
.gran = gran,
+ .pie = pie,
};
}
@@ -9556,33 +9870,6 @@ int fp_exception_el(CPUARMState *env, int cur_el)
return 0;
}
-/* Return the exception level we're running at if this is our mmu_idx */
-int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
-{
- if (mmu_idx & ARM_MMU_IDX_M) {
- return mmu_idx & ARM_MMU_IDX_M_PRIV;
- }
-
- switch (mmu_idx) {
- case ARMMMUIdx_E10_0:
- case ARMMMUIdx_E20_0:
- case ARMMMUIdx_E30_0:
- return 0;
- case ARMMMUIdx_E10_1:
- case ARMMMUIdx_E10_1_PAN:
- return 1;
- case ARMMMUIdx_E2:
- case ARMMMUIdx_E20_2:
- case ARMMMUIdx_E20_2_PAN:
- return 2;
- case ARMMMUIdx_E3:
- case ARMMMUIdx_E30_3_PAN:
- return 3;
- default:
- g_assert_not_reached();
- }
-}
-
#ifndef CONFIG_TCG
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
{
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 1d958db..f539bbe 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -34,6 +34,7 @@
#include "system/memory.h"
#include "syndrome.h"
#include "cpu-features.h"
+#include "mmuidx-internal.h"
/* register banks for CPU modes */
#define BANK_USRSYS 0
@@ -250,6 +251,7 @@ FIELD(VSTCR, SA, 30, 1)
#define HCRX_MSCEN (1ULL << 11)
#define HCRX_TCR2EN (1ULL << 14)
#define HCRX_SCTLR2EN (1ULL << 15)
+#define HCRX_GCSEN (1ULL << 22)
#define HPFAR_NS (1ULL << 63)
@@ -304,14 +306,14 @@ FIELD(CNTHCTL, CNTPMASK, 19, 1)
* and never returns because we will longjump back up to the CPU main loop.
*/
G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
- uint32_t syndrome, uint32_t target_el);
+ uint64_t syndrome, uint32_t target_el);
/*
* Similarly, but also use unwinding to restore cpu state.
*/
G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
- uint32_t syndrome, uint32_t target_el,
- uintptr_t ra);
+ uint64_t syndrome, uint32_t target_el,
+ uintptr_t ra);
/*
* For AArch64, map a given EL to an index in the banked_spsr array.
@@ -752,6 +754,7 @@ struct ARMMMUFaultInfo {
bool s1ptw;
bool s1ns;
bool ea;
+ bool dirtybit; /* FEAT_S1PIE, FEAT_S2PIE */
};
/**
@@ -983,8 +986,6 @@ static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
return mmu_idx | ARM_MMU_IDX_A;
}
-int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
-
/* Return the MMU index for a v7M CPU in the specified security state */
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
@@ -1027,108 +1028,10 @@ static inline void arm_call_el_change_hook(ARMCPU *cpu)
}
}
-/*
- * Return true if this address translation regime has two ranges.
- * Note that this will not return the correct answer for AArch32
- * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is
- * never called from a context where EL3 can be AArch32. (The
- * correct return value for ARMMMUIdx_E3 would be different for
- * that case, so we can't just make the function return the
- * correct value anyway; we would need an extra "bool e3_is_aarch32"
- * argument which all the current callsites would pass as 'false'.)
- */
-static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
-{
- switch (mmu_idx) {
- case ARMMMUIdx_Stage1_E0:
- case ARMMMUIdx_Stage1_E1:
- case ARMMMUIdx_Stage1_E1_PAN:
- case ARMMMUIdx_E10_0:
- case ARMMMUIdx_E10_1:
- case ARMMMUIdx_E10_1_PAN:
- case ARMMMUIdx_E20_0:
- case ARMMMUIdx_E20_2:
- case ARMMMUIdx_E20_2_PAN:
- return true;
- default:
- return false;
- }
-}
-
-static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
-{
- switch (mmu_idx) {
- case ARMMMUIdx_Stage1_E1_PAN:
- case ARMMMUIdx_E10_1_PAN:
- case ARMMMUIdx_E20_2_PAN:
- case ARMMMUIdx_E30_3_PAN:
- return true;
- default:
- return false;
- }
-}
-
-static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
-{
- return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
-}
-
-/* Return the exception level which controls this address translation regime */
-static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
-{
- switch (mmu_idx) {
- case ARMMMUIdx_E20_0:
- case ARMMMUIdx_E20_2:
- case ARMMMUIdx_E20_2_PAN:
- case ARMMMUIdx_Stage2:
- case ARMMMUIdx_Stage2_S:
- case ARMMMUIdx_E2:
- return 2;
- case ARMMMUIdx_E3:
- case ARMMMUIdx_E30_0:
- case ARMMMUIdx_E30_3_PAN:
- return 3;
- case ARMMMUIdx_E10_0:
- case ARMMMUIdx_Stage1_E0:
- case ARMMMUIdx_Stage1_E1:
- case ARMMMUIdx_Stage1_E1_PAN:
- case ARMMMUIdx_E10_1:
- case ARMMMUIdx_E10_1_PAN:
- case ARMMMUIdx_MPrivNegPri:
- case ARMMMUIdx_MUserNegPri:
- case ARMMMUIdx_MPriv:
- case ARMMMUIdx_MUser:
- case ARMMMUIdx_MSPrivNegPri:
- case ARMMMUIdx_MSUserNegPri:
- case ARMMMUIdx_MSPriv:
- case ARMMMUIdx_MSUser:
- return 1;
- default:
- g_assert_not_reached();
- }
-}
-
-static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
-{
- switch (mmu_idx) {
- case ARMMMUIdx_E10_0:
- case ARMMMUIdx_E20_0:
- case ARMMMUIdx_E30_0:
- case ARMMMUIdx_Stage1_E0:
- case ARMMMUIdx_MUser:
- case ARMMMUIdx_MSUser:
- case ARMMMUIdx_MUserNegPri:
- case ARMMMUIdx_MSUserNegPri:
- return true;
- default:
- return false;
- }
-}
-
/* Return the SCTLR value which controls this address translation regime */
static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
{
- return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
+ return env->cp15.sctlr_el[regime_el(mmu_idx)];
}
/*
@@ -1160,13 +1063,13 @@ static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
return v;
}
- return env->cp15.tcr_el[regime_el(env, mmu_idx)];
+ return env->cp15.tcr_el[regime_el(mmu_idx)];
}
/* Return true if the translation regime is using LPAE format page tables */
static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
{
- int el = regime_el(env, mmu_idx);
+ int el = regime_el(mmu_idx);
if (el == 2 || arm_el_is_aa64(env, el)) {
return true;
}
@@ -1378,25 +1281,6 @@ ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
#endif
-/**
- * arm_mmu_idx_is_stage1_of_2:
- * @mmu_idx: The ARMMMUIdx to test
- *
- * Return true if @mmu_idx is a NOTLB mmu_idx that is the
- * first stage of a two stage regime.
- */
-static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
-{
- switch (mmu_idx) {
- case ARMMMUIdx_Stage1_E0:
- case ARMMMUIdx_Stage1_E1:
- case ARMMMUIdx_Stage1_E1_PAN:
- return true;
- default:
- return false;
- }
-}
-
static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
const ARMISARegisters *id)
{
@@ -1491,7 +1375,7 @@ static inline int arm_granule_bits(ARMGranuleSize gran)
/*
* Parameters of a given virtual address, as extracted from the
- * translation control register (TCR) for a given regime.
+ * translation controls for a given regime.
*/
typedef struct ARMVAParameters {
unsigned tsz : 8;
@@ -1506,6 +1390,7 @@ typedef struct ARMVAParameters {
bool ha : 1;
bool hd : 1;
ARMGranuleSize gran : 2;
+ bool pie : 1;
} ARMVAParameters;
/**
@@ -1576,6 +1461,13 @@ typedef struct ARMCacheAttrs {
typedef struct GetPhysAddrResult {
CPUTLBEntryFull f;
ARMCacheAttrs cacheattrs;
+ /*
+ * For ARMMMUIdx_Stage2*, the protection installed into f.prot
+ * is the result for AccessType_TTW, i.e. the page table walk itself.
+ * The protection installed info s2prot is the one to be merged
+ * with the stage1 protection.
+ */
+ int s2prot;
} GetPhysAddrResult;
/**
@@ -1892,6 +1784,8 @@ void define_tlb_insn_regs(ARMCPU *cpu);
void define_at_insn_regs(ARMCPU *cpu);
/* Add the cpreg definitions for PM cpregs */
void define_pm_cpregs(ARMCPU *cpu);
+/* Add the cpreg definitions for GCS cpregs */
+void define_gcs_cpregs(ARMCPU *cpu);
/* Effective value of MDCR_EL2 */
static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
@@ -2003,8 +1897,13 @@ void vfp_clear_float_status_exc_flags(CPUARMState *env);
*/
void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask);
bool arm_pan_enabled(CPUARMState *env);
+uint32_t cpsr_read_for_spsr_elx(CPUARMState *env);
+void cpsr_write_from_spsr_elx(CPUARMState *env, uint32_t val);
/* Compare uint64_t for qsort and bsearch. */
int compare_u64(const void *a, const void *b);
+/* Used in FEAT_MEC to set the MECIDWidthm1 field in the MECIDR_EL2 register. */
+#define MECID_WIDTH 16
+
#endif
diff --git a/target/arm/machine.c b/target/arm/machine.c
index 6666a0c..44a0cf8 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -816,6 +816,80 @@ static const VMStateInfo vmstate_cpsr = {
.put = put_cpsr,
};
+static int get_pstate64(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+ uint64_t val = qemu_get_be64(f);
+
+ env->aarch64 = ((val & PSTATE_nRW) == 0);
+ if (is_a64(env)) {
+ pstate_write(env, val);
+ } else {
+ cpsr_write_from_spsr_elx(env, val);
+ }
+ return 0;
+}
+
+static int put_pstate64(QEMUFile *f, void *opaque, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+ uint64_t val;
+
+ if (is_a64(env)) {
+ val = pstate_read(env);
+ } else {
+ val = cpsr_read_for_spsr_elx(env);
+ }
+ qemu_put_be64(f, val);
+ return 0;
+}
+
+static bool pstate64_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+ uint64_t val;
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return false;
+ }
+ if (is_a64(env)) {
+ val = pstate_read(env);
+ } else {
+ val = cpsr_read_for_spsr_elx(env);
+ if (val & PSTATE_SS) {
+ return true;
+ }
+ }
+ return val > UINT32_MAX;
+}
+
+static const VMStateDescription vmstate_pstate64 = {
+ .name = "cpu/pstate64",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pstate64_needed,
+ .fields = (const VMStateField[]) {
+ {
+ .name = "pstate64",
+ .version_id = 0,
+ .size = sizeof(uint64_t),
+ .info = &(const VMStateInfo) {
+ .name = "pstate64",
+ .get = get_pstate64,
+ .put = put_pstate64,
+ },
+ .flags = VMS_SINGLE,
+ .offset = 0,
+ },
+ VMSTATE_END_OF_LIST()
+ },
+};
+
static int get_power(QEMUFile *f, void *opaque, size_t size,
const VMStateField *field)
{
@@ -848,6 +922,23 @@ static const VMStateInfo vmstate_powered_off = {
.put = put_power,
};
+static bool syndrome64_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ return cpu->env.exception.syndrome > UINT32_MAX;
+}
+
+static const VMStateDescription vmstate_syndrome64 = {
+ .name = "cpu/syndrome64",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = syndrome64_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINT64(env.exception.syndrome, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
static int cpu_pre_save(void *opaque)
{
ARMCPU *cpu = opaque;
@@ -1035,6 +1126,12 @@ const VMStateDescription vmstate_arm_cpu = {
VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
VMSTATE_UINT64(env.pc, ARMCPU),
+ /*
+ * If any bits are set in the upper 32 bits of cpsr/pstate,
+ * or if the cpu is in aa32 mode and PSTATE.SS is set, then
+ * the cpu/pstate64 subsection will override this with the
+ * full 64 bit state.
+ */
{
.name = "cpsr",
.version_id = 0,
@@ -1065,7 +1162,19 @@ const VMStateDescription vmstate_arm_cpu = {
VMSTATE_UINT64(env.exclusive_val, ARMCPU),
VMSTATE_UINT64(env.exclusive_high, ARMCPU),
VMSTATE_UNUSED(sizeof(uint64_t)),
- VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
+ /*
+ * If any bits are set in the upper 32 bits of syndrome,
+ * then the cpu/syndrome64 subsection will override this
+ * with the full 64 bit state.
+ */
+ {
+ .name = "env.exception.syndrome",
+ .version_id = 0,
+ .size = sizeof(uint32_t),
+ .info = &vmstate_info_uint32,
+ .flags = VMS_SINGLE,
+ .offset = offsetoflow32(ARMCPU, env.exception.syndrome),
+ },
VMSTATE_UINT32(env.exception.fsr, ARMCPU),
VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
@@ -1098,6 +1207,8 @@ const VMStateDescription vmstate_arm_cpu = {
&vmstate_serror,
&vmstate_irq_line_state,
&vmstate_wfxt_timer,
+ &vmstate_syndrome64,
+ &vmstate_pstate64,
NULL
}
};
diff --git a/target/arm/meson.build b/target/arm/meson.build
index 638ee62..3df7e03 100644
--- a/target/arm/meson.build
+++ b/target/arm/meson.build
@@ -6,7 +6,12 @@ arm_ss.add(files(
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
'cpu64.c',
- 'gdbstub64.c'))
+ 'gdbstub64.c'
+))
+
+arm_common_ss.add(files(
+ 'mmuidx.c',
+))
arm_system_ss = ss.source_set()
arm_common_system_ss = ss.source_set()
@@ -22,6 +27,7 @@ arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files(
'cpu32-stubs.c',
))
arm_user_ss.add(files(
+ 'cpregs-gcs.c',
'cpregs-pmu.c',
'debug_helper.c',
'helper.c',
@@ -42,6 +48,7 @@ arm_common_system_ss.add(files(
'arch_dump.c',
'arm-powerctl.c',
'cortex-regs.c',
+ 'cpregs-gcs.c',
'cpregs-pmu.c',
'cpu-irq.c',
'debug_helper.c',
diff --git a/target/arm/mmuidx-internal.h b/target/arm/mmuidx-internal.h
new file mode 100644
index 0000000..962b053
--- /dev/null
+++ b/target/arm/mmuidx-internal.h
@@ -0,0 +1,113 @@
+/*
+ * QEMU Arm software mmu index internal definitions
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef TARGET_ARM_MMUIDX_INTERNAL_H
+#define TARGET_ARM_MMUIDX_INTERNAL_H
+
+#include "mmuidx.h"
+#include "tcg/debug-assert.h"
+#include "hw/registerfields.h"
+
+
+FIELD(MMUIDXINFO, EL, 0, 2)
+FIELD(MMUIDXINFO, ELVALID, 2, 1)
+FIELD(MMUIDXINFO, REL, 3, 2)
+FIELD(MMUIDXINFO, RELVALID, 5, 1)
+FIELD(MMUIDXINFO, 2RANGES, 6, 1)
+FIELD(MMUIDXINFO, PAN, 7, 1)
+FIELD(MMUIDXINFO, USER, 8, 1)
+FIELD(MMUIDXINFO, STAGE1, 9, 1)
+FIELD(MMUIDXINFO, STAGE2, 10, 1)
+FIELD(MMUIDXINFO, GCS, 11, 1)
+FIELD(MMUIDXINFO, TG, 12, 5)
+
+extern const uint32_t arm_mmuidx_table[ARM_MMU_IDX_M + 8];
+
+#define arm_mmuidx_is_valid(x) ((unsigned)(x) < ARRAY_SIZE(arm_mmuidx_table))
+
+/* Return the exception level associated with this mmu index. */
+static inline int arm_mmu_idx_to_el(ARMMMUIdx idx)
+{
+ tcg_debug_assert(arm_mmuidx_is_valid(idx));
+ tcg_debug_assert(FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, ELVALID));
+ return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, EL);
+}
+
+/*
+ * Return the exception level for the address translation regime
+ * associated with this mmu index.
+ */
+static inline uint32_t regime_el(ARMMMUIdx idx)
+{
+ tcg_debug_assert(arm_mmuidx_is_valid(idx));
+ tcg_debug_assert(FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, RELVALID));
+ return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, REL);
+}
+
+/*
+ * Return true if this address translation regime has two ranges.
+ * Note that this will not return the correct answer for AArch32
+ * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is
+ * never called from a context where EL3 can be AArch32. (The
+ * correct return value for ARMMMUIdx_E3 would be different for
+ * that case, so we can't just make the function return the
+ * correct value anyway; we would need an extra "bool e3_is_aarch32"
+ * argument which all the current callsites would pass as 'false'.)
+ */
+static inline bool regime_has_2_ranges(ARMMMUIdx idx)
+{
+ tcg_debug_assert(arm_mmuidx_is_valid(idx));
+ return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, 2RANGES);
+}
+
+/* Return true if Privileged Access Never is enabled for this mmu index. */
+static inline bool regime_is_pan(ARMMMUIdx idx)
+{
+ tcg_debug_assert(arm_mmuidx_is_valid(idx));
+ return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, PAN);
+}
+
+/*
+ * Return true if the exception level associated with this mmu index is 0.
+ * Differs from arm_mmu_idx_to_el(idx) == 0 in that this allows querying
+ * Stage1 and Stage2 mmu indexes.
+ */
+static inline bool regime_is_user(ARMMMUIdx idx)
+{
+ tcg_debug_assert(arm_mmuidx_is_valid(idx));
+ return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, USER);
+}
+
+/* Return true if this mmu index is stage 1 of a 2-stage translation. */
+static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx idx)
+{
+ tcg_debug_assert(arm_mmuidx_is_valid(idx));
+ return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, STAGE1);
+}
+
+/* Return true if this mmu index is stage 2 of a 2-stage translation. */
+static inline bool regime_is_stage2(ARMMMUIdx idx)
+{
+ tcg_debug_assert(arm_mmuidx_is_valid(idx));
+ return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, STAGE2);
+}
+
+/* Return true if this mmu index implies AccessType_GCS. */
+static inline bool regime_is_gcs(ARMMMUIdx idx)
+{
+ tcg_debug_assert(arm_mmuidx_is_valid(idx));
+ return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, GCS);
+}
+
+/* Return the GCS MMUIdx for a given regime. */
+static inline ARMMMUIdx regime_to_gcs(ARMMMUIdx idx)
+{
+ tcg_debug_assert(arm_mmuidx_is_valid(idx));
+ uint32_t core = FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, TG);
+ tcg_debug_assert(core != 0); /* core 0 is E10_0, not a GCS index */
+ return core | ARM_MMU_IDX_A;
+}
+
+#endif /* TARGET_ARM_MMUIDX_INTERNAL_H */
diff --git a/target/arm/mmuidx.c b/target/arm/mmuidx.c
new file mode 100644
index 0000000..a4663c8
--- /dev/null
+++ b/target/arm/mmuidx.c
@@ -0,0 +1,66 @@
+/*
+ * QEMU Arm software mmu index definitions
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "mmuidx-internal.h"
+
+
+#define EL(X) ((X << R_MMUIDXINFO_EL_SHIFT) | R_MMUIDXINFO_ELVALID_MASK | \
+ ((X == 0) << R_MMUIDXINFO_USER_SHIFT))
+#define REL(X) ((X << R_MMUIDXINFO_REL_SHIFT) | R_MMUIDXINFO_RELVALID_MASK)
+#define R2 R_MMUIDXINFO_2RANGES_MASK
+#define PAN R_MMUIDXINFO_PAN_MASK
+#define USER R_MMUIDXINFO_USER_MASK
+#define S1 R_MMUIDXINFO_STAGE1_MASK
+#define S2 R_MMUIDXINFO_STAGE2_MASK
+#define GCS R_MMUIDXINFO_GCS_MASK
+#define TG(X) \
+ ((ARMMMUIdx_##X##_GCS & ARM_MMU_IDX_COREIDX_MASK) << R_MMUIDXINFO_TG_SHIFT)
+
+const uint32_t arm_mmuidx_table[ARM_MMU_IDX_M + 8] = {
+ /*
+ * A-profile.
+ */
+ [ARMMMUIdx_E10_0] = EL(0) | REL(1) | R2 | TG(E10_0),
+ [ARMMMUIdx_E10_0_GCS] = EL(0) | REL(1) | R2 | GCS,
+ [ARMMMUIdx_E10_1] = EL(1) | REL(1) | R2 | TG(E10_1),
+ [ARMMMUIdx_E10_1_PAN] = EL(1) | REL(1) | R2 | TG(E10_1) | PAN,
+ [ARMMMUIdx_E10_1_GCS] = EL(1) | REL(1) | R2 | GCS,
+
+ [ARMMMUIdx_E20_0] = EL(0) | REL(2) | R2 | TG(E20_0),
+ [ARMMMUIdx_E20_0_GCS] = EL(0) | REL(2) | R2 | GCS,
+ [ARMMMUIdx_E20_2] = EL(2) | REL(2) | R2 | TG(E20_2),
+ [ARMMMUIdx_E20_2_PAN] = EL(2) | REL(2) | R2 | TG(E20_2) | PAN,
+ [ARMMMUIdx_E20_2_GCS] = EL(2) | REL(2) | R2 | GCS,
+
+ [ARMMMUIdx_E2] = EL(2) | REL(2) | TG(E2),
+ [ARMMMUIdx_E2_GCS] = EL(2) | REL(2) | GCS,
+
+ [ARMMMUIdx_E3] = EL(3) | REL(3) | TG(E3),
+ [ARMMMUIdx_E3_GCS] = EL(3) | REL(3) | GCS,
+ [ARMMMUIdx_E30_0] = EL(0) | REL(3),
+ [ARMMMUIdx_E30_3_PAN] = EL(3) | REL(3) | PAN,
+
+ [ARMMMUIdx_Stage2_S] = REL(2) | S2,
+ [ARMMMUIdx_Stage2] = REL(2) | S2,
+
+ [ARMMMUIdx_Stage1_E0] = REL(1) | R2 | S1 | USER | TG(Stage1_E0),
+ [ARMMMUIdx_Stage1_E0_GCS] = REL(1) | R2 | S1 | USER | GCS,
+ [ARMMMUIdx_Stage1_E1] = REL(1) | R2 | S1 | TG(Stage1_E1),
+ [ARMMMUIdx_Stage1_E1_PAN] = REL(1) | R2 | S1 | TG(Stage1_E1) | PAN,
+ [ARMMMUIdx_Stage1_E1_GCS] = REL(1) | R2 | S1 | GCS,
+
+ /*
+ * M-profile.
+ */
+ [ARMMMUIdx_MUser] = EL(0) | REL(1),
+ [ARMMMUIdx_MPriv] = EL(1) | REL(1),
+ [ARMMMUIdx_MUserNegPri] = EL(0) | REL(1),
+ [ARMMMUIdx_MPrivNegPri] = EL(1) | REL(1),
+ [ARMMMUIdx_MSUser] = EL(0) | REL(1),
+ [ARMMMUIdx_MSPriv] = EL(1) | REL(1),
+ [ARMMMUIdx_MSUserNegPri] = EL(0) | REL(1),
+ [ARMMMUIdx_MSPrivNegPri] = EL(1) | REL(1),
+};
diff --git a/target/arm/mmuidx.h b/target/arm/mmuidx.h
new file mode 100644
index 0000000..8d8d273
--- /dev/null
+++ b/target/arm/mmuidx.h
@@ -0,0 +1,241 @@
+/*
+ * QEMU Arm software mmu index definitions
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef TARGET_ARM_MMUIDX_H
+#define TARGET_ARM_MMUIDX_H
+
+/*
+ * Arm has the following "translation regimes" (as the Arm ARM calls them):
+ *
+ * If EL3 is 64-bit:
+ * + NonSecure EL1 & 0 stage 1
+ * + NonSecure EL1 & 0 stage 2
+ * + NonSecure EL2
+ * + NonSecure EL2 & 0 (ARMv8.1-VHE)
+ * + Secure EL1 & 0 stage 1
+ * + Secure EL1 & 0 stage 2 (FEAT_SEL2)
+ * + Secure EL2 (FEAT_SEL2)
+ * + Secure EL2 & 0 (FEAT_SEL2)
+ * + Realm EL1 & 0 stage 1 (FEAT_RME)
+ * + Realm EL1 & 0 stage 2 (FEAT_RME)
+ * + Realm EL2 (FEAT_RME)
+ * + EL3
+ * If EL3 is 32-bit:
+ * + NonSecure PL1 & 0 stage 1
+ * + NonSecure PL1 & 0 stage 2
+ * + NonSecure PL2
+ * + Secure PL1 & 0
+ * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
+ *
+ * For QEMU, an mmu_idx is not quite the same as a translation regime because:
+ * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes,
+ * because they may differ in access permissions even if the VA->PA map is
+ * the same
+ * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
+ * translation, which means that we have one mmu_idx that deals with two
+ * concatenated translation regimes [this sort of combined s1+2 TLB is
+ * architecturally permitted]
+ * 3. we don't need to allocate an mmu_idx to translations that we won't be
+ * handling via the TLB. The only way to do a stage 1 translation without
+ * the immediate stage 2 translation is via the ATS or AT system insns,
+ * which can be slow-pathed and always do a page table walk.
+ * The only use of stage 2 translations is either as part of an s1+2
+ * lookup or when loading the descriptors during a stage 1 page table walk,
+ * and in both those cases we don't use the TLB.
+ * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
+ * translation regimes, because they map reasonably well to each other
+ * and they can't both be active at the same time.
+ * 5. we want to be able to use the TLB for accesses done as part of a
+ * stage1 page table walk, rather than having to walk the stage2 page
+ * table over and over.
+ * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
+ * Never (PAN) bit within PSTATE.
+ * 7. we fold together most secure and non-secure regimes for A-profile,
+ * because there are no banked system registers for aarch64, so the
+ * process of switching between secure and non-secure is
+ * already heavyweight.
+ * 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure,
+ * because both are in use simultaneously for Secure EL2.
+ * 9. we need separate indexes for handling AccessType_GCS.
+ *
+ * This gives us the following list of cases:
+ *
+ * EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2)
+ * EL0 EL1&0 stage 1+2 +GCS
+ * EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2)
+ * EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN)
+ * EL1 EL1&0 stage 1+2 +GCS
+ * EL0 EL2&0
+ * EL0 EL2&0 +GCS
+ * EL2 EL2&0
+ * EL2 EL2&0 +PAN
+ * EL2 EL2&0 +GCS
+ * EL2 (aka NS PL2)
+ * EL2 +GCS
+ * EL3 (aka AArch32 S PL1 PL1&0)
+ * EL3 +GCS
+ * AArch32 S PL0 PL1&0 (we call this EL30_0)
+ * AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN)
+ * Stage2 Secure
+ * Stage2 NonSecure
+ * plus one TLB per Physical address space: S, NS, Realm, Root
+ *
+ * for a total of 22 different mmu_idx.
+ *
+ * R profile CPUs have an MPU, but can use the same set of MMU indexes
+ * as A profile. They only need to distinguish EL0 and EL1 (and
+ * EL2 for cores like the Cortex-R52).
+ *
+ * M profile CPUs are rather different as they do not have a true MMU.
+ * They have the following different MMU indexes:
+ * User
+ * Privileged
+ * User, execution priority negative (ie the MPU HFNMIENA bit may apply)
+ * Privileged, execution priority negative (ditto)
+ * If the CPU supports the v8M Security Extension then there are also:
+ * Secure User
+ * Secure Privileged
+ * Secure User, execution priority negative
+ * Secure Privileged, execution priority negative
+ *
+ * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
+ * are not quite the same -- different CPU types (most notably M profile
+ * vs A/R profile) would like to use MMU indexes with different semantics,
+ * but since we don't ever need to use all of those in a single CPU we
+ * can avoid having to set NB_MMU_MODES to "total number of A profile MMU
+ * modes + total number of M profile MMU modes". The lower bits of
+ * ARMMMUIdx are the core TLB mmu index, and the higher bits are always
+ * the same for any particular CPU.
+ * Variables of type ARMMUIdx are always full values, and the core
+ * index values are in variables of type 'int'.
+ *
+ * Our enumeration includes at the end some entries which are not "true"
+ * mmu_idx values in that they don't have corresponding TLBs and are only
+ * valid for doing slow path page table walks.
+ *
+ * The constant names here are patterned after the general style of the names
+ * of the AT/ATS operations.
+ * The values used are carefully arranged to make mmu_idx => EL lookup easy.
+ * For M profile we arrange them to have a bit for priv, a bit for negpri
+ * and a bit for secure.
+ */
+#define ARM_MMU_IDX_A 0x20 /* A profile */
+#define ARM_MMU_IDX_NOTLB 0x40 /* does not have a TLB */
+#define ARM_MMU_IDX_M 0x80 /* M profile */
+
+/* Meanings of the bits for M profile mmu idx values */
+#define ARM_MMU_IDX_M_PRIV 0x1
+#define ARM_MMU_IDX_M_NEGPRI 0x2
+#define ARM_MMU_IDX_M_S 0x4 /* Secure */
+
+#define ARM_MMU_IDX_TYPE_MASK \
+ (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
+#define ARM_MMU_IDX_COREIDX_MASK 0x1f
+
+typedef enum ARMMMUIdx {
+ /*
+ * A-profile.
+ */
+
+ ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_0_GCS = 1 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1_PAN = 3 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1_GCS = 4 | ARM_MMU_IDX_A,
+
+ ARMMMUIdx_E20_0 = 5 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_0_GCS = 6 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2 = 7 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2_PAN = 8 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2_GCS = 9 | ARM_MMU_IDX_A,
+
+ ARMMMUIdx_E2 = 10 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E2_GCS = 11 | ARM_MMU_IDX_A,
+
+ ARMMMUIdx_E3 = 12 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E3_GCS = 13 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E30_0 = 14 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E30_3_PAN = 15 | ARM_MMU_IDX_A,
+
+ /*
+ * Used for second stage of an S12 page table walk, or for descriptor
+ * loads during first stage of an S1 page table walk. Note that both
+ * are in use simultaneously for SecureEL2: the security state for
+ * the S2 ptw is selected by the NS bit from the S1 ptw.
+ */
+ ARMMMUIdx_Stage2_S = 16 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Stage2 = 17 | ARM_MMU_IDX_A,
+
+ /* TLBs with 1-1 mapping to the physical address spaces. */
+ ARMMMUIdx_Phys_S = 18 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_NS = 19 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_Root = 20 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_Realm = 21 | ARM_MMU_IDX_A,
+
+ /*
+ * These are not allocated TLBs and are used only for AT system
+ * instructions or for the first stage of an S12 page table walk.
+ */
+ ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage1_E0_GCS = 3 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage1_E1_GCS = 4 | ARM_MMU_IDX_NOTLB,
+
+ /*
+ * M-profile.
+ */
+ ARMMMUIdx_MUser = ARM_MMU_IDX_M,
+ ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
+ ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
+ ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI,
+ ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S,
+ ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
+ ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
+ ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
+} ARMMMUIdx;
+
+/*
+ * Bit macros for the core-mmu-index values for each index,
+ * for use when calling tlb_flush_by_mmuidx() and friends.
+ */
+#define TO_CORE_BIT(NAME) \
+ ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK)
+
+typedef enum ARMMMUIdxBit {
+ TO_CORE_BIT(E10_0),
+ TO_CORE_BIT(E10_0_GCS),
+ TO_CORE_BIT(E10_1),
+ TO_CORE_BIT(E10_1_PAN),
+ TO_CORE_BIT(E10_1_GCS),
+ TO_CORE_BIT(E20_0),
+ TO_CORE_BIT(E20_0_GCS),
+ TO_CORE_BIT(E20_2),
+ TO_CORE_BIT(E20_2_PAN),
+ TO_CORE_BIT(E20_2_GCS),
+ TO_CORE_BIT(E2),
+ TO_CORE_BIT(E2_GCS),
+ TO_CORE_BIT(E3),
+ TO_CORE_BIT(E3_GCS),
+ TO_CORE_BIT(E30_0),
+ TO_CORE_BIT(E30_3_PAN),
+ TO_CORE_BIT(Stage2),
+ TO_CORE_BIT(Stage2_S),
+
+ TO_CORE_BIT(MUser),
+ TO_CORE_BIT(MPriv),
+ TO_CORE_BIT(MUserNegPri),
+ TO_CORE_BIT(MPrivNegPri),
+ TO_CORE_BIT(MSUser),
+ TO_CORE_BIT(MSPriv),
+ TO_CORE_BIT(MSUserNegPri),
+ TO_CORE_BIT(MSPrivNegPri),
+} ARMMMUIdxBit;
+
+#undef TO_CORE_BIT
+
+#define MMU_USER_IDX 0
+
+#endif /* TARGET_ARM_MMUIDX_H */
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index e03657f..d4386ed 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -79,6 +79,8 @@ typedef struct S1Translate {
* may be suppressed for debug or AT insns.
*/
uint8_t in_prot_check;
+ /* Cached EffectiveHCR_EL2_NVx() bit */
+ bool in_nv1;
bool out_rw;
bool out_be;
ARMSecuritySpace out_space;
@@ -167,6 +169,10 @@ ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
return ARMMMUIdx_Stage1_E1;
case ARMMMUIdx_E10_1_PAN:
return ARMMMUIdx_Stage1_E1_PAN;
+ case ARMMMUIdx_E10_0_GCS:
+ return ARMMMUIdx_Stage1_E0_GCS;
+ case ARMMMUIdx_E10_1_GCS:
+ return ARMMMUIdx_Stage1_E1_GCS;
default:
return mmu_idx;
}
@@ -233,9 +239,9 @@ static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
return env->cp15.vsttbr_el2;
}
if (ttbrn == 0) {
- return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
+ return env->cp15.ttbr0_el[regime_el(mmu_idx)];
} else {
- return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
+ return env->cp15.ttbr1_el[regime_el(mmu_idx)];
}
}
@@ -274,8 +280,10 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_0_GCS:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E10_1_GCS:
/* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
if (hcr_el2 & HCR_TGE) {
@@ -284,8 +292,10 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
break;
case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E0_GCS:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_Stage1_E1_GCS:
/* HCR.DC means SCTLR_EL1.M behaves as 0 */
hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
if (hcr_el2 & HCR_DC) {
@@ -294,10 +304,14 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
break;
case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_0_GCS:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_E20_2_GCS:
case ARMMMUIdx_E2:
+ case ARMMMUIdx_E2_GCS:
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E3_GCS:
case ARMMMUIdx_E30_0:
case ARMMMUIdx_E30_3_PAN:
break;
@@ -998,7 +1012,7 @@ static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
int ap, int domain_prot)
{
return ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot,
- regime_is_user(env, mmu_idx));
+ regime_is_user(mmu_idx));
}
/*
@@ -1024,7 +1038,7 @@ static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
{
- return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
+ return simple_ap_to_rw_prot_is_user(ap, regime_is_user(mmu_idx));
}
static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
@@ -1057,7 +1071,7 @@ static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
}
type = (desc & 3);
domain = (desc >> 5) & 0x0f;
- if (regime_el(env, ptw->in_mmu_idx) == 1) {
+ if (regime_el(ptw->in_mmu_idx) == 1) {
dacr = env->cp15.dacr_ns;
} else {
dacr = env->cp15.dacr_s;
@@ -1196,7 +1210,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
/* Page or Section. */
domain = (desc >> 5) & 0x0f;
}
- if (regime_el(env, mmu_idx) == 1) {
+ if (regime_el(mmu_idx) == 1) {
dacr = env->cp15.dacr_ns;
} else {
dacr = env->cp15.dacr_s;
@@ -1314,7 +1328,7 @@ do_fault:
* @xn: XN (execute-never) bits
* @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
*/
-static int get_S2prot_noexecute(int s2ap)
+static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
{
int prot = 0;
@@ -1324,12 +1338,6 @@ static int get_S2prot_noexecute(int s2ap)
if (s2ap & 2) {
prot |= PAGE_WRITE;
}
- return prot;
-}
-
-static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
-{
- int prot = get_S2prot_noexecute(s2ap);
if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
switch (xn) {
@@ -1361,6 +1369,44 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
return prot;
}
+static int get_S2prot_indirect(CPUARMState *env, GetPhysAddrResult *result,
+ int pi_index, int po_index, bool s1_is_el0)
+{
+ /* Last index is (priv, unpriv, ttw) */
+ static const uint8_t perm_table[16][3] = {
+ /* 0 */ { 0, 0, 0 }, /* no access */
+ /* 1 */ { 0, 0, 0 }, /* reserved */
+ /* 2 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE },
+ /* 3 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE },
+ /* 4 */ { PAGE_WRITE, PAGE_WRITE, 0 },
+ /* 5 */ { 0, 0, 0 }, /* reserved */
+ /* 6 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE },
+ /* 7 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE },
+ /* 8 */ { PAGE_READ, PAGE_READ, PAGE_READ },
+ /* 9 */ { PAGE_READ, PAGE_READ | PAGE_EXEC, PAGE_READ },
+ /* A */ { PAGE_READ | PAGE_EXEC, PAGE_READ, PAGE_READ },
+ /* B */ { PAGE_READ | PAGE_EXEC, PAGE_READ | PAGE_EXEC, PAGE_READ },
+ /* C */ { PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_WRITE },
+ /* D */ { PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE },
+ /* E */ { PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_WRITE },
+ /* F */ { PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE },
+ };
+
+ uint64_t pir = (env->cp15.scr_el3 & SCR_PIEN ? env->cp15.s2pir_el2 : 0);
+ int s2pi = extract64(pir, pi_index * 4, 4);
+
+ result->f.prot = perm_table[s2pi][2];
+ return perm_table[s2pi][s1_is_el0];
+}
+
/*
* Translate section/page access permissions to protection flags
* @env: CPUARMState
@@ -1378,7 +1424,7 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
{
ARMCPU *cpu = env_archcpu(env);
- bool is_user = regime_is_user(env, mmu_idx);
+ bool is_user = regime_is_user(mmu_idx);
bool have_wxn;
int wxn = 0;
@@ -1395,10 +1441,10 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
* We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0
* do not affect EPAN.
*/
- if (user_rw && regime_is_pan(env, mmu_idx)) {
+ if (user_rw && regime_is_pan(mmu_idx)) {
prot_rw = 0;
} else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 &&
- regime_is_pan(env, mmu_idx) &&
+ regime_is_pan(mmu_idx) &&
(regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) {
prot_rw = 0;
}
@@ -1455,7 +1501,7 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
xn = pxn || (user_rw & PAGE_WRITE);
}
} else if (arm_feature(env, ARM_FEATURE_V7)) {
- switch (regime_el(env, mmu_idx)) {
+ switch (regime_el(mmu_idx)) {
case 1:
case 3:
if (is_user) {
@@ -1482,11 +1528,115 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
return prot_rw | PAGE_EXEC;
}
+/* Extra page permission bits, during get_S1prot_indirect only. */
+#define PAGE_GCS (1 << 3)
+#define PAGE_WXN (1 << 4)
+#define PAGE_OVERLAY (1 << 5)
+QEMU_BUILD_BUG_ON(PAGE_RWX & (PAGE_GCS | PAGE_WXN | PAGE_OVERLAY));
+
+static int get_S1prot_indirect(CPUARMState *env, S1Translate *ptw,
+ ARMMMUIdx mmu_idx, int pi_index, int po_index,
+ ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
+{
+ static const uint8_t perm_table[16] = {
+ /* 0 */ PAGE_OVERLAY, /* no access */
+ /* 1 */ PAGE_OVERLAY | PAGE_READ,
+ /* 2 */ PAGE_OVERLAY | PAGE_EXEC,
+ /* 3 */ PAGE_OVERLAY | PAGE_READ | PAGE_EXEC,
+ /* 4 */ PAGE_OVERLAY, /* reserved */
+ /* 5 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE,
+ /* 6 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_WXN,
+ /* 7 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ /* 8 */ PAGE_READ,
+ /* 9 */ PAGE_READ | PAGE_GCS,
+ /* A */ PAGE_READ | PAGE_EXEC,
+ /* B */ 0, /* reserved */
+ /* C */ PAGE_READ | PAGE_WRITE,
+ /* D */ 0, /* reserved */
+ /* E */ PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ /* F */ 0, /* reserved */
+ };
+
+ uint32_t el = regime_el(mmu_idx);
+ uint64_t pir = env->cp15.pir_el[el];
+ uint64_t pire0 = 0;
+ int perm;
+
+ if (el < 3) {
+ if (arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_PIEN)) {
+ pir = 0;
+ } else if (el == 2) {
+ pire0 = env->cp15.pire0_el2;
+ } else if (!ptw->in_nv1) {
+ pire0 = env->cp15.pir_el[0];
+ }
+ }
+ perm = perm_table[extract64(pir, pi_index * 4, 4)];
+
+ if (regime_has_2_ranges(mmu_idx)) {
+ int p_perm = perm;
+ int u_perm = perm_table[extract64(pire0, pi_index * 4, 4)];
+
+ if ((p_perm & (PAGE_EXEC | PAGE_GCS)) &&
+ (u_perm & (PAGE_WRITE | PAGE_GCS))) {
+ p_perm &= ~(PAGE_RWX | PAGE_GCS);
+ u_perm &= ~(PAGE_RWX | PAGE_GCS);
+ }
+ if ((u_perm & (PAGE_RWX | PAGE_GCS)) && regime_is_pan(mmu_idx)) {
+ p_perm &= ~(PAGE_READ | PAGE_WRITE);
+ }
+ perm = regime_is_user(mmu_idx) ? u_perm : p_perm;
+ }
+
+ if (in_pa != out_pa) {
+ switch (in_pa) {
+ case ARMSS_Root:
+ /*
+ * R_ZWRVD: permission fault for insn fetched from non-Root,
+ * I_WWBFB: SIF has no effect in EL3.
+ */
+ perm &= ~(PAGE_EXEC | PAGE_GCS);
+ break;
+ case ARMSS_Realm:
+ /*
+ * R_PKTDS: permission fault for insn fetched from non-Realm,
+ * for Realm EL2 or EL2&0. The corresponding fault for EL1&0
+ * happens during any stage2 translation.
+ */
+ if (el == 2) {
+ perm &= ~(PAGE_EXEC | PAGE_GCS);
+ }
+ break;
+ case ARMSS_Secure:
+ if (env->cp15.scr_el3 & SCR_SIF) {
+ perm &= ~(PAGE_EXEC | PAGE_GCS);
+ }
+ break;
+ default:
+ /* Input NonSecure must have output NonSecure. */
+ g_assert_not_reached();
+ }
+ }
+
+ if (regime_is_gcs(mmu_idx)) {
+ /*
+ * Note that the one s1perms.gcs bit controls both read and write
+ * access via AccessType_GCS. See AArch64.S1CheckPermissions.
+ */
+ perm = (perm & PAGE_GCS ? PAGE_READ | PAGE_WRITE : 0);
+ } else if (perm & PAGE_WXN) {
+ perm &= ~PAGE_EXEC;
+ }
+
+ return perm & PAGE_RWX;
+}
+
static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
ARMMMUIdx mmu_idx)
{
uint64_t tcr = regime_tcr(env, mmu_idx);
- uint32_t el = regime_el(env, mmu_idx);
+ uint32_t el = regime_el(mmu_idx);
int select, tsz;
bool epd, hpd;
@@ -1507,8 +1657,12 @@ static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
}
tsz = sextract32(tcr, 0, 4) + 8;
select = 0;
- hpd = false;
epd = false;
+ /*
+ * Stage2 does not have hierarchical permissions.
+ * Thus disabling them makes things easier during ptw.
+ */
+ hpd = true;
} else if (el == 2) {
/* HTCR */
tsz = extract32(tcr, 0, 3);
@@ -1673,12 +1827,6 @@ static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds,
}
}
-static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw)
-{
- uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
- return (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1);
-}
-
/**
* get_phys_addr_lpae: perform one stage of page table walk, LPAE format
*
@@ -1713,8 +1861,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
int32_t stride;
int addrsize, inputsize, outputsize;
uint64_t tcr = regime_tcr(env, mmu_idx);
- int ap, xn, pxn;
- uint32_t el = regime_el(env, mmu_idx);
+ int ap, prot;
+ uint32_t el = regime_el(mmu_idx);
uint64_t descaddrmask;
bool aarch64 = arm_el_is_aa64(env, el);
uint64_t descriptor, new_descriptor;
@@ -1731,6 +1879,16 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
level = 0;
/*
+ * Cache NV1 before we adjust ptw->in_space for NSTable.
+ * Note that this is only relevant for EL1&0, and that
+ * computing it would assert for ARMSS_Root.
+ */
+ if (el == 1) {
+ uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
+ ptw->in_nv1 = (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1);
+ }
+
+ /*
* If TxSZ is programmed to a value larger than the maximum,
* or smaller than the effective minimum, it is IMPLEMENTATION
* DEFINED whether we behave as if the field were programmed
@@ -2014,21 +2172,31 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
* except NSTable (which we have already handled).
*/
attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
- if (!regime_is_stage2(mmu_idx)) {
- if (!param.hpd) {
- attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
- /*
- * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
- * means "force PL1 access only", which means forcing AP[1] to 0.
- */
- attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */
- attrs |= extract32(tableattrs, 3, 1) << 7; /* APT[1] => AP[2] */
- }
+ if (!param.hpd) {
+ attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
+ /*
+ * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
+ * means "force PL1 access only", which means forcing AP[1] to 0.
+ */
+ attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */
+ attrs |= extract32(tableattrs, 3, 1) << 7; /* APT[1] => AP[2] */
}
ap = extract32(attrs, 6, 2);
out_space = ptw->cur_space;
if (regime_is_stage2(mmu_idx)) {
+ if (param.pie) {
+ int pi = extract64(attrs, 6, 1)
+ | (extract64(attrs, 51, 1) << 1)
+ | (extract64(attrs, 53, 2) << 2);
+ int po = extract64(attrs, 60, 3);
+ prot = get_S2prot_indirect(env, result, pi, po, ptw->in_s1_is_el0);
+ } else {
+ int xn = extract64(attrs, 53, 2);
+ prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
+ /* Install TTW permissions in f.prot. */
+ result->f.prot = prot & (PAGE_READ | PAGE_WRITE);
+ }
/*
* R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
* The bit remains ignored for other security states.
@@ -2037,11 +2205,9 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
*/
if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) {
out_space = ARMSS_NonSecure;
- result->f.prot = get_S2prot_noexecute(ap);
- } else {
- xn = extract64(attrs, 53, 2);
- result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
+ prot &= ~PAGE_EXEC;
}
+ result->s2prot = prot;
result->cacheattrs.is_s2_format = true;
result->cacheattrs.attrs = extract32(attrs, 2, 4);
@@ -2055,7 +2221,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
int nse, ns = extract32(attrs, 5, 1);
uint8_t attrindx;
uint64_t mair;
- int user_rw, prot_rw;
switch (out_space) {
case ARMSS_Root:
@@ -2104,33 +2269,57 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
default:
g_assert_not_reached();
}
- xn = extract64(attrs, 54, 1);
- pxn = extract64(attrs, 53, 1);
- if (el == 1 && nv_nv1_enabled(env, ptw)) {
+ if (param.pie) {
+ int pi = extract64(attrs, 6, 1)
+ | (extract64(attrs, 51, 1) << 1)
+ | (extract64(attrs, 53, 2) << 2);
+ int po = extract64(attrs, 60, 3);
/*
- * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, the block/page
- * descriptor bit 54 holds PXN, 53 is RES0, and the effective value
- * of UXN is 0. Similarly for bits 59 and 60 in table descriptors
- * (which we have already folded into bits 53 and 54 of attrs).
- * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0.
- * Similarly, APTable[0] from the table descriptor is treated as 0;
- * we already folded this into AP[1] and squashing that to 0 does
- * the right thing.
+ * Note that we modified ptw->in_space earlier for NSTable, but
+ * result->f.attrs retains a copy of the original security space.
*/
- pxn = xn;
- xn = 0;
- ap &= ~1;
- }
+ prot = get_S1prot_indirect(env, ptw, mmu_idx, pi, po,
+ result->f.attrs.space, out_space);
+ } else if (regime_is_gcs(mmu_idx)) {
+ /*
+ * While one must use indirect permissions to successfully
+ * use GCS instructions, AArch64.S1DirectBasePermissions
+ * faithfully supplies s1perms.gcs = 0, Just In Case.
+ */
+ prot = 0;
+ } else {
+ int xn = extract64(attrs, 54, 1);
+ int pxn = extract64(attrs, 53, 1);
+ int user_rw, prot_rw;
- user_rw = simple_ap_to_rw_prot_is_user(ap, true);
- prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
- result->f.prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw,
- xn, pxn, ptw->in_space, out_space);
+ if (el == 1 && ptw->in_nv1) {
+ /*
+ * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1},
+ * the block/page descriptor bit 54 holds PXN,
+ * 53 is RES0, and the effective value of UXN is 0.
+ * Similarly for bits 59 and 60 in table descriptors
+ * (which we have already folded into bits 53 and 54 of attrs).
+ * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0.
+ * Similarly, APTable[0] from the table descriptor is treated
+ * as 0; we already folded this into AP[1] and squashing
+ * that to 0 does the right thing.
+ */
+ pxn = xn;
+ xn = 0;
+ ap &= ~1;
+ }
+
+ user_rw = simple_ap_to_rw_prot_is_user(ap, true);
+ prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
+ prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw,
+ xn, pxn, ptw->in_space, out_space);
+ }
+ result->f.prot = prot;
/* Index into MAIR registers for cache attributes */
attrindx = extract32(attrs, 2, 3);
- mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
+ mair = env->cp15.mair_el[regime_el(mmu_idx)];
assert(attrindx <= 7);
result->cacheattrs.is_s2_format = false;
result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
@@ -2172,11 +2361,27 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
result->f.tlb_fill_flags = 0;
}
- if (ptw->in_prot_check & ~result->f.prot) {
+ if (ptw->in_prot_check & ~prot) {
fi->type = ARMFault_Permission;
goto do_fault;
}
+ /* S1PIE and S2PIE both have a bit for software dirty page tracking. */
+ if (access_type == MMU_DATA_STORE && param.pie) {
+ /*
+ * For S1PIE, bit 7 is nDirty and both HA and HD are checked.
+ * For S2PIE, bit 7 is Dirty and only HD is checked.
+ */
+ bool bit7 = extract64(attrs, 7, 1);
+ if (regime_is_stage2(mmu_idx)
+ ? !bit7 && !param.hd
+ : bit7 && !(param.ha && param.hd)) {
+ fi->type = ARMFault_Permission;
+ fi->dirtybit = true;
+ goto do_fault;
+ }
+ }
+
/* If FEAT_HAFDBS has made changes, update the PTE. */
if (new_descriptor != descriptor) {
new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi);
@@ -2239,7 +2444,7 @@ static bool get_phys_addr_pmsav5(CPUARMState *env,
uint32_t mask;
uint32_t base;
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
- bool is_user = regime_is_user(env, mmu_idx);
+ bool is_user = regime_is_user(mmu_idx);
if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
/* MPU disabled. */
@@ -2406,7 +2611,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env,
ARMCPU *cpu = env_archcpu(env);
int n;
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
- bool is_user = regime_is_user(env, mmu_idx);
+ bool is_user = regime_is_user(mmu_idx);
bool secure = arm_space_is_secure(ptw->in_space);
result->f.phys_addr = address;
@@ -2592,7 +2797,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env,
static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx,
uint32_t secure)
{
- if (regime_el(env, mmu_idx) == 2) {
+ if (regime_el(mmu_idx) == 2) {
return env->pmsav8.hprbar;
} else {
return env->pmsav8.rbar[secure];
@@ -2602,7 +2807,7 @@ static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx,
static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx,
uint32_t secure)
{
- if (regime_el(env, mmu_idx) == 2) {
+ if (regime_el(mmu_idx) == 2) {
return env->pmsav8.hprlar;
} else {
return env->pmsav8.rlar[secure];
@@ -2626,7 +2831,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
* memory system to use a subpage.
*/
ARMCPU *cpu = env_archcpu(env);
- bool is_user = regime_is_user(env, mmu_idx);
+ bool is_user = regime_is_user(mmu_idx);
int n;
int matchregion = -1;
bool hit = false;
@@ -2634,7 +2839,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
int region_counter;
- if (regime_el(env, mmu_idx) == 2) {
+ if (regime_el(mmu_idx) == 2) {
region_counter = cpu->pmsav8r_hdregion;
} else {
region_counter = cpu->pmsav7_dregion;
@@ -2760,7 +2965,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
xn = 1;
}
- if (regime_el(env, mmu_idx) == 2) {
+ if (regime_el(mmu_idx) == 2) {
result->f.prot = simple_ap_to_rw_prot_is_user(ap,
mmu_idx != ARMMMUIdx_E2);
} else {
@@ -2769,7 +2974,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
if (!arm_feature(env, ARM_FEATURE_M)) {
uint8_t attrindx = extract32(matched_rlar, 1, 3);
- uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
+ uint64_t mair = env->cp15.mair_el[regime_el(mmu_idx)];
uint8_t sh = extract32(matched_rlar, 3, 2);
if (regime_sctlr(env, mmu_idx) & SCTLR_WXN &&
@@ -2777,7 +2982,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
xn = 0x1;
}
- if ((regime_el(env, mmu_idx) == 1) &&
+ if ((regime_el(mmu_idx) == 1) &&
regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) {
pxn = 0x1;
}
@@ -3262,7 +3467,7 @@ static bool get_phys_addr_disabled(CPUARMState *env,
break;
default:
- r_el = regime_el(env, mmu_idx);
+ r_el = regime_el(mmu_idx);
if (arm_el_is_aa64(env, r_el)) {
int pamax = arm_pamax(env_archcpu(env));
uint64_t tcr = env->cp15.tcr_el[r_el];
@@ -3370,7 +3575,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
fi->s2addr = ipa;
/* Combine the S1 and S2 perms. */
- result->f.prot &= s1_prot;
+ result->f.prot = s1_prot & result->s2prot;
/* If S2 fails, return early. */
if (ret) {
@@ -3507,7 +3712,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
break;
}
- result->f.attrs.user = regime_is_user(env, mmu_idx);
+ result->f.attrs.user = regime_is_user(mmu_idx);
/*
* Fast Context Switch Extension. This doesn't exist at all in v8.
@@ -3515,7 +3720,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
*/
if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
&& !arm_feature(env, ARM_FEATURE_V8)) {
- if (regime_el(env, mmu_idx) == 3) {
+ if (regime_el(mmu_idx) == 3) {
address += env->cp15.fcseidr_s;
} else {
address += env->cp15.fcseidr_ns;
@@ -3617,15 +3822,22 @@ arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx)
switch (mmu_idx) {
case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_0_GCS:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E10_1_GCS:
case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_0_GCS:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_E20_2_GCS:
case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E0_GCS:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_Stage1_E1_GCS:
case ARMMMUIdx_E2:
+ case ARMMMUIdx_E2_GCS:
ss = arm_security_space_below_el3(env);
break;
case ARMMMUIdx_Stage2:
@@ -3654,6 +3866,7 @@ arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx)
ss = ARMSS_Secure;
break;
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E3_GCS:
case ARMMMUIdx_E30_0:
case ARMMMUIdx_E30_3_PAN:
if (arm_feature(env, ARM_FEATURE_AARCH64) &&
diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h
index c48d3b8..bff61f0 100644
--- a/target/arm/syndrome.h
+++ b/target/arm/syndrome.h
@@ -63,6 +63,7 @@ enum arm_exception_class {
EC_MOP = 0x27,
EC_AA32_FPTRAP = 0x28,
EC_AA64_FPTRAP = 0x2c,
+ EC_GCS = 0x2d,
EC_SERROR = 0x2f,
EC_BREAKPOINT = 0x30,
EC_BREAKPOINT_SAME_EL = 0x31,
@@ -83,6 +84,23 @@ typedef enum {
SME_ET_InaccessibleZT0,
} SMEExceptionType;
+typedef enum {
+ GCS_ET_DataCheck,
+ GCS_ET_EXLOCK,
+ GCS_ET_GCSSTR_GCSSTTR,
+} GCSExceptionType;
+
+typedef enum {
+ GCS_IT_RET_nPauth = 0,
+ GCS_IT_GCSPOPM = 1,
+ GCS_IT_RET_PauthA = 2,
+ GCS_IT_RET_PauthB = 3,
+ GCS_IT_GCSSS1 = 4,
+ GCS_IT_GCSSS2 = 5,
+ GCS_IT_GCSPOPCX = 8,
+ GCS_IT_GCSPOPX = 9,
+} GCSInstructionType;
+
#define ARM_EL_EC_LENGTH 6
#define ARM_EL_EC_SHIFT 26
#define ARM_EL_IL_SHIFT 25
@@ -351,6 +369,23 @@ static inline uint32_t syn_pcalignment(void)
return (EC_PCALIGNMENT << ARM_EL_EC_SHIFT) | ARM_EL_IL;
}
+static inline uint32_t syn_gcs_data_check(GCSInstructionType it, int rn)
+{
+ return ((EC_GCS << ARM_EL_EC_SHIFT) | ARM_EL_IL |
+ (GCS_ET_DataCheck << 20) | (rn << 5) | it);
+}
+
+static inline uint32_t syn_gcs_exlock(void)
+{
+ return (EC_GCS << ARM_EL_EC_SHIFT) | ARM_EL_IL | (GCS_ET_EXLOCK << 20);
+}
+
+static inline uint32_t syn_gcs_gcsstr(int ra, int rn)
+{
+ return ((EC_GCS << ARM_EL_EC_SHIFT) | ARM_EL_IL |
+ (GCS_ET_GCSSTR_GCSSTTR << 20) | (ra << 10) | (rn << 5));
+}
+
static inline uint32_t syn_serror(uint32_t extra)
{
return (EC_SERROR << ARM_EL_EC_SHIFT) | ARM_EL_IL | extra;
diff --git a/target/arm/tcg-stubs.c b/target/arm/tcg-stubs.c
index aac99b2..aeeede8 100644
--- a/target/arm/tcg-stubs.c
+++ b/target/arm/tcg-stubs.c
@@ -16,7 +16,7 @@ void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
g_assert_not_reached();
}
-void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
+void raise_exception_ra(CPUARMState *env, uint32_t excp, uint64_t syndrome,
uint32_t target_el, uintptr_t ra)
{
g_assert_not_reached();
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
index 55ff6c5..01b1b3e 100644
--- a/target/arm/tcg/a64.decode
+++ b/target/arm/tcg/a64.decode
@@ -248,6 +248,7 @@ ERETA 1101011 0100 11111 00001 m:1 11111 11111 &reta # ERETAA, ERETAB
AUTIA1716 1101 0101 0000 0011 0010 0001 100 11111
AUTIB1716 1101 0101 0000 0011 0010 0001 110 11111
ESB 1101 0101 0000 0011 0010 0010 000 11111
+ GCSB 1101 0101 0000 0011 0010 0010 011 11111
PACIAZ 1101 0101 0000 0011 0010 0011 000 11111
PACIASP 1101 0101 0000 0011 0010 0011 001 11111
PACIBZ 1101 0101 0000 0011 0010 0011 010 11111
@@ -256,6 +257,7 @@ ERETA 1101011 0100 11111 00001 m:1 11111 11111 &reta # ERETAA, ERETAB
AUTIASP 1101 0101 0000 0011 0010 0011 101 11111
AUTIBZ 1101 0101 0000 0011 0010 0011 110 11111
AUTIBSP 1101 0101 0000 0011 0010 0011 111 11111
+ CHKFEAT 1101 0101 0000 0011 0010 0101 000 11111
]
# The canonical NOP has CRm == op2 == 0, but all of the space
# that isn't specifically allocated to an instruction must NOP
@@ -570,6 +572,9 @@ LDAPR_i 10 011001 10 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext
LDAPR_i 00 011001 11 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext=1 sz=0
LDAPR_i 01 011001 11 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext=1 sz=1
+# GCSSTR, GCSSTTR
+GCSSTR 11011001 000 11111 000 unpriv:1 11 rn:5 rt:5
+
# Load/store multiple structures
# The 4-bit opcode in [15:12] encodes repeat count and structure elements
&ldst_mult rm rn rt sz q p rpt selem
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
index 8c617fe..1bffe66 100644
--- a/target/arm/tcg/cpu64.c
+++ b/target/arm/tcg/cpu64.c
@@ -1280,6 +1280,7 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64PFR1, SME, 2); /* FEAT_SME2 */
t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_3 */
t = FIELD_DP64(t, ID_AA64PFR1, NMI, 1); /* FEAT_NMI */
+ t = FIELD_DP64(t, ID_AA64PFR1, GCS, 1); /* FEAT_GCS */
SET_IDREG(isar, ID_AA64PFR1, t);
t = GET_IDREG(isar, ID_AA64MMFR0);
@@ -1326,7 +1327,10 @@ void aarch64_max_tcg_initfn(Object *obj)
t = GET_IDREG(isar, ID_AA64MMFR3);
t = FIELD_DP64(t, ID_AA64MMFR3, TCRX, 1); /* FEAT_TCR2 */
t = FIELD_DP64(t, ID_AA64MMFR3, SCTLRX, 1); /* FEAT_SCTLR2 */
+ t = FIELD_DP64(t, ID_AA64MMFR3, MEC, 1); /* FEAT_MEC */
t = FIELD_DP64(t, ID_AA64MMFR3, SPEC_FPACC, 1); /* FEAT_FPACC_SPEC */
+ t = FIELD_DP64(t, ID_AA64MMFR3, S1PIE, 1); /* FEAT_S1PIE */
+ t = FIELD_DP64(t, ID_AA64MMFR3, S2PIE, 1); /* FEAT_S2PIE */
SET_IDREG(isar, ID_AA64MMFR3, t);
t = GET_IDREG(isar, ID_AA64ZFR0);
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
index 71c6c44..ba1d775 100644
--- a/target/arm/tcg/helper-a64.c
+++ b/target/arm/tcg/helper-a64.c
@@ -576,6 +576,7 @@ uint32_t HELPER(advsimd_rinth)(uint32_t x, float_status *fp_status)
return ret;
}
+#ifndef CONFIG_USER_ONLY
static int el_from_spsr(uint32_t spsr)
{
/* Return the exception level that this SPSR is requesting a return to,
@@ -614,32 +615,12 @@ static int el_from_spsr(uint32_t spsr)
}
}
-static void cpsr_write_from_spsr_elx(CPUARMState *env,
- uint32_t val)
-{
- uint32_t mask;
-
- /* Save SPSR_ELx.SS into PSTATE. */
- env->pstate = (env->pstate & ~PSTATE_SS) | (val & PSTATE_SS);
- val &= ~PSTATE_SS;
-
- /* Move DIT to the correct location for CPSR */
- if (val & PSTATE_DIT) {
- val &= ~PSTATE_DIT;
- val |= CPSR_DIT;
- }
-
- mask = aarch32_cpsr_valid_mask(env->features, \
- &env_archcpu(env)->isar);
- cpsr_write(env, val, mask, CPSRWriteRaw);
-}
-
void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
{
ARMCPU *cpu = env_archcpu(env);
int cur_el = arm_current_el(env);
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
- uint32_t spsr = env->banked_spsr[spsr_idx];
+ uint64_t spsr = env->banked_spsr[spsr_idx];
int new_el;
bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
@@ -694,6 +675,17 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
goto illegal_return;
}
+ /*
+ * If GetCurrentEXLOCKEN, the exception return path must use GCSPOPCX,
+ * which will set PSTATE.EXLOCK. We need not explicitly check FEAT_GCS,
+ * because GCSCR_ELx cannot be set without it.
+ */
+ if (new_el == cur_el &&
+ (env->cp15.gcscr_el[cur_el] & GCSCR_EXLOCKEN) &&
+ !(env->pstate & PSTATE_EXLOCK)) {
+ goto illegal_return;
+ }
+
bql_lock();
arm_call_pre_el_change_hook(cpu);
bql_unlock();
@@ -787,6 +779,7 @@ illegal_return:
qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
"resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
}
+#endif /* !CONFIG_USER_ONLY */
void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
{
diff --git a/target/arm/tcg/helper-a64.h b/target/arm/tcg/helper-a64.h
index 8502346..b6008b5 100644
--- a/target/arm/tcg/helper-a64.h
+++ b/target/arm/tcg/helper-a64.h
@@ -80,7 +80,6 @@ DEF_HELPER_3(vfp_ah_maxh, f16, f16, f16, fpst)
DEF_HELPER_3(vfp_ah_maxs, f32, f32, f32, fpst)
DEF_HELPER_3(vfp_ah_maxd, f64, f64, f64, fpst)
-DEF_HELPER_2(exception_return, void, env, i64)
DEF_HELPER_FLAGS_2(dc_zva, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_FLAGS_3(pacia, TCG_CALL_NO_WG, i64, env, i64, i64)
@@ -145,3 +144,7 @@ DEF_HELPER_FLAGS_5(gvec_fmulx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32
DEF_HELPER_FLAGS_5(gvec_fmulx_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(gvec_fmulx_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(gvec_fmulx_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_2(exception_return, void, env, i64)
+#endif
diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c
index 17f83f1..5c9b9be 100644
--- a/target/arm/tcg/hflags.c
+++ b/target/arm/tcg/hflags.c
@@ -451,6 +451,44 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
}
+ if (cpu_isar_feature(aa64_gcs, env_archcpu(env))) {
+ /* C.f. GCSEnabled */
+ if (env->cp15.gcscr_el[el] & GCSCR_PCRSEL) {
+ switch (el) {
+ default:
+ if (!el_is_in_host(env, el)
+ && !(arm_hcrx_el2_eff(env) & HCRX_GCSEN)) {
+ break;
+ }
+ /* fall through */
+ case 2:
+ if (arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_GCSEN)) {
+ break;
+ }
+ /* fall through */
+ case 3:
+ DP_TBFLAG_A64(flags, GCS_EN, 1);
+ break;
+ }
+ }
+
+ /* C.f. GCSReturnValueCheckEnabled */
+ if (env->cp15.gcscr_el[el] & GCSCR_RVCHKEN) {
+ DP_TBFLAG_A64(flags, GCS_RVCEN, 1);
+ }
+
+ /* C.f. CheckGCSSTREnabled */
+ if (!(env->cp15.gcscr_el[el] & GCSCR_STREN)) {
+ DP_TBFLAG_A64(flags, GCSSTR_EL, el ? el : 1);
+ } else if (el == 1
+ && EX_TBFLAG_ANY(flags, FGT_ACTIVE)
+ && !FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR],
+ HFGITR_EL2, NGCSSTR_EL1)) {
+ DP_TBFLAG_A64(flags, GCSSTR_EL, 2);
+ }
+ }
+
if (env->vfp.fpcr & FPCR_AH) {
DP_TBFLAG_A64(flags, AH, 1);
}
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index b96c953..bb48fe3 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -605,7 +605,7 @@ void mte_check_fail(CPUARMState *env, uint32_t desc,
int el, reg_el, tcf;
uint64_t sctlr;
- reg_el = regime_el(env, arm_mmu_idx);
+ reg_el = regime_el(arm_mmu_idx);
sctlr = env->cp15.sctlr_el[reg_el];
switch (arm_mmu_idx) {
diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c
index 5373e0e..4fbd219 100644
--- a/target/arm/tcg/op_helper.c
+++ b/target/arm/tcg/op_helper.c
@@ -46,7 +46,7 @@ int exception_target_el(CPUARMState *env)
}
void raise_exception(CPUARMState *env, uint32_t excp,
- uint32_t syndrome, uint32_t target_el)
+ uint64_t syndrome, uint32_t target_el)
{
CPUState *cs = env_cpu(env);
@@ -70,7 +70,7 @@ void raise_exception(CPUARMState *env, uint32_t excp,
cpu_loop_exit(cs);
}
-void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
+void raise_exception_ra(CPUARMState *env, uint32_t excp, uint64_t syndrome,
uint32_t target_el, uintptr_t ra)
{
CPUState *cs = env_cpu(env);
@@ -881,6 +881,13 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
}
syndrome = syn_uncategorized();
break;
+ case CP_ACCESS_EXLOCK:
+ /*
+ * CP_ACCESS_EXLOCK is always directed to the current EL,
+ * which is going to be the same as the usual target EL.
+ */
+ syndrome = syn_gcs_exlock();
+ break;
default:
g_assert_not_reached();
}
diff --git a/target/arm/tcg/tlb-insns.c b/target/arm/tcg/tlb-insns.c
index 95c26c6..1a0a332 100644
--- a/target/arm/tcg/tlb-insns.c
+++ b/target/arm/tcg/tlb-insns.c
@@ -149,7 +149,8 @@ static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = env_cpu(env);
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
+ tlb_flush_page_by_mmuidx(cs, pageaddr,
+ ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS);
}
static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -159,7 +160,8 @@ static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
- ARMMMUIdxBit_E2);
+ ARMMMUIdxBit_E2 |
+ ARMMMUIdxBit_E2_GCS);
}
static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -202,7 +204,7 @@ static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS);
}
static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -210,7 +212,8 @@ static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2 |
+ ARMMMUIdxBit_E2_GCS);
}
/*
@@ -228,12 +231,16 @@ static int vae1_tlbmask(CPUARMState *env)
if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E20_0;
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS;
} else {
/* This is AArch64 only, so we don't need to touch the EL30_x TLBs */
mask = ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
- ARMMMUIdxBit_E10_0;
+ ARMMMUIdxBit_E10_1_GCS |
+ ARMMMUIdxBit_E10_0 |
+ ARMMMUIdxBit_E10_0_GCS;
}
return mask;
}
@@ -246,13 +253,20 @@ static int vae2_tlbmask(CPUARMState *env)
if (hcr & HCR_E2H) {
mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E20_0;
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS;
} else {
- mask = ARMMMUIdxBit_E2;
+ mask = ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS;
}
return mask;
}
+static int vae3_tlbmask(void)
+{
+ return ARMMMUIdxBit_E3 | ARMMMUIdxBit_E3_GCS;
+}
+
/* Return 56 if TBI is enabled, 64 otherwise. */
static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
uint64_t addr)
@@ -325,9 +339,12 @@ static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
static int e2_tlbmask(CPUARMState *env)
{
return (ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS |
ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E2);
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E2 |
+ ARMMMUIdxBit_E2_GCS);
}
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -354,7 +371,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
+ tlb_flush_by_mmuidx(cs, vae3_tlbmask());
}
static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -380,7 +397,7 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, vae3_tlbmask());
}
static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -411,7 +428,7 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
+ tlb_flush_page_by_mmuidx(cs, pageaddr, vae3_tlbmask());
}
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -465,7 +482,7 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
- ARMMMUIdxBit_E3, bits);
+ vae3_tlbmask(), bits);
}
static int ipas2e1_tlbmask(CPUARMState *env, int64_t value)
@@ -963,7 +980,7 @@ static void tlbi_aa64_rvae3_write(CPUARMState *env,
* flush-last-level-only.
*/
- do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
+ do_rvae_write(env, value, vae3_tlbmask(), tlb_force_broadcast(env));
}
static void tlbi_aa64_rvae3is_write(CPUARMState *env,
@@ -977,7 +994,7 @@ static void tlbi_aa64_rvae3is_write(CPUARMState *env,
* flush-last-level-only or inner/outer specific flushes.
*/
- do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
+ do_rvae_write(env, value, vae3_tlbmask(), true);
}
static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c
index 23c72a9..f1983a5 100644
--- a/target/arm/tcg/tlb_helper.c
+++ b/target/arm/tcg/tlb_helper.c
@@ -24,13 +24,13 @@ bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
return regime_using_lpae_format(env, mmu_idx);
}
-static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
+static inline uint64_t merge_syn_data_abort(uint32_t template_syn,
ARMMMUFaultInfo *fi,
unsigned int target_el,
bool same_el, bool is_write,
- int fsc)
+ int fsc, bool gcs)
{
- uint32_t syn;
+ uint64_t syn;
/*
* ISV is only set for stage-2 data aborts routed to EL2 and
@@ -75,6 +75,11 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
/* Merge the runtime syndrome with the template syndrome. */
syn |= template_syn;
}
+
+ /* Form ISS2 at the top of the syndrome. */
+ syn |= (uint64_t)fi->dirtybit << 37;
+ syn |= (uint64_t)gcs << 40;
+
return syn;
}
@@ -176,7 +181,9 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
int target_el = exception_target_el(env);
int current_el = arm_current_el(env);
bool same_el;
- uint32_t syn, exc, fsr, fsc;
+ uint32_t exc, fsr, fsc;
+ uint64_t syn;
+
/*
* We know this must be a data or insn abort, and that
* env->exception.syndrome contains the template syndrome set
@@ -246,9 +253,10 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
exc = EXCP_PREFETCH_ABORT;
} else {
+ bool gcs = regime_is_gcs(core_to_arm_mmu_idx(env, mmu_idx));
syn = merge_syn_data_abort(env->exception.syndrome, fi, target_el,
same_el, access_type == MMU_DATA_STORE,
- fsc);
+ fsc, gcs);
if (access_type == MMU_DATA_STORE
&& arm_feature(env, ARM_FEATURE_V6)) {
fsr |= (1 << 11);
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index a0e3300..918d5ed 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -26,6 +26,7 @@
#include "cpregs.h"
static TCGv_i64 cpu_X[32];
+static TCGv_i64 cpu_gcspr[4];
static TCGv_i64 cpu_pc;
/* Load/store exclusive handling */
@@ -77,6 +78,10 @@ static int scale_by_log2_tag_granule(DisasContext *s, int x)
/* initialize TCG globals. */
void a64_translate_init(void)
{
+ static const char gcspr_names[4][12] = {
+ "gcspr_el0", "gcspr_el1", "gcspr_el2", "gcspr_el3"
+ };
+
int i;
cpu_pc = tcg_global_mem_new_i64(tcg_env,
@@ -90,10 +95,17 @@ void a64_translate_init(void)
cpu_exclusive_high = tcg_global_mem_new_i64(tcg_env,
offsetof(CPUARMState, exclusive_high), "exclusive_high");
+
+ for (i = 0; i < 4; i++) {
+ cpu_gcspr[i] =
+ tcg_global_mem_new_i64(tcg_env,
+ offsetof(CPUARMState, cp15.gcspr_el[i]),
+ gcspr_names[i]);
+ }
}
/*
- * Return the core mmu_idx to use for A64 load/store insns which
+ * Return the full arm mmu_idx to use for A64 load/store insns which
* have a "unprivileged load/store" variant. Those insns access
* EL0 if executed from an EL which has control over EL0 (usually
* EL1) but behave like normal loads and stores if executed from
@@ -103,7 +115,7 @@ void a64_translate_init(void)
* normal encoding (in which case we will return the same
* thing as get_mem_index().
*/
-static int get_a64_user_mem_index(DisasContext *s, bool unpriv)
+static ARMMMUIdx full_a64_user_mem_index(DisasContext *s, bool unpriv)
{
/*
* If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
@@ -130,7 +142,19 @@ static int get_a64_user_mem_index(DisasContext *s, bool unpriv)
g_assert_not_reached();
}
}
- return arm_to_core_mmu_idx(useridx);
+ return useridx;
+}
+
+/* Return the core mmu_idx per above. */
+static int core_a64_user_mem_index(DisasContext *s, bool unpriv)
+{
+ return arm_to_core_mmu_idx(full_a64_user_mem_index(s, unpriv));
+}
+
+/* For a given translation regime, return the core mmu_idx for gcs access. */
+static int core_gcs_mem_index(ARMMMUIdx armidx)
+{
+ return arm_to_core_mmu_idx(regime_to_gcs(armidx));
}
static void set_btype_raw(int val)
@@ -408,6 +432,39 @@ static MemOp check_ordered_align(DisasContext *s, int rn, int imm,
return finalize_memop(s, mop);
}
+static void gen_add_gcs_record(DisasContext *s, TCGv_i64 value)
+{
+ TCGv_i64 addr = tcg_temp_new_i64();
+ TCGv_i64 gcspr = cpu_gcspr[s->current_el];
+ int mmuidx = core_gcs_mem_index(s->mmu_idx);
+ MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
+
+ tcg_gen_addi_i64(addr, gcspr, -8);
+ tcg_gen_qemu_st_i64(value, clean_data_tbi(s, addr), mmuidx, mop);
+ tcg_gen_mov_i64(gcspr, addr);
+}
+
+static void gen_load_check_gcs_record(DisasContext *s, TCGv_i64 target,
+ GCSInstructionType it, int rt)
+{
+ TCGv_i64 gcspr = cpu_gcspr[s->current_el];
+ int mmuidx = core_gcs_mem_index(s->mmu_idx);
+ MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
+ TCGv_i64 rec_va = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(rec_va, clean_data_tbi(s, gcspr), mmuidx, mop);
+
+ if (s->gcs_rvcen) {
+ TCGLabel *fail_label =
+ delay_exception(s, EXCP_UDEF, syn_gcs_data_check(it, rt));
+
+ tcg_gen_brcond_i64(TCG_COND_NE, rec_va, target, fail_label);
+ }
+
+ gen_a64_set_pc(s, rec_va);
+ tcg_gen_addi_i64(gcspr, gcspr, 8);
+}
+
typedef struct DisasCompare64 {
TCGCond cond;
TCGv_i64 value;
@@ -1642,7 +1699,14 @@ static bool trans_B(DisasContext *s, arg_i *a)
static bool trans_BL(DisasContext *s, arg_i *a)
{
- gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s));
+ TCGv_i64 link = tcg_temp_new_i64();
+
+ gen_pc_plus_diff(s, link, 4);
+ if (s->gcs_en) {
+ gen_add_gcs_record(s, link);
+ }
+ tcg_gen_mov_i64(cpu_reg(s, 30), link);
+
reset_btype(s);
gen_goto_tb(s, 0, a->imm);
return true;
@@ -1739,15 +1803,15 @@ static bool trans_BR(DisasContext *s, arg_r *a)
static bool trans_BLR(DisasContext *s, arg_r *a)
{
- TCGv_i64 dst = cpu_reg(s, a->rn);
- TCGv_i64 lr = cpu_reg(s, 30);
- if (dst == lr) {
- TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_mov_i64(tmp, dst);
- dst = tmp;
+ TCGv_i64 link = tcg_temp_new_i64();
+
+ gen_pc_plus_diff(s, link, 4);
+ if (s->gcs_en) {
+ gen_add_gcs_record(s, link);
}
- gen_pc_plus_diff(s, lr, curr_insn_len(s));
- gen_a64_set_pc(s, dst);
+ gen_a64_set_pc(s, cpu_reg(s, a->rn));
+ tcg_gen_mov_i64(cpu_reg(s, 30), link);
+
set_btype_for_blr(s);
s->base.is_jmp = DISAS_JUMP;
return true;
@@ -1755,7 +1819,13 @@ static bool trans_BLR(DisasContext *s, arg_r *a)
static bool trans_RET(DisasContext *s, arg_r *a)
{
- gen_a64_set_pc(s, cpu_reg(s, a->rn));
+ TCGv_i64 target = cpu_reg(s, a->rn);
+
+ if (s->gcs_en) {
+ gen_load_check_gcs_record(s, target, GCS_IT_RET_nPauth, a->rn);
+ } else {
+ gen_a64_set_pc(s, target);
+ }
s->base.is_jmp = DISAS_JUMP;
return true;
}
@@ -1799,21 +1869,21 @@ static bool trans_BRAZ(DisasContext *s, arg_braz *a)
static bool trans_BLRAZ(DisasContext *s, arg_braz *a)
{
- TCGv_i64 dst, lr;
+ TCGv_i64 dst, link;
if (!dc_isar_feature(aa64_pauth, s)) {
return false;
}
-
dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m);
- lr = cpu_reg(s, 30);
- if (dst == lr) {
- TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_mov_i64(tmp, dst);
- dst = tmp;
+
+ link = tcg_temp_new_i64();
+ gen_pc_plus_diff(s, link, 4);
+ if (s->gcs_en) {
+ gen_add_gcs_record(s, link);
}
- gen_pc_plus_diff(s, lr, curr_insn_len(s));
gen_a64_set_pc(s, dst);
+ tcg_gen_mov_i64(cpu_reg(s, 30), link);
+
set_btype_for_blr(s);
s->base.is_jmp = DISAS_JUMP;
return true;
@@ -1828,7 +1898,12 @@ static bool trans_RETA(DisasContext *s, arg_reta *a)
}
dst = auth_branch_target(s, cpu_reg(s, 30), cpu_X[31], !a->m);
- gen_a64_set_pc(s, dst);
+ if (s->gcs_en) {
+ GCSInstructionType it = a->m ? GCS_IT_RET_PauthB : GCS_IT_RET_PauthA;
+ gen_load_check_gcs_record(s, dst, it, 30);
+ } else {
+ gen_a64_set_pc(s, dst);
+ }
s->base.is_jmp = DISAS_JUMP;
return true;
}
@@ -1849,20 +1924,21 @@ static bool trans_BRA(DisasContext *s, arg_bra *a)
static bool trans_BLRA(DisasContext *s, arg_bra *a)
{
- TCGv_i64 dst, lr;
+ TCGv_i64 dst, link;
if (!dc_isar_feature(aa64_pauth, s)) {
return false;
}
dst = auth_branch_target(s, cpu_reg(s, a->rn), cpu_reg_sp(s, a->rm), !a->m);
- lr = cpu_reg(s, 30);
- if (dst == lr) {
- TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_mov_i64(tmp, dst);
- dst = tmp;
+
+ link = tcg_temp_new_i64();
+ gen_pc_plus_diff(s, link, 4);
+ if (s->gcs_en) {
+ gen_add_gcs_record(s, link);
}
- gen_pc_plus_diff(s, lr, curr_insn_len(s));
gen_a64_set_pc(s, dst);
+ tcg_gen_mov_i64(cpu_reg(s, 30), link);
+
set_btype_for_blr(s);
s->base.is_jmp = DISAS_JUMP;
return true;
@@ -1870,6 +1946,9 @@ static bool trans_BLRA(DisasContext *s, arg_bra *a)
static bool trans_ERET(DisasContext *s, arg_ERET *a)
{
+#ifdef CONFIG_USER_ONLY
+ return false;
+#else
TCGv_i64 dst;
if (s->current_el == 0) {
@@ -1889,10 +1968,14 @@ static bool trans_ERET(DisasContext *s, arg_ERET *a)
/* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT;
return true;
+#endif
}
static bool trans_ERETA(DisasContext *s, arg_reta *a)
{
+#ifdef CONFIG_USER_ONLY
+ return false;
+#else
TCGv_i64 dst;
if (!dc_isar_feature(aa64_pauth, s)) {
@@ -1918,6 +2001,7 @@ static bool trans_ERETA(DisasContext *s, arg_reta *a)
/* Must exit loop to check un-masked IRQs */
s->base.is_jmp = DISAS_EXIT;
return true;
+#endif
}
static bool trans_NOP(DisasContext *s, arg_NOP *a)
@@ -2060,6 +2144,14 @@ static bool trans_ESB(DisasContext *s, arg_ESB *a)
return true;
}
+static bool trans_GCSB(DisasContext *s, arg_GCSB *a)
+{
+ if (dc_isar_feature(aa64_gcs, s)) {
+ tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
+ }
+ return true;
+}
+
static bool trans_PACIAZ(DisasContext *s, arg_PACIAZ *a)
{
if (s->pauth_active) {
@@ -2124,6 +2216,20 @@ static bool trans_AUTIBSP(DisasContext *s, arg_AUTIBSP *a)
return true;
}
+static bool trans_CHKFEAT(DisasContext *s, arg_CHKFEAT *a)
+{
+ uint64_t feat_en = 0;
+
+ if (s->gcs_en) {
+ feat_en |= 1 << 0;
+ }
+ if (feat_en) {
+ TCGv_i64 x16 = cpu_reg(s, 16);
+ tcg_gen_andi_i64(x16, x16, ~feat_en);
+ }
+ return true;
+}
+
static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
{
tcg_gen_movi_i64(cpu_exclusive_addr, -1);
@@ -2455,6 +2561,182 @@ static void gen_sysreg_undef(DisasContext *s, bool isread,
gen_exception_insn(s, 0, EXCP_UDEF, syndrome);
}
+static void gen_gcspopm(DisasContext *s, int rt)
+{
+ TCGv_i64 gcspr = cpu_gcspr[s->current_el];
+ int mmuidx = core_gcs_mem_index(s->mmu_idx);
+ MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
+ TCGv_i64 value = tcg_temp_new_i64();
+ TCGLabel *fail_label =
+ delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSPOPM, rt));
+
+ /* The value at top-of-stack must have low 2 bits clear. */
+ tcg_gen_qemu_ld_i64(value, clean_data_tbi(s, gcspr), mmuidx, mop);
+ tcg_gen_brcondi_i64(TCG_COND_TSTNE, value, 3, fail_label);
+
+ /* Complete the pop and return the value. */
+ tcg_gen_addi_i64(gcspr, gcspr, 8);
+ tcg_gen_mov_i64(cpu_reg(s, rt), value);
+}
+
+static void gen_gcspushx(DisasContext *s)
+{
+ TCGv_i64 gcspr = cpu_gcspr[s->current_el];
+ int spsr_idx = aarch64_banked_spsr_index(s->current_el);
+ int spsr_off = offsetof(CPUARMState, banked_spsr[spsr_idx]);
+ int elr_off = offsetof(CPUARMState, elr_el[s->current_el]);
+ int mmuidx = core_gcs_mem_index(s->mmu_idx);
+ MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
+ TCGv_i64 addr = tcg_temp_new_i64();
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ tcg_gen_addi_i64(addr, gcspr, -8);
+ tcg_gen_qemu_st_i64(cpu_reg(s, 30), addr, mmuidx, mop);
+
+ tcg_gen_ld_i64(tmp, tcg_env, spsr_off);
+ tcg_gen_addi_i64(addr, addr, -8);
+ tcg_gen_qemu_st_i64(tmp, addr, mmuidx, mop);
+
+ tcg_gen_ld_i64(tmp, tcg_env, elr_off);
+ tcg_gen_addi_i64(addr, addr, -8);
+ tcg_gen_qemu_st_i64(tmp, addr, mmuidx, mop);
+
+ tcg_gen_addi_i64(addr, addr, -8);
+ tcg_gen_qemu_st_i64(tcg_constant_i64(0b1001), addr, mmuidx, mop);
+
+ tcg_gen_mov_i64(gcspr, addr);
+ clear_pstate_bits(PSTATE_EXLOCK);
+}
+
+static void gen_gcspopcx(DisasContext *s)
+{
+ TCGv_i64 gcspr = cpu_gcspr[s->current_el];
+ int spsr_idx = aarch64_banked_spsr_index(s->current_el);
+ int spsr_off = offsetof(CPUARMState, banked_spsr[spsr_idx]);
+ int elr_off = offsetof(CPUARMState, elr_el[s->current_el]);
+ int gcscr_off = offsetof(CPUARMState, cp15.gcscr_el[s->current_el]);
+ int pstate_off = offsetof(CPUARMState, pstate);
+ int mmuidx = core_gcs_mem_index(s->mmu_idx);
+ MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
+ TCGv_i64 addr = tcg_temp_new_i64();
+ TCGv_i64 tmp1 = tcg_temp_new_i64();
+ TCGv_i64 tmp2 = tcg_temp_new_i64();
+ TCGLabel *fail_label =
+ delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSPOPCX, 31));
+
+ /* The value at top-of-stack must be an exception token. */
+ tcg_gen_qemu_ld_i64(tmp1, gcspr, mmuidx, mop);
+ tcg_gen_brcondi_i64(TCG_COND_NE, tmp1, 0b1001, fail_label);
+
+ /* Validate in turn, ELR ... */
+ tcg_gen_addi_i64(addr, gcspr, 8);
+ tcg_gen_qemu_ld_i64(tmp1, addr, mmuidx, mop);
+ tcg_gen_ld_i64(tmp2, tcg_env, elr_off);
+ tcg_gen_brcond_i64(TCG_COND_NE, tmp1, tmp2, fail_label);
+
+ /* ... SPSR ... */
+ tcg_gen_addi_i64(addr, addr, 8);
+ tcg_gen_qemu_ld_i64(tmp1, addr, mmuidx, mop);
+ tcg_gen_ld_i64(tmp2, tcg_env, spsr_off);
+ tcg_gen_brcond_i64(TCG_COND_NE, tmp1, tmp2, fail_label);
+
+ /* ... and LR. */
+ tcg_gen_addi_i64(addr, addr, 8);
+ tcg_gen_qemu_ld_i64(tmp1, addr, mmuidx, mop);
+ tcg_gen_brcond_i64(TCG_COND_NE, tmp1, cpu_reg(s, 30), fail_label);
+
+ /* Writeback stack pointer after pop. */
+ tcg_gen_addi_i64(gcspr, addr, 8);
+
+ /* PSTATE.EXLOCK = GetCurrentEXLOCKEN(). */
+ tcg_gen_ld_i64(tmp1, tcg_env, gcscr_off);
+ tcg_gen_ld_i64(tmp2, tcg_env, pstate_off);
+ tcg_gen_shri_i64(tmp1, tmp1, ctz64(GCSCR_EXLOCKEN));
+ tcg_gen_deposit_i64(tmp2, tmp2, tmp1, ctz64(PSTATE_EXLOCK), 1);
+ tcg_gen_st_i64(tmp2, tcg_env, pstate_off);
+}
+
+static void gen_gcspopx(DisasContext *s)
+{
+ TCGv_i64 gcspr = cpu_gcspr[s->current_el];
+ int mmuidx = core_gcs_mem_index(s->mmu_idx);
+ MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
+ TCGv_i64 addr = tcg_temp_new_i64();
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGLabel *fail_label =
+ delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSPOPX, 31));
+
+ /* The value at top-of-stack must be an exception token. */
+ tcg_gen_qemu_ld_i64(tmp, gcspr, mmuidx, mop);
+ tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 0b1001, fail_label);
+
+ /*
+ * The other three values in the exception return record
+ * are ignored, but are loaded anyway to raise faults.
+ */
+ tcg_gen_addi_i64(addr, gcspr, 8);
+ tcg_gen_qemu_ld_i64(tmp, addr, mmuidx, mop);
+ tcg_gen_addi_i64(addr, addr, 8);
+ tcg_gen_qemu_ld_i64(tmp, addr, mmuidx, mop);
+ tcg_gen_addi_i64(addr, addr, 8);
+ tcg_gen_qemu_ld_i64(tmp, addr, mmuidx, mop);
+ tcg_gen_addi_i64(gcspr, addr, 8);
+}
+
+static void gen_gcsss1(DisasContext *s, int rt)
+{
+ TCGv_i64 gcspr = cpu_gcspr[s->current_el];
+ int mmuidx = core_gcs_mem_index(s->mmu_idx);
+ MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
+ TCGv_i64 inptr = cpu_reg(s, rt);
+ TCGv_i64 cmp = tcg_temp_new_i64();
+ TCGv_i64 new = tcg_temp_new_i64();
+ TCGv_i64 old = tcg_temp_new_i64();
+ TCGLabel *fail_label =
+ delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSSS1, rt));
+
+ /* Compute the valid cap entry that the new stack must have. */
+ tcg_gen_deposit_i64(cmp, inptr, tcg_constant_i64(1), 0, 12);
+ /* Compute the in-progress cap entry for the old stack. */
+ tcg_gen_deposit_i64(new, gcspr, tcg_constant_i64(5), 0, 3);
+
+ /* Swap the valid cap the with the in-progress cap. */
+ tcg_gen_atomic_cmpxchg_i64(old, inptr, cmp, new, mmuidx, mop);
+ tcg_gen_brcond_i64(TCG_COND_NE, old, cmp, fail_label);
+
+ /* The new stack had a valid cap: change gcspr. */
+ tcg_gen_andi_i64(gcspr, inptr, ~7);
+}
+
+static void gen_gcsss2(DisasContext *s, int rt)
+{
+ TCGv_i64 gcspr = cpu_gcspr[s->current_el];
+ int mmuidx = core_gcs_mem_index(s->mmu_idx);
+ MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN);
+ TCGv_i64 outptr = tcg_temp_new_i64();
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGLabel *fail_label =
+ delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSSS2, rt));
+
+ /* Validate that the new stack has an in-progress cap. */
+ tcg_gen_qemu_ld_i64(outptr, gcspr, mmuidx, mop);
+ tcg_gen_andi_i64(tmp, outptr, 7);
+ tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 5, fail_label);
+
+ /* Push a valid cap to the old stack. */
+ tcg_gen_andi_i64(outptr, outptr, ~7);
+ tcg_gen_addi_i64(outptr, outptr, -8);
+ tcg_gen_deposit_i64(tmp, outptr, tcg_constant_i64(1), 0, 12);
+ tcg_gen_qemu_st_i64(tmp, outptr, mmuidx, mop);
+ tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
+
+ /* Pop the in-progress cap from the new stack. */
+ tcg_gen_addi_i64(gcspr, gcspr, 8);
+
+ /* Return a pointer to the old stack cap. */
+ tcg_gen_mov_i64(cpu_reg(s, rt), outptr);
+}
+
/*
* Look up @key, returning the cpreg, which must exist.
* Additionally, the new cpreg must also be accessible.
@@ -2761,6 +3043,51 @@ static void handle_sys(DisasContext *s, bool isread,
}
}
return;
+ case ARM_CP_GCSPUSHM:
+ if (s->gcs_en) {
+ gen_add_gcs_record(s, cpu_reg(s, rt));
+ }
+ return;
+ case ARM_CP_GCSPOPM:
+ /* Note that X[rt] is unchanged if !GCSEnabled. */
+ if (s->gcs_en) {
+ gen_gcspopm(s, rt);
+ }
+ return;
+ case ARM_CP_GCSPUSHX:
+ /* Choose the CONSTRAINED UNPREDICTABLE for UNDEF. */
+ if (rt != 31) {
+ unallocated_encoding(s);
+ } else if (s->gcs_en) {
+ gen_gcspushx(s);
+ }
+ return;
+ case ARM_CP_GCSPOPCX:
+ /* Choose the CONSTRAINED UNPREDICTABLE for UNDEF. */
+ if (rt != 31) {
+ unallocated_encoding(s);
+ } else if (s->gcs_en) {
+ gen_gcspopcx(s);
+ }
+ return;
+ case ARM_CP_GCSPOPX:
+ /* Choose the CONSTRAINED UNPREDICTABLE for UNDEF. */
+ if (rt != 31) {
+ unallocated_encoding(s);
+ } else if (s->gcs_en) {
+ gen_gcspopx(s);
+ }
+ return;
+ case ARM_CP_GCSSS1:
+ if (s->gcs_en) {
+ gen_gcsss1(s, rt);
+ }
+ return;
+ case ARM_CP_GCSSS2:
+ if (s->gcs_en) {
+ gen_gcsss2(s, rt);
+ }
+ return;
default:
g_assert_not_reached();
}
@@ -3555,7 +3882,7 @@ static void op_addr_ldst_imm_pre(DisasContext *s, arg_ldst_imm *a,
if (!a->p) {
tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset);
}
- memidx = get_a64_user_mem_index(s, a->unpriv);
+ memidx = core_a64_user_mem_index(s, a->unpriv);
*clean_addr = gen_mte_check1_mmuidx(s, *dirty_addr, is_store,
a->w || a->rn != 31,
mop, a->unpriv, memidx);
@@ -3576,7 +3903,7 @@ static bool trans_STR_i(DisasContext *s, arg_ldst_imm *a)
{
bool iss_sf, iss_valid = !a->w;
TCGv_i64 clean_addr, dirty_addr, tcg_rt;
- int memidx = get_a64_user_mem_index(s, a->unpriv);
+ int memidx = core_a64_user_mem_index(s, a->unpriv);
MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop);
@@ -3594,7 +3921,7 @@ static bool trans_LDR_i(DisasContext *s, arg_ldst_imm *a)
{
bool iss_sf, iss_valid = !a->w;
TCGv_i64 clean_addr, dirty_addr, tcg_rt;
- int memidx = get_a64_user_mem_index(s, a->unpriv);
+ int memidx = core_a64_user_mem_index(s, a->unpriv);
MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop);
@@ -3961,6 +4288,42 @@ static bool trans_STLR_i(DisasContext *s, arg_ldapr_stlr_i *a)
return true;
}
+static bool trans_GCSSTR(DisasContext *s, arg_GCSSTR *a)
+{
+ ARMMMUIdx armidx;
+
+ if (!dc_isar_feature(aa64_gcs, s)) {
+ return false;
+ }
+
+ /*
+ * The pseudocode for GCSSTTR is
+ *
+ * effective_el = AArch64.IsUnprivAccessPriv() ? PSTATE.EL : EL0;
+ * if (effective_el == PSTATE.EL) CheckGCSSTREnabled();
+ *
+ * We have cached the result of IsUnprivAccessPriv in DisasContext,
+ * but since we need the result of full_a64_user_mem_index anyway,
+ * use the mmu_idx test as a proxy for the effective_el test.
+ */
+ armidx = full_a64_user_mem_index(s, a->unpriv);
+ if (armidx == s->mmu_idx && s->gcsstr_el != 0) {
+ gen_exception_insn_el(s, 0, EXCP_UDEF,
+ syn_gcs_gcsstr(a->rn, a->rt),
+ s->gcsstr_el);
+ return true;
+ }
+
+ if (a->rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ tcg_gen_qemu_st_i64(cpu_reg(s, a->rt),
+ clean_data_tbi(s, cpu_reg_sp(s, a->rn)),
+ core_gcs_mem_index(armidx),
+ finalize_memop(s, MO_64 | MO_ALIGN));
+ return true;
+}
+
static bool trans_LD_mult(DisasContext *s, arg_ldst_mult *a)
{
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
@@ -4492,7 +4855,7 @@ static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue,
return false;
}
- memidx = get_a64_user_mem_index(s, a->unpriv);
+ memidx = core_a64_user_mem_index(s, a->unpriv);
/*
* We pass option_a == true, matching our implementation;
@@ -4546,8 +4909,8 @@ static bool do_CPY(DisasContext *s, arg_cpy *a, bool is_epilogue, CpyFn fn)
return false;
}
- rmemidx = get_a64_user_mem_index(s, runpriv);
- wmemidx = get_a64_user_mem_index(s, wunpriv);
+ rmemidx = core_a64_user_mem_index(s, runpriv);
+ wmemidx = core_a64_user_mem_index(s, wunpriv);
/*
* We pass option_a == true, matching our implementation;
@@ -10344,6 +10707,9 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
dc->nv2_mem_be = EX_TBFLAG_A64(tb_flags, NV2_MEM_BE);
dc->fpcr_ah = EX_TBFLAG_A64(tb_flags, AH);
dc->fpcr_nep = EX_TBFLAG_A64(tb_flags, NEP);
+ dc->gcs_en = EX_TBFLAG_A64(tb_flags, GCS_EN);
+ dc->gcs_rvcen = EX_TBFLAG_A64(tb_flags, GCS_RVCEN);
+ dc->gcsstr_el = EX_TBFLAG_A64(tb_flags, GCSSTR_EL);
dc->vec_len = 0;
dc->vec_stride = 0;
dc->cp_regs = arm_cpu->cp_regs;
@@ -10570,6 +10936,8 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
break;
}
}
+
+ emit_delayed_exceptions(dc);
}
const TranslatorOps aarch64_translator_ops = {
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
index e62dcc5..3df0bbc 100644
--- a/target/arm/tcg/translate.c
+++ b/target/arm/tcg/translate.c
@@ -1088,6 +1088,57 @@ void gen_exception_insn(DisasContext *s, target_long pc_diff,
s->base.is_jmp = DISAS_NORETURN;
}
+TCGLabel *delay_exception_el(DisasContext *s, int excp,
+ uint32_t syn, uint32_t target_el)
+{
+ /* Use tcg_malloc for automatic release on longjmp out of translation. */
+ DisasDelayException *e = tcg_malloc(sizeof(DisasDelayException));
+
+ memset(e, 0, sizeof(*e));
+
+ /* Save enough of the current state to satisfy gen_exception_insn. */
+ e->pc_curr = s->pc_curr;
+ e->pc_save = s->pc_save;
+ if (!s->aarch64) {
+ e->condexec_cond = s->condexec_cond;
+ e->condexec_mask = s->condexec_mask;
+ }
+
+ e->excp = excp;
+ e->syn = syn;
+ e->target_el = target_el;
+
+ e->next = s->delay_excp_list;
+ s->delay_excp_list = e;
+
+ e->lab = gen_new_label();
+ return e->lab;
+}
+
+TCGLabel *delay_exception(DisasContext *s, int excp, uint32_t syn)
+{
+ return delay_exception_el(s, excp, syn, 0);
+}
+
+void emit_delayed_exceptions(DisasContext *s)
+{
+ for (DisasDelayException *e = s->delay_excp_list; e ; e = e->next) {
+ gen_set_label(e->lab);
+
+ /* Restore the insn state to satisfy gen_exception_insn. */
+ s->pc_curr = e->pc_curr;
+ s->pc_save = e->pc_save;
+ s->condexec_cond = e->condexec_cond;
+ s->condexec_mask = e->condexec_mask;
+
+ if (e->target_el) {
+ gen_exception_insn_el(s, 0, e->excp, e->syn, e->target_el);
+ } else {
+ gen_exception_insn(s, 0, e->excp, e->syn);
+ }
+ }
+}
+
static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
{
gen_set_condexec(s);
@@ -1723,21 +1774,11 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
if (maskbit != 4 && maskbit != 14) {
/* T4 and T14 are RES0 so never cause traps */
- TCGv_i32 t;
- DisasLabel over = gen_disas_label(s);
-
- t = load_cpu_offset(offsetoflow32(CPUARMState, cp15.hstr_el2));
- tcg_gen_andi_i32(t, t, 1u << maskbit);
- tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, over.label);
+ TCGLabel *fail = delay_exception_el(s, EXCP_UDEF, syndrome, 2);
+ TCGv_i32 t =
+ load_cpu_offset(offsetoflow32(CPUARMState, cp15.hstr_el2));
- gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
- /*
- * gen_exception_insn() will set is_jmp to DISAS_NORETURN,
- * but since we're conditionally branching over it, we want
- * to assume continue-to-next-instruction.
- */
- s->base.is_jmp = DISAS_NEXT;
- set_disas_label(s, over);
+ tcg_gen_brcondi_i32(TCG_COND_TSTNE, t, 1u << maskbit, fail);
}
}
@@ -5557,11 +5598,10 @@ static bool trans_LE(DisasContext *s, arg_LE *a)
if (!a->tp && dc_isar_feature(aa32_mve, s) && fpu_active) {
/* Need to do a runtime check for LTPSIZE != 4 */
- DisasLabel skipexc = gen_disas_label(s);
+ TCGLabel *fail = delay_exception(s, EXCP_INVSTATE, syn_uncategorized());
+
tmp = load_cpu_field(v7m.ltpsize);
- tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc.label);
- gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized());
- set_disas_label(s, skipexc);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 4, fail);
}
if (a->f) {
@@ -6791,6 +6831,8 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
gen_goto_tb(dc, 1, curr_insn_len(dc));
}
}
+
+ emit_delayed_exceptions(dc);
}
static const TranslatorOps arm_translator_ops = {
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
index f1a6e5e..9a85ea7 100644
--- a/target/arm/tcg/translate.h
+++ b/target/arm/tcg/translate.h
@@ -21,9 +21,25 @@ typedef struct DisasLabel {
target_ulong pc_save;
} DisasLabel;
+/*
+ * Emit an exception call out of line.
+ */
+typedef struct DisasDelayException {
+ struct DisasDelayException *next;
+ TCGLabel *lab;
+ target_long pc_curr;
+ target_long pc_save;
+ int condexec_mask;
+ int condexec_cond;
+ uint32_t excp;
+ uint32_t syn;
+ uint32_t target_el;
+} DisasDelayException;
+
typedef struct DisasContext {
DisasContextBase base;
const ARMISARegisters *isar;
+ DisasDelayException *delay_excp_list;
/* The address of the current instruction being translated. */
target_ulong pc_curr;
@@ -166,6 +182,12 @@ typedef struct DisasContext {
bool fpcr_ah;
/* True if FPCR.NEP is 1 (FEAT_AFP scalar upper-element result handling) */
bool fpcr_nep;
+ /* True if GCSEnabled. */
+ bool gcs_en;
+ /* True if GCSReturnValueCheckEnabled. */
+ bool gcs_rvcen;
+ /* GCSSTR exception EL or 0 if enabled */
+ uint8_t gcsstr_el;
/*
* >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
* < 0, set by the current instruction.
@@ -359,6 +381,10 @@ void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp,
uint32_t syn, uint32_t target_el);
void gen_exception_insn(DisasContext *s, target_long pc_diff,
int excp, uint32_t syn);
+TCGLabel *delay_exception_el(DisasContext *s, int excp,
+ uint32_t syn, uint32_t target_el);
+TCGLabel *delay_exception(DisasContext *s, int excp, uint32_t syn);
+void emit_delayed_exceptions(DisasContext *s);
/* Return state of Alternate Half-precision flag, caller frees result */
static inline TCGv_i32 get_ahp_flag(void)
@@ -372,27 +398,27 @@ static inline TCGv_i32 get_ahp_flag(void)
}
/* Set bits within PSTATE. */
-static inline void set_pstate_bits(uint32_t bits)
+static inline void set_pstate_bits(uint64_t bits)
{
- TCGv_i32 p = tcg_temp_new_i32();
+ TCGv_i64 p = tcg_temp_new_i64();
tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
- tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate));
- tcg_gen_ori_i32(p, p, bits);
- tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate));
+ tcg_gen_ld_i64(p, tcg_env, offsetof(CPUARMState, pstate));
+ tcg_gen_ori_i64(p, p, bits);
+ tcg_gen_st_i64(p, tcg_env, offsetof(CPUARMState, pstate));
}
/* Clear bits within PSTATE. */
-static inline void clear_pstate_bits(uint32_t bits)
+static inline void clear_pstate_bits(uint64_t bits)
{
- TCGv_i32 p = tcg_temp_new_i32();
+ TCGv_i64 p = tcg_temp_new_i64();
tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
- tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate));
- tcg_gen_andi_i32(p, p, ~bits);
- tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate));
+ tcg_gen_ld_i64(p, tcg_env, offsetof(CPUARMState, pstate));
+ tcg_gen_andi_i64(p, p, ~bits);
+ tcg_gen_st_i64(p, tcg_env, offsetof(CPUARMState, pstate));
}
/* If the singlestep state is Active-not-pending, advance to Active-pending. */
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 6d85149..ab18de8 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -7539,6 +7539,20 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w)
#endif
break;
+ case FEAT_7_0_EDX:
+ /*
+ * Windows does not like ARCH_CAPABILITIES on AMD machines at all.
+ * Do not show the fake ARCH_CAPABILITIES MSR that KVM sets up,
+ * except if needed for migration.
+ *
+ * When arch_cap_always_on is removed, this tweak can move to
+ * kvm_arch_get_supported_cpuid.
+ */
+ if (cpu && IS_AMD_CPU(&cpu->env) && !cpu->arch_cap_always_on) {
+ unavail = CPUID_7_0_EDX_ARCH_CAPABILITIES;
+ }
+ break;
+
default:
break;
}
@@ -7894,6 +7908,11 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
/* Fixup overflow: max value for bits 23-16 is 255. */
*ebx |= MIN(num, 255) << 16;
}
+ if (cpu->pdcm_on_even_without_pmu) {
+ if (!cpu->enable_pmu) {
+ *ecx &= ~CPUID_EXT_PDCM;
+ }
+ }
break;
case 2: { /* cache info: needed for Pentium Pro compatibility */
const CPUCaches *caches;
@@ -8944,9 +8963,11 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
}
}
- /* PDCM is fixed1 bit for TDX */
- if (!cpu->enable_pmu && !is_tdx_vm()) {
- env->features[FEAT_1_ECX] &= ~CPUID_EXT_PDCM;
+ if (!cpu->pdcm_on_even_without_pmu) {
+ /* PDCM is fixed1 bit for TDX */
+ if (!cpu->enable_pmu && !is_tdx_vm()) {
+ env->features[FEAT_1_ECX] &= ~CPUID_EXT_PDCM;
+ }
}
for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
@@ -10004,6 +10025,11 @@ static const Property x86_cpu_properties[] = {
true),
DEFINE_PROP_BOOL("x-l1-cache-per-thread", X86CPU, l1_cache_per_core, true),
DEFINE_PROP_BOOL("x-force-cpuid-0x1f", X86CPU, force_cpuid_0x1f, false),
+
+ DEFINE_PROP_BOOL("x-arch-cap-always-on", X86CPU,
+ arch_cap_always_on, false),
+ DEFINE_PROP_BOOL("x-pdcm-on-even-without-pmu", X86CPU,
+ pdcm_on_even_without_pmu, false),
};
#ifndef CONFIG_USER_ONLY
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index e0be7a7..8b7c173 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -435,9 +435,11 @@ typedef enum X86Seg {
#define MSR_SMI_COUNT 0x34
#define MSR_CORE_THREAD_COUNT 0x35
#define MSR_MTRRcap 0xfe
+#define MSR_MTRR_MEM_TYPE_WB 0x06
#define MSR_MTRRcap_VCNT 8
#define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
#define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
+#define MSR_MTRR_ENABLE (1 << 11)
#define MSR_IA32_SYSENTER_CS 0x174
#define MSR_IA32_SYSENTER_ESP 0x175
@@ -2126,7 +2128,7 @@ typedef struct CPUArchState {
QEMUTimer *xen_periodic_timer;
QemuMutex xen_timers_lock;
#endif
-#if defined(CONFIG_HVF)
+#if defined(CONFIG_HVF) || defined(CONFIG_MSHV)
void *emu_mmio_buf;
#endif
@@ -2314,6 +2316,18 @@ struct ArchCPU {
/* Forcefully disable KVM PV features not exposed in guest CPUIDs */
bool kvm_pv_enforce_cpuid;
+ /*
+ * Expose arch-capabilities unconditionally even on AMD models, for backwards
+ * compatibility with QEMU <10.1.
+ */
+ bool arch_cap_always_on;
+
+ /*
+ * Backwards compatibility with QEMU <10.1. The PDCM feature is now disabled when
+ * PMU is not available, but prior to 10.1 it was enabled even if PMU is off.
+ */
+ bool pdcm_on_even_without_pmu;
+
/* Number of physical address bits supported */
uint32_t phys_bits;
diff --git a/target/i386/emulate/meson.build b/target/i386/emulate/meson.build
index 4edd4f4..b6dafb6 100644
--- a/target/i386/emulate/meson.build
+++ b/target/i386/emulate/meson.build
@@ -1,5 +1,8 @@
-i386_system_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files(
+emulator_files = files(
'x86_decode.c',
'x86_emu.c',
'x86_flags.c',
-))
+)
+
+i386_system_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: emulator_files)
+i386_system_ss.add(when: 'CONFIG_MSHV', if_true: emulator_files)
diff --git a/target/i386/emulate/x86_decode.c b/target/i386/emulate/x86_decode.c
index 2eca398..97bd6f1 100644
--- a/target/i386/emulate/x86_decode.c
+++ b/target/i386/emulate/x86_decode.c
@@ -71,10 +71,16 @@ static inline uint64_t decode_bytes(CPUX86State *env, struct x86_decode *decode,
VM_PANIC_EX("%s invalid size %d\n", __func__, size);
break;
}
- target_ulong va = linear_rip(env_cpu(env), env->eip) + decode->len;
- emul_ops->read_mem(env_cpu(env), &val, va, size);
+
+ /* copy the bytes from the instruction stream, if available */
+ if (decode->stream && decode->len + size <= decode->stream->len) {
+ memcpy(&val, decode->stream->bytes + decode->len, size);
+ } else {
+ target_ulong va = linear_rip(env_cpu(env), env->eip) + decode->len;
+ emul_ops->fetch_instruction(env_cpu(env), &val, va, size);
+ }
decode->len += size;
-
+
return val;
}
@@ -2076,9 +2082,10 @@ static void decode_opcodes(CPUX86State *env, struct x86_decode *decode)
}
}
-uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode)
+static uint32_t decode_opcode(CPUX86State *env, struct x86_decode *decode)
{
memset(decode, 0, sizeof(*decode));
+
decode_prefix(env, decode);
set_addressing_size(env, decode);
set_operand_size(env, decode);
@@ -2088,6 +2095,18 @@ uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode)
return decode->len;
}
+uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode)
+{
+ return decode_opcode(env, decode);
+}
+
+uint32_t decode_instruction_stream(CPUX86State *env, struct x86_decode *decode,
+ struct x86_insn_stream *stream)
+{
+ decode->stream = stream;
+ return decode_opcode(env, decode);
+}
+
void init_decoder(void)
{
int i;
diff --git a/target/i386/emulate/x86_decode.h b/target/i386/emulate/x86_decode.h
index 927645a..1cadf36 100644
--- a/target/i386/emulate/x86_decode.h
+++ b/target/i386/emulate/x86_decode.h
@@ -272,6 +272,11 @@ typedef struct x86_decode_op {
};
} x86_decode_op;
+typedef struct x86_insn_stream {
+ const uint8_t *bytes;
+ size_t len;
+} x86_insn_stream;
+
typedef struct x86_decode {
int len;
uint8_t opcode[4];
@@ -298,11 +303,15 @@ typedef struct x86_decode {
struct x86_modrm modrm;
struct x86_decode_op op[4];
bool is_fpu;
+
+ x86_insn_stream *stream;
} x86_decode;
uint64_t sign(uint64_t val, int size);
uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode);
+uint32_t decode_instruction_stream(CPUX86State *env, struct x86_decode *decode,
+ struct x86_insn_stream *stream);
void *get_reg_ref(CPUX86State *env, int reg, int rex_present,
int is_extended, int size);
diff --git a/target/i386/emulate/x86_emu.c b/target/i386/emulate/x86_emu.c
index db7a7f7..4409f7b 100644
--- a/target/i386/emulate/x86_emu.c
+++ b/target/i386/emulate/x86_emu.c
@@ -1246,7 +1246,8 @@ static void init_cmd_handler(void)
bool exec_instruction(CPUX86State *env, struct x86_decode *ins)
{
if (!_cmd_handler[ins->cmd].handler) {
- printf("Unimplemented handler (" TARGET_FMT_lx ") for %d (%x %x) \n", env->eip,
+ printf("Unimplemented handler (" TARGET_FMT_lx ") for %d (%x %x)\n",
+ env->eip,
ins->cmd, ins->opcode[0],
ins->opcode_len > 1 ? ins->opcode[1] : 0);
env->eip += ins->len;
diff --git a/target/i386/emulate/x86_emu.h b/target/i386/emulate/x86_emu.h
index a1a9612..05686b1 100644
--- a/target/i386/emulate/x86_emu.h
+++ b/target/i386/emulate/x86_emu.h
@@ -24,6 +24,8 @@
#include "cpu.h"
struct x86_emul_ops {
+ void (*fetch_instruction)(CPUState *cpu, void *data, target_ulong addr,
+ int bytes);
void (*read_mem)(CPUState *cpu, void *data, target_ulong addr, int bytes);
void (*write_mem)(CPUState *cpu, void *data, target_ulong addr, int bytes);
void (*read_segment_descriptor)(CPUState *cpu, struct x86_segment_descriptor *desc,
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index 6a3a1c1..db40caa 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -503,12 +503,8 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
* Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
* We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
* returned by KVM_GET_MSR_INDEX_LIST.
- *
- * But also, because Windows does not like ARCH_CAPABILITIES on AMD
- * mcahines at all, do not show the fake ARCH_CAPABILITIES MSR that
- * KVM sets up.
*/
- if (!has_msr_arch_capabs || !(edx & CPUID_7_0_EDX_ARCH_CAPABILITIES)) {
+ if (!has_msr_arch_capabs) {
ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES;
}
} else if (function == 7 && index == 1 && reg == R_EAX) {
diff --git a/target/i386/meson.build b/target/i386/meson.build
index 092af34..89ba491 100644
--- a/target/i386/meson.build
+++ b/target/i386/meson.build
@@ -13,6 +13,7 @@ i386_ss.add(when: 'CONFIG_KVM', if_true: files('host-cpu.c'))
i386_ss.add(when: 'CONFIG_HVF', if_true: files('host-cpu.c'))
i386_ss.add(when: 'CONFIG_WHPX', if_true: files('host-cpu.c'))
i386_ss.add(when: 'CONFIG_NVMM', if_true: files('host-cpu.c'))
+i386_ss.add(when: 'CONFIG_MSHV', if_true: files('host-cpu.c'))
i386_system_ss = ss.source_set()
i386_system_ss.add(files(
@@ -34,6 +35,7 @@ subdir('nvmm')
subdir('hvf')
subdir('tcg')
subdir('emulate')
+subdir('mshv')
target_arch += {'i386': i386_ss}
target_system_arch += {'i386': i386_system_ss}
diff --git a/target/i386/mshv/meson.build b/target/i386/mshv/meson.build
new file mode 100644
index 0000000..647e5da
--- /dev/null
+++ b/target/i386/mshv/meson.build
@@ -0,0 +1,8 @@
+i386_mshv_ss = ss.source_set()
+
+i386_mshv_ss.add(files(
+ 'mshv-cpu.c',
+ 'x86.c',
+))
+
+i386_system_ss.add_all(when: 'CONFIG_MSHV', if_true: i386_mshv_ss)
diff --git a/target/i386/mshv/mshv-cpu.c b/target/i386/mshv/mshv-cpu.c
new file mode 100644
index 0000000..1f7b9cb
--- /dev/null
+++ b/target/i386/mshv/mshv-cpu.c
@@ -0,0 +1,1763 @@
+/*
+ * QEMU MSHV support
+ *
+ * Copyright Microsoft, Corp. 2025
+ *
+ * Authors: Ziqiao Zhou <ziqiaozhou@microsoft.com>
+ * Magnus Kulke <magnuskulke@microsoft.com>
+ * Jinank Jain <jinankjain@microsoft.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qemu/memalign.h"
+#include "qemu/typedefs.h"
+
+#include "system/mshv.h"
+#include "system/mshv_int.h"
+#include "system/address-spaces.h"
+#include "linux/mshv.h"
+#include "hw/hyperv/hvgdk.h"
+#include "hw/hyperv/hvgdk_mini.h"
+#include "hw/hyperv/hvhdk_mini.h"
+#include "hw/i386/apic_internal.h"
+
+#include "cpu.h"
+#include "emulate/x86_decode.h"
+#include "emulate/x86_emu.h"
+#include "emulate/x86_flags.h"
+
+#include "trace-accel_mshv.h"
+#include "trace.h"
+
+#include <sys/ioctl.h>
+
+#define MAX_REGISTER_COUNT (MAX_CONST(ARRAY_SIZE(STANDARD_REGISTER_NAMES), \
+ MAX_CONST(ARRAY_SIZE(SPECIAL_REGISTER_NAMES), \
+ ARRAY_SIZE(FPU_REGISTER_NAMES))))
+
+static enum hv_register_name STANDARD_REGISTER_NAMES[18] = {
+ HV_X64_REGISTER_RAX,
+ HV_X64_REGISTER_RBX,
+ HV_X64_REGISTER_RCX,
+ HV_X64_REGISTER_RDX,
+ HV_X64_REGISTER_RSI,
+ HV_X64_REGISTER_RDI,
+ HV_X64_REGISTER_RSP,
+ HV_X64_REGISTER_RBP,
+ HV_X64_REGISTER_R8,
+ HV_X64_REGISTER_R9,
+ HV_X64_REGISTER_R10,
+ HV_X64_REGISTER_R11,
+ HV_X64_REGISTER_R12,
+ HV_X64_REGISTER_R13,
+ HV_X64_REGISTER_R14,
+ HV_X64_REGISTER_R15,
+ HV_X64_REGISTER_RIP,
+ HV_X64_REGISTER_RFLAGS,
+};
+
+static enum hv_register_name SPECIAL_REGISTER_NAMES[17] = {
+ HV_X64_REGISTER_CS,
+ HV_X64_REGISTER_DS,
+ HV_X64_REGISTER_ES,
+ HV_X64_REGISTER_FS,
+ HV_X64_REGISTER_GS,
+ HV_X64_REGISTER_SS,
+ HV_X64_REGISTER_TR,
+ HV_X64_REGISTER_LDTR,
+ HV_X64_REGISTER_GDTR,
+ HV_X64_REGISTER_IDTR,
+ HV_X64_REGISTER_CR0,
+ HV_X64_REGISTER_CR2,
+ HV_X64_REGISTER_CR3,
+ HV_X64_REGISTER_CR4,
+ HV_X64_REGISTER_CR8,
+ HV_X64_REGISTER_EFER,
+ HV_X64_REGISTER_APIC_BASE,
+};
+
+static enum hv_register_name FPU_REGISTER_NAMES[26] = {
+ HV_X64_REGISTER_XMM0,
+ HV_X64_REGISTER_XMM1,
+ HV_X64_REGISTER_XMM2,
+ HV_X64_REGISTER_XMM3,
+ HV_X64_REGISTER_XMM4,
+ HV_X64_REGISTER_XMM5,
+ HV_X64_REGISTER_XMM6,
+ HV_X64_REGISTER_XMM7,
+ HV_X64_REGISTER_XMM8,
+ HV_X64_REGISTER_XMM9,
+ HV_X64_REGISTER_XMM10,
+ HV_X64_REGISTER_XMM11,
+ HV_X64_REGISTER_XMM12,
+ HV_X64_REGISTER_XMM13,
+ HV_X64_REGISTER_XMM14,
+ HV_X64_REGISTER_XMM15,
+ HV_X64_REGISTER_FP_MMX0,
+ HV_X64_REGISTER_FP_MMX1,
+ HV_X64_REGISTER_FP_MMX2,
+ HV_X64_REGISTER_FP_MMX3,
+ HV_X64_REGISTER_FP_MMX4,
+ HV_X64_REGISTER_FP_MMX5,
+ HV_X64_REGISTER_FP_MMX6,
+ HV_X64_REGISTER_FP_MMX7,
+ HV_X64_REGISTER_FP_CONTROL_STATUS,
+ HV_X64_REGISTER_XMM_CONTROL_STATUS,
+};
+
+static int translate_gva(const CPUState *cpu, uint64_t gva, uint64_t *gpa,
+ uint64_t flags)
+{
+ int ret;
+ int cpu_fd = mshv_vcpufd(cpu);
+ int vp_index = cpu->cpu_index;
+
+ hv_input_translate_virtual_address in = { 0 };
+ hv_output_translate_virtual_address out = { 0 };
+ struct mshv_root_hvcall args = {0};
+ uint64_t gva_page = gva >> HV_HYP_PAGE_SHIFT;
+
+ in.vp_index = vp_index;
+ in.control_flags = flags;
+ in.gva_page = gva_page;
+
+ /* create the hvcall envelope */
+ args.code = HVCALL_TRANSLATE_VIRTUAL_ADDRESS;
+ args.in_sz = sizeof(in);
+ args.in_ptr = (uint64_t) &in;
+ args.out_sz = sizeof(out);
+ args.out_ptr = (uint64_t) &out;
+
+ /* perform the call */
+ ret = mshv_hvcall(cpu_fd, &args);
+ if (ret < 0) {
+ error_report("Failed to invoke gva->gpa translation");
+ return -errno;
+ }
+
+ if (out.translation_result.result_code != HV_TRANSLATE_GVA_SUCCESS) {
+ error_report("Failed to translate gva (" TARGET_FMT_lx ") to gpa", gva);
+ return -1;
+ }
+
+ *gpa = ((out.gpa_page << HV_HYP_PAGE_SHIFT)
+ | (gva & ~(uint64_t)HV_HYP_PAGE_MASK));
+
+ return 0;
+}
+
+int mshv_set_generic_regs(const CPUState *cpu, const hv_register_assoc *assocs,
+ size_t n_regs)
+{
+ int cpu_fd = mshv_vcpufd(cpu);
+ int vp_index = cpu->cpu_index;
+ size_t in_sz, assocs_sz;
+ hv_input_set_vp_registers *in = cpu->accel->hvcall_args.input_page;
+ struct mshv_root_hvcall args = {0};
+ int ret;
+
+ /* find out the size of the struct w/ a flexible array at the tail */
+ assocs_sz = n_regs * sizeof(hv_register_assoc);
+ in_sz = sizeof(hv_input_set_vp_registers) + assocs_sz;
+
+ /* fill the input struct */
+ memset(in, 0, sizeof(hv_input_set_vp_registers));
+ in->vp_index = vp_index;
+ memcpy(in->elements, assocs, assocs_sz);
+
+ /* create the hvcall envelope */
+ args.code = HVCALL_SET_VP_REGISTERS;
+ args.in_sz = in_sz;
+ args.in_ptr = (uint64_t) in;
+ args.reps = (uint16_t) n_regs;
+
+ /* perform the call */
+ ret = mshv_hvcall(cpu_fd, &args);
+ if (ret < 0) {
+ error_report("Failed to set registers");
+ return -1;
+ }
+
+ /* assert we set all registers */
+ if (args.reps != n_regs) {
+ error_report("Failed to set registers: expected %zu elements"
+ ", got %u", n_regs, args.reps);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int get_generic_regs(CPUState *cpu, hv_register_assoc *assocs,
+ size_t n_regs)
+{
+ int cpu_fd = mshv_vcpufd(cpu);
+ int vp_index = cpu->cpu_index;
+ hv_input_get_vp_registers *in = cpu->accel->hvcall_args.input_page;
+ hv_register_value *values = cpu->accel->hvcall_args.output_page;
+ size_t in_sz, names_sz, values_sz;
+ int i, ret;
+ struct mshv_root_hvcall args = {0};
+
+ /* find out the size of the struct w/ a flexible array at the tail */
+ names_sz = n_regs * sizeof(hv_register_name);
+ in_sz = sizeof(hv_input_get_vp_registers) + names_sz;
+
+ /* fill the input struct */
+ memset(in, 0, sizeof(hv_input_get_vp_registers));
+ in->vp_index = vp_index;
+ for (i = 0; i < n_regs; i++) {
+ in->names[i] = assocs[i].name;
+ }
+
+ /* determine size of value output buffer */
+ values_sz = n_regs * sizeof(union hv_register_value);
+
+ /* create the hvcall envelope */
+ args.code = HVCALL_GET_VP_REGISTERS;
+ args.in_sz = in_sz;
+ args.in_ptr = (uint64_t) in;
+ args.out_sz = values_sz;
+ args.out_ptr = (uint64_t) values;
+ args.reps = (uint16_t) n_regs;
+
+ /* perform the call */
+ ret = mshv_hvcall(cpu_fd, &args);
+ if (ret < 0) {
+ error_report("Failed to retrieve registers");
+ return -1;
+ }
+
+ /* assert we got all registers */
+ if (args.reps != n_regs) {
+ error_report("Failed to retrieve registers: expected %zu elements"
+ ", got %u", n_regs, args.reps);
+ return -1;
+ }
+
+ /* copy values into assoc */
+ for (i = 0; i < n_regs; i++) {
+ assocs[i].value = values[i];
+ }
+
+ return 0;
+}
+
+static int set_standard_regs(const CPUState *cpu)
+{
+ X86CPU *x86cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86cpu->env;
+ hv_register_assoc assocs[ARRAY_SIZE(STANDARD_REGISTER_NAMES)];
+ int ret;
+ size_t n_regs = ARRAY_SIZE(STANDARD_REGISTER_NAMES);
+
+ /* set names */
+ for (size_t i = 0; i < ARRAY_SIZE(STANDARD_REGISTER_NAMES); i++) {
+ assocs[i].name = STANDARD_REGISTER_NAMES[i];
+ }
+ assocs[0].value.reg64 = env->regs[R_EAX];
+ assocs[1].value.reg64 = env->regs[R_EBX];
+ assocs[2].value.reg64 = env->regs[R_ECX];
+ assocs[3].value.reg64 = env->regs[R_EDX];
+ assocs[4].value.reg64 = env->regs[R_ESI];
+ assocs[5].value.reg64 = env->regs[R_EDI];
+ assocs[6].value.reg64 = env->regs[R_ESP];
+ assocs[7].value.reg64 = env->regs[R_EBP];
+ assocs[8].value.reg64 = env->regs[R_R8];
+ assocs[9].value.reg64 = env->regs[R_R9];
+ assocs[10].value.reg64 = env->regs[R_R10];
+ assocs[11].value.reg64 = env->regs[R_R11];
+ assocs[12].value.reg64 = env->regs[R_R12];
+ assocs[13].value.reg64 = env->regs[R_R13];
+ assocs[14].value.reg64 = env->regs[R_R14];
+ assocs[15].value.reg64 = env->regs[R_R15];
+ assocs[16].value.reg64 = env->eip;
+ lflags_to_rflags(env);
+ assocs[17].value.reg64 = env->eflags;
+
+ ret = mshv_set_generic_regs(cpu, assocs, n_regs);
+ if (ret < 0) {
+ error_report("failed to set standard registers");
+ return -errno;
+ }
+ return 0;
+}
+
+int mshv_store_regs(CPUState *cpu)
+{
+ int ret;
+
+ ret = set_standard_regs(cpu);
+ if (ret < 0) {
+ error_report("Failed to store standard registers");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void populate_standard_regs(const hv_register_assoc *assocs,
+ CPUX86State *env)
+{
+ env->regs[R_EAX] = assocs[0].value.reg64;
+ env->regs[R_EBX] = assocs[1].value.reg64;
+ env->regs[R_ECX] = assocs[2].value.reg64;
+ env->regs[R_EDX] = assocs[3].value.reg64;
+ env->regs[R_ESI] = assocs[4].value.reg64;
+ env->regs[R_EDI] = assocs[5].value.reg64;
+ env->regs[R_ESP] = assocs[6].value.reg64;
+ env->regs[R_EBP] = assocs[7].value.reg64;
+ env->regs[R_R8] = assocs[8].value.reg64;
+ env->regs[R_R9] = assocs[9].value.reg64;
+ env->regs[R_R10] = assocs[10].value.reg64;
+ env->regs[R_R11] = assocs[11].value.reg64;
+ env->regs[R_R12] = assocs[12].value.reg64;
+ env->regs[R_R13] = assocs[13].value.reg64;
+ env->regs[R_R14] = assocs[14].value.reg64;
+ env->regs[R_R15] = assocs[15].value.reg64;
+
+ env->eip = assocs[16].value.reg64;
+ env->eflags = assocs[17].value.reg64;
+ rflags_to_lflags(env);
+}
+
+int mshv_get_standard_regs(CPUState *cpu)
+{
+ struct hv_register_assoc assocs[ARRAY_SIZE(STANDARD_REGISTER_NAMES)];
+ int ret;
+ X86CPU *x86cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86cpu->env;
+ size_t n_regs = ARRAY_SIZE(STANDARD_REGISTER_NAMES);
+
+ for (size_t i = 0; i < n_regs; i++) {
+ assocs[i].name = STANDARD_REGISTER_NAMES[i];
+ }
+ ret = get_generic_regs(cpu, assocs, n_regs);
+ if (ret < 0) {
+ error_report("failed to get standard registers");
+ return -1;
+ }
+
+ populate_standard_regs(assocs, env);
+ return 0;
+}
+
+static inline void populate_segment_reg(const hv_x64_segment_register *hv_seg,
+ SegmentCache *seg)
+{
+ memset(seg, 0, sizeof(SegmentCache));
+
+ seg->base = hv_seg->base;
+ seg->limit = hv_seg->limit;
+ seg->selector = hv_seg->selector;
+
+ seg->flags = (hv_seg->segment_type << DESC_TYPE_SHIFT)
+ | (hv_seg->present * DESC_P_MASK)
+ | (hv_seg->descriptor_privilege_level << DESC_DPL_SHIFT)
+ | (hv_seg->_default << DESC_B_SHIFT)
+ | (hv_seg->non_system_segment * DESC_S_MASK)
+ | (hv_seg->_long << DESC_L_SHIFT)
+ | (hv_seg->granularity * DESC_G_MASK)
+ | (hv_seg->available * DESC_AVL_MASK);
+
+}
+
+static inline void populate_table_reg(const hv_x64_table_register *hv_seg,
+ SegmentCache *tbl)
+{
+ memset(tbl, 0, sizeof(SegmentCache));
+
+ tbl->base = hv_seg->base;
+ tbl->limit = hv_seg->limit;
+}
+
+static void populate_special_regs(const hv_register_assoc *assocs,
+ X86CPU *x86cpu)
+{
+ CPUX86State *env = &x86cpu->env;
+
+ populate_segment_reg(&assocs[0].value.segment, &env->segs[R_CS]);
+ populate_segment_reg(&assocs[1].value.segment, &env->segs[R_DS]);
+ populate_segment_reg(&assocs[2].value.segment, &env->segs[R_ES]);
+ populate_segment_reg(&assocs[3].value.segment, &env->segs[R_FS]);
+ populate_segment_reg(&assocs[4].value.segment, &env->segs[R_GS]);
+ populate_segment_reg(&assocs[5].value.segment, &env->segs[R_SS]);
+
+ populate_segment_reg(&assocs[6].value.segment, &env->tr);
+ populate_segment_reg(&assocs[7].value.segment, &env->ldt);
+
+ populate_table_reg(&assocs[8].value.table, &env->gdt);
+ populate_table_reg(&assocs[9].value.table, &env->idt);
+
+ env->cr[0] = assocs[10].value.reg64;
+ env->cr[2] = assocs[11].value.reg64;
+ env->cr[3] = assocs[12].value.reg64;
+ env->cr[4] = assocs[13].value.reg64;
+
+ cpu_set_apic_tpr(x86cpu->apic_state, assocs[14].value.reg64);
+ env->efer = assocs[15].value.reg64;
+ cpu_set_apic_base(x86cpu->apic_state, assocs[16].value.reg64);
+}
+
+
+int mshv_get_special_regs(CPUState *cpu)
+{
+ struct hv_register_assoc assocs[ARRAY_SIZE(SPECIAL_REGISTER_NAMES)];
+ int ret;
+ X86CPU *x86cpu = X86_CPU(cpu);
+ size_t n_regs = ARRAY_SIZE(SPECIAL_REGISTER_NAMES);
+
+ for (size_t i = 0; i < n_regs; i++) {
+ assocs[i].name = SPECIAL_REGISTER_NAMES[i];
+ }
+ ret = get_generic_regs(cpu, assocs, n_regs);
+ if (ret < 0) {
+ error_report("failed to get special registers");
+ return -errno;
+ }
+
+ populate_special_regs(assocs, x86cpu);
+ return 0;
+}
+
+int mshv_load_regs(CPUState *cpu)
+{
+ int ret;
+
+ ret = mshv_get_standard_regs(cpu);
+ if (ret < 0) {
+ error_report("Failed to load standard registers");
+ return -1;
+ }
+
+ ret = mshv_get_special_regs(cpu);
+ if (ret < 0) {
+ error_report("Failed to load special registers");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void add_cpuid_entry(GList *cpuid_entries,
+ uint32_t function, uint32_t index,
+ uint32_t eax, uint32_t ebx,
+ uint32_t ecx, uint32_t edx)
+{
+ struct hv_cpuid_entry *entry;
+
+ entry = g_malloc0(sizeof(struct hv_cpuid_entry));
+ entry->function = function;
+ entry->index = index;
+ entry->eax = eax;
+ entry->ebx = ebx;
+ entry->ecx = ecx;
+ entry->edx = edx;
+
+ cpuid_entries = g_list_append(cpuid_entries, entry);
+}
+
+static void collect_cpuid_entries(const CPUState *cpu, GList *cpuid_entries)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ uint32_t eax, ebx, ecx, edx;
+ uint32_t leaf, subleaf;
+ size_t max_leaf = 0x1F;
+ size_t max_subleaf = 0x20;
+
+ uint32_t leaves_with_subleaves[] = {0x4, 0x7, 0xD, 0xF, 0x10};
+ int n_subleaf_leaves = ARRAY_SIZE(leaves_with_subleaves);
+
+ /* Regular leaves without subleaves */
+ for (leaf = 0; leaf <= max_leaf; leaf++) {
+ bool has_subleaves = false;
+ for (int i = 0; i < n_subleaf_leaves; i++) {
+ if (leaf == leaves_with_subleaves[i]) {
+ has_subleaves = true;
+ break;
+ }
+ }
+
+ if (!has_subleaves) {
+ cpu_x86_cpuid(env, leaf, 0, &eax, &ebx, &ecx, &edx);
+ if (eax == 0 && ebx == 0 && ecx == 0 && edx == 0) {
+ /* all zeroes indicates no more leaves */
+ continue;
+ }
+
+ add_cpuid_entry(cpuid_entries, leaf, 0, eax, ebx, ecx, edx);
+ continue;
+ }
+
+ subleaf = 0;
+ while (subleaf < max_subleaf) {
+ cpu_x86_cpuid(env, leaf, subleaf, &eax, &ebx, &ecx, &edx);
+
+ if (eax == 0 && ebx == 0 && ecx == 0 && edx == 0) {
+ /* all zeroes indicates no more leaves */
+ break;
+ }
+ add_cpuid_entry(cpuid_entries, leaf, 0, eax, ebx, ecx, edx);
+ subleaf++;
+ }
+ }
+}
+
+static int register_intercept_result_cpuid_entry(const CPUState *cpu,
+ uint8_t subleaf_specific,
+ uint8_t always_override,
+ struct hv_cpuid_entry *entry)
+{
+ int ret;
+ int vp_index = cpu->cpu_index;
+ int cpu_fd = mshv_vcpufd(cpu);
+
+ struct hv_register_x64_cpuid_result_parameters cpuid_params = {
+ .input.eax = entry->function,
+ .input.ecx = entry->index,
+ .input.subleaf_specific = subleaf_specific,
+ .input.always_override = always_override,
+ .input.padding = 0,
+ /*
+ * With regard to masks - these are to specify bits to be overwritten
+ * The current CpuidEntry structure wouldn't allow to carry the masks
+ * in addition to the actual register values. For this reason, the
+ * masks are set to the exact values of the corresponding register bits
+ * to be registered for an overwrite. To view resulting values the
+ * hypervisor would return, HvCallGetVpCpuidValues hypercall can be
+ * used.
+ */
+ .result.eax = entry->eax,
+ .result.eax_mask = entry->eax,
+ .result.ebx = entry->ebx,
+ .result.ebx_mask = entry->ebx,
+ .result.ecx = entry->ecx,
+ .result.ecx_mask = entry->ecx,
+ .result.edx = entry->edx,
+ .result.edx_mask = entry->edx,
+ };
+ union hv_register_intercept_result_parameters parameters = {
+ .cpuid = cpuid_params,
+ };
+
+ hv_input_register_intercept_result in = {0};
+ in.vp_index = vp_index;
+ in.intercept_type = HV_INTERCEPT_TYPE_X64_CPUID;
+ in.parameters = parameters;
+
+ struct mshv_root_hvcall args = {0};
+ args.code = HVCALL_REGISTER_INTERCEPT_RESULT;
+ args.in_sz = sizeof(in);
+ args.in_ptr = (uint64_t)&in;
+
+ ret = mshv_hvcall(cpu_fd, &args);
+ if (ret < 0) {
+ error_report("failed to register intercept result for cpuid");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int register_intercept_result_cpuid(const CPUState *cpu,
+ struct hv_cpuid *cpuid)
+{
+ int ret = 0, entry_ret;
+ struct hv_cpuid_entry *entry;
+ uint8_t subleaf_specific, always_override;
+
+ for (size_t i = 0; i < cpuid->nent; i++) {
+ entry = &cpuid->entries[i];
+
+ /* set defaults */
+ subleaf_specific = 0;
+ always_override = 1;
+
+ /* Intel */
+ /* 0xb - Extended Topology Enumeration Leaf */
+ /* 0x1f - V2 Extended Topology Enumeration Leaf */
+ /* AMD */
+ /* 0x8000_001e - Processor Topology Information */
+ /* 0x8000_0026 - Extended CPU Topology */
+ if (entry->function == 0xb
+ || entry->function == 0x1f
+ || entry->function == 0x8000001e
+ || entry->function == 0x80000026) {
+ subleaf_specific = 1;
+ always_override = 1;
+ } else if (entry->function == 0x00000001
+ || entry->function == 0x80000000
+ || entry->function == 0x80000001
+ || entry->function == 0x80000008) {
+ subleaf_specific = 0;
+ always_override = 1;
+ }
+
+ entry_ret = register_intercept_result_cpuid_entry(cpu, subleaf_specific,
+ always_override,
+ entry);
+ if ((entry_ret < 0) && (ret == 0)) {
+ ret = entry_ret;
+ }
+ }
+
+ return ret;
+}
+
+static int set_cpuid2(const CPUState *cpu)
+{
+ int ret;
+ size_t n_entries, cpuid_size;
+ struct hv_cpuid *cpuid;
+ struct hv_cpuid_entry *entry;
+ GList *entries = NULL;
+
+ collect_cpuid_entries(cpu, entries);
+ n_entries = g_list_length(entries);
+
+ cpuid_size = sizeof(struct hv_cpuid)
+ + n_entries * sizeof(struct hv_cpuid_entry);
+
+ cpuid = g_malloc0(cpuid_size);
+ cpuid->nent = n_entries;
+ cpuid->padding = 0;
+
+ for (size_t i = 0; i < n_entries; i++) {
+ entry = g_list_nth_data(entries, i);
+ cpuid->entries[i] = *entry;
+ g_free(entry);
+ }
+ g_list_free(entries);
+
+ ret = register_intercept_result_cpuid(cpu, cpuid);
+ g_free(cpuid);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline void populate_hv_segment_reg(SegmentCache *seg,
+ hv_x64_segment_register *hv_reg)
+{
+ uint32_t flags = seg->flags;
+
+ hv_reg->base = seg->base;
+ hv_reg->limit = seg->limit;
+ hv_reg->selector = seg->selector;
+ hv_reg->segment_type = (flags >> DESC_TYPE_SHIFT) & 0xF;
+ hv_reg->non_system_segment = (flags & DESC_S_MASK) != 0;
+ hv_reg->descriptor_privilege_level = (flags >> DESC_DPL_SHIFT) & 0x3;
+ hv_reg->present = (flags & DESC_P_MASK) != 0;
+ hv_reg->reserved = 0;
+ hv_reg->available = (flags & DESC_AVL_MASK) != 0;
+ hv_reg->_long = (flags >> DESC_L_SHIFT) & 0x1;
+ hv_reg->_default = (flags >> DESC_B_SHIFT) & 0x1;
+ hv_reg->granularity = (flags & DESC_G_MASK) != 0;
+}
+
+static inline void populate_hv_table_reg(const struct SegmentCache *seg,
+ hv_x64_table_register *hv_reg)
+{
+ memset(hv_reg, 0, sizeof(*hv_reg));
+
+ hv_reg->base = seg->base;
+ hv_reg->limit = seg->limit;
+}
+
+static int set_special_regs(const CPUState *cpu)
+{
+ X86CPU *x86cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86cpu->env;
+ struct hv_register_assoc assocs[ARRAY_SIZE(SPECIAL_REGISTER_NAMES)];
+ size_t n_regs = ARRAY_SIZE(SPECIAL_REGISTER_NAMES);
+ int ret;
+
+ /* set names */
+ for (size_t i = 0; i < n_regs; i++) {
+ assocs[i].name = SPECIAL_REGISTER_NAMES[i];
+ }
+ populate_hv_segment_reg(&env->segs[R_CS], &assocs[0].value.segment);
+ populate_hv_segment_reg(&env->segs[R_DS], &assocs[1].value.segment);
+ populate_hv_segment_reg(&env->segs[R_ES], &assocs[2].value.segment);
+ populate_hv_segment_reg(&env->segs[R_FS], &assocs[3].value.segment);
+ populate_hv_segment_reg(&env->segs[R_GS], &assocs[4].value.segment);
+ populate_hv_segment_reg(&env->segs[R_SS], &assocs[5].value.segment);
+ populate_hv_segment_reg(&env->tr, &assocs[6].value.segment);
+ populate_hv_segment_reg(&env->ldt, &assocs[7].value.segment);
+
+ populate_hv_table_reg(&env->gdt, &assocs[8].value.table);
+ populate_hv_table_reg(&env->idt, &assocs[9].value.table);
+
+ assocs[10].value.reg64 = env->cr[0];
+ assocs[11].value.reg64 = env->cr[2];
+ assocs[12].value.reg64 = env->cr[3];
+ assocs[13].value.reg64 = env->cr[4];
+ assocs[14].value.reg64 = cpu_get_apic_tpr(x86cpu->apic_state);
+ assocs[15].value.reg64 = env->efer;
+ assocs[16].value.reg64 = cpu_get_apic_base(x86cpu->apic_state);
+
+ ret = mshv_set_generic_regs(cpu, assocs, n_regs);
+ if (ret < 0) {
+ error_report("failed to set special registers");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int set_fpu(const CPUState *cpu, const struct MshvFPU *regs)
+{
+ struct hv_register_assoc assocs[ARRAY_SIZE(FPU_REGISTER_NAMES)];
+ union hv_register_value *value;
+ size_t fp_i;
+ union hv_x64_fp_control_status_register *ctrl_status;
+ union hv_x64_xmm_control_status_register *xmm_ctrl_status;
+ int ret;
+ size_t n_regs = ARRAY_SIZE(FPU_REGISTER_NAMES);
+
+ /* first 16 registers are xmm0-xmm15 */
+ for (size_t i = 0; i < 16; i++) {
+ assocs[i].name = FPU_REGISTER_NAMES[i];
+ value = &assocs[i].value;
+ memcpy(&value->reg128, &regs->xmm[i], 16);
+ }
+
+ /* next 8 registers are fp_mmx0-fp_mmx7 */
+ for (size_t i = 16; i < 24; i++) {
+ assocs[i].name = FPU_REGISTER_NAMES[i];
+ fp_i = (i - 16);
+ value = &assocs[i].value;
+ memcpy(&value->reg128, &regs->fpr[fp_i], 16);
+ }
+
+ /* last two registers are fp_control_status and xmm_control_status */
+ assocs[24].name = FPU_REGISTER_NAMES[24];
+ value = &assocs[24].value;
+ ctrl_status = &value->fp_control_status;
+ ctrl_status->fp_control = regs->fcw;
+ ctrl_status->fp_status = regs->fsw;
+ ctrl_status->fp_tag = regs->ftwx;
+ ctrl_status->reserved = 0;
+ ctrl_status->last_fp_op = regs->last_opcode;
+ ctrl_status->last_fp_rip = regs->last_ip;
+
+ assocs[25].name = FPU_REGISTER_NAMES[25];
+ value = &assocs[25].value;
+ xmm_ctrl_status = &value->xmm_control_status;
+ xmm_ctrl_status->xmm_status_control = regs->mxcsr;
+ xmm_ctrl_status->xmm_status_control_mask = 0;
+ xmm_ctrl_status->last_fp_rdp = regs->last_dp;
+
+ ret = mshv_set_generic_regs(cpu, assocs, n_regs);
+ if (ret < 0) {
+ error_report("failed to set fpu registers");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int set_xc_reg(const CPUState *cpu, uint64_t xcr0)
+{
+ int ret;
+ struct hv_register_assoc assoc = {
+ .name = HV_X64_REGISTER_XFEM,
+ .value.reg64 = xcr0,
+ };
+
+ ret = mshv_set_generic_regs(cpu, &assoc, 1);
+ if (ret < 0) {
+ error_report("failed to set xcr0");
+ return -errno;
+ }
+ return 0;
+}
+
+static int set_cpu_state(const CPUState *cpu, const MshvFPU *fpu_regs,
+ uint64_t xcr0)
+{
+ int ret;
+
+ ret = set_standard_regs(cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ ret = set_special_regs(cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ ret = set_fpu(cpu, fpu_regs);
+ if (ret < 0) {
+ return ret;
+ }
+ ret = set_xc_reg(cpu, xcr0);
+ if (ret < 0) {
+ return ret;
+ }
+ return 0;
+}
+
+static int get_vp_state(int cpu_fd, struct mshv_get_set_vp_state *state)
+{
+ int ret;
+
+ ret = ioctl(cpu_fd, MSHV_GET_VP_STATE, state);
+ if (ret < 0) {
+ error_report("failed to get partition state: %s", strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int get_lapic(int cpu_fd,
+ struct hv_local_interrupt_controller_state *state)
+{
+ int ret;
+ size_t size = 4096;
+ /* buffer aligned to 4k, as *state requires that */
+ void *buffer = qemu_memalign(size, size);
+ struct mshv_get_set_vp_state mshv_state = { 0 };
+
+ mshv_state.buf_ptr = (uint64_t) buffer;
+ mshv_state.buf_sz = size;
+ mshv_state.type = MSHV_VP_STATE_LAPIC;
+
+ ret = get_vp_state(cpu_fd, &mshv_state);
+ if (ret == 0) {
+ memcpy(state, buffer, sizeof(*state));
+ }
+ qemu_vfree(buffer);
+ if (ret < 0) {
+ error_report("failed to get lapic");
+ return -1;
+ }
+
+ return 0;
+}
+
+static uint32_t set_apic_delivery_mode(uint32_t reg, uint32_t mode)
+{
+ return ((reg) & ~0x700) | ((mode) << 8);
+}
+
+static int set_vp_state(int cpu_fd, const struct mshv_get_set_vp_state *state)
+{
+ int ret;
+
+ ret = ioctl(cpu_fd, MSHV_SET_VP_STATE, state);
+ if (ret < 0) {
+ error_report("failed to set partition state: %s", strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int set_lapic(int cpu_fd,
+ const struct hv_local_interrupt_controller_state *state)
+{
+ int ret;
+ size_t size = 4096;
+ /* buffer aligned to 4k, as *state requires that */
+ void *buffer = qemu_memalign(size, size);
+ struct mshv_get_set_vp_state mshv_state = { 0 };
+
+ if (!state) {
+ error_report("lapic state is NULL");
+ return -1;
+ }
+ memcpy(buffer, state, sizeof(*state));
+
+ mshv_state.buf_ptr = (uint64_t) buffer;
+ mshv_state.buf_sz = size;
+ mshv_state.type = MSHV_VP_STATE_LAPIC;
+
+ ret = set_vp_state(cpu_fd, &mshv_state);
+ qemu_vfree(buffer);
+ if (ret < 0) {
+ error_report("failed to set lapic: %s", strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int set_lint(int cpu_fd)
+{
+ int ret;
+ uint32_t *lvt_lint0, *lvt_lint1;
+
+ struct hv_local_interrupt_controller_state lapic_state = { 0 };
+ ret = get_lapic(cpu_fd, &lapic_state);
+ if (ret < 0) {
+ return ret;
+ }
+
+ lvt_lint0 = &lapic_state.apic_lvt_lint0;
+ *lvt_lint0 = set_apic_delivery_mode(*lvt_lint0, APIC_DM_EXTINT);
+
+ lvt_lint1 = &lapic_state.apic_lvt_lint1;
+ *lvt_lint1 = set_apic_delivery_mode(*lvt_lint1, APIC_DM_NMI);
+
+ /* TODO: should we skip setting lapic if the values are the same? */
+
+ return set_lapic(cpu_fd, &lapic_state);
+}
+
+static int setup_msrs(const CPUState *cpu)
+{
+ int ret;
+ uint64_t default_type = MSR_MTRR_ENABLE | MSR_MTRR_MEM_TYPE_WB;
+
+ /* boot msr entries */
+ MshvMsrEntry msrs[9] = {
+ { .index = IA32_MSR_SYSENTER_CS, .data = 0x0, },
+ { .index = IA32_MSR_SYSENTER_ESP, .data = 0x0, },
+ { .index = IA32_MSR_SYSENTER_EIP, .data = 0x0, },
+ { .index = IA32_MSR_STAR, .data = 0x0, },
+ { .index = IA32_MSR_CSTAR, .data = 0x0, },
+ { .index = IA32_MSR_LSTAR, .data = 0x0, },
+ { .index = IA32_MSR_KERNEL_GS_BASE, .data = 0x0, },
+ { .index = IA32_MSR_SFMASK, .data = 0x0, },
+ { .index = IA32_MSR_MTRR_DEF_TYPE, .data = default_type, },
+ };
+
+ ret = mshv_configure_msr(cpu, msrs, 9);
+ if (ret < 0) {
+ error_report("failed to setup msrs");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * TODO: populate topology info:
+ *
+ * X86CPU *x86cpu = X86_CPU(cpu);
+ * CPUX86State *env = &x86cpu->env;
+ * X86CPUTopoInfo *topo_info = &env->topo_info;
+ */
+int mshv_configure_vcpu(const CPUState *cpu, const struct MshvFPU *fpu,
+ uint64_t xcr0)
+{
+ int ret;
+ int cpu_fd = mshv_vcpufd(cpu);
+
+ ret = set_cpuid2(cpu);
+ if (ret < 0) {
+ error_report("failed to set cpuid");
+ return -1;
+ }
+
+ ret = setup_msrs(cpu);
+ if (ret < 0) {
+ error_report("failed to setup msrs");
+ return -1;
+ }
+
+ ret = set_cpu_state(cpu, fpu, xcr0);
+ if (ret < 0) {
+ error_report("failed to set cpu state");
+ return -1;
+ }
+
+ ret = set_lint(cpu_fd);
+ if (ret < 0) {
+ error_report("failed to set lpic int");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int put_regs(const CPUState *cpu)
+{
+ X86CPU *x86cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86cpu->env;
+ MshvFPU fpu = {0};
+ int ret;
+
+ memset(&fpu, 0, sizeof(fpu));
+
+ ret = mshv_configure_vcpu(cpu, &fpu, env->xcr0);
+ if (ret < 0) {
+ error_report("failed to configure vcpu");
+ return ret;
+ }
+
+ return 0;
+}
+
+struct MsrPair {
+ uint32_t index;
+ uint64_t value;
+};
+
+static int put_msrs(const CPUState *cpu)
+{
+ int ret = 0;
+ X86CPU *x86cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86cpu->env;
+ MshvMsrEntries *msrs = g_malloc0(sizeof(MshvMsrEntries));
+
+ struct MsrPair pairs[] = {
+ { MSR_IA32_SYSENTER_CS, env->sysenter_cs },
+ { MSR_IA32_SYSENTER_ESP, env->sysenter_esp },
+ { MSR_IA32_SYSENTER_EIP, env->sysenter_eip },
+ { MSR_EFER, env->efer },
+ { MSR_PAT, env->pat },
+ { MSR_STAR, env->star },
+ { MSR_CSTAR, env->cstar },
+ { MSR_LSTAR, env->lstar },
+ { MSR_KERNELGSBASE, env->kernelgsbase },
+ { MSR_FMASK, env->fmask },
+ { MSR_MTRRdefType, env->mtrr_deftype },
+ { MSR_VM_HSAVE_PA, env->vm_hsave },
+ { MSR_SMI_COUNT, env->msr_smi_count },
+ { MSR_IA32_PKRS, env->pkrs },
+ { MSR_IA32_BNDCFGS, env->msr_bndcfgs },
+ { MSR_IA32_XSS, env->xss },
+ { MSR_IA32_UMWAIT_CONTROL, env->umwait },
+ { MSR_IA32_TSX_CTRL, env->tsx_ctrl },
+ { MSR_AMD64_TSC_RATIO, env->amd_tsc_scale_msr },
+ { MSR_TSC_AUX, env->tsc_aux },
+ { MSR_TSC_ADJUST, env->tsc_adjust },
+ { MSR_IA32_SMBASE, env->smbase },
+ { MSR_IA32_SPEC_CTRL, env->spec_ctrl },
+ { MSR_VIRT_SSBD, env->virt_ssbd },
+ };
+
+ if (ARRAY_SIZE(pairs) > MSHV_MSR_ENTRIES_COUNT) {
+ error_report("MSR entries exceed maximum size");
+ g_free(msrs);
+ return -1;
+ }
+
+ for (size_t i = 0; i < ARRAY_SIZE(pairs); i++) {
+ MshvMsrEntry *entry = &msrs->entries[i];
+ entry->index = pairs[i].index;
+ entry->reserved = 0;
+ entry->data = pairs[i].value;
+ msrs->nmsrs++;
+ }
+
+ ret = mshv_configure_msr(cpu, &msrs->entries[0], msrs->nmsrs);
+ g_free(msrs);
+ return ret;
+}
+
+
+int mshv_arch_put_registers(const CPUState *cpu)
+{
+ int ret;
+
+ ret = put_regs(cpu);
+ if (ret < 0) {
+ error_report("Failed to put registers");
+ return -1;
+ }
+
+ ret = put_msrs(cpu);
+ if (ret < 0) {
+ error_report("Failed to put msrs");
+ return -1;
+ }
+
+ return 0;
+}
+
+void mshv_arch_amend_proc_features(
+ union hv_partition_synthetic_processor_features *features)
+{
+ features->access_guest_idle_reg = 1;
+}
+
+static int set_memory_info(const struct hyperv_message *msg,
+ struct hv_x64_memory_intercept_message *info)
+{
+ if (msg->header.message_type != HVMSG_GPA_INTERCEPT
+ && msg->header.message_type != HVMSG_UNMAPPED_GPA
+ && msg->header.message_type != HVMSG_UNACCEPTED_GPA) {
+ error_report("invalid message type");
+ return -1;
+ }
+ memcpy(info, msg->payload, sizeof(*info));
+
+ return 0;
+}
+
+static int emulate_instruction(CPUState *cpu,
+ const uint8_t *insn_bytes, size_t insn_len,
+ uint64_t gva, uint64_t gpa)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ struct x86_decode decode = { 0 };
+ int ret;
+ x86_insn_stream stream = { .bytes = insn_bytes, .len = insn_len };
+
+ ret = mshv_load_regs(cpu);
+ if (ret < 0) {
+ error_report("failed to load registers");
+ return -1;
+ }
+
+ decode_instruction_stream(env, &decode, &stream);
+ exec_instruction(env, &decode);
+
+ ret = mshv_store_regs(cpu);
+ if (ret < 0) {
+ error_report("failed to store registers");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int handle_mmio(CPUState *cpu, const struct hyperv_message *msg,
+ MshvVmExit *exit_reason)
+{
+ struct hv_x64_memory_intercept_message info = { 0 };
+ size_t insn_len;
+ uint8_t access_type;
+ uint8_t *instruction_bytes;
+ int ret;
+
+ ret = set_memory_info(msg, &info);
+ if (ret < 0) {
+ error_report("failed to convert message to memory info");
+ return -1;
+ }
+ insn_len = info.instruction_byte_count;
+ access_type = info.header.intercept_access_type;
+
+ if (access_type == HV_X64_INTERCEPT_ACCESS_TYPE_EXECUTE) {
+ error_report("invalid intercept access type: execute");
+ return -1;
+ }
+
+ if (insn_len > 16) {
+ error_report("invalid mmio instruction length: %zu", insn_len);
+ return -1;
+ }
+
+ trace_mshv_handle_mmio(info.guest_virtual_address,
+ info.guest_physical_address,
+ info.instruction_byte_count, access_type);
+
+ instruction_bytes = info.instruction_bytes;
+
+ ret = emulate_instruction(cpu, instruction_bytes, insn_len,
+ info.guest_virtual_address,
+ info.guest_physical_address);
+ if (ret < 0) {
+ error_report("failed to emulate mmio");
+ return -1;
+ }
+
+ *exit_reason = MshvVmExitIgnore;
+
+ return 0;
+}
+
+static int handle_unmapped_mem(int vm_fd, CPUState *cpu,
+ const struct hyperv_message *msg,
+ MshvVmExit *exit_reason)
+{
+ struct hv_x64_memory_intercept_message info = { 0 };
+ uint64_t gpa;
+ int ret;
+ enum MshvRemapResult remap_result;
+
+ ret = set_memory_info(msg, &info);
+ if (ret < 0) {
+ error_report("failed to convert message to memory info");
+ return -1;
+ }
+
+ gpa = info.guest_physical_address;
+
+ /* attempt to remap the region, in case of overlapping userspace mappings */
+ remap_result = mshv_remap_overlap_region(vm_fd, gpa);
+ *exit_reason = MshvVmExitIgnore;
+
+ switch (remap_result) {
+ case MshvRemapNoMapping:
+ /* if we didn't find a mapping, it is probably mmio */
+ return handle_mmio(cpu, msg, exit_reason);
+ case MshvRemapOk:
+ break;
+ case MshvRemapNoOverlap:
+ /* This should not happen, but we are forgiving it */
+ warn_report("found no overlap for unmapped region");
+ *exit_reason = MshvVmExitSpecial;
+ break;
+ }
+
+ return 0;
+}
+
+static int set_ioport_info(const struct hyperv_message *msg,
+ hv_x64_io_port_intercept_message *info)
+{
+ if (msg->header.message_type != HVMSG_X64_IO_PORT_INTERCEPT) {
+ error_report("Invalid message type");
+ return -1;
+ }
+ memcpy(info, msg->payload, sizeof(*info));
+
+ return 0;
+}
+
+static int set_x64_registers(const CPUState *cpu, const uint32_t *names,
+ const uint64_t *values)
+{
+
+ hv_register_assoc assocs[2];
+ int ret;
+
+ for (size_t i = 0; i < ARRAY_SIZE(assocs); i++) {
+ assocs[i].name = names[i];
+ assocs[i].value.reg64 = values[i];
+ }
+
+ ret = mshv_set_generic_regs(cpu, assocs, ARRAY_SIZE(assocs));
+ if (ret < 0) {
+ error_report("failed to set x64 registers");
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline MemTxAttrs get_mem_attrs(bool is_secure_mode)
+{
+ MemTxAttrs memattr = {0};
+ memattr.secure = is_secure_mode;
+ return memattr;
+}
+
+static void pio_read(uint64_t port, uint8_t *data, uintptr_t size,
+ bool is_secure_mode)
+{
+ int ret = 0;
+ MemTxAttrs memattr = get_mem_attrs(is_secure_mode);
+ ret = address_space_rw(&address_space_io, port, memattr, (void *)data, size,
+ false);
+ if (ret != MEMTX_OK) {
+ error_report("Failed to read from port %lx: %d", port, ret);
+ abort();
+ }
+}
+
+static int pio_write(uint64_t port, const uint8_t *data, uintptr_t size,
+ bool is_secure_mode)
+{
+ int ret = 0;
+ MemTxAttrs memattr = get_mem_attrs(is_secure_mode);
+ ret = address_space_rw(&address_space_io, port, memattr, (void *)data, size,
+ true);
+ return ret;
+}
+
+static int handle_pio_non_str(const CPUState *cpu,
+ hv_x64_io_port_intercept_message *info)
+{
+ size_t len = info->access_info.access_size;
+ uint8_t access_type = info->header.intercept_access_type;
+ int ret;
+ uint32_t val, eax;
+ const uint32_t eax_mask = 0xffffffffu >> (32 - len * 8);
+ size_t insn_len;
+ uint64_t rip, rax;
+ uint32_t reg_names[2];
+ uint64_t reg_values[2];
+ uint16_t port = info->port_number;
+
+ if (access_type == HV_X64_INTERCEPT_ACCESS_TYPE_WRITE) {
+ union {
+ uint32_t u32;
+ uint8_t bytes[4];
+ } conv;
+
+ /* convert the first 4 bytes of rax to bytes */
+ conv.u32 = (uint32_t)info->rax;
+ /* secure mode is set to false */
+ ret = pio_write(port, conv.bytes, len, false);
+ if (ret < 0) {
+ error_report("Failed to write to io port");
+ return -1;
+ }
+ } else {
+ uint8_t data[4] = { 0 };
+ /* secure mode is set to false */
+ pio_read(info->port_number, data, len, false);
+
+ /* Preserve high bits in EAX, but clear out high bits in RAX */
+ val = *(uint32_t *)data;
+ eax = (((uint32_t)info->rax) & ~eax_mask) | (val & eax_mask);
+ info->rax = (uint64_t)eax;
+ }
+
+ insn_len = info->header.instruction_length;
+
+ /* Advance RIP and update RAX */
+ rip = info->header.rip + insn_len;
+ rax = info->rax;
+
+ reg_names[0] = HV_X64_REGISTER_RIP;
+ reg_values[0] = rip;
+ reg_names[1] = HV_X64_REGISTER_RAX;
+ reg_values[1] = rax;
+
+ ret = set_x64_registers(cpu, reg_names, reg_values);
+ if (ret < 0) {
+ error_report("Failed to set x64 registers");
+ return -1;
+ }
+
+ cpu->accel->dirty = false;
+
+ return 0;
+}
+
+static int fetch_guest_state(CPUState *cpu)
+{
+ int ret;
+
+ ret = mshv_get_standard_regs(cpu);
+ if (ret < 0) {
+ error_report("Failed to get standard registers");
+ return -1;
+ }
+
+ ret = mshv_get_special_regs(cpu);
+ if (ret < 0) {
+ error_report("Failed to get special registers");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int read_memory(const CPUState *cpu, uint64_t initial_gva,
+ uint64_t initial_gpa, uint64_t gva, uint8_t *data,
+ size_t len)
+{
+ int ret;
+ uint64_t gpa, flags;
+
+ if (gva == initial_gva) {
+ gpa = initial_gpa;
+ } else {
+ flags = HV_TRANSLATE_GVA_VALIDATE_READ;
+ ret = translate_gva(cpu, gva, &gpa, flags);
+ if (ret < 0) {
+ return -1;
+ }
+
+ ret = mshv_guest_mem_read(gpa, data, len, false, false);
+ if (ret < 0) {
+ error_report("failed to read guest mem");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int write_memory(const CPUState *cpu, uint64_t initial_gva,
+ uint64_t initial_gpa, uint64_t gva, const uint8_t *data,
+ size_t len)
+{
+ int ret;
+ uint64_t gpa, flags;
+
+ if (gva == initial_gva) {
+ gpa = initial_gpa;
+ } else {
+ flags = HV_TRANSLATE_GVA_VALIDATE_WRITE;
+ ret = translate_gva(cpu, gva, &gpa, flags);
+ if (ret < 0) {
+ error_report("failed to translate gva to gpa");
+ return -1;
+ }
+ }
+ ret = mshv_guest_mem_write(gpa, data, len, false);
+ if (ret != MEMTX_OK) {
+ error_report("failed to write to mmio");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int handle_pio_str_write(CPUState *cpu,
+ hv_x64_io_port_intercept_message *info,
+ size_t repeat, uint16_t port,
+ bool direction_flag)
+{
+ int ret;
+ uint64_t src;
+ uint8_t data[4] = { 0 };
+ size_t len = info->access_info.access_size;
+
+ src = linear_addr(cpu, info->rsi, R_DS);
+
+ for (size_t i = 0; i < repeat; i++) {
+ ret = read_memory(cpu, 0, 0, src, data, len);
+ if (ret < 0) {
+ error_report("Failed to read memory");
+ return -1;
+ }
+ ret = pio_write(port, data, len, false);
+ if (ret < 0) {
+ error_report("Failed to write to io port");
+ return -1;
+ }
+ src += direction_flag ? -len : len;
+ info->rsi += direction_flag ? -len : len;
+ }
+
+ return 0;
+}
+
+static int handle_pio_str_read(CPUState *cpu,
+ hv_x64_io_port_intercept_message *info,
+ size_t repeat, uint16_t port,
+ bool direction_flag)
+{
+ int ret;
+ uint64_t dst;
+ size_t len = info->access_info.access_size;
+ uint8_t data[4] = { 0 };
+
+ dst = linear_addr(cpu, info->rdi, R_ES);
+
+ for (size_t i = 0; i < repeat; i++) {
+ pio_read(port, data, len, false);
+
+ ret = write_memory(cpu, 0, 0, dst, data, len);
+ if (ret < 0) {
+ error_report("Failed to write memory");
+ return -1;
+ }
+ dst += direction_flag ? -len : len;
+ info->rdi += direction_flag ? -len : len;
+ }
+
+ return 0;
+}
+
+static int handle_pio_str(CPUState *cpu, hv_x64_io_port_intercept_message *info)
+{
+ uint8_t access_type = info->header.intercept_access_type;
+ uint16_t port = info->port_number;
+ bool repop = info->access_info.rep_prefix == 1;
+ size_t repeat = repop ? info->rcx : 1;
+ size_t insn_len = info->header.instruction_length;
+ bool direction_flag;
+ uint32_t reg_names[3];
+ uint64_t reg_values[3];
+ int ret;
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ ret = fetch_guest_state(cpu);
+ if (ret < 0) {
+ error_report("Failed to fetch guest state");
+ return -1;
+ }
+
+ direction_flag = (env->eflags & DESC_E_MASK) != 0;
+
+ if (access_type == HV_X64_INTERCEPT_ACCESS_TYPE_WRITE) {
+ ret = handle_pio_str_write(cpu, info, repeat, port, direction_flag);
+ if (ret < 0) {
+ error_report("Failed to handle pio str write");
+ return -1;
+ }
+ reg_names[0] = HV_X64_REGISTER_RSI;
+ reg_values[0] = info->rsi;
+ } else {
+ ret = handle_pio_str_read(cpu, info, repeat, port, direction_flag);
+ reg_names[0] = HV_X64_REGISTER_RDI;
+ reg_values[0] = info->rdi;
+ }
+
+ reg_names[1] = HV_X64_REGISTER_RIP;
+ reg_values[1] = info->header.rip + insn_len;
+ reg_names[2] = HV_X64_REGISTER_RAX;
+ reg_values[2] = info->rax;
+
+ ret = set_x64_registers(cpu, reg_names, reg_values);
+ if (ret < 0) {
+ error_report("Failed to set x64 registers");
+ return -1;
+ }
+
+ cpu->accel->dirty = false;
+
+ return 0;
+}
+
+static int handle_pio(CPUState *cpu, const struct hyperv_message *msg)
+{
+ struct hv_x64_io_port_intercept_message info = { 0 };
+ int ret;
+
+ ret = set_ioport_info(msg, &info);
+ if (ret < 0) {
+ error_report("Failed to convert message to ioport info");
+ return -1;
+ }
+
+ if (info.access_info.string_op) {
+ return handle_pio_str(cpu, &info);
+ }
+
+ return handle_pio_non_str(cpu, &info);
+}
+
+int mshv_run_vcpu(int vm_fd, CPUState *cpu, hv_message *msg, MshvVmExit *exit)
+{
+ int ret;
+ enum MshvVmExit exit_reason;
+ int cpu_fd = mshv_vcpufd(cpu);
+
+ ret = ioctl(cpu_fd, MSHV_RUN_VP, msg);
+ if (ret < 0) {
+ return MshvVmExitShutdown;
+ }
+
+ switch (msg->header.message_type) {
+ case HVMSG_UNRECOVERABLE_EXCEPTION:
+ return MshvVmExitShutdown;
+ case HVMSG_UNMAPPED_GPA:
+ ret = handle_unmapped_mem(vm_fd, cpu, msg, &exit_reason);
+ if (ret < 0) {
+ error_report("failed to handle unmapped memory");
+ return -1;
+ }
+ return exit_reason;
+ case HVMSG_GPA_INTERCEPT:
+ ret = handle_mmio(cpu, msg, &exit_reason);
+ if (ret < 0) {
+ error_report("failed to handle mmio");
+ return -1;
+ }
+ return exit_reason;
+ case HVMSG_X64_IO_PORT_INTERCEPT:
+ ret = handle_pio(cpu, msg);
+ if (ret < 0) {
+ return MshvVmExitSpecial;
+ }
+ return MshvVmExitIgnore;
+ default:
+ break;
+ }
+
+ *exit = MshvVmExitIgnore;
+ return 0;
+}
+
+void mshv_remove_vcpu(int vm_fd, int cpu_fd)
+{
+ close(cpu_fd);
+}
+
+
+int mshv_create_vcpu(int vm_fd, uint8_t vp_index, int *cpu_fd)
+{
+ int ret;
+ struct mshv_create_vp vp_arg = {
+ .vp_index = vp_index,
+ };
+ ret = ioctl(vm_fd, MSHV_CREATE_VP, &vp_arg);
+ if (ret < 0) {
+ error_report("failed to create mshv vcpu: %s", strerror(errno));
+ return -1;
+ }
+
+ *cpu_fd = ret;
+
+ return 0;
+}
+
+static int guest_mem_read_with_gva(const CPUState *cpu, uint64_t gva,
+ uint8_t *data, uintptr_t size,
+ bool fetch_instruction)
+{
+ int ret;
+ uint64_t gpa, flags;
+
+ flags = HV_TRANSLATE_GVA_VALIDATE_READ;
+ ret = translate_gva(cpu, gva, &gpa, flags);
+ if (ret < 0) {
+ error_report("failed to translate gva to gpa");
+ return -1;
+ }
+
+ ret = mshv_guest_mem_read(gpa, data, size, false, fetch_instruction);
+ if (ret < 0) {
+ error_report("failed to read from guest memory");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int guest_mem_write_with_gva(const CPUState *cpu, uint64_t gva,
+ const uint8_t *data, uintptr_t size)
+{
+ int ret;
+ uint64_t gpa, flags;
+
+ flags = HV_TRANSLATE_GVA_VALIDATE_WRITE;
+ ret = translate_gva(cpu, gva, &gpa, flags);
+ if (ret < 0) {
+ error_report("failed to translate gva to gpa");
+ return -1;
+ }
+ ret = mshv_guest_mem_write(gpa, data, size, false);
+ if (ret < 0) {
+ error_report("failed to write to guest memory");
+ return -1;
+ }
+ return 0;
+}
+
+static void write_mem(CPUState *cpu, void *data, target_ulong addr, int bytes)
+{
+ if (guest_mem_write_with_gva(cpu, addr, data, bytes) < 0) {
+ error_report("failed to write memory");
+ abort();
+ }
+}
+
+static void fetch_instruction(CPUState *cpu, void *data,
+ target_ulong addr, int bytes)
+{
+ if (guest_mem_read_with_gva(cpu, addr, data, bytes, true) < 0) {
+ error_report("failed to fetch instruction");
+ abort();
+ }
+}
+
+static void read_mem(CPUState *cpu, void *data, target_ulong addr, int bytes)
+{
+ if (guest_mem_read_with_gva(cpu, addr, data, bytes, false) < 0) {
+ error_report("failed to read memory");
+ abort();
+ }
+}
+
+static void read_segment_descriptor(CPUState *cpu,
+ struct x86_segment_descriptor *desc,
+ enum X86Seg seg_idx)
+{
+ bool ret;
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ SegmentCache *seg = &env->segs[seg_idx];
+ x86_segment_selector sel = { .sel = seg->selector & 0xFFFF };
+
+ ret = x86_read_segment_descriptor(cpu, desc, sel);
+ if (ret == false) {
+ error_report("failed to read segment descriptor");
+ abort();
+ }
+}
+
+static const struct x86_emul_ops mshv_x86_emul_ops = {
+ .fetch_instruction = fetch_instruction,
+ .read_mem = read_mem,
+ .write_mem = write_mem,
+ .read_segment_descriptor = read_segment_descriptor,
+};
+
+void mshv_init_mmio_emu(void)
+{
+ init_decoder();
+ init_emu(&mshv_x86_emul_ops);
+}
+
+void mshv_arch_init_vcpu(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ AccelCPUState *state = cpu->accel;
+ size_t page = HV_HYP_PAGE_SIZE;
+ void *mem = qemu_memalign(page, 2 * page);
+
+ /* sanity check, to make sure we don't overflow the page */
+ QEMU_BUILD_BUG_ON((MAX_REGISTER_COUNT
+ * sizeof(hv_register_assoc)
+ + sizeof(hv_input_get_vp_registers)
+ > HV_HYP_PAGE_SIZE));
+
+ state->hvcall_args.base = mem;
+ state->hvcall_args.input_page = mem;
+ state->hvcall_args.output_page = (uint8_t *)mem + page;
+
+ env->emu_mmio_buf = g_new(char, 4096);
+}
+
+void mshv_arch_destroy_vcpu(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ AccelCPUState *state = cpu->accel;
+
+ g_free(state->hvcall_args.base);
+ state->hvcall_args = (MshvHvCallArgs){0};
+ g_clear_pointer(&env->emu_mmio_buf, g_free);
+}
+
+/*
+ * Default Microsoft Hypervisor behavior for unimplemented MSR is to send a
+ * fault to the guest if it tries to access it. It is possible to override
+ * this behavior with a more suitable option i.e., ignore writes from the guest
+ * and return zero in attempt to read unimplemented.
+ */
+static int set_unimplemented_msr_action(int vm_fd)
+{
+ struct hv_input_set_partition_property in = {0};
+ struct mshv_root_hvcall args = {0};
+
+ in.property_code = HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION;
+ in.property_value = HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO;
+
+ args.code = HVCALL_SET_PARTITION_PROPERTY;
+ args.in_sz = sizeof(in);
+ args.in_ptr = (uint64_t)&in;
+
+ trace_mshv_hvcall_args("unimplemented_msr_action", args.code, args.in_sz);
+
+ int ret = mshv_hvcall(vm_fd, &args);
+ if (ret < 0) {
+ error_report("Failed to set unimplemented MSR action");
+ return -1;
+ }
+ return 0;
+}
+
+int mshv_arch_post_init_vm(int vm_fd)
+{
+ int ret;
+
+ ret = set_unimplemented_msr_action(vm_fd);
+ if (ret < 0) {
+ error_report("Failed to set unimplemented MSR action");
+ }
+
+ return ret;
+}
diff --git a/target/i386/mshv/x86.c b/target/i386/mshv/x86.c
new file mode 100644
index 0000000..d574b3b
--- /dev/null
+++ b/target/i386/mshv/x86.c
@@ -0,0 +1,297 @@
+/*
+ * QEMU MSHV support
+ *
+ * Copyright Microsoft, Corp. 2025
+ *
+ * Authors: Magnus Kulke <magnuskulke@microsoft.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "emulate/x86_decode.h"
+#include "emulate/x86_emu.h"
+#include "qemu/typedefs.h"
+#include "qemu/error-report.h"
+#include "system/mshv.h"
+
+/* RW or Exec segment */
+static const uint8_t RWRX_SEGMENT_TYPE = 0x2;
+static const uint8_t CODE_SEGMENT_TYPE = 0x8;
+static const uint8_t EXPAND_DOWN_SEGMENT_TYPE = 0x4;
+
+typedef enum CpuMode {
+ REAL_MODE,
+ PROTECTED_MODE,
+ LONG_MODE,
+} CpuMode;
+
+static CpuMode cpu_mode(CPUState *cpu)
+{
+ enum CpuMode m = REAL_MODE;
+
+ if (x86_is_protected(cpu)) {
+ m = PROTECTED_MODE;
+
+ if (x86_is_long_mode(cpu)) {
+ m = LONG_MODE;
+ }
+ }
+
+ return m;
+}
+
+static bool segment_type_ro(const SegmentCache *seg)
+{
+ uint32_t type_ = (seg->flags >> DESC_TYPE_SHIFT) & 15;
+ return (type_ & (~RWRX_SEGMENT_TYPE)) == 0;
+}
+
+static bool segment_type_code(const SegmentCache *seg)
+{
+ uint32_t type_ = (seg->flags >> DESC_TYPE_SHIFT) & 15;
+ return (type_ & CODE_SEGMENT_TYPE) != 0;
+}
+
+static bool segment_expands_down(const SegmentCache *seg)
+{
+ uint32_t type_ = (seg->flags >> DESC_TYPE_SHIFT) & 15;
+
+ if (segment_type_code(seg)) {
+ return false;
+ }
+
+ return (type_ & EXPAND_DOWN_SEGMENT_TYPE) != 0;
+}
+
+static uint32_t segment_limit(const SegmentCache *seg)
+{
+ uint32_t limit = seg->limit;
+ uint32_t granularity = (seg->flags & DESC_G_MASK) != 0;
+
+ if (granularity != 0) {
+ limit = (limit << 12) | 0xFFF;
+ }
+
+ return limit;
+}
+
+static uint8_t segment_db(const SegmentCache *seg)
+{
+ return (seg->flags >> DESC_B_SHIFT) & 1;
+}
+
+static uint32_t segment_max_limit(const SegmentCache *seg)
+{
+ if (segment_db(seg) != 0) {
+ return 0xFFFFFFFF;
+ }
+ return 0xFFFF;
+}
+
+static int linearize(CPUState *cpu,
+ target_ulong logical_addr, target_ulong *linear_addr,
+ X86Seg seg_idx)
+{
+ enum CpuMode mode;
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ SegmentCache *seg = &env->segs[seg_idx];
+ target_ulong base = seg->base;
+ target_ulong logical_addr_32b;
+ uint32_t limit;
+ /* TODO: the emulator will not pass us "write" indicator yet */
+ bool write = false;
+
+ mode = cpu_mode(cpu);
+
+ switch (mode) {
+ case LONG_MODE:
+ if (__builtin_add_overflow(logical_addr, base, linear_addr)) {
+ error_report("Address overflow");
+ return -1;
+ }
+ break;
+ case PROTECTED_MODE:
+ case REAL_MODE:
+ if (segment_type_ro(seg) && write) {
+ error_report("Cannot write to read-only segment");
+ return -1;
+ }
+
+ logical_addr_32b = logical_addr & 0xFFFFFFFF;
+ limit = segment_limit(seg);
+
+ if (segment_expands_down(seg)) {
+ if (logical_addr_32b >= limit) {
+ error_report("Address exceeds limit (expands down)");
+ return -1;
+ }
+
+ limit = segment_max_limit(seg);
+ }
+
+ if (logical_addr_32b > limit) {
+ error_report("Address exceeds limit %u", limit);
+ return -1;
+ }
+ *linear_addr = logical_addr_32b + base;
+ break;
+ default:
+ error_report("Unknown cpu mode: %d", mode);
+ return -1;
+ }
+
+ return 0;
+}
+
+bool x86_read_segment_descriptor(CPUState *cpu,
+ struct x86_segment_descriptor *desc,
+ x86_segment_selector sel)
+{
+ target_ulong base;
+ uint32_t limit;
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ target_ulong gva;
+
+ memset(desc, 0, sizeof(*desc));
+
+ /* valid gdt descriptors start from index 1 */
+ if (!sel.index && GDT_SEL == sel.ti) {
+ return false;
+ }
+
+ if (GDT_SEL == sel.ti) {
+ base = env->gdt.base;
+ limit = env->gdt.limit;
+ } else {
+ base = env->ldt.base;
+ limit = env->ldt.limit;
+ }
+
+ if (sel.index * 8 >= limit) {
+ return false;
+ }
+
+ gva = base + sel.index * 8;
+ emul_ops->read_mem(cpu, desc, gva, sizeof(*desc));
+
+ return true;
+}
+
+bool x86_read_call_gate(CPUState *cpu, struct x86_call_gate *idt_desc,
+ int gate)
+{
+ target_ulong base;
+ uint32_t limit;
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ target_ulong gva;
+
+ base = env->idt.base;
+ limit = env->idt.limit;
+
+ memset(idt_desc, 0, sizeof(*idt_desc));
+ if (gate * 8 >= limit) {
+ perror("call gate exceeds idt limit");
+ return false;
+ }
+
+ gva = base + gate * 8;
+ emul_ops->read_mem(cpu, idt_desc, gva, sizeof(*idt_desc));
+
+ return true;
+}
+
+bool x86_is_protected(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ uint64_t cr0 = env->cr[0];
+
+ return cr0 & CR0_PE_MASK;
+}
+
+bool x86_is_real(CPUState *cpu)
+{
+ return !x86_is_protected(cpu);
+}
+
+bool x86_is_v8086(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ return x86_is_protected(cpu) && (env->eflags & VM_MASK);
+}
+
+bool x86_is_long_mode(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ uint64_t efer = env->efer;
+ uint64_t lme_lma = (MSR_EFER_LME | MSR_EFER_LMA);
+
+ return ((efer & lme_lma) == lme_lma);
+}
+
+bool x86_is_long64_mode(CPUState *cpu)
+{
+ error_report("unimplemented: is_long64_mode()");
+ abort();
+}
+
+bool x86_is_paging_mode(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ uint64_t cr0 = env->cr[0];
+
+ return cr0 & CR0_PG_MASK;
+}
+
+bool x86_is_pae_enabled(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ uint64_t cr4 = env->cr[4];
+
+ return cr4 & CR4_PAE_MASK;
+}
+
+target_ulong linear_addr(CPUState *cpu, target_ulong addr, X86Seg seg)
+{
+ int ret;
+ target_ulong linear_addr;
+
+ ret = linearize(cpu, addr, &linear_addr, seg);
+ if (ret < 0) {
+ error_report("failed to linearize address");
+ abort();
+ }
+
+ return linear_addr;
+}
+
+target_ulong linear_addr_size(CPUState *cpu, target_ulong addr, int size,
+ X86Seg seg)
+{
+ switch (size) {
+ case 2:
+ addr = (uint16_t)addr;
+ break;
+ case 4:
+ addr = (uint32_t)addr;
+ break;
+ default:
+ break;
+ }
+ return linear_addr(cpu, addr, seg);
+}
+
+target_ulong linear_rip(CPUState *cpu, target_ulong rip)
+{
+ return linear_addr(cpu, rip, R_CS);
+}
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
index 993602f..86490e0 100644
--- a/target/loongarch/cpu.c
+++ b/target/loongarch/cpu.c
@@ -28,11 +28,6 @@
#ifdef CONFIG_KVM
#include <linux/kvm.h>
#endif
-#ifdef CONFIG_TCG
-#include "accel/tcg/cpu-ldst.h"
-#include "accel/tcg/cpu-ops.h"
-#include "tcg/tcg.h"
-#endif
#include "tcg/tcg_loongarch.h"
const char * const regnames[32] = {
@@ -49,62 +44,6 @@ const char * const fregnames[32] = {
"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
};
-struct TypeExcp {
- int32_t exccode;
- const char * const name;
-};
-
-static const struct TypeExcp excp_names[] = {
- {EXCCODE_INT, "Interrupt"},
- {EXCCODE_PIL, "Page invalid exception for load"},
- {EXCCODE_PIS, "Page invalid exception for store"},
- {EXCCODE_PIF, "Page invalid exception for fetch"},
- {EXCCODE_PME, "Page modified exception"},
- {EXCCODE_PNR, "Page Not Readable exception"},
- {EXCCODE_PNX, "Page Not Executable exception"},
- {EXCCODE_PPI, "Page Privilege error"},
- {EXCCODE_ADEF, "Address error for instruction fetch"},
- {EXCCODE_ADEM, "Address error for Memory access"},
- {EXCCODE_SYS, "Syscall"},
- {EXCCODE_BRK, "Break"},
- {EXCCODE_INE, "Instruction Non-Existent"},
- {EXCCODE_IPE, "Instruction privilege error"},
- {EXCCODE_FPD, "Floating Point Disabled"},
- {EXCCODE_FPE, "Floating Point Exception"},
- {EXCCODE_DBP, "Debug breakpoint"},
- {EXCCODE_BCE, "Bound Check Exception"},
- {EXCCODE_SXD, "128 bit vector instructions Disable exception"},
- {EXCCODE_ASXD, "256 bit vector instructions Disable exception"},
- {EXCP_HLT, "EXCP_HLT"},
-};
-
-const char *loongarch_exception_name(int32_t exception)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(excp_names); i++) {
- if (excp_names[i].exccode == exception) {
- return excp_names[i].name;
- }
- }
- return "Unknown";
-}
-
-void G_NORETURN do_raise_exception(CPULoongArchState *env,
- uint32_t exception,
- uintptr_t pc)
-{
- CPUState *cs = env_cpu(env);
-
- qemu_log_mask(CPU_LOG_INT, "%s: exception: %d (%s)\n",
- __func__,
- exception,
- loongarch_exception_name(exception));
- cs->exception_index = exception;
-
- cpu_loop_exit_restore(cs, pc);
-}
-
static void loongarch_cpu_set_pc(CPUState *cs, vaddr value)
{
set_pc(cpu_env(cs), value);
@@ -140,18 +79,8 @@ void loongarch_cpu_set_irq(void *opaque, int irq, int level)
}
}
-static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env)
-{
- bool ret = 0;
-
- ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) &&
- !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)));
-
- return ret;
-}
-
/* Check if there is pending and not masked out interrupt */
-static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env)
+bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env)
{
uint32_t pending;
uint32_t status;
@@ -163,217 +92,8 @@ static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env)
}
#endif
-#ifdef CONFIG_TCG
-#ifndef CONFIG_USER_ONLY
-static void loongarch_cpu_do_interrupt(CPUState *cs)
-{
- CPULoongArchState *env = cpu_env(cs);
- bool update_badinstr = 1;
- int cause = -1;
- bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR);
- uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS);
-
- if (cs->exception_index != EXCCODE_INT) {
- qemu_log_mask(CPU_LOG_INT,
- "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx
- " TLBRERA " TARGET_FMT_lx " exception: %d (%s)\n",
- __func__, env->pc, env->CSR_ERA, env->CSR_TLBRERA,
- cs->exception_index,
- loongarch_exception_name(cs->exception_index));
- }
-
- switch (cs->exception_index) {
- case EXCCODE_DBP:
- env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1);
- env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC);
- goto set_DERA;
- set_DERA:
- env->CSR_DERA = env->pc;
- env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1);
- set_pc(env, env->CSR_EENTRY + 0x480);
- break;
- case EXCCODE_INT:
- if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) {
- env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1);
- goto set_DERA;
- }
- QEMU_FALLTHROUGH;
- case EXCCODE_PIF:
- case EXCCODE_ADEF:
- cause = cs->exception_index;
- update_badinstr = 0;
- break;
- case EXCCODE_SYS:
- case EXCCODE_BRK:
- case EXCCODE_INE:
- case EXCCODE_IPE:
- case EXCCODE_FPD:
- case EXCCODE_FPE:
- case EXCCODE_SXD:
- case EXCCODE_ASXD:
- env->CSR_BADV = env->pc;
- QEMU_FALLTHROUGH;
- case EXCCODE_BCE:
- case EXCCODE_ADEM:
- case EXCCODE_PIL:
- case EXCCODE_PIS:
- case EXCCODE_PME:
- case EXCCODE_PNR:
- case EXCCODE_PNX:
- case EXCCODE_PPI:
- cause = cs->exception_index;
- break;
- default:
- qemu_log("Error: exception(%d) has not been supported\n",
- cs->exception_index);
- abort();
- }
-
- if (update_badinstr) {
- env->CSR_BADI = cpu_ldl_code(env, env->pc);
- }
-
- /* Save PLV and IE */
- if (tlbfill) {
- env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV,
- FIELD_EX64(env->CSR_CRMD,
- CSR_CRMD, PLV));
- env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE,
- FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE));
- /* set the DA mode */
- env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1);
- env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0);
- env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA,
- PC, (env->pc >> 2));
- } else {
- env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE,
- EXCODE_MCODE(cause));
- env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ESUBCODE,
- EXCODE_SUBCODE(cause));
- env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV,
- FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV));
- env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE,
- FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE));
- env->CSR_ERA = env->pc;
- }
-
- env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0);
- env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0);
-
- if (vec_size) {
- vec_size = (1 << vec_size) * 4;
- }
-
- if (cs->exception_index == EXCCODE_INT) {
- /* Interrupt */
- uint32_t vector = 0;
- uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS);
- pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE);
-
- /* Find the highest-priority interrupt. */
- vector = 31 - clz32(pending);
- set_pc(env, env->CSR_EENTRY + \
- (EXCCODE_EXTERNAL_INT + vector) * vec_size);
- qemu_log_mask(CPU_LOG_INT,
- "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
- " cause %d\n" " A " TARGET_FMT_lx " D "
- TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS"
- TARGET_FMT_lx "\n",
- __func__, env->pc, env->CSR_ERA,
- cause, env->CSR_BADV, env->CSR_DERA, vector,
- env->CSR_ECFG, env->CSR_ESTAT);
- } else {
- if (tlbfill) {
- set_pc(env, env->CSR_TLBRENTRY);
- } else {
- set_pc(env, env->CSR_EENTRY + EXCODE_MCODE(cause) * vec_size);
- }
- qemu_log_mask(CPU_LOG_INT,
- "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
- " cause %d%s\n, ESTAT " TARGET_FMT_lx
- " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx
- "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu
- " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc,
- tlbfill ? env->CSR_TLBRERA : env->CSR_ERA,
- cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT,
- env->CSR_ECFG,
- tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV,
- env->CSR_BADI, env->gpr[11], cs->cpu_index,
- env->CSR_ASID);
- }
- cs->exception_index = -1;
-}
-
-static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
- vaddr addr, unsigned size,
- MMUAccessType access_type,
- int mmu_idx, MemTxAttrs attrs,
- MemTxResult response,
- uintptr_t retaddr)
-{
- CPULoongArchState *env = cpu_env(cs);
-
- if (access_type == MMU_INST_FETCH) {
- do_raise_exception(env, EXCCODE_ADEF, retaddr);
- } else {
- do_raise_exception(env, EXCCODE_ADEM, retaddr);
- }
-}
-
-static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
-{
- if (interrupt_request & CPU_INTERRUPT_HARD) {
- CPULoongArchState *env = cpu_env(cs);
-
- if (cpu_loongarch_hw_interrupts_enabled(env) &&
- cpu_loongarch_hw_interrupts_pending(env)) {
- /* Raise it */
- cs->exception_index = EXCCODE_INT;
- loongarch_cpu_do_interrupt(cs);
- return true;
- }
- }
- return false;
-}
-
-static vaddr loongarch_pointer_wrap(CPUState *cs, int mmu_idx,
- vaddr result, vaddr base)
-{
- return is_va32(cpu_env(cs)) ? (uint32_t)result : result;
-}
-#endif
-
-static TCGTBCPUState loongarch_get_tb_cpu_state(CPUState *cs)
-{
- CPULoongArchState *env = cpu_env(cs);
- uint32_t flags;
-
- flags = env->CSR_CRMD & (R_CSR_CRMD_PLV_MASK | R_CSR_CRMD_PG_MASK);
- flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, FPE) * HW_FLAGS_EUEN_FPE;
- flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE) * HW_FLAGS_EUEN_SXE;
- flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE) * HW_FLAGS_EUEN_ASXE;
- flags |= is_va32(env) * HW_FLAGS_VA32;
-
- return (TCGTBCPUState){ .pc = env->pc, .flags = flags };
-}
-
-static void loongarch_cpu_synchronize_from_tb(CPUState *cs,
- const TranslationBlock *tb)
-{
- tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL));
- set_pc(cpu_env(cs), tb->pc);
-}
-
-static void loongarch_restore_state_to_opc(CPUState *cs,
- const TranslationBlock *tb,
- const uint64_t *data)
-{
- set_pc(cpu_env(cs), data[0]);
-}
-#endif /* CONFIG_TCG */
-
#ifndef CONFIG_USER_ONLY
-static bool loongarch_cpu_has_work(CPUState *cs)
+bool loongarch_cpu_has_work(CPUState *cs)
{
bool has_work = false;
@@ -386,16 +106,6 @@ static bool loongarch_cpu_has_work(CPUState *cs)
}
#endif /* !CONFIG_USER_ONLY */
-static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch)
-{
- CPULoongArchState *env = cpu_env(cs);
-
- if (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG)) {
- return FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV);
- }
- return MMU_DA_IDX;
-}
-
static void loongarch_la464_init_csr(Object *obj)
{
#ifndef CONFIG_USER_ONLY
@@ -911,30 +621,6 @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
}
-#ifdef CONFIG_TCG
-static const TCGCPUOps loongarch_tcg_ops = {
- .guest_default_memory_order = 0,
- .mttcg_supported = true,
-
- .initialize = loongarch_translate_init,
- .translate_code = loongarch_translate_code,
- .get_tb_cpu_state = loongarch_get_tb_cpu_state,
- .synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
- .restore_state_to_opc = loongarch_restore_state_to_opc,
- .mmu_index = loongarch_cpu_mmu_index,
-
-#ifndef CONFIG_USER_ONLY
- .tlb_fill = loongarch_cpu_tlb_fill,
- .pointer_wrap = loongarch_pointer_wrap,
- .cpu_exec_interrupt = loongarch_cpu_exec_interrupt,
- .cpu_exec_halt = loongarch_cpu_has_work,
- .cpu_exec_reset = cpu_reset,
- .do_interrupt = loongarch_cpu_do_interrupt,
- .do_transaction_failed = loongarch_cpu_do_transaction_failed,
-#endif
-};
-#endif /* CONFIG_TCG */
-
#ifndef CONFIG_USER_ONLY
#include "hw/core/sysemu-cpu-ops.h"
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
index e50d109..8793bd9 100644
--- a/target/loongarch/internals.h
+++ b/target/loongarch/internals.h
@@ -24,8 +24,6 @@ void G_NORETURN do_raise_exception(CPULoongArchState *env,
uint32_t exception,
uintptr_t pc);
-const char *loongarch_exception_name(int32_t exception);
-
#ifdef CONFIG_TCG
int ieee_ex_to_loongarch(int xcpt);
void restore_fp_status(CPULoongArchState *env);
@@ -41,6 +39,8 @@ uint64_t cpu_loongarch_get_constant_timer_counter(LoongArchCPU *cpu);
uint64_t cpu_loongarch_get_constant_timer_ticks(LoongArchCPU *cpu);
void cpu_loongarch_store_constant_timer_config(LoongArchCPU *cpu,
uint64_t value);
+bool loongarch_cpu_has_work(CPUState *cs);
+bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env);
#endif /* !CONFIG_USER_ONLY */
uint64_t read_fcc(CPULoongArchState *env);
diff --git a/target/loongarch/tcg/meson.build b/target/loongarch/tcg/meson.build
index bdf34f9..b7adfe4 100644
--- a/target/loongarch/tcg/meson.build
+++ b/target/loongarch/tcg/meson.build
@@ -7,6 +7,7 @@ loongarch_ss.add([zlib, gen])
loongarch_ss.add(files(
'fpu_helper.c',
'op_helper.c',
+ 'tcg_cpu.c',
'translate.c',
'vec_helper.c',
))
diff --git a/target/loongarch/tcg/tcg_cpu.c b/target/loongarch/tcg/tcg_cpu.c
new file mode 100644
index 0000000..82b54e6
--- /dev/null
+++ b/target/loongarch/tcg/tcg_cpu.c
@@ -0,0 +1,322 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch CPU parameters for QEMU.
+ *
+ * Copyright (c) 2025 Loongson Technology Corporation Limited
+ */
+#include "qemu/osdep.h"
+#include "qemu/accel.h"
+#include "qemu/error-report.h"
+#include "qemu/log.h"
+#include "accel/accel-cpu-target.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/cpu-ops.h"
+#include "exec/translation-block.h"
+#include "exec/target_page.h"
+#include "tcg_loongarch.h"
+#include "internals.h"
+
+struct TypeExcp {
+ int32_t exccode;
+ const char * const name;
+};
+
+static const struct TypeExcp excp_names[] = {
+ {EXCCODE_INT, "Interrupt"},
+ {EXCCODE_PIL, "Page invalid exception for load"},
+ {EXCCODE_PIS, "Page invalid exception for store"},
+ {EXCCODE_PIF, "Page invalid exception for fetch"},
+ {EXCCODE_PME, "Page modified exception"},
+ {EXCCODE_PNR, "Page Not Readable exception"},
+ {EXCCODE_PNX, "Page Not Executable exception"},
+ {EXCCODE_PPI, "Page Privilege error"},
+ {EXCCODE_ADEF, "Address error for instruction fetch"},
+ {EXCCODE_ADEM, "Address error for Memory access"},
+ {EXCCODE_SYS, "Syscall"},
+ {EXCCODE_BRK, "Break"},
+ {EXCCODE_INE, "Instruction Non-Existent"},
+ {EXCCODE_IPE, "Instruction privilege error"},
+ {EXCCODE_FPD, "Floating Point Disabled"},
+ {EXCCODE_FPE, "Floating Point Exception"},
+ {EXCCODE_DBP, "Debug breakpoint"},
+ {EXCCODE_BCE, "Bound Check Exception"},
+ {EXCCODE_SXD, "128 bit vector instructions Disable exception"},
+ {EXCCODE_ASXD, "256 bit vector instructions Disable exception"},
+ {EXCP_HLT, "EXCP_HLT"},
+};
+
+static const char *loongarch_exception_name(int32_t exception)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(excp_names); i++) {
+ if (excp_names[i].exccode == exception) {
+ return excp_names[i].name;
+ }
+ }
+ return "Unknown";
+}
+
+void G_NORETURN do_raise_exception(CPULoongArchState *env,
+ uint32_t exception,
+ uintptr_t pc)
+{
+ CPUState *cs = env_cpu(env);
+
+ qemu_log_mask(CPU_LOG_INT, "%s: exception: %d (%s)\n",
+ __func__,
+ exception,
+ loongarch_exception_name(exception));
+ cs->exception_index = exception;
+
+ cpu_loop_exit_restore(cs, pc);
+}
+
+#ifndef CONFIG_USER_ONLY
+static void loongarch_cpu_do_interrupt(CPUState *cs)
+{
+ CPULoongArchState *env = cpu_env(cs);
+ bool update_badinstr = 1;
+ int cause = -1;
+ bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR);
+ uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS);
+
+ if (cs->exception_index != EXCCODE_INT) {
+ qemu_log_mask(CPU_LOG_INT,
+ "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx
+ " TLBRERA " TARGET_FMT_lx " exception: %d (%s)\n",
+ __func__, env->pc, env->CSR_ERA, env->CSR_TLBRERA,
+ cs->exception_index,
+ loongarch_exception_name(cs->exception_index));
+ }
+
+ switch (cs->exception_index) {
+ case EXCCODE_DBP:
+ env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1);
+ env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC);
+ goto set_DERA;
+ set_DERA:
+ env->CSR_DERA = env->pc;
+ env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1);
+ set_pc(env, env->CSR_EENTRY + 0x480);
+ break;
+ case EXCCODE_INT:
+ if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) {
+ env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1);
+ goto set_DERA;
+ }
+ QEMU_FALLTHROUGH;
+ case EXCCODE_PIF:
+ case EXCCODE_ADEF:
+ cause = cs->exception_index;
+ update_badinstr = 0;
+ break;
+ case EXCCODE_SYS:
+ case EXCCODE_BRK:
+ case EXCCODE_INE:
+ case EXCCODE_IPE:
+ case EXCCODE_FPD:
+ case EXCCODE_FPE:
+ case EXCCODE_SXD:
+ case EXCCODE_ASXD:
+ env->CSR_BADV = env->pc;
+ QEMU_FALLTHROUGH;
+ case EXCCODE_BCE:
+ case EXCCODE_ADEM:
+ case EXCCODE_PIL:
+ case EXCCODE_PIS:
+ case EXCCODE_PME:
+ case EXCCODE_PNR:
+ case EXCCODE_PNX:
+ case EXCCODE_PPI:
+ cause = cs->exception_index;
+ break;
+ default:
+ qemu_log("Error: exception(%d) has not been supported\n",
+ cs->exception_index);
+ abort();
+ }
+
+ if (update_badinstr) {
+ env->CSR_BADI = cpu_ldl_code(env, env->pc);
+ }
+
+ /* Save PLV and IE */
+ if (tlbfill) {
+ env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV,
+ FIELD_EX64(env->CSR_CRMD,
+ CSR_CRMD, PLV));
+ env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE,
+ FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE));
+ /* set the DA mode */
+ env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1);
+ env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0);
+ env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA,
+ PC, (env->pc >> 2));
+ } else {
+ env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE,
+ EXCODE_MCODE(cause));
+ env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ESUBCODE,
+ EXCODE_SUBCODE(cause));
+ env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV,
+ FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV));
+ env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE,
+ FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE));
+ env->CSR_ERA = env->pc;
+ }
+
+ env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0);
+ env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0);
+
+ if (vec_size) {
+ vec_size = (1 << vec_size) * 4;
+ }
+
+ if (cs->exception_index == EXCCODE_INT) {
+ /* Interrupt */
+ uint32_t vector = 0;
+ uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS);
+ pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE);
+
+ /* Find the highest-priority interrupt. */
+ vector = 31 - clz32(pending);
+ set_pc(env, env->CSR_EENTRY + \
+ (EXCCODE_EXTERNAL_INT + vector) * vec_size);
+ qemu_log_mask(CPU_LOG_INT,
+ "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
+ " cause %d\n" " A " TARGET_FMT_lx " D "
+ TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS"
+ TARGET_FMT_lx "\n",
+ __func__, env->pc, env->CSR_ERA,
+ cause, env->CSR_BADV, env->CSR_DERA, vector,
+ env->CSR_ECFG, env->CSR_ESTAT);
+ } else {
+ if (tlbfill) {
+ set_pc(env, env->CSR_TLBRENTRY);
+ } else {
+ set_pc(env, env->CSR_EENTRY + EXCODE_MCODE(cause) * vec_size);
+ }
+ qemu_log_mask(CPU_LOG_INT,
+ "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
+ " cause %d%s\n, ESTAT " TARGET_FMT_lx
+ " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx
+ "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu
+ " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc,
+ tlbfill ? env->CSR_TLBRERA : env->CSR_ERA,
+ cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT,
+ env->CSR_ECFG,
+ tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV,
+ env->CSR_BADI, env->gpr[11], cs->cpu_index,
+ env->CSR_ASID);
+ }
+ cs->exception_index = -1;
+}
+
+static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response,
+ uintptr_t retaddr)
+{
+ CPULoongArchState *env = cpu_env(cs);
+
+ if (access_type == MMU_INST_FETCH) {
+ do_raise_exception(env, EXCCODE_ADEF, retaddr);
+ } else {
+ do_raise_exception(env, EXCCODE_ADEM, retaddr);
+ }
+}
+
+static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env)
+{
+ bool ret = 0;
+
+ ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) &&
+ !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)));
+
+ return ret;
+}
+
+static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ CPULoongArchState *env = cpu_env(cs);
+
+ if (cpu_loongarch_hw_interrupts_enabled(env) &&
+ cpu_loongarch_hw_interrupts_pending(env)) {
+ /* Raise it */
+ cs->exception_index = EXCCODE_INT;
+ loongarch_cpu_do_interrupt(cs);
+ return true;
+ }
+ }
+ return false;
+}
+
+static vaddr loongarch_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ return is_va32(cpu_env(cs)) ? (uint32_t)result : result;
+}
+#endif
+
+static TCGTBCPUState loongarch_get_tb_cpu_state(CPUState *cs)
+{
+ CPULoongArchState *env = cpu_env(cs);
+ uint32_t flags;
+
+ flags = env->CSR_CRMD & (R_CSR_CRMD_PLV_MASK | R_CSR_CRMD_PG_MASK);
+ flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, FPE) * HW_FLAGS_EUEN_FPE;
+ flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE) * HW_FLAGS_EUEN_SXE;
+ flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE) * HW_FLAGS_EUEN_ASXE;
+ flags |= is_va32(env) * HW_FLAGS_VA32;
+
+ return (TCGTBCPUState){ .pc = env->pc, .flags = flags };
+}
+
+static void loongarch_cpu_synchronize_from_tb(CPUState *cs,
+ const TranslationBlock *tb)
+{
+ tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL));
+ set_pc(cpu_env(cs), tb->pc);
+}
+
+static void loongarch_restore_state_to_opc(CPUState *cs,
+ const TranslationBlock *tb,
+ const uint64_t *data)
+{
+ set_pc(cpu_env(cs), data[0]);
+}
+
+static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+ CPULoongArchState *env = cpu_env(cs);
+
+ if (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG)) {
+ return FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV);
+ }
+ return MMU_DA_IDX;
+}
+
+const TCGCPUOps loongarch_tcg_ops = {
+ .guest_default_memory_order = 0,
+ .mttcg_supported = true,
+
+ .initialize = loongarch_translate_init,
+ .translate_code = loongarch_translate_code,
+ .get_tb_cpu_state = loongarch_get_tb_cpu_state,
+ .synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
+ .restore_state_to_opc = loongarch_restore_state_to_opc,
+ .mmu_index = loongarch_cpu_mmu_index,
+
+#ifndef CONFIG_USER_ONLY
+ .tlb_fill = loongarch_cpu_tlb_fill,
+ .pointer_wrap = loongarch_pointer_wrap,
+ .cpu_exec_interrupt = loongarch_cpu_exec_interrupt,
+ .cpu_exec_halt = loongarch_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
+ .do_interrupt = loongarch_cpu_do_interrupt,
+ .do_transaction_failed = loongarch_cpu_do_transaction_failed,
+#endif
+};
diff --git a/target/loongarch/tcg/tcg_loongarch.h b/target/loongarch/tcg/tcg_loongarch.h
index 4770289..7fb627f 100644
--- a/target/loongarch/tcg/tcg_loongarch.h
+++ b/target/loongarch/tcg/tcg_loongarch.h
@@ -9,6 +9,7 @@
#include "cpu.h"
#include "cpu-mmu.h"
+extern const TCGCPUOps loongarch_tcg_ops;
void loongarch_csr_translate_init(void);
bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
diff --git a/target/s390x/helper.c b/target/s390x/helper.c
index 5c127da..184428c 100644
--- a/target/s390x/helper.c
+++ b/target/s390x/helper.c
@@ -24,8 +24,8 @@
#include "gdbstub/helpers.h"
#include "qemu/timer.h"
#include "hw/s390x/ioinst.h"
-#include "target/s390x/kvm/pv.h"
#include "system/hw_accel.h"
+#include "system/memory.h"
#include "system/runstate.h"
#include "exec/target_page.h"
#include "exec/watchpoint.h"
@@ -107,19 +107,23 @@ LowCore *cpu_map_lowcore(CPUS390XState *env)
{
LowCore *lowcore;
hwaddr len = sizeof(LowCore);
+ CPUState *cs = env_cpu(env);
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
- lowcore = cpu_physical_memory_map(env->psa, &len, true);
+ lowcore = address_space_map(cs->as, env->psa, &len, true, attrs);
if (len < sizeof(LowCore)) {
- cpu_abort(env_cpu(env), "Could not map lowcore\n");
+ cpu_abort(cs, "Could not map lowcore\n");
}
return lowcore;
}
-void cpu_unmap_lowcore(LowCore *lowcore)
+void cpu_unmap_lowcore(CPUS390XState *env, LowCore *lowcore)
{
- cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
+ AddressSpace *as = env_cpu(env)->as;
+
+ address_space_unmap(as, lowcore, sizeof(LowCore), true, sizeof(LowCore));
}
void do_restart_interrupt(CPUS390XState *env)
@@ -134,7 +138,7 @@ void do_restart_interrupt(CPUS390XState *env)
mask = be64_to_cpu(lowcore->restart_new_psw.mask);
addr = be64_to_cpu(lowcore->restart_new_psw.addr);
- cpu_unmap_lowcore(lowcore);
+ cpu_unmap_lowcore(env, lowcore);
env->pending_int &= ~INTERRUPT_RESTART;
s390_cpu_set_psw(env, mask, addr);
@@ -177,109 +181,3 @@ void s390_cpu_recompute_watchpoints(CPUState *cs)
wp_flags, NULL);
}
}
-
-typedef struct SigpSaveArea {
- uint64_t fprs[16]; /* 0x0000 */
- uint64_t grs[16]; /* 0x0080 */
- PSW psw; /* 0x0100 */
- uint8_t pad_0x0110[0x0118 - 0x0110]; /* 0x0110 */
- uint32_t prefix; /* 0x0118 */
- uint32_t fpc; /* 0x011c */
- uint8_t pad_0x0120[0x0124 - 0x0120]; /* 0x0120 */
- uint32_t todpr; /* 0x0124 */
- uint64_t cputm; /* 0x0128 */
- uint64_t ckc; /* 0x0130 */
- uint8_t pad_0x0138[0x0140 - 0x0138]; /* 0x0138 */
- uint32_t ars[16]; /* 0x0140 */
- uint64_t crs[16]; /* 0x0384 */
-} SigpSaveArea;
-QEMU_BUILD_BUG_ON(sizeof(SigpSaveArea) != 512);
-
-int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
-{
- static const uint8_t ar_id = 1;
- SigpSaveArea *sa;
- hwaddr len = sizeof(*sa);
- int i;
-
- /* For PVMs storing will occur when this cpu enters SIE again */
- if (s390_is_pv()) {
- return 0;
- }
-
- sa = cpu_physical_memory_map(addr, &len, true);
- if (!sa) {
- return -EFAULT;
- }
- if (len != sizeof(*sa)) {
- cpu_physical_memory_unmap(sa, len, 1, 0);
- return -EFAULT;
- }
-
- if (store_arch) {
- cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
- }
- for (i = 0; i < 16; ++i) {
- sa->fprs[i] = cpu_to_be64(*get_freg(&cpu->env, i));
- }
- for (i = 0; i < 16; ++i) {
- sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
- }
- sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
- sa->psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(&cpu->env));
- sa->prefix = cpu_to_be32(cpu->env.psa);
- sa->fpc = cpu_to_be32(cpu->env.fpc);
- sa->todpr = cpu_to_be32(cpu->env.todpr);
- sa->cputm = cpu_to_be64(cpu->env.cputm);
- sa->ckc = cpu_to_be64(cpu->env.ckc >> 8);
- for (i = 0; i < 16; ++i) {
- sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]);
- }
- for (i = 0; i < 16; ++i) {
- sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]);
- }
-
- cpu_physical_memory_unmap(sa, len, 1, len);
-
- return 0;
-}
-
-typedef struct SigpAdtlSaveArea {
- uint64_t vregs[32][2]; /* 0x0000 */
- uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */
- uint64_t gscb[4]; /* 0x0400 */
- uint8_t pad_0x0420[0x1000 - 0x0420]; /* 0x0420 */
-} SigpAdtlSaveArea;
-QEMU_BUILD_BUG_ON(sizeof(SigpAdtlSaveArea) != 4096);
-
-#define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */
-int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len)
-{
- SigpAdtlSaveArea *sa;
- hwaddr save = len;
- int i;
-
- sa = cpu_physical_memory_map(addr, &save, true);
- if (!sa) {
- return -EFAULT;
- }
- if (save != len) {
- cpu_physical_memory_unmap(sa, len, 1, 0);
- return -EFAULT;
- }
-
- if (s390_has_feat(S390_FEAT_VECTOR)) {
- for (i = 0; i < 32; i++) {
- sa->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i][0]);
- sa->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i][1]);
- }
- }
- if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) {
- for (i = 0; i < 4; i++) {
- sa->gscb[i] = cpu_to_be64(cpu->env.gscb[i]);
- }
- }
-
- cpu_physical_memory_unmap(sa, len, 1, len);
- return 0;
-}
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
index 56cce2e..9691366 100644
--- a/target/s390x/s390x-internal.h
+++ b/target/s390x/s390x-internal.h
@@ -323,11 +323,8 @@ void s390x_cpu_timer(void *opaque);
void s390_handle_wait(S390CPU *cpu);
hwaddr s390_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr);
-#define S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
-int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch);
-int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len);
LowCore *cpu_map_lowcore(CPUS390XState *env);
-void cpu_unmap_lowcore(LowCore *lowcore);
+void cpu_unmap_lowcore(CPUS390XState *env, LowCore *lowcore);
#endif /* CONFIG_USER_ONLY */
diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c
index 5e95c497..f5d7bc0 100644
--- a/target/s390x/sigp.c
+++ b/target/s390x/sigp.c
@@ -13,12 +13,14 @@
#include "s390x-internal.h"
#include "hw/boards.h"
#include "system/hw_accel.h"
+#include "system/memory.h"
#include "system/runstate.h"
#include "system/address-spaces.h"
#include "exec/cputlb.h"
#include "system/tcg.h"
#include "trace.h"
#include "qapi/qapi-types-machine.h"
+#include "target/s390x/kvm/pv.h"
QemuMutex qemu_sigp_mutex;
@@ -126,6 +128,78 @@ static void sigp_stop(CPUState *cs, run_on_cpu_data arg)
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
}
+typedef struct SigpSaveArea {
+ uint64_t fprs[16]; /* 0x0000 */
+ uint64_t grs[16]; /* 0x0080 */
+ PSW psw; /* 0x0100 */
+ uint8_t pad_0x0110[0x0118 - 0x0110]; /* 0x0110 */
+ uint32_t prefix; /* 0x0118 */
+ uint32_t fpc; /* 0x011c */
+ uint8_t pad_0x0120[0x0124 - 0x0120]; /* 0x0120 */
+ uint32_t todpr; /* 0x0124 */
+ uint64_t cputm; /* 0x0128 */
+ uint64_t ckc; /* 0x0130 */
+ uint8_t pad_0x0138[0x0140 - 0x0138]; /* 0x0138 */
+ uint32_t ars[16]; /* 0x0140 */
+ uint64_t crs[16]; /* 0x0384 */
+} SigpSaveArea;
+QEMU_BUILD_BUG_ON(sizeof(SigpSaveArea) != 512);
+
+#define S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
+static int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
+{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ AddressSpace *as = CPU(cpu)->as;
+ SigpSaveArea *sa;
+ hwaddr len = sizeof(*sa);
+ int i;
+
+ /* For PVMs storing will occur when this cpu enters SIE again */
+ if (s390_is_pv()) {
+ return 0;
+ }
+
+ sa = address_space_map(as, addr, &len, true, attrs);
+ if (!sa) {
+ return -EFAULT;
+ }
+ if (len != sizeof(*sa)) {
+ address_space_unmap(as, sa, len, true, 0);
+ return -EFAULT;
+ }
+
+ if (store_arch) {
+ static const uint8_t ar_id = 1;
+
+ address_space_stb(as, offsetof(LowCore, ar_access_id),
+ ar_id, attrs, NULL);
+
+ }
+ for (i = 0; i < 16; ++i) {
+ sa->fprs[i] = cpu_to_be64(*get_freg(&cpu->env, i));
+ }
+ for (i = 0; i < 16; ++i) {
+ sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
+ }
+ sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
+ sa->psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(&cpu->env));
+ sa->prefix = cpu_to_be32(cpu->env.psa);
+ sa->fpc = cpu_to_be32(cpu->env.fpc);
+ sa->todpr = cpu_to_be32(cpu->env.todpr);
+ sa->cputm = cpu_to_be64(cpu->env.cputm);
+ sa->ckc = cpu_to_be64(cpu->env.ckc >> 8);
+ for (i = 0; i < 16; ++i) {
+ sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]);
+ }
+ for (i = 0; i < 16; ++i) {
+ sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]);
+ }
+
+ address_space_unmap(as, sa, len, true, len);
+
+ return 0;
+}
+
static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg)
{
S390CPU *cpu = S390_CPU(cs);
@@ -172,6 +246,49 @@ static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg)
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
}
+typedef struct SigpAdtlSaveArea {
+ uint64_t vregs[32][2]; /* 0x0000 */
+ uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */
+ uint64_t gscb[4]; /* 0x0400 */
+ uint8_t pad_0x0420[0x1000 - 0x0420]; /* 0x0420 */
+} SigpAdtlSaveArea;
+QEMU_BUILD_BUG_ON(sizeof(SigpAdtlSaveArea) != 4096);
+
+#define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */
+static int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len)
+{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ AddressSpace *as = CPU(cpu)->as;
+ SigpAdtlSaveArea *sa;
+ hwaddr save = len;
+ int i;
+
+ sa = address_space_map(as, addr, &save, true, attrs);
+ if (!sa) {
+ return -EFAULT;
+ }
+ if (save != len) {
+ address_space_unmap(as, sa, len, true, 0);
+ return -EFAULT;
+ }
+
+ if (s390_has_feat(S390_FEAT_VECTOR)) {
+ for (i = 0; i < 32; i++) {
+ sa->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i][0]);
+ sa->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i][1]);
+ }
+ }
+ if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) {
+ for (i = 0; i < 4; i++) {
+ sa->gscb[i] = cpu_to_be64(cpu->env.gscb[i]);
+ }
+ }
+
+ address_space_unmap(as, sa, len, true, len);
+
+ return 0;
+}
+
#define ADTL_SAVE_LC_MASK 0xfUL
static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg)
{
diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c
index 4c7faee..0ae4e26 100644
--- a/target/s390x/tcg/excp_helper.c
+++ b/target/s390x/tcg/excp_helper.c
@@ -30,6 +30,7 @@
#ifndef CONFIG_USER_ONLY
#include "qemu/timer.h"
#include "system/address-spaces.h"
+#include "system/memory.h"
#include "hw/s390x/ioinst.h"
#include "hw/s390x/s390_flic.h"
#include "hw/boards.h"
@@ -284,7 +285,7 @@ static void do_program_interrupt(CPUS390XState *env)
addr = be64_to_cpu(lowcore->program_new_psw.addr);
lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
- cpu_unmap_lowcore(lowcore);
+ cpu_unmap_lowcore(env, lowcore);
s390_cpu_set_psw(env, mask, addr);
}
@@ -303,7 +304,7 @@ static void do_svc_interrupt(CPUS390XState *env)
mask = be64_to_cpu(lowcore->svc_new_psw.mask);
addr = be64_to_cpu(lowcore->svc_new_psw.addr);
- cpu_unmap_lowcore(lowcore);
+ cpu_unmap_lowcore(env, lowcore);
s390_cpu_set_psw(env, mask, addr);
@@ -377,7 +378,7 @@ static void do_ext_interrupt(CPUS390XState *env)
lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env));
lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
- cpu_unmap_lowcore(lowcore);
+ cpu_unmap_lowcore(env, lowcore);
s390_cpu_set_psw(env, mask, addr);
}
@@ -404,7 +405,7 @@ static void do_io_interrupt(CPUS390XState *env)
mask = be64_to_cpu(lowcore->io_new_psw.mask);
addr = be64_to_cpu(lowcore->io_new_psw.addr);
- cpu_unmap_lowcore(lowcore);
+ cpu_unmap_lowcore(env, lowcore);
g_free(io);
s390_cpu_set_psw(env, mask, addr);
@@ -418,16 +419,18 @@ QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
{
+ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
+ AddressSpace *as = env_cpu(env)->as;
hwaddr len = sizeof(MchkExtSaveArea);
MchkExtSaveArea *sa;
int i;
- sa = cpu_physical_memory_map(mcesao, &len, true);
+ sa = address_space_map(as, mcesao, &len, true, attrs);
if (!sa) {
return -EFAULT;
}
if (len != sizeof(MchkExtSaveArea)) {
- cpu_physical_memory_unmap(sa, len, 1, 0);
+ address_space_unmap(as, sa, len, true, 0);
return -EFAULT;
}
@@ -436,7 +439,7 @@ static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
}
- cpu_physical_memory_unmap(sa, len, 1, len);
+ address_space_unmap(as, sa, len, true, len);
return 0;
}
@@ -488,7 +491,7 @@ static void do_mchk_interrupt(CPUS390XState *env)
mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
- cpu_unmap_lowcore(lowcore);
+ cpu_unmap_lowcore(env, lowcore);
s390_cpu_set_psw(env, mask, addr);
}
diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c
index f7101be..6d9d601 100644
--- a/target/s390x/tcg/misc_helper.c
+++ b/target/s390x/tcg/misc_helper.c
@@ -570,7 +570,7 @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
lowcore->subchannel_nr = cpu_to_be16(io->nr);
lowcore->io_int_parm = cpu_to_be32(io->parm);
lowcore->io_int_word = cpu_to_be32(io->word);
- cpu_unmap_lowcore(lowcore);
+ cpu_unmap_lowcore(env, lowcore);
}
g_free(io);
@@ -700,7 +700,7 @@ void HELPER(stfl)(CPUS390XState *env)
lowcore = cpu_map_lowcore(env);
prepare_stfl();
memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list));
- cpu_unmap_lowcore(lowcore);
+ cpu_unmap_lowcore(env, lowcore);
}
#endif