aboutsummaryrefslogtreecommitdiff
path: root/target/arm/helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/helper.c')
-rw-r--r--target/arm/helper.c933
1 files changed, 421 insertions, 512 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c
index c442947..aa730ad 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -40,26 +40,57 @@
static void switch_mode(CPUARMState *env, int mode);
+int compare_u64(const void *a, const void *b)
+{
+ if (*(uint64_t *)a > *(uint64_t *)b) {
+ return 1;
+ }
+ if (*(uint64_t *)a < *(uint64_t *)b) {
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Macros which are lvalues for the field in CPUARMState for the
+ * ARMCPRegInfo *ri.
+ */
+#define CPREG_FIELD32(env, ri) \
+ (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
+#define CPREG_FIELD64(env, ri) \
+ (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
+
uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
assert(ri->fieldoffset);
- if (cpreg_field_is_64bit(ri)) {
+ switch (cpreg_field_type(ri)) {
+ case MO_64:
return CPREG_FIELD64(env, ri);
- } else {
+ case MO_32:
return CPREG_FIELD32(env, ri);
+ default:
+ g_assert_not_reached();
}
}
void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
assert(ri->fieldoffset);
- if (cpreg_field_is_64bit(ri)) {
+ switch (cpreg_field_type(ri)) {
+ case MO_64:
CPREG_FIELD64(env, ri) = value;
- } else {
+ break;
+ case MO_32:
CPREG_FIELD32(env, ri) = value;
+ break;
+ default:
+ g_assert_not_reached();
}
}
+#undef CPREG_FIELD32
+#undef CPREG_FIELD64
+
static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
{
return (char *)env + ri->fieldoffset;
@@ -198,11 +229,11 @@ bool write_list_to_cpustate(ARMCPU *cpu)
return ok;
}
-static void add_cpreg_to_list(gpointer key, gpointer opaque)
+static void add_cpreg_to_list(gpointer key, gpointer value, gpointer opaque)
{
ARMCPU *cpu = opaque;
uint32_t regidx = (uintptr_t)key;
- const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
+ const ARMCPRegInfo *ri = value;
if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
@@ -211,61 +242,49 @@ static void add_cpreg_to_list(gpointer key, gpointer opaque)
}
}
-static void count_cpreg(gpointer key, gpointer opaque)
+static void count_cpreg(gpointer key, gpointer value, gpointer opaque)
{
ARMCPU *cpu = opaque;
- const ARMCPRegInfo *ri;
-
- ri = g_hash_table_lookup(cpu->cp_regs, key);
+ const ARMCPRegInfo *ri = value;
if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
cpu->cpreg_array_len++;
}
}
-static gint cpreg_key_compare(gconstpointer a, gconstpointer b, gpointer d)
-{
- uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
- uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
-
- if (aidx > bidx) {
- return 1;
- }
- if (aidx < bidx) {
- return -1;
- }
- return 0;
-}
-
void init_cpreg_list(ARMCPU *cpu)
{
/*
* Initialise the cpreg_tuples[] array based on the cp_regs hash.
* Note that we require cpreg_tuples[] to be sorted by key ID.
*/
- GList *keys;
int arraylen;
- keys = g_hash_table_get_keys(cpu->cp_regs);
- keys = g_list_sort_with_data(keys, cpreg_key_compare, NULL);
-
cpu->cpreg_array_len = 0;
-
- g_list_foreach(keys, count_cpreg, cpu);
+ g_hash_table_foreach(cpu->cp_regs, count_cpreg, cpu);
arraylen = cpu->cpreg_array_len;
- cpu->cpreg_indexes = g_new(uint64_t, arraylen);
- cpu->cpreg_values = g_new(uint64_t, arraylen);
- cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
- cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
- cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
+ if (arraylen) {
+ cpu->cpreg_indexes = g_new(uint64_t, arraylen);
+ cpu->cpreg_values = g_new(uint64_t, arraylen);
+ cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
+ cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
+ } else {
+ cpu->cpreg_indexes = NULL;
+ cpu->cpreg_values = NULL;
+ cpu->cpreg_vmstate_indexes = NULL;
+ cpu->cpreg_vmstate_values = NULL;
+ }
+ cpu->cpreg_vmstate_array_len = arraylen;
cpu->cpreg_array_len = 0;
- g_list_foreach(keys, add_cpreg_to_list, cpu);
+ g_hash_table_foreach(cpu->cp_regs, add_cpreg_to_list, cpu);
assert(cpu->cpreg_array_len == arraylen);
- g_list_free(keys);
+ if (arraylen) {
+ qsort(cpu->cpreg_indexes, arraylen, sizeof(uint64_t), compare_u64);
+ }
}
bool arm_pan_enabled(CPUARMState *env)
@@ -435,6 +454,8 @@ static const ARMCPRegInfo cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_CONTEXTIDR_EL1,
.nv2_redirect_offset = 0x108 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 13, 0, 1),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 13, 0, 1),
.secure = ARM_CP_SECSTATE_NS,
.fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
.resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
@@ -652,9 +673,11 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
*/
{ .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
- { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
+ { .name = "CPACR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
.crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
.fgt = FGT_CPACR_EL1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 1, 2),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 2),
.nv2_redirect_offset = 0x100 | NV2_REDIR_NV1,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
.resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
@@ -937,12 +960,16 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_AFSR0_EL1,
.nv2_redirect_offset = 0x128 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 1, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 1, 0),
.type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_AFSR1_EL1,
.nv2_redirect_offset = 0x130 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 1, 1),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 1, 1),
.type = ARM_CP_CONST, .resetvalue = 0 },
/*
* MAIR can just read-as-written because we don't implement caches
@@ -953,6 +980,8 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_MAIR_EL1,
.nv2_redirect_offset = 0x140 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 0),
.fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
.resetvalue = 0 },
{ .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
@@ -1062,7 +1091,7 @@ static const ARMCPRegInfo v6k_cp_reginfo[] = {
.resetvalue = 0 },
};
-static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
+static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = env_archcpu(env);
@@ -1999,9 +2028,11 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
.resetfn = arm_gt_cntfrq_reset,
},
/* overall control: mostly access permissions */
- { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
+ { .name = "CNTKCTL_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
.access = PL1_RW,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 14, 1, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 14, 1, 0),
.fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
.resetvalue = 0,
},
@@ -2731,7 +2762,7 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* If the ASID changes (with a 64-bit write), we must flush the TLB. */
- if (cpreg_field_is_64bit(ri) &&
+ if (cpreg_field_type(ri) == MO_64 &&
extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
ARMCPU *cpu = env_archcpu(env);
tlb_flush(CPU(cpu));
@@ -2792,6 +2823,8 @@ static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_FAR_EL1,
.nv2_redirect_offset = 0x220 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 6, 0, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 6, 0, 0),
.fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
.resetvalue = 0, },
};
@@ -2802,12 +2835,16 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_ESR_EL1,
.nv2_redirect_offset = 0x138 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 2, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 2, 0),
.fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
{ .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_TTBR0_EL1,
.nv2_redirect_offset = 0x200 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 0),
.writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
offsetof(CPUARMState, cp15.ttbr0_ns) } },
@@ -2816,6 +2853,8 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_TTBR1_EL1,
.nv2_redirect_offset = 0x210 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 1),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 1),
.writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
offsetof(CPUARMState, cp15.ttbr1_ns) } },
@@ -2824,6 +2863,8 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_TCR_EL1,
.nv2_redirect_offset = 0x120 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 2),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 2),
.writefn = vmsa_tcr_el12_write,
.raw_writefn = raw_write,
.resetvalue = 0,
@@ -3029,12 +3070,14 @@ static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
}
static const ARMCPRegInfo lpae_cp_reginfo[] = {
- /* NOP AMAIR0/1 */
- { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
+ /* AMAIR0 is mapped to AMAIR_EL1[31:0] */
+ { .name = "AMAIR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_AMAIR_EL1,
.nv2_redirect_offset = 0x148 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 3, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 3, 0),
.type = ARM_CP_CONST, .resetvalue = 0 },
/* AMAIR1 is mapped to AMAIR_EL1[63:32] */
{ .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
@@ -3550,12 +3593,16 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
.access = PL1_RW, .accessfn = access_nv1,
.nv2_redirect_offset = 0x230 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 4, 0, 1),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 4, 0, 1),
.fieldoffset = offsetof(CPUARMState, elr_el[1]) },
{ .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
.access = PL1_RW, .accessfn = access_nv1,
.nv2_redirect_offset = 0x160 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 4, 0, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 4, 0, 0),
.fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
/*
* We rely on the access checks not allowing the guest to write to the
@@ -4398,234 +4445,6 @@ static CPAccessResult access_el1nvvct(CPUARMState *env, const ARMCPRegInfo *ri,
return e2h_access(env, ri, isread);
}
-/* Test if system register redirection is to occur in the current state. */
-static bool redirect_for_e2h(CPUARMState *env)
-{
- return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
-}
-
-static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- CPReadFn *readfn;
-
- if (redirect_for_e2h(env)) {
- /* Switch to the saved EL2 version of the register. */
- ri = ri->opaque;
- readfn = ri->readfn;
- } else {
- readfn = ri->orig_readfn;
- }
- if (readfn == NULL) {
- readfn = raw_read;
- }
- return readfn(env, ri);
-}
-
-static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPWriteFn *writefn;
-
- if (redirect_for_e2h(env)) {
- /* Switch to the saved EL2 version of the register. */
- ri = ri->opaque;
- writefn = ri->writefn;
- } else {
- writefn = ri->orig_writefn;
- }
- if (writefn == NULL) {
- writefn = raw_write;
- }
- writefn(env, ri, value);
-}
-
-static uint64_t el2_e2h_e12_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
- return ri->orig_readfn(env, ri->opaque);
-}
-
-static void el2_e2h_e12_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
- return ri->orig_writefn(env, ri->opaque, value);
-}
-
-static CPAccessResult el2_e2h_e12_access(CPUARMState *env,
- const ARMCPRegInfo *ri,
- bool isread)
-{
- if (arm_current_el(env) == 1) {
- /*
- * This must be a FEAT_NV access (will either trap or redirect
- * to memory). None of the registers with _EL12 aliases want to
- * apply their trap controls for this kind of access, so don't
- * call the orig_accessfn or do the "UNDEF when E2H is 0" check.
- */
- return CP_ACCESS_OK;
- }
- /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */
- if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
- return CP_ACCESS_UNDEFINED;
- }
- if (ri->orig_accessfn) {
- return ri->orig_accessfn(env, ri->opaque, isread);
- }
- return CP_ACCESS_OK;
-}
-
-static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
-{
- struct E2HAlias {
- uint32_t src_key, dst_key, new_key;
- const char *src_name, *dst_name, *new_name;
- bool (*feature)(const ARMISARegisters *id);
- };
-
-#define K(op0, op1, crn, crm, op2) \
- ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
-
- static const struct E2HAlias aliases[] = {
- { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
- "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
- { K(3, 0, 1, 0, 3), K(3, 4, 1, 0, 3), K(3, 5, 1, 0, 3),
- "SCTLR2_EL1", "SCTLR2_EL2", "SCTLR2_EL12", isar_feature_aa64_sctlr2 },
- { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
- "CPACR", "CPTR_EL2", "CPACR_EL12" },
- { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
- "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
- { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
- "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
- { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
- "TCR_EL1", "TCR_EL2", "TCR_EL12" },
- { K(3, 0, 2, 0, 3), K(3, 4, 2, 0, 3), K(3, 5, 2, 0, 3),
- "TCR2_EL1", "TCR2_EL2", "TCR2_EL12", isar_feature_aa64_tcr2 },
- { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
- "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
- { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
- "ELR_EL1", "ELR_EL2", "ELR_EL12" },
- { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
- "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
- { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
- "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
- { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
- "ESR_EL1", "ESR_EL2", "ESR_EL12" },
- { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
- "FAR_EL1", "FAR_EL2", "FAR_EL12" },
- { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
- "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
- { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
- "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
- { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
- "VBAR", "VBAR_EL2", "VBAR_EL12" },
- { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
- "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
- { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
- "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
-
- { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
- "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
- { K(3, 0, 1, 2, 6), K(3, 4, 1, 2, 6), K(3, 5, 1, 2, 6),
- "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme },
-
- { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
- "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
-
- { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
- "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
- isar_feature_aa64_scxtnum },
-
- /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
- /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
- };
-#undef K
-
- size_t i;
-
- for (i = 0; i < ARRAY_SIZE(aliases); i++) {
- const struct E2HAlias *a = &aliases[i];
- ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
- bool ok;
-
- if (a->feature && !a->feature(&cpu->isar)) {
- continue;
- }
-
- src_reg = g_hash_table_lookup(cpu->cp_regs,
- (gpointer)(uintptr_t)a->src_key);
- dst_reg = g_hash_table_lookup(cpu->cp_regs,
- (gpointer)(uintptr_t)a->dst_key);
- g_assert(src_reg != NULL);
- g_assert(dst_reg != NULL);
-
- /* Cross-compare names to detect typos in the keys. */
- g_assert(strcmp(src_reg->name, a->src_name) == 0);
- g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
-
- /* None of the core system registers use opaque; we will. */
- g_assert(src_reg->opaque == NULL);
-
- /* Create alias before redirection so we dup the right data. */
- new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
-
- new_reg->name = a->new_name;
- new_reg->type |= ARM_CP_ALIAS;
- /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
- new_reg->access &= PL2_RW | PL3_RW;
- /* The new_reg op fields are as per new_key, not the target reg */
- new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK)
- >> CP_REG_ARM64_SYSREG_CRN_SHIFT;
- new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK)
- >> CP_REG_ARM64_SYSREG_CRM_SHIFT;
- new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK)
- >> CP_REG_ARM64_SYSREG_OP0_SHIFT;
- new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK)
- >> CP_REG_ARM64_SYSREG_OP1_SHIFT;
- new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK)
- >> CP_REG_ARM64_SYSREG_OP2_SHIFT;
- new_reg->opaque = src_reg;
- new_reg->orig_readfn = src_reg->readfn ?: raw_read;
- new_reg->orig_writefn = src_reg->writefn ?: raw_write;
- new_reg->orig_accessfn = src_reg->accessfn;
- if (!new_reg->raw_readfn) {
- new_reg->raw_readfn = raw_read;
- }
- if (!new_reg->raw_writefn) {
- new_reg->raw_writefn = raw_write;
- }
- new_reg->readfn = el2_e2h_e12_read;
- new_reg->writefn = el2_e2h_e12_write;
- new_reg->accessfn = el2_e2h_e12_access;
-
- /*
- * If the _EL1 register is redirected to memory by FEAT_NV2,
- * then it shares the offset with the _EL12 register,
- * and which one is redirected depends on HCR_EL2.NV1.
- */
- if (new_reg->nv2_redirect_offset) {
- assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1);
- new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1;
- new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1;
- }
-
- ok = g_hash_table_insert(cpu->cp_regs,
- (gpointer)(uintptr_t)a->new_key, new_reg);
- g_assert(ok);
-
- src_reg->opaque = dst_reg;
- src_reg->orig_readfn = src_reg->readfn ?: raw_read;
- src_reg->orig_writefn = src_reg->writefn ?: raw_write;
- if (!src_reg->raw_readfn) {
- src_reg->raw_readfn = raw_read;
- }
- if (!src_reg->raw_writefn) {
- src_reg->raw_writefn = raw_write;
- }
- src_reg->readfn = el2_e2h_read;
- src_reg->writefn = el2_e2h_write;
- }
-}
#endif
static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -4918,6 +4737,8 @@ static const ARMCPRegInfo zcr_reginfo[] = {
{ .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
.nv2_redirect_offset = 0x1e0 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 2, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 2, 0),
.access = PL1_RW, .type = ARM_CP_SVE,
.fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
.writefn = zcr_write, .raw_writefn = raw_write },
@@ -5063,6 +4884,8 @@ static const ARMCPRegInfo sme_reginfo[] = {
{ .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
.nv2_redirect_offset = 0x1f0 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 2, 6),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 2, 6),
.access = PL1_RW, .type = ARM_CP_SME,
.fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
.writefn = smcr_write, .raw_writefn = raw_write },
@@ -5184,7 +5007,7 @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
uint64_t pfr1 = GET_IDREG(&cpu->isar, ID_PFR1);
if (env->gicv3state) {
- pfr1 |= 1 << 28;
+ pfr1 = FIELD_DP64(pfr1, ID_PFR1, GIC, 1);
}
return pfr1;
}
@@ -5195,7 +5018,7 @@ static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
uint64_t pfr0 = GET_IDREG(&cpu->isar, ID_AA64PFR0);
if (env->gicv3state) {
- pfr0 |= 1 << 24;
+ pfr0 = FIELD_DP64(pfr0, ID_AA64PFR0, GIC, 1);
}
return pfr0;
}
@@ -5371,7 +5194,7 @@ static const ARMCPRegInfo rndr_reginfo[] = {
.access = PL0_R, .readfn = rndr_readfn },
};
-static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
+static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
#ifdef CONFIG_TCG
@@ -5508,6 +5331,8 @@ static const ARMCPRegInfo mte_reginfo[] = {
.opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
.access = PL1_RW, .accessfn = access_tfsr_el1,
.nv2_redirect_offset = 0x190 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 6, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 6, 0),
.fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
{ .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_NV2_REDIRECT,
@@ -5683,6 +5508,8 @@ static const ARMCPRegInfo scxtnum_reginfo[] = {
.access = PL1_RW, .accessfn = access_scxtnum_el1,
.fgt = FGT_SCXTNUM_EL1,
.nv2_redirect_offset = 0x188 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 13, 0, 7),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 13, 0, 7),
.fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
{ .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
@@ -6027,6 +5854,8 @@ static const ARMCPRegInfo sctlr2_reginfo[] = {
.opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 1, .crm = 0,
.access = PL1_RW, .accessfn = sctlr2_el1_access,
.writefn = sctlr2_el1_write, .fgt = FGT_SCTLR_EL1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 0, 3),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 3),
.nv2_redirect_offset = 0x278 | NV2_REDIR_NV1,
.fieldoffset = offsetof(CPUARMState, cp15.sctlr2_el[1]) },
{ .name = "SCTLR2_EL2", .state = ARM_CP_STATE_AA64,
@@ -6087,6 +5916,8 @@ static const ARMCPRegInfo tcr2_reginfo[] = {
.opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 2, .crm = 0,
.access = PL1_RW, .accessfn = tcr2_el1_access,
.writefn = tcr2_el1_write, .fgt = FGT_TCR_EL1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 3),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 3),
.nv2_redirect_offset = 0x270 | NV2_REDIR_NV1,
.fieldoffset = offsetof(CPUARMState, cp15.tcr2_el[1]) },
{ .name = "TCR2_EL2", .state = ARM_CP_STATE_AA64,
@@ -6278,11 +6109,11 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
.resetvalue = GET_IDREG(isar, ID_AA64PFR1)},
- { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ { .name = "ID_AA64PFR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = 0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64PFR2)},
{ .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -6510,6 +6341,8 @@ void register_cp_regs_for_features(ARMCPU *cpu)
R_ID_AA64PFR1_SSBS_MASK |
R_ID_AA64PFR1_MTE_MASK |
R_ID_AA64PFR1_SME_MASK },
+ { .name = "ID_AA64PFR2_EL1",
+ .exported_bits = 0 },
{ .name = "ID_AA64PFR*_EL1_RESERVED",
.is_glob = true },
{ .name = "ID_AA64ZFR0_EL1",
@@ -7177,12 +7010,14 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_VBAR)) {
static const ARMCPRegInfo vbar_cp_reginfo[] = {
- { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
+ { .name = "VBAR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .writefn = vbar_write,
.accessfn = access_nv1,
.fgt = FGT_VBAR_EL1,
.nv2_redirect_offset = 0x250 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 12, 0, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 12, 0, 0),
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
offsetof(CPUARMState, cp15.vbar_ns) },
.resetvalue = 0 },
@@ -7193,10 +7028,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
/* Generic registers whose values depend on the implementation */
{
ARMCPRegInfo sctlr = {
- .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
+ .name = "SCTLR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_SCTLR_EL1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 0, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 0),
.nv2_redirect_offset = 0x110 | NV2_REDIR_NV1,
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
offsetof(CPUARMState, cp15.sctlr_ns) },
@@ -7331,61 +7168,40 @@ void register_cp_regs_for_features(ARMCPU *cpu)
}
define_pm_cpregs(cpu);
+}
-#ifndef CONFIG_USER_ONLY
- /*
- * Register redirections and aliases must be done last,
- * after the registers from the other extensions have been defined.
- */
- if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
- define_arm_vh_e2h_redirects_aliases(cpu);
+/*
+ * Copy a ARMCPRegInfo structure, allocating it along with the name
+ * and an optional suffix to the name.
+ */
+static ARMCPRegInfo *alloc_cpreg(const ARMCPRegInfo *in, const char *suffix)
+{
+ const char *name = in->name;
+ size_t name_len = strlen(name);
+ size_t suff_len = suffix ? strlen(suffix) : 0;
+ ARMCPRegInfo *out = g_malloc(sizeof(*in) + name_len + suff_len + 1);
+ char *p = (char *)(out + 1);
+
+ *out = *in;
+ out->name = p;
+
+ memcpy(p, name, name_len + 1);
+ if (suffix) {
+ memcpy(p + name_len, suffix, suff_len + 1);
}
-#endif
+ return out;
}
/*
- * Private utility function for define_one_arm_cp_reg_with_opaque():
+ * Private utility function for define_one_arm_cp_reg():
* add a single reginfo struct to the hash table.
*/
-static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
- void *opaque, CPState state,
- CPSecureState secstate,
- int crm, int opc1, int opc2,
- const char *name)
+static void add_cpreg_to_hashtable(ARMCPU *cpu, ARMCPRegInfo *r,
+ CPState state, CPSecureState secstate,
+ uint32_t key)
{
CPUARMState *env = &cpu->env;
- uint32_t key;
- ARMCPRegInfo *r2;
- bool is64 = r->type & ARM_CP_64BIT;
bool ns = secstate & ARM_CP_SECSTATE_NS;
- int cp = r->cp;
- size_t name_len;
- bool make_const;
-
- switch (state) {
- case ARM_CP_STATE_AA32:
- /* We assume it is a cp15 register if the .cp field is left unset. */
- if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
- cp = 15;
- }
- key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
- break;
- case ARM_CP_STATE_AA64:
- /*
- * To allow abbreviation of ARMCPRegInfo definitions, we treat
- * cp == 0 as equivalent to the value for "standard guest-visible
- * sysreg". STATE_BOTH definitions are also always "standard sysreg"
- * in their AArch64 view (the .cp value may be non-zero for the
- * benefit of the AArch32 view).
- */
- if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
- cp = CP_REG_ARM64_SYSREG_CP;
- }
- key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
- break;
- default:
- g_assert_not_reached();
- }
/* Overriding of an existing definition must be explicitly requested. */
if (!(r->type & ARM_CP_OVERRIDE)) {
@@ -7395,84 +7211,7 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
}
}
- /*
- * Eliminate registers that are not present because the EL is missing.
- * Doing this here makes it easier to put all registers for a given
- * feature into the same ARMCPRegInfo array and define them all at once.
- */
- make_const = false;
- if (arm_feature(env, ARM_FEATURE_EL3)) {
- /*
- * An EL2 register without EL2 but with EL3 is (usually) RES0.
- * See rule RJFFP in section D1.1.3 of DDI0487H.a.
- */
- int min_el = ctz32(r->access) / 2;
- if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
- if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
- return;
- }
- make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
- }
- } else {
- CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
- ? PL2_RW : PL1_RW);
- if ((r->access & max_el) == 0) {
- return;
- }
- }
-
- /* Combine cpreg and name into one allocation. */
- name_len = strlen(name) + 1;
- r2 = g_malloc(sizeof(*r2) + name_len);
- *r2 = *r;
- r2->name = memcpy(r2 + 1, name, name_len);
-
- /*
- * Update fields to match the instantiation, overwiting wildcards
- * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
- */
- r2->cp = cp;
- r2->crm = crm;
- r2->opc1 = opc1;
- r2->opc2 = opc2;
- r2->state = state;
- r2->secure = secstate;
- if (opaque) {
- r2->opaque = opaque;
- }
-
- if (make_const) {
- /* This should not have been a very special register to begin. */
- int old_special = r2->type & ARM_CP_SPECIAL_MASK;
- assert(old_special == 0 || old_special == ARM_CP_NOP);
- /*
- * Set the special function to CONST, retaining the other flags.
- * This is important for e.g. ARM_CP_SVE so that we still
- * take the SVE trap if CPTR_EL3.EZ == 0.
- */
- r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
- /*
- * Usually, these registers become RES0, but there are a few
- * special cases like VPIDR_EL2 which have a constant non-zero
- * value with writes ignored.
- */
- if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
- r2->resetvalue = 0;
- }
- /*
- * ARM_CP_CONST has precedence, so removing the callbacks and
- * offsets are not strictly necessary, but it is potentially
- * less confusing to debug later.
- */
- r2->readfn = NULL;
- r2->writefn = NULL;
- r2->raw_readfn = NULL;
- r2->raw_writefn = NULL;
- r2->resetfn = NULL;
- r2->fieldoffset = 0;
- r2->bank_fieldoffsets[0] = 0;
- r2->bank_fieldoffsets[1] = 0;
- } else {
+ {
bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
if (isbanked) {
@@ -7481,7 +7220,7 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
* Overwriting fieldoffset as the array is only used to define
* banked registers but later only fieldoffset is used.
*/
- r2->fieldoffset = r->bank_fieldoffsets[ns];
+ r->fieldoffset = r->bank_fieldoffsets[ns];
}
if (state == ARM_CP_STATE_AA32) {
if (isbanked) {
@@ -7498,54 +7237,187 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
*/
if ((r->state == ARM_CP_STATE_BOTH && ns) ||
(arm_feature(env, ARM_FEATURE_V8) && !ns)) {
- r2->type |= ARM_CP_ALIAS;
+ r->type |= ARM_CP_ALIAS;
}
} else if ((secstate != r->secure) && !ns) {
/*
* The register is not banked so we only want to allow
* migration of the non-secure instance.
*/
- r2->type |= ARM_CP_ALIAS;
- }
-
- if (HOST_BIG_ENDIAN &&
- r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
- r2->fieldoffset += sizeof(uint32_t);
+ r->type |= ARM_CP_ALIAS;
}
}
}
/*
- * By convention, for wildcarded registers only the first
- * entry is used for migration; the others are marked as
- * ALIAS so we don't try to transfer the register
- * multiple times. Special registers (ie NOP/WFI) are
- * never migratable and not even raw-accessible.
+ * For 32-bit AArch32 regs shared with 64-bit AArch64 regs,
+ * adjust the field offset for endianness. This had to be
+ * delayed until banked registers were resolved.
*/
- if (r2->type & ARM_CP_SPECIAL_MASK) {
- r2->type |= ARM_CP_NO_RAW;
+ if (HOST_BIG_ENDIAN &&
+ state == ARM_CP_STATE_AA32 &&
+ r->state == ARM_CP_STATE_BOTH &&
+ r->fieldoffset) {
+ r->fieldoffset += sizeof(uint32_t);
}
- if (((r->crm == CP_ANY) && crm != 0) ||
- ((r->opc1 == CP_ANY) && opc1 != 0) ||
- ((r->opc2 == CP_ANY) && opc2 != 0)) {
- r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
+
+ /*
+ * Special registers (ie NOP/WFI) are never migratable and
+ * are not even raw-accessible.
+ */
+ if (r->type & ARM_CP_SPECIAL_MASK) {
+ r->type |= ARM_CP_NO_RAW;
}
/*
+ * Update fields to match the instantiation, overwiting wildcards
+ * such as ARM_CP_STATE_BOTH or ARM_CP_SECSTATE_BOTH.
+ */
+ r->state = state;
+ r->secure = secstate;
+
+ /*
* Check that raw accesses are either forbidden or handled. Note that
* we can't assert this earlier because the setup of fieldoffset for
* banked registers has to be done first.
*/
- if (!(r2->type & ARM_CP_NO_RAW)) {
- assert(!raw_accessors_invalid(r2));
+ if (!(r->type & ARM_CP_NO_RAW)) {
+ assert(!raw_accessors_invalid(r));
}
- g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
+ g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r);
+}
+
+static void add_cpreg_to_hashtable_aa32(ARMCPU *cpu, ARMCPRegInfo *r)
+{
+ /*
+ * Under AArch32 CP registers can be common
+ * (same for secure and non-secure world) or banked.
+ */
+ ARMCPRegInfo *r_s;
+ bool is64 = r->type & ARM_CP_64BIT;
+ uint32_t key = ENCODE_CP_REG(r->cp, is64, 0, r->crn,
+ r->crm, r->opc1, r->opc2);
+
+ assert(!(r->type & ARM_CP_ADD_TLBI_NXS)); /* aa64 only */
+ r->vhe_redir_to_el2 = 0;
+ r->vhe_redir_to_el01 = 0;
+
+ switch (r->secure) {
+ case ARM_CP_SECSTATE_NS:
+ key |= CP_REG_AA32_NS_MASK;
+ /* fall through */
+ case ARM_CP_SECSTATE_S:
+ add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA32, r->secure, key);
+ break;
+ case ARM_CP_SECSTATE_BOTH:
+ r_s = alloc_cpreg(r, "_S");
+ add_cpreg_to_hashtable(cpu, r_s, ARM_CP_STATE_AA32,
+ ARM_CP_SECSTATE_S, key);
+
+ key |= CP_REG_AA32_NS_MASK;
+ add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA32,
+ ARM_CP_SECSTATE_NS, key);
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
+static void add_cpreg_to_hashtable_aa64(ARMCPU *cpu, ARMCPRegInfo *r)
+{
+ uint32_t key = ENCODE_AA64_CP_REG(r->opc0, r->opc1,
+ r->crn, r->crm, r->opc2);
+
+ if ((r->type & ARM_CP_ADD_TLBI_NXS) &&
+ cpu_isar_feature(aa64_xs, cpu)) {
+ /*
+ * This is a TLBI insn which has an NXS variant. The
+ * NXS variant is at the same encoding except that
+ * crn is +1, and has the same behaviour except for
+ * fine-grained trapping. Add the NXS insn here and
+ * then fall through to add the normal register.
+ * add_cpreg_to_hashtable() copies the cpreg struct
+ * and name that it is passed, so it's OK to use
+ * a local struct here.
+ */
+ ARMCPRegInfo *nxs_ri = alloc_cpreg(r, "NXS");
+ uint32_t nxs_key;
+
+ assert(nxs_ri->crn < 0xf);
+ nxs_ri->crn++;
+ /* Also increment the CRN field inside the key value */
+ nxs_key = key + (1 << CP_REG_ARM64_SYSREG_CRN_SHIFT);
+ if (nxs_ri->fgt) {
+ nxs_ri->fgt |= R_FGT_NXS_MASK;
+ }
+
+ add_cpreg_to_hashtable(cpu, nxs_ri, ARM_CP_STATE_AA64,
+ ARM_CP_SECSTATE_NS, nxs_key);
+ }
+
+ if (!r->vhe_redir_to_el01) {
+ assert(!r->vhe_redir_to_el2);
+ } else if (!arm_feature(&cpu->env, ARM_FEATURE_EL2) ||
+ !cpu_isar_feature(aa64_vh, cpu)) {
+ r->vhe_redir_to_el2 = 0;
+ r->vhe_redir_to_el01 = 0;
+ } else {
+ /* Create the FOO_EL12 alias. */
+ ARMCPRegInfo *r2 = alloc_cpreg(r, "2");
+ uint32_t key2 = r->vhe_redir_to_el01;
+
+ /*
+ * Clear EL1 redirection on the FOO_EL1 reg;
+ * Clear EL2 redirection on the FOO_EL12 reg;
+ * Install redirection from FOO_EL12 back to FOO_EL1.
+ */
+ r->vhe_redir_to_el01 = 0;
+ r2->vhe_redir_to_el2 = 0;
+ r2->vhe_redir_to_el01 = key;
+
+ r2->type |= ARM_CP_ALIAS | ARM_CP_NO_RAW;
+ /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
+ r2->access &= PL2_RW | PL3_RW;
+ /* The new_reg op fields are as per new_key, not the target reg */
+ r2->crn = (key2 & CP_REG_ARM64_SYSREG_CRN_MASK)
+ >> CP_REG_ARM64_SYSREG_CRN_SHIFT;
+ r2->crm = (key2 & CP_REG_ARM64_SYSREG_CRM_MASK)
+ >> CP_REG_ARM64_SYSREG_CRM_SHIFT;
+ r2->opc0 = (key2 & CP_REG_ARM64_SYSREG_OP0_MASK)
+ >> CP_REG_ARM64_SYSREG_OP0_SHIFT;
+ r2->opc1 = (key2 & CP_REG_ARM64_SYSREG_OP1_MASK)
+ >> CP_REG_ARM64_SYSREG_OP1_SHIFT;
+ r2->opc2 = (key2 & CP_REG_ARM64_SYSREG_OP2_MASK)
+ >> CP_REG_ARM64_SYSREG_OP2_SHIFT;
+
+ /* Non-redirected access to this register will abort. */
+ r2->readfn = NULL;
+ r2->writefn = NULL;
+ r2->raw_readfn = NULL;
+ r2->raw_writefn = NULL;
+ r2->accessfn = NULL;
+ r2->fieldoffset = 0;
-void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
- const ARMCPRegInfo *r, void *opaque)
+ /*
+ * If the _EL1 register is redirected to memory by FEAT_NV2,
+ * then it shares the offset with the _EL12 register,
+ * and which one is redirected depends on HCR_EL2.NV1.
+ */
+ if (r2->nv2_redirect_offset) {
+ assert(r2->nv2_redirect_offset & NV2_REDIR_NV1);
+ r2->nv2_redirect_offset &= ~NV2_REDIR_NV1;
+ r2->nv2_redirect_offset |= NV2_REDIR_NO_NV1;
+ }
+ add_cpreg_to_hashtable(cpu, r2, ARM_CP_STATE_AA64,
+ ARM_CP_SECSTATE_NS, key2);
+ }
+
+ add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA64,
+ ARM_CP_SECSTATE_NS, key);
+}
+
+void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *r)
{
/*
* Define implementations of coprocessor registers.
@@ -7571,21 +7443,27 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
* bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
* the register, if any.
*/
- int crm, opc1, opc2;
int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
- CPState state;
+ int cp = r->cp;
+ ARMCPRegInfo r_const;
+ CPUARMState *env = &cpu->env;
- /* 64 bit registers have only CRm and Opc1 fields */
- assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
+ /*
+ * AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless.
+ * Moreover, the encoding test just following in general prevents
+ * shared encoding so ARM_CP_STATE_BOTH won't work either.
+ */
+ assert(r->state == ARM_CP_STATE_AA32 || !(r->type & ARM_CP_64BIT));
+ /* AArch32 64-bit registers have only CRm and Opc1 fields. */
+ assert(!(r->type & ARM_CP_64BIT) || !(r->opc2 || r->crn));
/* op0 only exists in the AArch64 encodings */
- assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
- /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
- assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
+ assert(r->state != ARM_CP_STATE_AA32 || r->opc0 == 0);
+
/*
* This API is only for Arm's system coprocessors (14 and 15) or
* (M-profile or v7A-and-earlier only) for implementation defined
@@ -7596,21 +7474,25 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
*/
switch (r->state) {
case ARM_CP_STATE_BOTH:
- /* 0 has a special meaning, but otherwise the same rules as AA32. */
- if (r->cp == 0) {
+ /*
+ * If the cp field is left unset, assume cp15.
+ * Otherwise apply the same rules as AA32.
+ */
+ if (cp == 0) {
+ cp = 15;
break;
}
/* fall through */
case ARM_CP_STATE_AA32:
if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
!arm_feature(&cpu->env, ARM_FEATURE_M)) {
- assert(r->cp >= 14 && r->cp <= 15);
+ assert(cp >= 14 && cp <= 15);
} else {
- assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
+ assert(cp < 8 || (cp >= 14 && cp <= 15));
}
break;
case ARM_CP_STATE_AA64:
- assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
+ assert(cp == 0);
break;
default:
g_assert_not_reached();
@@ -7675,75 +7557,104 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
}
}
- for (crm = crmmin; crm <= crmmax; crm++) {
- for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
- for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
- for (state = ARM_CP_STATE_AA32;
- state <= ARM_CP_STATE_AA64; state++) {
- if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
- continue;
- }
- if ((r->type & ARM_CP_ADD_TLBI_NXS) &&
- cpu_isar_feature(aa64_xs, cpu)) {
- /*
- * This is a TLBI insn which has an NXS variant. The
- * NXS variant is at the same encoding except that
- * crn is +1, and has the same behaviour except for
- * fine-grained trapping. Add the NXS insn here and
- * then fall through to add the normal register.
- * add_cpreg_to_hashtable() copies the cpreg struct
- * and name that it is passed, so it's OK to use
- * a local struct here.
- */
- ARMCPRegInfo nxs_ri = *r;
- g_autofree char *name = g_strdup_printf("%sNXS", r->name);
-
- assert(state == ARM_CP_STATE_AA64);
- assert(nxs_ri.crn < 0xf);
- nxs_ri.crn++;
- if (nxs_ri.fgt) {
- nxs_ri.fgt |= R_FGT_NXS_MASK;
- }
- add_cpreg_to_hashtable(cpu, &nxs_ri, opaque, state,
- ARM_CP_SECSTATE_NS,
- crm, opc1, opc2, name);
- }
- if (state == ARM_CP_STATE_AA32) {
- /*
- * Under AArch32 CP registers can be common
- * (same for secure and non-secure world) or banked.
- */
- char *name;
-
- switch (r->secure) {
- case ARM_CP_SECSTATE_S:
- case ARM_CP_SECSTATE_NS:
- add_cpreg_to_hashtable(cpu, r, opaque, state,
- r->secure, crm, opc1, opc2,
- r->name);
- break;
- case ARM_CP_SECSTATE_BOTH:
- name = g_strdup_printf("%s_S", r->name);
- add_cpreg_to_hashtable(cpu, r, opaque, state,
- ARM_CP_SECSTATE_S,
- crm, opc1, opc2, name);
- g_free(name);
- add_cpreg_to_hashtable(cpu, r, opaque, state,
- ARM_CP_SECSTATE_NS,
- crm, opc1, opc2, r->name);
- break;
- default:
- g_assert_not_reached();
- }
- } else {
- /*
- * AArch64 registers get mapped to non-secure instance
- * of AArch32
- */
- add_cpreg_to_hashtable(cpu, r, opaque, state,
- ARM_CP_SECSTATE_NS,
- crm, opc1, opc2, r->name);
- }
+ /*
+ * Eliminate registers that are not present because the EL is missing.
+ * Doing this here makes it easier to put all registers for a given
+ * feature into the same ARMCPRegInfo array and define them all at once.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ /*
+ * An EL2 register without EL2 but with EL3 is (usually) RES0.
+ * See rule RJFFP in section D1.1.3 of DDI0487H.a.
+ */
+ int min_el = ctz32(r->access) / 2;
+ if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
+ if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
+ return;
+ }
+ if (!(r->type & ARM_CP_EL3_NO_EL2_KEEP)) {
+ /* This should not have been a very special register. */
+ int old_special = r->type & ARM_CP_SPECIAL_MASK;
+ assert(old_special == 0 || old_special == ARM_CP_NOP);
+
+ r_const = *r;
+
+ /*
+ * Set the special function to CONST, retaining the other flags.
+ * This is important for e.g. ARM_CP_SVE so that we still
+ * take the SVE trap if CPTR_EL3.EZ == 0.
+ */
+ r_const.type = (r->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
+ /*
+ * Usually, these registers become RES0, but there are a few
+ * special cases like VPIDR_EL2 which have a constant non-zero
+ * value with writes ignored.
+ */
+ if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
+ r_const.resetvalue = 0;
+ }
+ /*
+ * ARM_CP_CONST has precedence, so removing the callbacks and
+ * offsets are not strictly necessary, but it is potentially
+ * less confusing to debug later.
+ */
+ r_const.readfn = NULL;
+ r_const.writefn = NULL;
+ r_const.raw_readfn = NULL;
+ r_const.raw_writefn = NULL;
+ r_const.resetfn = NULL;
+ r_const.fieldoffset = 0;
+ r_const.bank_fieldoffsets[0] = 0;
+ r_const.bank_fieldoffsets[1] = 0;
+
+ r = &r_const;
+ }
+ }
+ } else {
+ CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
+ ? PL2_RW : PL1_RW);
+ if ((r->access & max_el) == 0) {
+ return;
+ }
+ }
+
+ for (int crm = crmmin; crm <= crmmax; crm++) {
+ for (int opc1 = opc1min; opc1 <= opc1max; opc1++) {
+ for (int opc2 = opc2min; opc2 <= opc2max; opc2++) {
+ ARMCPRegInfo *r2 = alloc_cpreg(r, NULL);
+ ARMCPRegInfo *r3;
+
+ /*
+ * By convention, for wildcarded registers only the first
+ * entry is used for migration; the others are marked as
+ * ALIAS so we don't try to transfer the register
+ * multiple times.
+ */
+ if (crm != crmmin || opc1 != opc1min || opc2 != opc2min) {
+ r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
+ }
+
+ /* Overwrite CP_ANY with the instantiation. */
+ r2->crm = crm;
+ r2->opc1 = opc1;
+ r2->opc2 = opc2;
+
+ switch (r->state) {
+ case ARM_CP_STATE_AA32:
+ add_cpreg_to_hashtable_aa32(cpu, r2);
+ break;
+ case ARM_CP_STATE_AA64:
+ add_cpreg_to_hashtable_aa64(cpu, r2);
+ break;
+ case ARM_CP_STATE_BOTH:
+ r3 = alloc_cpreg(r2, NULL);
+ r2->cp = cp;
+ add_cpreg_to_hashtable_aa32(cpu, r2);
+ r3->cp = 0;
+ add_cpreg_to_hashtable_aa64(cpu, r3);
+ break;
+ default:
+ g_assert_not_reached();
}
}
}
@@ -7751,12 +7662,10 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
}
/* Define a whole list of registers */
-void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
- void *opaque, size_t len)
+void define_arm_cp_regs_len(ARMCPU *cpu, const ARMCPRegInfo *regs, size_t len)
{
- size_t i;
- for (i = 0; i < len; ++i) {
- define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque);
+ for (size_t i = 0; i < len; ++i) {
+ define_one_arm_cp_reg(cpu, regs + i);
}
}
@@ -7818,7 +7727,7 @@ uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
return 0;
}
-void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
+void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *ri)
{
/* Helper coprocessor reset function for do-nothing-on-reset registers */
}