aboutsummaryrefslogtreecommitdiff
path: root/target/arm/hvf/hvf.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/hvf/hvf.c')
-rw-r--r--target/arm/hvf/hvf.c117
1 files changed, 69 insertions, 48 deletions
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
index 34ca36f..47b0cd3 100644
--- a/target/arm/hvf/hvf.c
+++ b/target/arm/hvf/hvf.c
@@ -19,6 +19,7 @@
#include "system/hw_accel.h"
#include "hvf_arm.h"
#include "cpregs.h"
+#include "cpu-sysregs.h"
#include <mach/mach_time.h>
@@ -185,6 +186,7 @@ void hvf_arm_init_debug(void)
#define SYSREG_OSLAR_EL1 SYSREG(2, 0, 1, 0, 4)
#define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4)
#define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4)
+#define SYSREG_LORC_EL1 SYSREG(3, 0, 10, 4, 3)
#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1)
#define SYSREG_CNTP_CTL_EL0 SYSREG(3, 3, 14, 2, 1)
#define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0)
@@ -812,9 +814,9 @@ int hvf_put_registers(CPUState *cpu)
static void flush_cpu_state(CPUState *cpu)
{
- if (cpu->accel->dirty) {
+ if (cpu->vcpu_dirty) {
hvf_put_registers(cpu);
- cpu->accel->dirty = false;
+ cpu->vcpu_dirty = false;
}
}
@@ -845,14 +847,17 @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt)
return val;
}
-static void clamp_id_aa64mmfr0_parange_to_ipa_size(uint64_t *id_aa64mmfr0)
+static void clamp_id_aa64mmfr0_parange_to_ipa_size(ARMISARegisters *isar)
{
uint32_t ipa_size = chosen_ipa_bit_size ?
chosen_ipa_bit_size : hvf_arm_get_max_ipa_bit_size();
+ uint64_t id_aa64mmfr0;
/* Clamp down the PARange to the IPA size the kernel supports. */
uint8_t index = round_down_to_parange_index(ipa_size);
- *id_aa64mmfr0 = (*id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index;
+ id_aa64mmfr0 = GET_IDREG(isar, ID_AA64MMFR0);
+ id_aa64mmfr0 = (id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index;
+ SET_IDREG(isar, ID_AA64MMFR0, id_aa64mmfr0);
}
static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
@@ -862,16 +867,16 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
int reg;
uint64_t *val;
} regs[] = {
- { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 },
- { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 },
- { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 },
- { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
- { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
- { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
+ { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.idregs[ID_AA64PFR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.idregs[ID_AA64PFR1_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.idregs[ID_AA64DFR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.idregs[ID_AA64DFR1_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.idregs[ID_AA64ISAR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.idregs[ID_AA64ISAR1_EL1_IDX] },
/* Add ID_AA64ISAR2_EL1 here when HVF supports it */
- { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
- { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
- { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
+ { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.idregs[ID_AA64MMFR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.idregs[ID_AA64MMFR1_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.idregs[ID_AA64MMFR2_EL1_IDX] },
/* Add ID_AA64MMFR3_EL1 here when HVF supports it */
};
hv_vcpu_t fd;
@@ -879,7 +884,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
hv_vcpu_exit_t *exit;
int i;
- ahcf->dtb_compatible = "arm,arm-v8";
+ ahcf->dtb_compatible = "arm,armv8";
ahcf->features = (1ULL << ARM_FEATURE_V8) |
(1ULL << ARM_FEATURE_NEON) |
(1ULL << ARM_FEATURE_AARCH64) |
@@ -898,7 +903,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
r |= hv_vcpu_destroy(fd);
- clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar.id_aa64mmfr0);
+ clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar);
/*
* Disable SME, which is not properly handled by QEMU hvf yet.
@@ -910,7 +915,8 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* - fix any assumptions we made that SME implies SVE (since
* on the M4 there is SME but not SVE)
*/
- host_isar.id_aa64pfr1 &= ~R_ID_AA64PFR1_SME_MASK;
+ SET_IDREG(&host_isar, ID_AA64PFR1,
+ GET_IDREG(&host_isar, ID_AA64PFR1) & ~R_ID_AA64PFR1_SME_MASK);
ahcf->isar = host_isar;
@@ -927,7 +933,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
ahcf->reset_sctlr |= 0x00800000;
/* Make sure we don't advertise AArch32 support for EL0/EL1 */
- if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) {
+ if ((GET_IDREG(&host_isar, ID_AA64PFR0) & 0xff) != 0x11) {
return false;
}
@@ -1065,12 +1071,12 @@ int hvf_arch_init_vcpu(CPUState *cpu)
/* We're limited to underlying hardware caps, override internal versions */
ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
- &arm_cpu->isar.id_aa64mmfr0);
+ &arm_cpu->isar.idregs[ID_AA64MMFR0_EL1_IDX]);
assert_hvf_ok(ret);
- clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar.id_aa64mmfr0);
+ clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar);
ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
- arm_cpu->isar.id_aa64mmfr0);
+ arm_cpu->isar.idregs[ID_AA64MMFR0_EL1_IDX]);
assert_hvf_ok(ret);
return 0;
@@ -1083,13 +1089,13 @@ void hvf_kick_vcpu_thread(CPUState *cpu)
}
static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
- uint32_t syndrome)
+ uint32_t syndrome, int target_el)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
CPUARMState *env = &arm_cpu->env;
cpu->exception_index = excp;
- env->exception.target_el = 1;
+ env->exception.target_el = target_el;
env->exception.syndrome = syndrome;
arm_cpu_do_interrupt(cpu);
@@ -1258,6 +1264,9 @@ static bool hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg, uint64_t *val)
ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
if (ri) {
+ if (!cp_access_ok(1, ri, true)) {
+ return false;
+ }
if (ri->accessfn) {
if (ri->accessfn(env, ri, true) != CP_ACCESS_OK) {
return false;
@@ -1353,6 +1362,7 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val)
case SYSREG_ICC_IGRPEN0_EL1:
case SYSREG_ICC_IGRPEN1_EL1:
case SYSREG_ICC_PMR_EL1:
+ case SYSREG_ICC_RPR_EL1:
case SYSREG_ICC_SGI0R_EL1:
case SYSREG_ICC_SGI1R_EL1:
case SYSREG_ICC_SRE_EL1:
@@ -1449,7 +1459,7 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val)
SYSREG_CRN(reg),
SYSREG_CRM(reg),
SYSREG_OP2(reg));
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
return 1;
}
@@ -1538,6 +1548,9 @@ static bool hvf_sysreg_write_cp(CPUState *cpu, uint32_t reg, uint64_t val)
ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg));
if (ri) {
+ if (!cp_access_ok(1, ri, false)) {
+ return false;
+ }
if (ri->accessfn) {
if (ri->accessfn(env, ri, false) != CP_ACCESS_OK) {
return false;
@@ -1645,6 +1658,9 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
case SYSREG_OSDLR_EL1:
/* Dummy register */
return 0;
+ case SYSREG_LORC_EL1:
+ /* Dummy register */
+ return 0;
case SYSREG_ICC_AP0R0_EL1:
case SYSREG_ICC_AP0R1_EL1:
case SYSREG_ICC_AP0R2_EL1:
@@ -1667,6 +1683,7 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
case SYSREG_ICC_IGRPEN0_EL1:
case SYSREG_ICC_IGRPEN1_EL1:
case SYSREG_ICC_PMR_EL1:
+ case SYSREG_ICC_RPR_EL1:
case SYSREG_ICC_SGI0R_EL1:
case SYSREG_ICC_SGI1R_EL1:
case SYSREG_ICC_SRE_EL1:
@@ -1759,7 +1776,7 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
SYSREG_CRN(reg),
SYSREG_CRM(reg),
SYSREG_OP2(reg));
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
return 1;
}
@@ -1910,7 +1927,17 @@ int hvf_vcpu_exec(CPUState *cpu)
flush_cpu_state(cpu);
bql_unlock();
- assert_hvf_ok(hv_vcpu_run(cpu->accel->fd));
+ r = hv_vcpu_run(cpu->accel->fd);
+ bql_lock();
+ switch (r) {
+ case HV_SUCCESS:
+ break;
+ case HV_ILLEGAL_GUEST_STATE:
+ trace_hvf_illegal_guest_state();
+ /* fall through */
+ default:
+ g_assert_not_reached();
+ }
/* handle VMEXIT */
uint64_t exit_reason = hvf_exit->reason;
@@ -1918,7 +1945,6 @@ int hvf_vcpu_exec(CPUState *cpu)
uint32_t ec = syn_get_ec(syndrome);
ret = 0;
- bql_lock();
switch (exit_reason) {
case HV_EXIT_REASON_EXCEPTION:
/* This is the main one, handle below. */
@@ -1953,7 +1979,7 @@ int hvf_vcpu_exec(CPUState *cpu)
if (!hvf_find_sw_breakpoint(cpu, env->pc)) {
/* Re-inject into the guest */
ret = 0;
- hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0));
+ hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0), 1);
}
break;
}
@@ -1991,7 +2017,7 @@ int hvf_vcpu_exec(CPUState *cpu)
uint32_t cm = (syndrome >> 8) & 0x1;
uint64_t val = 0;
- trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address,
+ trace_hvf_data_abort(hvf_exit->exception.virtual_address,
hvf_exit->exception.physical_address, isv,
iswrite, s1ptw, len, srt);
@@ -2058,13 +2084,13 @@ int hvf_vcpu_exec(CPUState *cpu)
cpu_synchronize_state(cpu);
if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
if (!hvf_handle_psci_call(cpu)) {
- trace_hvf_unknown_hvc(env->xregs[0]);
+ trace_hvf_unknown_hvc(env->pc, env->xregs[0]);
/* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
env->xregs[0] = -1;
}
} else {
- trace_hvf_unknown_hvc(env->xregs[0]);
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ trace_hvf_unknown_hvc(env->pc, env->xregs[0]);
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
}
break;
case EC_AA64_SMC:
@@ -2079,7 +2105,7 @@ int hvf_vcpu_exec(CPUState *cpu)
}
} else {
trace_hvf_unknown_smc(env->xregs[0]);
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
}
break;
default:
@@ -2278,28 +2304,23 @@ static inline bool hvf_arm_hw_debug_active(CPUState *cpu)
return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
}
-static void hvf_arch_set_traps(void)
+static void hvf_arch_set_traps(CPUState *cpu)
{
- CPUState *cpu;
bool should_enable_traps = false;
hv_return_t r = HV_SUCCESS;
/* Check whether guest debugging is enabled for at least one vCPU; if it
* is, enable exiting the guest on all vCPUs */
- CPU_FOREACH(cpu) {
- should_enable_traps |= cpu->accel->guest_debug_enabled;
- }
- CPU_FOREACH(cpu) {
- /* Set whether debug exceptions exit the guest */
- r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
- should_enable_traps);
- assert_hvf_ok(r);
+ should_enable_traps |= cpu->accel->guest_debug_enabled;
+ /* Set whether debug exceptions exit the guest */
+ r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
+ should_enable_traps);
+ assert_hvf_ok(r);
- /* Set whether accesses to debug registers exit the guest */
- r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
- should_enable_traps);
- assert_hvf_ok(r);
- }
+ /* Set whether accesses to debug registers exit the guest */
+ r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
+ should_enable_traps);
+ assert_hvf_ok(r);
}
void hvf_arch_update_guest_debug(CPUState *cpu)
@@ -2340,7 +2361,7 @@ void hvf_arch_update_guest_debug(CPUState *cpu)
deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 0);
}
- hvf_arch_set_traps();
+ hvf_arch_set_traps(cpu);
}
bool hvf_arch_supports_guest_debug(void)