aboutsummaryrefslogtreecommitdiff
path: root/target/arm/hvf/hvf.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/hvf/hvf.c')
-rw-r--r--target/arm/hvf/hvf.c106
1 files changed, 58 insertions, 48 deletions
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
index 2439af6..c9cfcdc 100644
--- a/target/arm/hvf/hvf.c
+++ b/target/arm/hvf/hvf.c
@@ -19,10 +19,12 @@
#include "system/hw_accel.h"
#include "hvf_arm.h"
#include "cpregs.h"
+#include "cpu-sysregs.h"
#include <mach/mach_time.h>
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
+#include "system/memory.h"
#include "hw/boards.h"
#include "hw/irq.h"
#include "qemu/main-loop.h"
@@ -811,9 +813,9 @@ int hvf_put_registers(CPUState *cpu)
static void flush_cpu_state(CPUState *cpu)
{
- if (cpu->accel->dirty) {
+ if (cpu->vcpu_dirty) {
hvf_put_registers(cpu);
- cpu->accel->dirty = false;
+ cpu->vcpu_dirty = false;
}
}
@@ -844,14 +846,17 @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt)
return val;
}
-static void clamp_id_aa64mmfr0_parange_to_ipa_size(uint64_t *id_aa64mmfr0)
+static void clamp_id_aa64mmfr0_parange_to_ipa_size(ARMISARegisters *isar)
{
uint32_t ipa_size = chosen_ipa_bit_size ?
chosen_ipa_bit_size : hvf_arm_get_max_ipa_bit_size();
+ uint64_t id_aa64mmfr0;
/* Clamp down the PARange to the IPA size the kernel supports. */
uint8_t index = round_down_to_parange_index(ipa_size);
- *id_aa64mmfr0 = (*id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index;
+ id_aa64mmfr0 = GET_IDREG(isar, ID_AA64MMFR0);
+ id_aa64mmfr0 = (id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index;
+ SET_IDREG(isar, ID_AA64MMFR0, id_aa64mmfr0);
}
static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
@@ -861,16 +866,16 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
int reg;
uint64_t *val;
} regs[] = {
- { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 },
- { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 },
- { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 },
- { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
- { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
- { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
+ { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.idregs[ID_AA64PFR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.idregs[ID_AA64PFR1_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.idregs[ID_AA64DFR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.idregs[ID_AA64DFR1_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.idregs[ID_AA64ISAR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.idregs[ID_AA64ISAR1_EL1_IDX] },
/* Add ID_AA64ISAR2_EL1 here when HVF supports it */
- { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
- { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
- { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
+ { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.idregs[ID_AA64MMFR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.idregs[ID_AA64MMFR1_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.idregs[ID_AA64MMFR2_EL1_IDX] },
/* Add ID_AA64MMFR3_EL1 here when HVF supports it */
};
hv_vcpu_t fd;
@@ -878,7 +883,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
hv_vcpu_exit_t *exit;
int i;
- ahcf->dtb_compatible = "arm,arm-v8";
+ ahcf->dtb_compatible = "arm,armv8";
ahcf->features = (1ULL << ARM_FEATURE_V8) |
(1ULL << ARM_FEATURE_NEON) |
(1ULL << ARM_FEATURE_AARCH64) |
@@ -897,7 +902,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
r |= hv_vcpu_destroy(fd);
- clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar.id_aa64mmfr0);
+ clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar);
/*
* Disable SME, which is not properly handled by QEMU hvf yet.
@@ -909,7 +914,8 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* - fix any assumptions we made that SME implies SVE (since
* on the M4 there is SME but not SVE)
*/
- host_isar.id_aa64pfr1 &= ~R_ID_AA64PFR1_SME_MASK;
+ SET_IDREG(&host_isar, ID_AA64PFR1,
+ GET_IDREG(&host_isar, ID_AA64PFR1) & ~R_ID_AA64PFR1_SME_MASK);
ahcf->isar = host_isar;
@@ -926,7 +932,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
ahcf->reset_sctlr |= 0x00800000;
/* Make sure we don't advertise AArch32 support for EL0/EL1 */
- if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) {
+ if ((GET_IDREG(&host_isar, ID_AA64PFR0) & 0xff) != 0x11) {
return false;
}
@@ -1064,12 +1070,12 @@ int hvf_arch_init_vcpu(CPUState *cpu)
/* We're limited to underlying hardware caps, override internal versions */
ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
- &arm_cpu->isar.id_aa64mmfr0);
+ &arm_cpu->isar.idregs[ID_AA64MMFR0_EL1_IDX]);
assert_hvf_ok(ret);
- clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar.id_aa64mmfr0);
+ clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar);
ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
- arm_cpu->isar.id_aa64mmfr0);
+ arm_cpu->isar.idregs[ID_AA64MMFR0_EL1_IDX]);
assert_hvf_ok(ret);
return 0;
@@ -1082,13 +1088,13 @@ void hvf_kick_vcpu_thread(CPUState *cpu)
}
static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
- uint32_t syndrome)
+ uint32_t syndrome, int target_el)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
CPUARMState *env = &arm_cpu->env;
cpu->exception_index = excp;
- env->exception.target_el = 1;
+ env->exception.target_el = target_el;
env->exception.syndrome = syndrome;
arm_cpu_do_interrupt(cpu);
@@ -1448,7 +1454,7 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val)
SYSREG_CRN(reg),
SYSREG_CRM(reg),
SYSREG_OP2(reg));
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
return 1;
}
@@ -1758,7 +1764,7 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
SYSREG_CRN(reg),
SYSREG_CRM(reg),
SYSREG_OP2(reg));
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
return 1;
}
@@ -1909,7 +1915,17 @@ int hvf_vcpu_exec(CPUState *cpu)
flush_cpu_state(cpu);
bql_unlock();
- assert_hvf_ok(hv_vcpu_run(cpu->accel->fd));
+ r = hv_vcpu_run(cpu->accel->fd);
+ bql_lock();
+ switch (r) {
+ case HV_SUCCESS:
+ break;
+ case HV_ILLEGAL_GUEST_STATE:
+ trace_hvf_illegal_guest_state();
+ /* fall through */
+ default:
+ g_assert_not_reached();
+ }
/* handle VMEXIT */
uint64_t exit_reason = hvf_exit->reason;
@@ -1917,7 +1933,6 @@ int hvf_vcpu_exec(CPUState *cpu)
uint32_t ec = syn_get_ec(syndrome);
ret = 0;
- bql_lock();
switch (exit_reason) {
case HV_EXIT_REASON_EXCEPTION:
/* This is the main one, handle below. */
@@ -1952,7 +1967,7 @@ int hvf_vcpu_exec(CPUState *cpu)
if (!hvf_find_sw_breakpoint(cpu, env->pc)) {
/* Re-inject into the guest */
ret = 0;
- hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0));
+ hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0), 1);
}
break;
}
@@ -2057,13 +2072,13 @@ int hvf_vcpu_exec(CPUState *cpu)
cpu_synchronize_state(cpu);
if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
if (!hvf_handle_psci_call(cpu)) {
- trace_hvf_unknown_hvc(env->xregs[0]);
+ trace_hvf_unknown_hvc(env->pc, env->xregs[0]);
/* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
env->xregs[0] = -1;
}
} else {
- trace_hvf_unknown_hvc(env->xregs[0]);
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ trace_hvf_unknown_hvc(env->pc, env->xregs[0]);
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
}
break;
case EC_AA64_SMC:
@@ -2078,7 +2093,7 @@ int hvf_vcpu_exec(CPUState *cpu)
}
} else {
trace_hvf_unknown_smc(env->xregs[0]);
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
}
break;
default:
@@ -2277,28 +2292,23 @@ static inline bool hvf_arm_hw_debug_active(CPUState *cpu)
return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
}
-static void hvf_arch_set_traps(void)
+static void hvf_arch_set_traps(CPUState *cpu)
{
- CPUState *cpu;
bool should_enable_traps = false;
hv_return_t r = HV_SUCCESS;
/* Check whether guest debugging is enabled for at least one vCPU; if it
* is, enable exiting the guest on all vCPUs */
- CPU_FOREACH(cpu) {
- should_enable_traps |= cpu->accel->guest_debug_enabled;
- }
- CPU_FOREACH(cpu) {
- /* Set whether debug exceptions exit the guest */
- r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
- should_enable_traps);
- assert_hvf_ok(r);
+ should_enable_traps |= cpu->accel->guest_debug_enabled;
+ /* Set whether debug exceptions exit the guest */
+ r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
+ should_enable_traps);
+ assert_hvf_ok(r);
- /* Set whether accesses to debug registers exit the guest */
- r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
- should_enable_traps);
- assert_hvf_ok(r);
- }
+ /* Set whether accesses to debug registers exit the guest */
+ r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
+ should_enable_traps);
+ assert_hvf_ok(r);
}
void hvf_arch_update_guest_debug(CPUState *cpu)
@@ -2339,7 +2349,7 @@ void hvf_arch_update_guest_debug(CPUState *cpu)
deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 0);
}
- hvf_arch_set_traps();
+ hvf_arch_set_traps(cpu);
}
bool hvf_arch_supports_guest_debug(void)