aboutsummaryrefslogtreecommitdiff
path: root/target/hppa/cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/hppa/cpu.c')
-rw-r--r--target/hppa/cpu.c77
1 files changed, 62 insertions, 15 deletions
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index f050787..2477772 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -24,9 +24,12 @@
#include "qemu/timer.h"
#include "cpu.h"
#include "qemu/module.h"
-#include "exec/exec-all.h"
+#include "exec/translation-block.h"
+#include "exec/target_page.h"
#include "fpu/softfloat.h"
#include "tcg/tcg.h"
+#include "hw/hppa/hppa_hardware.h"
+#include "accel/tcg/cpu-ops.h"
static void hppa_cpu_set_pc(CPUState *cs, vaddr value)
{
@@ -43,15 +46,17 @@ static vaddr hppa_cpu_get_pc(CPUState *cs)
{
CPUHPPAState *env = cpu_env(cs);
- return hppa_form_gva_psw(env->psw, (env->psw & PSW_C ? env->iasq_f : 0),
- env->iaoq_f & -4);
+ return hppa_form_gva_mask(env->gva_offset_mask,
+ (env->psw & PSW_C ? env->iasq_f : 0),
+ env->iaoq_f & -4);
}
-void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
- uint64_t *pcsbase, uint32_t *pflags)
+static TCGTBCPUState hppa_get_tb_cpu_state(CPUState *cs)
{
+ CPUHPPAState *env = cpu_env(cs);
uint32_t flags = 0;
uint64_t cs_base = 0;
+ vaddr pc;
/*
* TB lookup assumes that PC contains the complete virtual address.
@@ -59,7 +64,7 @@ void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
* incomplete virtual address. This also means that we must separate
* out current cpu privilege from the low bits of IAOQ_F.
*/
- *pc = hppa_cpu_get_pc(env_cpu(env));
+ pc = hppa_cpu_get_pc(env_cpu(env));
flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
/*
@@ -89,10 +94,13 @@ void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
& (env->sr[4] == env->sr[7])) {
flags |= TB_FLAG_SR_SAME;
}
+ if ((env->psw & PSW_W) &&
+ (env->dr[2] & HPPA64_DIAG_SPHASH_ENABLE)) {
+ flags |= TB_FLAG_SPHASH;
+ }
#endif
- *pcsbase = cs_base;
- *pflags = flags;
+ return (TCGTBCPUState){ .pc = pc, .flags = flags, .cs_base = cs_base };
}
static void hppa_cpu_synchronize_from_tb(CPUState *cs,
@@ -124,10 +132,12 @@ static void hppa_restore_state_to_opc(CPUState *cs,
env->psw_n = 0;
}
+#ifndef CONFIG_USER_ONLY
static bool hppa_cpu_has_work(CPUState *cs)
{
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
}
+#endif /* !CONFIG_USER_ONLY */
static int hppa_cpu_mmu_index(CPUState *cs, bool ifetch)
{
@@ -143,6 +153,7 @@ static int hppa_cpu_mmu_index(CPUState *cs, bool ifetch)
static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info)
{
info->mach = bfd_mach_hppa20;
+ info->endian = BFD_ENDIAN_BIG;
info->print_insn = print_insn_hppa;
}
@@ -194,13 +205,33 @@ static void hppa_cpu_realizefn(DeviceState *dev, Error **errp)
static void hppa_cpu_initfn(Object *obj)
{
+ CPUHPPAState *env = cpu_env(CPU(obj));
+
+ env->is_pa20 = !!object_dynamic_cast(obj, TYPE_HPPA64_CPU);
+}
+
+static void hppa_cpu_reset_hold(Object *obj, ResetType type)
+{
+ HPPACPUClass *scc = HPPA_CPU_GET_CLASS(obj);
CPUState *cs = CPU(obj);
HPPACPU *cpu = HPPA_CPU(obj);
CPUHPPAState *env = &cpu->env;
+ if (scc->parent_phases.hold) {
+ scc->parent_phases.hold(obj, type);
+ }
cs->exception_index = -1;
+ cs->halted = 0;
+ cpu_set_pc(cs, 0xf0000004);
+
+ memset(env, 0, offsetof(CPUHPPAState, end_reset_fields));
+
cpu_hppa_loaded_fr0(env);
- cpu_hppa_put_psw(env, PSW_W);
+
+ /* 64-bit machines start with space-register hashing enabled in %dr2 */
+ env->dr[2] = hppa_is_pa20(env) ? HPPA64_DIAG_SPHASH_ENABLE : 0;
+
+ cpu_hppa_put_psw(env, PSW_M);
}
static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
@@ -214,38 +245,54 @@ static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps hppa_sysemu_ops = {
+ .has_work = hppa_cpu_has_work,
.get_phys_page_debug = hppa_cpu_get_phys_page_debug,
};
#endif
-#include "hw/core/tcg-cpu-ops.h"
-
static const TCGCPUOps hppa_tcg_ops = {
+ /* PA-RISC 1.x processors have a strong memory model. */
+ /*
+ * ??? While we do not yet implement PA-RISC 2.0, those processors have
+ * a weak memory model, but with TLB bits that force ordering on a per-page
+ * basis. It's probably easier to fall back to a strong memory model.
+ */
+ .guest_default_memory_order = TCG_MO_ALL,
+ .mttcg_supported = true,
+
.initialize = hppa_translate_init,
+ .translate_code = hppa_translate_code,
+ .get_tb_cpu_state = hppa_get_tb_cpu_state,
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
.restore_state_to_opc = hppa_restore_state_to_opc,
+ .mmu_index = hppa_cpu_mmu_index,
#ifndef CONFIG_USER_ONLY
- .tlb_fill = hppa_cpu_tlb_fill,
+ .tlb_fill_align = hppa_cpu_tlb_fill_align,
+ .pointer_wrap = cpu_pointer_wrap_notreached,
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
+ .cpu_exec_halt = hppa_cpu_has_work,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = hppa_cpu_do_interrupt,
.do_unaligned_access = hppa_cpu_do_unaligned_access,
.do_transaction_failed = hppa_cpu_do_transaction_failed,
#endif /* !CONFIG_USER_ONLY */
};
-static void hppa_cpu_class_init(ObjectClass *oc, void *data)
+static void hppa_cpu_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
HPPACPUClass *acc = HPPA_CPU_CLASS(oc);
+ ResettableClass *rc = RESETTABLE_CLASS(oc);
device_class_set_parent_realize(dc, hppa_cpu_realizefn,
&acc->parent_realize);
+ resettable_class_set_parent_phases(rc, NULL, hppa_cpu_reset_hold, NULL,
+ &acc->parent_phases);
+
cc->class_by_name = hppa_cpu_class_by_name;
- cc->has_work = hppa_cpu_has_work;
- cc->mmu_index = hppa_cpu_mmu_index;
cc->dump_state = hppa_cpu_dump_state;
cc->set_pc = hppa_cpu_set_pc;
cc->get_pc = hppa_cpu_get_pc;