aboutsummaryrefslogtreecommitdiff
path: root/target/mips/tcg/system
diff options
context:
space:
mode:
Diffstat (limited to 'target/mips/tcg/system')
-rw-r--r--target/mips/tcg/system/cp0_helper.c1633
-rw-r--r--target/mips/tcg/system/lcsr_helper.c40
-rw-r--r--target/mips/tcg/system/meson.build12
-rw-r--r--target/mips/tcg/system/mips-semi.c377
-rw-r--r--target/mips/tcg/system/semihosting-stub.c16
-rw-r--r--target/mips/tcg/system/special_helper.c173
-rw-r--r--target/mips/tcg/system/tlb_helper.c1422
7 files changed, 3673 insertions, 0 deletions
diff --git a/target/mips/tcg/system/cp0_helper.c b/target/mips/tcg/system/cp0_helper.c
new file mode 100644
index 0000000..101b1e6
--- /dev/null
+++ b/target/mips/tcg/system/cp0_helper.c
@@ -0,0 +1,1633 @@
+/*
+ * Helpers for emulation of CP0-related MIPS instructions.
+ *
+ * Copyright (C) 2004-2005 Jocelyn Mayer
+ * Copyright (C) 2020 Wave Computing, Inc.
+ * Copyright (C) 2020 Aleksandar Markovic <amarkovic@wavecomp.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "internal.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "exec/cputlb.h"
+#include "exec/target_page.h"
+
+
+/* SMP helpers. */
+static bool mips_vpe_is_wfi(MIPSCPU *c)
+{
+ CPUState *cpu = CPU(c);
+ CPUMIPSState *env = &c->env;
+
+ /*
+ * If the VPE is halted but otherwise active, it means it's waiting for
+ * an interrupt.\
+ */
+ return cpu->halted && mips_vpe_active(env);
+}
+
+static bool mips_vp_is_wfi(MIPSCPU *c)
+{
+ CPUState *cpu = CPU(c);
+ CPUMIPSState *env = &c->env;
+
+ return cpu->halted && mips_vp_active(env);
+}
+
+static inline void mips_vpe_wake(MIPSCPU *c)
+{
+ /*
+ * Don't set ->halted = 0 directly, let it be done via cpu_has_work
+ * because there might be other conditions that state that c should
+ * be sleeping.
+ */
+ bql_lock();
+ cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
+ bql_unlock();
+}
+
+static inline void mips_vpe_sleep(MIPSCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+
+ /*
+ * The VPE was shut off, really go to bed.
+ * Reset any old _WAKE requests.
+ */
+ cs->halted = 1;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
+}
+
+static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
+{
+ CPUMIPSState *c = &cpu->env;
+
+ /* FIXME: TC reschedule. */
+ if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
+ mips_vpe_wake(cpu);
+ }
+}
+
+static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
+{
+ CPUMIPSState *c = &cpu->env;
+
+ /* FIXME: TC reschedule. */
+ if (!mips_vpe_active(c)) {
+ mips_vpe_sleep(cpu);
+ }
+}
+
+/**
+ * mips_cpu_map_tc:
+ * @env: CPU from which mapping is performed.
+ * @tc: Should point to an int with the value of the global TC index.
+ *
+ * This function will transform @tc into a local index within the
+ * returned #CPUMIPSState.
+ */
+
+/*
+ * FIXME: This code assumes that all VPEs have the same number of TCs,
+ * which depends on runtime setup. Can probably be fixed by
+ * walking the list of CPUMIPSStates.
+ */
+static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
+{
+ MIPSCPU *cpu;
+ CPUState *cs;
+ CPUState *other_cs;
+ int vpe_idx;
+ int tc_idx = *tc;
+
+ if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
+ /* Not allowed to address other CPUs. */
+ *tc = env->current_tc;
+ return env;
+ }
+
+ cs = env_cpu(env);
+ vpe_idx = tc_idx / cs->nr_threads;
+ *tc = tc_idx % cs->nr_threads;
+ other_cs = qemu_get_cpu(vpe_idx);
+ if (other_cs == NULL) {
+ return env;
+ }
+ cpu = MIPS_CPU(other_cs);
+ return &cpu->env;
+}
+
+/*
+ * The per VPE CP0_Status register shares some fields with the per TC
+ * CP0_TCStatus registers. These fields are wired to the same registers,
+ * so changes to either of them should be reflected on both registers.
+ *
+ * Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
+ *
+ * These helper call synchronizes the regs for a given cpu.
+ */
+
+/*
+ * Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c.
+ * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
+ * int tc);
+ */
+
+/* Called for updates to CP0_TCStatus. */
+static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
+ target_ulong v)
+{
+ uint32_t status;
+ uint32_t tcu, tmx, tasid, tksu;
+ uint32_t mask = ((1U << CP0St_CU3)
+ | (1 << CP0St_CU2)
+ | (1 << CP0St_CU1)
+ | (1 << CP0St_CU0)
+ | (1 << CP0St_MX)
+ | (3 << CP0St_KSU));
+
+ tcu = (v >> CP0TCSt_TCU0) & 0xf;
+ tmx = (v >> CP0TCSt_TMX) & 0x1;
+ tasid = v & cpu->CP0_EntryHi_ASID_mask;
+ tksu = (v >> CP0TCSt_TKSU) & 0x3;
+
+ status = tcu << CP0St_CU0;
+ status |= tmx << CP0St_MX;
+ status |= tksu << CP0St_KSU;
+
+ cpu->CP0_Status &= ~mask;
+ cpu->CP0_Status |= status;
+
+ /* Sync the TASID with EntryHi. */
+ cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask;
+ cpu->CP0_EntryHi |= tasid;
+
+ compute_hflags(cpu);
+}
+
+/* Called for updates to CP0_EntryHi. */
+static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
+{
+ int32_t *tcst;
+ uint32_t asid, v = cpu->CP0_EntryHi;
+
+ asid = v & cpu->CP0_EntryHi_ASID_mask;
+
+ if (tc == cpu->current_tc) {
+ tcst = &cpu->active_tc.CP0_TCStatus;
+ } else {
+ tcst = &cpu->tcs[tc].CP0_TCStatus;
+ }
+
+ *tcst &= ~cpu->CP0_EntryHi_ASID_mask;
+ *tcst |= asid;
+}
+
+/* XXX: do not use a global */
+uint32_t cpu_mips_get_random(CPUMIPSState *env)
+{
+ static uint32_t seed = 1;
+ static uint32_t prev_idx;
+ uint32_t idx;
+ uint32_t nb_rand_tlb = env->tlb->nb_tlb - env->CP0_Wired;
+
+ if (nb_rand_tlb == 1) {
+ return env->tlb->nb_tlb - 1;
+ }
+
+ /* Don't return same value twice, so get another value */
+ do {
+ /*
+ * Use a simple algorithm of Linear Congruential Generator
+ * from ISO/IEC 9899 standard.
+ */
+ seed = 1103515245 * seed + 12345;
+ idx = (seed >> 16) % nb_rand_tlb + env->CP0_Wired;
+ } while (idx == prev_idx);
+ prev_idx = idx;
+ return idx;
+}
+
+/* CP0 helpers */
+target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
+{
+ return env->mvp->CP0_MVPControl;
+}
+
+target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
+{
+ return env->mvp->CP0_MVPConf0;
+}
+
+target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
+{
+ return env->mvp->CP0_MVPConf1;
+}
+
+target_ulong helper_mfc0_random(CPUMIPSState *env)
+{
+ return (int32_t)cpu_mips_get_random(env);
+}
+
+target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCStatus;
+}
+
+target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCStatus;
+ } else {
+ return other->tcs[other_tc].CP0_TCStatus;
+ }
+}
+
+target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCBind;
+}
+
+target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCBind;
+ } else {
+ return other->tcs[other_tc].CP0_TCBind;
+ }
+}
+
+target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
+{
+ return env->active_tc.PC;
+}
+
+target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.PC;
+ } else {
+ return other->tcs[other_tc].PC;
+ }
+}
+
+target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCHalt;
+}
+
+target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCHalt;
+ } else {
+ return other->tcs[other_tc].CP0_TCHalt;
+ }
+}
+
+target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCContext;
+}
+
+target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCContext;
+ } else {
+ return other->tcs[other_tc].CP0_TCContext;
+ }
+}
+
+target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCSchedule;
+}
+
+target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCSchedule;
+ } else {
+ return other->tcs[other_tc].CP0_TCSchedule;
+ }
+}
+
+target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCScheFBack;
+}
+
+target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.CP0_TCScheFBack;
+ } else {
+ return other->tcs[other_tc].CP0_TCScheFBack;
+ }
+}
+
+target_ulong helper_mfc0_count(CPUMIPSState *env)
+{
+ return (int32_t)cpu_mips_get_count(env);
+}
+
+target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_EntryHi;
+}
+
+target_ulong helper_mftc0_cause(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_Cause;
+}
+
+target_ulong helper_mftc0_status(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_Status;
+}
+
+target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
+{
+ return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift);
+}
+
+target_ulong helper_mfc0_maar(CPUMIPSState *env)
+{
+ return (int32_t) env->CP0_MAAR[env->CP0_MAARI];
+}
+
+target_ulong helper_mfhc0_maar(CPUMIPSState *env)
+{
+ return env->CP0_MAAR[env->CP0_MAARI] >> 32;
+}
+
+target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
+{
+ return (int32_t)env->CP0_WatchLo[sel];
+}
+
+target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
+{
+ return (int32_t) env->CP0_WatchHi[sel];
+}
+
+target_ulong helper_mfhc0_watchhi(CPUMIPSState *env, uint32_t sel)
+{
+ return env->CP0_WatchHi[sel] >> 32;
+}
+
+target_ulong helper_mfc0_debug(CPUMIPSState *env)
+{
+ target_ulong t0 = env->CP0_Debug;
+ if (env->hflags & MIPS_HFLAG_DM) {
+ t0 |= 1 << CP0DB_DM;
+ }
+
+ return t0;
+}
+
+target_ulong helper_mftc0_debug(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ int32_t tcstatus;
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ tcstatus = other->active_tc.CP0_Debug_tcstatus;
+ } else {
+ tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
+ }
+
+ /* XXX: Might be wrong, check with EJTAG spec. */
+ return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
+ (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
+{
+ return env->active_tc.PC;
+}
+
+target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCHalt;
+}
+
+target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCContext;
+}
+
+target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCSchedule;
+}
+
+target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCScheFBack;
+}
+
+target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
+{
+ return env->CP0_LLAddr >> env->CP0_LLAddr_shift;
+}
+
+target_ulong helper_dmfc0_maar(CPUMIPSState *env)
+{
+ return env->CP0_MAAR[env->CP0_MAARI];
+}
+
+target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
+{
+ return env->CP0_WatchLo[sel];
+}
+
+target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel)
+{
+ return env->CP0_WatchHi[sel];
+}
+
+#endif /* TARGET_MIPS64 */
+
+void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t index_p = env->CP0_Index & 0x80000000;
+ uint32_t tlb_index = arg1 & 0x7fffffff;
+ if (tlb_index < env->tlb->nb_tlb) {
+ if (env->insn_flags & ISA_MIPS_R6) {
+ index_p |= arg1 & 0x80000000;
+ }
+ env->CP0_Index = index_p | tlb_index;
+ }
+}
+
+void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
+ mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
+ (1 << CP0MVPCo_EVP);
+ }
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
+ mask |= (1 << CP0MVPCo_STLB);
+ }
+ newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
+
+ /* TODO: Enable/disable shared TLB, enable/disable VPEs. */
+
+ env->mvp->CP0_MVPControl = newval;
+}
+
+void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask;
+ uint32_t newval;
+
+ mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
+ (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
+ newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
+
+ /*
+ * Yield scheduler intercept not implemented.
+ * Gating storage scheduler intercept not implemented.
+ */
+
+ /* TODO: Enable/disable TCs. */
+
+ env->CP0_VPEControl = newval;
+}
+
+void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ uint32_t mask;
+ uint32_t newval;
+
+ mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
+ (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
+ newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
+
+ /* TODO: Enable/disable TCs. */
+
+ other->CP0_VPEControl = newval;
+}
+
+target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ /* FIXME: Mask away return zero on read bits. */
+ return other->CP0_VPEControl;
+}
+
+target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_VPEConf0;
+}
+
+void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) {
+ mask |= (0xff << CP0VPEC0_XTC);
+ }
+ mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
+ }
+ newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
+
+ /* TODO: TC exclusive handling due to ERL/EXL. */
+
+ env->CP0_VPEConf0 = newval;
+}
+
+void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
+ newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
+
+ /* TODO: TC exclusive handling due to ERL/EXL. */
+ other->CP0_VPEConf0 = newval;
+}
+
+void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
+ mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
+ (0xff << CP0VPEC1_NCP1);
+ newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
+
+ /* UDI not implemented. */
+ /* CP2 not implemented. */
+
+ /* TODO: Handle FPU (CP1) binding. */
+
+ env->CP0_VPEConf1 = newval;
+}
+
+void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
+{
+ /* Yield qualifier inputs not implemented. */
+ env->CP0_YQMask = 0x00000000;
+}
+
+void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_VPEOpt = arg1 & 0x0000ffff;
+}
+
+#define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF)
+
+void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
+{
+ /* 1k pages not implemented */
+ target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
+ env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env))
+ | (rxi << (CP0EnLo_XI - 30));
+}
+
+#if defined(TARGET_MIPS64)
+#define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6)
+
+void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1)
+{
+ uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
+ env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
+}
+#endif
+
+void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = env->CP0_TCStatus_rw_bitmask;
+ uint32_t newval;
+
+ newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
+
+ env->active_tc.CP0_TCStatus = newval;
+ sync_c0_tcstatus(env, env->current_tc, newval);
+}
+
+void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCStatus = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCStatus = arg1;
+ }
+ sync_c0_tcstatus(other, other_tc, arg1);
+}
+
+void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = (1 << CP0TCBd_TBE);
+ uint32_t newval;
+
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
+ mask |= (1 << CP0TCBd_CurVPE);
+ }
+ newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
+ env->active_tc.CP0_TCBind = newval;
+}
+
+void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t mask = (1 << CP0TCBd_TBE);
+ uint32_t newval;
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
+ mask |= (1 << CP0TCBd_CurVPE);
+ }
+ if (other_tc == other->current_tc) {
+ newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
+ other->active_tc.CP0_TCBind = newval;
+ } else {
+ newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
+ other->tcs[other_tc].CP0_TCBind = newval;
+ }
+}
+
+void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.PC = arg1;
+ env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
+ env->CP0_LLAddr = 0;
+ env->lladdr = 0;
+ /* MIPS16 not implemented. */
+}
+
+void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.PC = arg1;
+ other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
+ other->CP0_LLAddr = 0;
+ other->lladdr = 0;
+ /* MIPS16 not implemented. */
+ } else {
+ other->tcs[other_tc].PC = arg1;
+ other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
+ other->CP0_LLAddr = 0;
+ other->lladdr = 0;
+ /* MIPS16 not implemented. */
+ }
+}
+
+void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
+{
+ MIPSCPU *cpu = env_archcpu(env);
+
+ env->active_tc.CP0_TCHalt = arg1 & 0x1;
+
+ /* TODO: Halt TC / Restart (if allocated+active) TC. */
+ if (env->active_tc.CP0_TCHalt & 1) {
+ mips_tc_sleep(cpu, env->current_tc);
+ } else {
+ mips_tc_wake(cpu, env->current_tc);
+ }
+}
+
+void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ MIPSCPU *other_cpu = env_archcpu(other);
+
+ /* TODO: Halt TC / Restart (if allocated+active) TC. */
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCHalt = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCHalt = arg1;
+ }
+
+ if (arg1 & 1) {
+ mips_tc_sleep(other_cpu, other_tc);
+ } else {
+ mips_tc_wake(other_cpu, other_tc);
+ }
+}
+
+void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.CP0_TCContext = arg1;
+}
+
+void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCContext = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCContext = arg1;
+ }
+}
+
+void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.CP0_TCSchedule = arg1;
+}
+
+void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCSchedule = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCSchedule = arg1;
+ }
+}
+
+void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.CP0_TCScheFBack = arg1;
+}
+
+void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_TCScheFBack = arg1;
+ } else {
+ other->tcs[other_tc].CP0_TCScheFBack = arg1;
+ }
+}
+
+void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
+{
+ /* 1k pages not implemented */
+ target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
+ env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env))
+ | (rxi << (CP0EnLo_XI - 30));
+}
+
+#if defined(TARGET_MIPS64)
+void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1)
+{
+ uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
+ env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
+}
+#endif
+
+void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
+}
+
+void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1)
+{
+ int32_t old;
+ old = env->CP0_MemoryMapID;
+ env->CP0_MemoryMapID = (int32_t) arg1;
+ /* If the MemoryMapID changes, flush qemu's TLB. */
+ if (old != env->CP0_MemoryMapID) {
+ cpu_mips_tlb_flush(env);
+ }
+}
+
+uint32_t compute_pagemask(uint32_t val)
+{
+ /* Don't care MASKX as we don't support 1KB page */
+ uint32_t mask = extract32(val, CP0PM_MASK, 16);
+ int maskbits = cto32(mask);
+
+ /* Ensure no more set bit after first zero, and maskbits even. */
+ if ((mask >> maskbits) == 0 && maskbits % 2 == 0) {
+ return mask << CP0PM_MASK;
+ } else {
+ /* When invalid, set to default target page size. */
+ return 0;
+ }
+}
+
+void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_PageMask = compute_pagemask(arg1);
+}
+
+void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
+{
+ /* SmartMIPS not implemented */
+ /* 1k pages not implemented */
+ env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) |
+ (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask);
+ compute_hflags(env);
+ restore_pamask(env);
+}
+
+void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->CP0_SegCtl0 = arg1 & CP0SC0_MASK;
+ tlb_flush(cs);
+}
+
+void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->CP0_SegCtl1 = arg1 & CP0SC1_MASK;
+ tlb_flush(cs);
+}
+
+void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1)
+{
+ CPUState *cs = env_cpu(env);
+
+ env->CP0_SegCtl2 = arg1 & CP0SC2_MASK;
+ tlb_flush(cs);
+}
+
+void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1)
+{
+#if defined(TARGET_MIPS64)
+ uint64_t mask = 0x3F3FFFFFFFULL;
+ uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL;
+ uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL;
+
+ if ((env->insn_flags & ISA_MIPS_R6)) {
+ if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_BDI);
+ }
+ if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_GDI);
+ }
+ if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_UDI);
+ }
+ if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_MDI);
+ }
+ if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) {
+ mask &= ~(0x3FULL << CP0PF_PTI);
+ }
+ }
+ env->CP0_PWField = arg1 & mask;
+
+ if ((new_ptei >= 32) ||
+ ((env->insn_flags & ISA_MIPS_R6) &&
+ (new_ptei == 0 || new_ptei == 1))) {
+ env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) |
+ (old_ptei << CP0PF_PTEI);
+ }
+#else
+ uint32_t mask = 0x3FFFFFFF;
+ uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
+ uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F;
+
+ if ((env->insn_flags & ISA_MIPS_R6)) {
+ if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) {
+ mask &= ~(0x3F << CP0PF_GDW);
+ }
+ if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) {
+ mask &= ~(0x3F << CP0PF_UDW);
+ }
+ if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) {
+ mask &= ~(0x3F << CP0PF_MDW);
+ }
+ if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) {
+ mask &= ~(0x3F << CP0PF_PTW);
+ }
+ }
+ env->CP0_PWField = arg1 & mask;
+
+ if ((new_ptew >= 32) ||
+ ((env->insn_flags & ISA_MIPS_R6) &&
+ (new_ptew == 0 || new_ptew == 1))) {
+ env->CP0_PWField = (env->CP0_PWField & ~0x3F) |
+ (old_ptew << CP0PF_PTEW);
+ }
+#endif
+}
+
+void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1)
+{
+#if defined(TARGET_MIPS64)
+ env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL;
+#else
+ env->CP0_PWSize = arg1 & 0x3FFFFFFF;
+#endif
+}
+
+void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
+{
+ if (env->insn_flags & ISA_MIPS_R6) {
+ if (arg1 < env->tlb->nb_tlb) {
+ env->CP0_Wired = arg1;
+ }
+ } else {
+ env->CP0_Wired = arg1 % env->tlb->nb_tlb;
+ }
+}
+
+void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1)
+{
+#if defined(TARGET_MIPS64)
+ /* PWEn = 0. Hardware page table walking is not implemented. */
+ env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F);
+#else
+ env->CP0_PWCtl = (arg1 & 0x800000FF);
+#endif
+}
+
+void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
+}
+
+void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
+}
+
+void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
+}
+
+void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
+}
+
+void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
+}
+
+void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0x0000000F;
+
+ if ((env->CP0_Config1 & (1 << CP0C1_PC)) &&
+ (env->insn_flags & ISA_MIPS_R6)) {
+ mask |= (1 << 4);
+ }
+ if (env->insn_flags & ISA_MIPS_R6) {
+ mask |= (1 << 5);
+ }
+ if (env->CP0_Config3 & (1 << CP0C3_ULRI)) {
+ mask |= (1 << 29);
+
+ if (arg1 & (1 << 29)) {
+ env->hflags |= MIPS_HFLAG_HWRENA_ULR;
+ } else {
+ env->hflags &= ~MIPS_HFLAG_HWRENA_ULR;
+ }
+ }
+
+ env->CP0_HWREna = arg1 & mask;
+}
+
+void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
+{
+ cpu_mips_store_count(env, arg1);
+}
+
+void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
+{
+ target_ulong old, val, mask;
+ mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask;
+ if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) {
+ mask |= 1 << CP0EnHi_EHINV;
+ }
+
+ /* 1k pages not implemented */
+#if defined(TARGET_MIPS64)
+ if (env->insn_flags & ISA_MIPS_R6) {
+ int entryhi_r = extract64(arg1, 62, 2);
+ int config0_at = extract32(env->CP0_Config0, 13, 2);
+ bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0;
+ if ((entryhi_r == 2) ||
+ (entryhi_r == 1 && (no_supervisor || config0_at == 1))) {
+ /* skip EntryHi.R field if new value is reserved */
+ mask &= ~(0x3ull << 62);
+ }
+ }
+ mask &= env->SEGMask;
+#endif
+ old = env->CP0_EntryHi;
+ val = (arg1 & mask) | (old & ~mask);
+ env->CP0_EntryHi = val;
+ if (ase_mt_available(env)) {
+ sync_c0_entryhi(env, env->current_tc);
+ }
+ /* If the ASID changes, flush qemu's TLB. */
+ if ((old & env->CP0_EntryHi_ASID_mask) !=
+ (val & env->CP0_EntryHi_ASID_mask)) {
+ tlb_flush(env_cpu(env));
+ }
+}
+
+void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ other->CP0_EntryHi = arg1;
+ sync_c0_entryhi(other, other_tc);
+}
+
+void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
+{
+ cpu_mips_store_compare(env, arg1);
+}
+
+void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t val, old;
+
+ old = env->CP0_Status;
+ cpu_mips_store_status(env, arg1);
+ val = env->CP0_Status;
+
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
+ old, old & env->CP0_Cause & CP0Ca_IP_mask,
+ val, val & env->CP0_Cause & CP0Ca_IP_mask,
+ env->CP0_Cause);
+ switch (mips_env_mmu_index(env)) {
+ case 3:
+ qemu_log(", ERL\n");
+ break;
+ case MIPS_HFLAG_UM:
+ qemu_log(", UM\n");
+ break;
+ case MIPS_HFLAG_SM:
+ qemu_log(", SM\n");
+ break;
+ case MIPS_HFLAG_KM:
+ qemu_log("\n");
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
+ break;
+ }
+ }
+}
+
+void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018;
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask);
+ sync_c0_status(env, other, other_tc);
+}
+
+void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
+}
+
+void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
+ env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
+}
+
+void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
+{
+ cpu_mips_store_cause(env, arg1);
+}
+
+void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ cpu_mips_store_cause(other, arg1);
+}
+
+target_ulong helper_mftc0_epc(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_EPC;
+}
+
+target_ulong helper_mftc0_ebase(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_EBase;
+}
+
+void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
+{
+ target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
+ if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
+ mask |= ~0x3FFFFFFF;
+ }
+ env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask);
+}
+
+void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
+ if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
+ mask |= ~0x3FFFFFFF;
+ }
+ other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask);
+}
+
+target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ switch (idx) {
+ case 0: return other->CP0_Config0;
+ case 1: return other->CP0_Config1;
+ case 2: return other->CP0_Config2;
+ case 3: return other->CP0_Config3;
+ /* 4 and 5 are reserved. */
+ case 6: return other->CP0_Config6;
+ case 7: return other->CP0_Config7;
+ default:
+ break;
+ }
+ return 0;
+}
+
+void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
+}
+
+void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
+{
+ /* tertiary/secondary caches not implemented */
+ env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
+}
+
+void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1)
+{
+ if (env->insn_flags & ASE_MICROMIPS) {
+ env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) |
+ (arg1 & (1 << CP0C3_ISA_ON_EXC));
+ }
+}
+
+void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) |
+ (arg1 & env->CP0_Config4_rw_bitmask);
+}
+
+void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) |
+ (arg1 & env->CP0_Config5_rw_bitmask);
+ env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ?
+ 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff;
+ compute_hflags(env);
+}
+
+void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
+{
+ target_long mask = env->CP0_LLAddr_rw_bitmask;
+ arg1 = arg1 << env->CP0_LLAddr_shift;
+ env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask);
+}
+
+#define MTC0_MAAR_MASK(env) \
+ ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3)
+
+void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env);
+}
+
+void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_MAAR[env->CP0_MAARI] =
+ (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) |
+ (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL);
+}
+
+void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1)
+{
+ int index = arg1 & 0x3f;
+ if (index == 0x3f) {
+ /*
+ * Software may write all ones to INDEX to determine the
+ * maximum value supported.
+ */
+ env->CP0_MAARI = MIPS_MAAR_MAX - 1;
+ } else if (index < MIPS_MAAR_MAX) {
+ env->CP0_MAARI = index;
+ }
+ /*
+ * Other than the all ones, if the value written is not supported,
+ * then INDEX is unchanged from its previous value.
+ */
+}
+
+void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ /*
+ * Watch exceptions for instructions, data loads, data stores
+ * not implemented.
+ */
+ env->CP0_WatchLo[sel] = (arg1 & ~0x7);
+}
+
+void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID);
+ uint64_t m_bit = env->CP0_WatchHi[sel] & (1 << CP0WH_M); /* read-only */
+ if ((env->CP0_Config5 >> CP0C5_MI) & 1) {
+ mask |= 0xFFFFFFFF00000000ULL; /* MMID */
+ }
+ env->CP0_WatchHi[sel] = m_bit | (arg1 & mask);
+ env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
+}
+
+void helper_mthc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ env->CP0_WatchHi[sel] = ((uint64_t) (arg1) << 32) |
+ (env->CP0_WatchHi[sel] & 0x00000000ffffffffULL);
+}
+
+void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
+{
+ target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
+ env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
+}
+
+void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Framemask = arg1; /* XXX */
+}
+
+void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
+ if (arg1 & (1 << CP0DB_DM)) {
+ env->hflags |= MIPS_HFLAG_DM;
+ } else {
+ env->hflags &= ~MIPS_HFLAG_DM;
+ }
+}
+
+void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ /* XXX: Might be wrong, check with EJTAG spec. */
+ if (other_tc == other->current_tc) {
+ other->active_tc.CP0_Debug_tcstatus = val;
+ } else {
+ other->tcs[other_tc].CP0_Debug_tcstatus = val;
+ }
+ other->CP0_Debug = (other->CP0_Debug &
+ ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
+ (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
+}
+
+void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Performance0 = arg1 & 0x000007ff;
+}
+
+void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1)
+{
+ int32_t wst = arg1 & (1 << CP0EC_WST);
+ int32_t spr = arg1 & (1 << CP0EC_SPR);
+ int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0;
+
+ env->CP0_ErrCtl = wst | spr | itc;
+
+ if (itc && !wst && !spr) {
+ env->hflags |= MIPS_HFLAG_ITC_CACHE;
+ } else {
+ env->hflags &= ~MIPS_HFLAG_ITC_CACHE;
+ }
+}
+
+void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
+{
+ if (env->hflags & MIPS_HFLAG_ITC_CACHE) {
+ /*
+ * If CACHE instruction is configured for ITC tags then make all
+ * CP0.TagLo bits writable. The actual write to ITC Configuration
+ * Tag will take care of the read-only bits.
+ */
+ env->CP0_TagLo = arg1;
+ } else {
+ env->CP0_TagLo = arg1 & 0xFFFFFCF6;
+ }
+}
+
+void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_DataLo = arg1; /* XXX */
+}
+
+void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_TagHi = arg1; /* XXX */
+}
+
+void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_DataHi = arg1; /* XXX */
+}
+
+/* MIPS MT functions */
+target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.gpr[sel];
+ } else {
+ return other->tcs[other_tc].gpr[sel];
+ }
+}
+
+target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.LO[sel];
+ } else {
+ return other->tcs[other_tc].LO[sel];
+ }
+}
+
+target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.HI[sel];
+ } else {
+ return other->tcs[other_tc].HI[sel];
+ }
+}
+
+target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.ACX[sel];
+ } else {
+ return other->tcs[other_tc].ACX[sel];
+ }
+}
+
+target_ulong helper_mftdsp(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ return other->active_tc.DSPControl;
+ } else {
+ return other->tcs[other_tc].DSPControl;
+ }
+}
+
+void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.gpr[sel] = arg1;
+ } else {
+ other->tcs[other_tc].gpr[sel] = arg1;
+ }
+}
+
+void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.LO[sel] = arg1;
+ } else {
+ other->tcs[other_tc].LO[sel] = arg1;
+ }
+}
+
+void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.HI[sel] = arg1;
+ } else {
+ other->tcs[other_tc].HI[sel] = arg1;
+ }
+}
+
+void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.ACX[sel] = arg1;
+ } else {
+ other->tcs[other_tc].ACX[sel] = arg1;
+ }
+}
+
+void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.DSPControl = arg1;
+ } else {
+ other->tcs[other_tc].DSPControl = arg1;
+ }
+}
+
+/* MIPS MT functions */
+target_ulong helper_dmt(void)
+{
+ /* TODO */
+ return 0;
+}
+
+target_ulong helper_emt(void)
+{
+ /* TODO */
+ return 0;
+}
+
+target_ulong helper_dvpe(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->mvp->CP0_MVPControl;
+
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ /* Turn off all VPEs except the one executing the dvpe. */
+ if (&other_cpu->env != env) {
+ other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
+ mips_vpe_sleep(other_cpu);
+ }
+ }
+ return prev;
+}
+
+target_ulong helper_evpe(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->mvp->CP0_MVPControl;
+
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+
+ if (&other_cpu->env != env
+ /* If the VPE is WFI, don't disturb its sleep. */
+ && !mips_vpe_is_wfi(other_cpu)) {
+ /* Enable the VPE. */
+ other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
+ mips_vpe_wake(other_cpu); /* And wake it up. */
+ }
+ }
+ return prev;
+}
+
+/* R6 Multi-threading */
+target_ulong helper_dvp(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->CP0_VPControl;
+
+ if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ /* Turn off all VPs except the one executing the dvp. */
+ if (&other_cpu->env != env) {
+ mips_vpe_sleep(other_cpu);
+ }
+ }
+ env->CP0_VPControl |= (1 << CP0VPCtl_DIS);
+ }
+ return prev;
+}
+
+target_ulong helper_evp(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->CP0_VPControl;
+
+ if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) {
+ /*
+ * If the VP is WFI, don't disturb its sleep.
+ * Otherwise, wake it up.
+ */
+ mips_vpe_wake(other_cpu);
+ }
+ }
+ env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS);
+ }
+ return prev;
+}
diff --git a/target/mips/tcg/system/lcsr_helper.c b/target/mips/tcg/system/lcsr_helper.c
new file mode 100644
index 0000000..25e0357
--- /dev/null
+++ b/target/mips/tcg/system/lcsr_helper.c
@@ -0,0 +1,40 @@
+/*
+ * Loongson CSR instructions translation routines
+ *
+ * Copyright (c) 2023 Jiaxun Yang <jiaxun.yang@flygoat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+#define GET_MEMTXATTRS(cas) \
+ ((MemTxAttrs){.requester_id = env_cpu(cas)->cpu_index})
+
+uint64_t helper_lcsr_rdcsr(CPUMIPSState *env, target_ulong r_addr)
+{
+ return address_space_ldl(&env->iocsr.as, r_addr,
+ GET_MEMTXATTRS(env), NULL);
+}
+
+uint64_t helper_lcsr_drdcsr(CPUMIPSState *env, target_ulong r_addr)
+{
+ return address_space_ldq(&env->iocsr.as, r_addr,
+ GET_MEMTXATTRS(env), NULL);
+}
+
+void helper_lcsr_wrcsr(CPUMIPSState *env, target_ulong w_addr,
+ target_ulong val)
+{
+ address_space_stl(&env->iocsr.as, w_addr,
+ val, GET_MEMTXATTRS(env), NULL);
+}
+
+void helper_lcsr_dwrcsr(CPUMIPSState *env, target_ulong w_addr,
+ target_ulong val)
+{
+ address_space_stq(&env->iocsr.as, w_addr,
+ val, GET_MEMTXATTRS(env), NULL);
+}
diff --git a/target/mips/tcg/system/meson.build b/target/mips/tcg/system/meson.build
new file mode 100644
index 0000000..911341a
--- /dev/null
+++ b/target/mips/tcg/system/meson.build
@@ -0,0 +1,12 @@
+mips_system_ss.add(files(
+ 'cp0_helper.c',
+ 'special_helper.c',
+ 'tlb_helper.c',
+))
+mips_system_ss.add(when: ['CONFIG_SEMIHOSTING'],
+ if_true: files('mips-semi.c'),
+ if_false: files('semihosting-stub.c')
+)
+mips_system_ss.add(when: 'TARGET_MIPS64', if_true: files(
+ 'lcsr_helper.c',
+))
diff --git a/target/mips/tcg/system/mips-semi.c b/target/mips/tcg/system/mips-semi.c
new file mode 100644
index 0000000..e822a42
--- /dev/null
+++ b/target/mips/tcg/system/mips-semi.c
@@ -0,0 +1,377 @@
+/*
+ * Unified Hosting Interface syscalls.
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/log.h"
+#include "gdbstub/syscalls.h"
+#include "gdbstub/helpers.h"
+#include "semihosting/uaccess.h"
+#include "semihosting/semihost.h"
+#include "semihosting/console.h"
+#include "semihosting/syscalls.h"
+#include "internal.h"
+
+typedef enum UHIOp {
+ UHI_exit = 1,
+ UHI_open = 2,
+ UHI_close = 3,
+ UHI_read = 4,
+ UHI_write = 5,
+ UHI_lseek = 6,
+ UHI_unlink = 7,
+ UHI_fstat = 8,
+ UHI_argc = 9,
+ UHI_argnlen = 10,
+ UHI_argn = 11,
+ UHI_plog = 13,
+ UHI_assert = 14,
+ UHI_pread = 19,
+ UHI_pwrite = 20,
+ UHI_link = 22
+} UHIOp;
+
+typedef struct UHIStat {
+ int16_t uhi_st_dev;
+ uint16_t uhi_st_ino;
+ uint32_t uhi_st_mode;
+ uint16_t uhi_st_nlink;
+ uint16_t uhi_st_uid;
+ uint16_t uhi_st_gid;
+ int16_t uhi_st_rdev;
+ uint64_t uhi_st_size;
+ uint64_t uhi_st_atime;
+ uint64_t uhi_st_spare1;
+ uint64_t uhi_st_mtime;
+ uint64_t uhi_st_spare2;
+ uint64_t uhi_st_ctime;
+ uint64_t uhi_st_spare3;
+ uint64_t uhi_st_blksize;
+ uint64_t uhi_st_blocks;
+ uint64_t uhi_st_spare4[2];
+} UHIStat;
+
+enum UHIOpenFlags {
+ UHIOpen_RDONLY = 0x0,
+ UHIOpen_WRONLY = 0x1,
+ UHIOpen_RDWR = 0x2,
+ UHIOpen_APPEND = 0x8,
+ UHIOpen_CREAT = 0x200,
+ UHIOpen_TRUNC = 0x400,
+ UHIOpen_EXCL = 0x800
+};
+
+enum UHIErrno {
+ UHI_EACCESS = 13,
+ UHI_EAGAIN = 11,
+ UHI_EBADF = 9,
+ UHI_EBADMSG = 77,
+ UHI_EBUSY = 16,
+ UHI_ECONNRESET = 104,
+ UHI_EEXIST = 17,
+ UHI_EFBIG = 27,
+ UHI_EINTR = 4,
+ UHI_EINVAL = 22,
+ UHI_EIO = 5,
+ UHI_EISDIR = 21,
+ UHI_ELOOP = 92,
+ UHI_EMFILE = 24,
+ UHI_EMLINK = 31,
+ UHI_ENAMETOOLONG = 91,
+ UHI_ENETDOWN = 115,
+ UHI_ENETUNREACH = 114,
+ UHI_ENFILE = 23,
+ UHI_ENOBUFS = 105,
+ UHI_ENOENT = 2,
+ UHI_ENOMEM = 12,
+ UHI_ENOSPC = 28,
+ UHI_ENOSR = 63,
+ UHI_ENOTCONN = 128,
+ UHI_ENOTDIR = 20,
+ UHI_ENXIO = 6,
+ UHI_EOVERFLOW = 139,
+ UHI_EPERM = 1,
+ UHI_EPIPE = 32,
+ UHI_ERANGE = 34,
+ UHI_EROFS = 30,
+ UHI_ESPIPE = 29,
+ UHI_ETIMEDOUT = 116,
+ UHI_ETXTBSY = 26,
+ UHI_EWOULDBLOCK = 11,
+ UHI_EXDEV = 18,
+};
+
+static void report_fault(CPUMIPSState *env)
+{
+ int op = env->active_tc.gpr[25];
+ error_report("Fault during UHI operation %d", op);
+ abort();
+}
+
+static void uhi_cb(CPUState *cs, uint64_t ret, int err)
+{
+ CPUMIPSState *env = cpu_env(cs);
+
+#define E(N) case E##N: err = UHI_E##N; break
+
+ switch (err) {
+ case 0:
+ break;
+ E(PERM);
+ E(NOENT);
+ E(INTR);
+ E(BADF);
+ E(BUSY);
+ E(EXIST);
+ E(NOTDIR);
+ E(ISDIR);
+ E(INVAL);
+ E(NFILE);
+ E(MFILE);
+ E(FBIG);
+ E(NOSPC);
+ E(SPIPE);
+ E(ROFS);
+ E(NAMETOOLONG);
+ default:
+ err = UHI_EINVAL;
+ break;
+ case EFAULT:
+ report_fault(env);
+ }
+
+#undef E
+
+ env->active_tc.gpr[2] = ret;
+ env->active_tc.gpr[3] = err;
+}
+
+static void uhi_fstat_cb(CPUState *cs, uint64_t ret, int err)
+{
+ QEMU_BUILD_BUG_ON(sizeof(UHIStat) < sizeof(struct gdb_stat));
+
+ if (!err) {
+ CPUMIPSState *env = cpu_env(cs);
+ bool swap_needed = HOST_BIG_ENDIAN != mips_env_is_bigendian(env);
+ target_ulong addr = env->active_tc.gpr[5];
+ UHIStat *dst = lock_user(VERIFY_WRITE, addr, sizeof(UHIStat), 1);
+ struct gdb_stat s;
+
+ if (!dst) {
+ report_fault(env);
+ }
+
+ memcpy(&s, dst, sizeof(struct gdb_stat));
+ memset(dst, 0, sizeof(UHIStat));
+
+ dst->uhi_st_dev = be32_to_cpu(s.gdb_st_dev);
+ dst->uhi_st_ino = be32_to_cpu(s.gdb_st_ino);
+ dst->uhi_st_mode = be32_to_cpu(s.gdb_st_mode);
+ dst->uhi_st_nlink = be32_to_cpu(s.gdb_st_nlink);
+ dst->uhi_st_uid = be32_to_cpu(s.gdb_st_uid);
+ dst->uhi_st_gid = be32_to_cpu(s.gdb_st_gid);
+ dst->uhi_st_rdev = be32_to_cpu(s.gdb_st_rdev);
+ dst->uhi_st_size = be64_to_cpu(s.gdb_st_size);
+ dst->uhi_st_atime = be32_to_cpu(s.gdb_st_atime);
+ dst->uhi_st_mtime = be32_to_cpu(s.gdb_st_mtime);
+ dst->uhi_st_ctime = be32_to_cpu(s.gdb_st_ctime);
+ dst->uhi_st_blksize = be64_to_cpu(s.gdb_st_blksize);
+ dst->uhi_st_blocks = be64_to_cpu(s.gdb_st_blocks);
+
+ if (swap_needed) {
+ dst->uhi_st_dev = bswap16(dst->uhi_st_dev);
+ dst->uhi_st_ino = bswap16(dst->uhi_st_ino);
+ dst->uhi_st_mode = bswap32(dst->uhi_st_mode);
+ dst->uhi_st_nlink = bswap16(dst->uhi_st_nlink);
+ dst->uhi_st_uid = bswap16(dst->uhi_st_uid);
+ dst->uhi_st_gid = bswap16(dst->uhi_st_gid);
+ dst->uhi_st_rdev = bswap16(dst->uhi_st_rdev);
+ dst->uhi_st_size = bswap64(dst->uhi_st_size);
+ dst->uhi_st_atime = bswap64(dst->uhi_st_atime);
+ dst->uhi_st_mtime = bswap64(dst->uhi_st_mtime);
+ dst->uhi_st_ctime = bswap64(dst->uhi_st_ctime);
+ dst->uhi_st_blksize = bswap64(dst->uhi_st_blksize);
+ dst->uhi_st_blocks = bswap64(dst->uhi_st_blocks);
+ }
+
+ unlock_user(dst, addr, sizeof(UHIStat));
+ }
+
+ uhi_cb(cs, ret, err);
+}
+
+void mips_semihosting(CPUMIPSState *env)
+{
+ CPUState *cs = env_cpu(env);
+ target_ulong *gpr = env->active_tc.gpr;
+ const UHIOp op = gpr[25];
+ char *p;
+
+ switch (op) {
+ case UHI_exit:
+ gdb_exit(gpr[4]);
+ exit(gpr[4]);
+
+ case UHI_open:
+ {
+ target_ulong fname = gpr[4];
+ int ret = -1;
+
+ p = lock_user_string(fname);
+ if (!p) {
+ report_fault(env);
+ }
+ if (!strcmp("/dev/stdin", p)) {
+ ret = 0;
+ } else if (!strcmp("/dev/stdout", p)) {
+ ret = 1;
+ } else if (!strcmp("/dev/stderr", p)) {
+ ret = 2;
+ }
+ unlock_user(p, fname, 0);
+
+ /* FIXME: reusing a guest fd doesn't seem correct. */
+ if (ret >= 0) {
+ gpr[2] = ret;
+ break;
+ }
+
+ semihost_sys_open(cs, uhi_cb, fname, 0, gpr[5], gpr[6]);
+ }
+ break;
+
+ case UHI_close:
+ semihost_sys_close(cs, uhi_cb, gpr[4]);
+ break;
+ case UHI_read:
+ semihost_sys_read(cs, uhi_cb, gpr[4], gpr[5], gpr[6]);
+ break;
+ case UHI_write:
+ semihost_sys_write(cs, uhi_cb, gpr[4], gpr[5], gpr[6]);
+ break;
+ case UHI_lseek:
+ semihost_sys_lseek(cs, uhi_cb, gpr[4], gpr[5], gpr[6]);
+ break;
+ case UHI_unlink:
+ semihost_sys_remove(cs, uhi_cb, gpr[4], 0);
+ break;
+ case UHI_fstat:
+ semihost_sys_fstat(cs, uhi_fstat_cb, gpr[4], gpr[5]);
+ break;
+
+ case UHI_argc:
+ gpr[2] = semihosting_get_argc();
+ break;
+ case UHI_argnlen:
+ {
+ const char *s = semihosting_get_arg(gpr[4]);
+ gpr[2] = s ? strlen(s) : -1;
+ }
+ break;
+ case UHI_argn:
+ {
+ const char *s = semihosting_get_arg(gpr[4]);
+ target_ulong addr;
+ size_t len;
+
+ if (!s) {
+ gpr[2] = -1;
+ break;
+ }
+ len = strlen(s) + 1;
+ addr = gpr[5];
+ p = lock_user(VERIFY_WRITE, addr, len, 0);
+ if (!p) {
+ report_fault(env);
+ }
+ memcpy(p, s, len);
+ unlock_user(p, addr, len);
+ gpr[2] = 0;
+ }
+ break;
+
+ case UHI_plog:
+ {
+ target_ulong addr = gpr[4];
+ ssize_t len = target_strlen(addr);
+ GString *str;
+ char *pct_d;
+
+ if (len < 0) {
+ report_fault(env);
+ }
+ p = lock_user(VERIFY_READ, addr, len, 1);
+ if (!p) {
+ report_fault(env);
+ }
+
+ pct_d = strstr(p, "%d");
+ if (!pct_d) {
+ unlock_user(p, addr, 0);
+ semihost_sys_write(cs, uhi_cb, 2, addr, len);
+ break;
+ }
+
+ str = g_string_new_len(p, pct_d - p);
+ g_string_append_printf(str, "%d%s", (int)gpr[5], pct_d + 2);
+ unlock_user(p, addr, 0);
+
+ /*
+ * When we're using gdb, we need a guest address, so
+ * drop the string onto the stack below the stack pointer.
+ */
+ if (use_gdb_syscalls()) {
+ addr = gpr[29] - str->len;
+ p = lock_user(VERIFY_WRITE, addr, str->len, 0);
+ if (!p) {
+ report_fault(env);
+ }
+ memcpy(p, str->str, str->len);
+ unlock_user(p, addr, str->len);
+ semihost_sys_write(cs, uhi_cb, 2, addr, str->len);
+ } else {
+ gpr[2] = qemu_semihosting_console_write(str->str, str->len);
+ }
+ g_string_free(str, true);
+ }
+ break;
+
+ case UHI_assert:
+ {
+ const char *msg, *file;
+
+ msg = lock_user_string(gpr[4]);
+ if (!msg) {
+ msg = "<EFAULT>";
+ }
+ file = lock_user_string(gpr[5]);
+ if (!file) {
+ file = "<EFAULT>";
+ }
+
+ error_report("UHI assertion \"%s\": file \"%s\", line %d",
+ msg, file, (int)gpr[6]);
+ abort();
+ }
+
+ default:
+ error_report("Unknown UHI operation %d", op);
+ abort();
+ }
+}
diff --git a/target/mips/tcg/system/semihosting-stub.c b/target/mips/tcg/system/semihosting-stub.c
new file mode 100644
index 0000000..bb1f7aa
--- /dev/null
+++ b/target/mips/tcg/system/semihosting-stub.c
@@ -0,0 +1,16 @@
+/*
+ * MIPS semihosting stub
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (c) 2024 Linaro Ltd.
+ * Authors:
+ * Philippe Mathieu-Daudé
+ */
+
+#include "qemu/osdep.h"
+#include "internal.h"
+
+void mips_semihosting(CPUMIPSState *env)
+{
+ g_assert_not_reached();
+}
diff --git a/target/mips/tcg/system/special_helper.c b/target/mips/tcg/system/special_helper.c
new file mode 100644
index 0000000..b54cbe8
--- /dev/null
+++ b/target/mips/tcg/system/special_helper.c
@@ -0,0 +1,173 @@
+/*
+ * QEMU MIPS emulation: Special opcode helpers
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/translation-block.h"
+#include "internal.h"
+
+/* Specials */
+target_ulong helper_di(CPUMIPSState *env)
+{
+ target_ulong t0 = env->CP0_Status;
+
+ env->CP0_Status = t0 & ~(1 << CP0St_IE);
+ return t0;
+}
+
+target_ulong helper_ei(CPUMIPSState *env)
+{
+ target_ulong t0 = env->CP0_Status;
+
+ env->CP0_Status = t0 | (1 << CP0St_IE);
+ return t0;
+}
+
+static void debug_pre_eret(CPUMIPSState *env)
+{
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
+ env->active_tc.PC, env->CP0_EPC);
+ if (env->CP0_Status & (1 << CP0St_ERL)) {
+ qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
+ }
+ if (env->hflags & MIPS_HFLAG_DM) {
+ qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
+ }
+ qemu_log("\n");
+ }
+}
+
+static void debug_post_eret(CPUMIPSState *env)
+{
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
+ env->active_tc.PC, env->CP0_EPC);
+ if (env->CP0_Status & (1 << CP0St_ERL)) {
+ qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
+ }
+ if (env->hflags & MIPS_HFLAG_DM) {
+ qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
+ }
+ switch (mips_env_mmu_index(env)) {
+ case 3:
+ qemu_log(", ERL\n");
+ break;
+ case MIPS_HFLAG_UM:
+ qemu_log(", UM\n");
+ break;
+ case MIPS_HFLAG_SM:
+ qemu_log(", SM\n");
+ break;
+ case MIPS_HFLAG_KM:
+ qemu_log("\n");
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
+ break;
+ }
+ }
+}
+
+bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb)
+{
+ CPUMIPSState *env = cpu_env(cs);
+
+ if ((env->hflags & MIPS_HFLAG_BMASK) != 0
+ && !tcg_cflags_has(cs, CF_PCREL) && env->active_tc.PC != tb->pc) {
+ env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ return true;
+ }
+ return false;
+}
+
+static inline void exception_return(CPUMIPSState *env)
+{
+ debug_pre_eret(env);
+ if (env->CP0_Status & (1 << CP0St_ERL)) {
+ mips_env_set_pc(env, env->CP0_ErrorEPC);
+ env->CP0_Status &= ~(1 << CP0St_ERL);
+ } else {
+ mips_env_set_pc(env, env->CP0_EPC);
+ env->CP0_Status &= ~(1 << CP0St_EXL);
+ }
+ compute_hflags(env);
+ debug_post_eret(env);
+}
+
+void helper_eret(CPUMIPSState *env)
+{
+ exception_return(env);
+ env->CP0_LLAddr = 1;
+ env->lladdr = 1;
+}
+
+void helper_eretnc(CPUMIPSState *env)
+{
+ exception_return(env);
+}
+
+void helper_deret(CPUMIPSState *env)
+{
+ debug_pre_eret(env);
+
+ env->hflags &= ~MIPS_HFLAG_DM;
+ compute_hflags(env);
+
+ mips_env_set_pc(env, env->CP0_DEPC);
+
+ debug_post_eret(env);
+}
+
+void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op)
+{
+ static const char *const type_name[] = {
+ "Primary Instruction",
+ "Primary Data or Unified Primary",
+ "Tertiary",
+ "Secondary"
+ };
+ uint32_t cache_type = extract32(op, 0, 2);
+ uint32_t cache_operation = extract32(op, 2, 3);
+ target_ulong index = addr & 0x1fffffff;
+
+ switch (cache_operation) {
+ case 0b010: /* Index Store Tag */
+ memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo,
+ MO_64, MEMTXATTRS_UNSPECIFIED);
+ break;
+ case 0b001: /* Index Load Tag */
+ memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo,
+ MO_64, MEMTXATTRS_UNSPECIFIED);
+ break;
+ case 0b000: /* Index Invalidate */
+ case 0b100: /* Hit Invalidate */
+ case 0b110: /* Hit Writeback */
+ /* no-op */
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "cache operation:%u (type: %s cache)\n",
+ cache_operation, type_name[cache_type]);
+ break;
+ }
+}
diff --git a/target/mips/tcg/system/tlb_helper.c b/target/mips/tcg/system/tlb_helper.c
new file mode 100644
index 0000000..eccaf36
--- /dev/null
+++ b/target/mips/tcg/system/tlb_helper.c
@@ -0,0 +1,1422 @@
+/*
+ * MIPS TLB (Translation lookaside buffer) helpers.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu/bitops.h"
+
+#include "cpu.h"
+#include "internal.h"
+#include "exec/cputlb.h"
+#include "exec/page-protection.h"
+#include "exec/target_page.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "exec/log.h"
+#include "exec/helper-proto.h"
+
+/* TLB management */
+static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first)
+{
+ /* Discard entries from env->tlb[first] onwards. */
+ while (env->tlb->tlb_in_use > first) {
+ r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
+ }
+}
+
+static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
+{
+#if defined(TARGET_MIPS64)
+ return extract64(entrylo, 6, 54);
+#else
+ return extract64(entrylo, 6, 24) | /* PFN */
+ (extract64(entrylo, 32, 32) << 24); /* PFNX */
+#endif
+}
+
+static void r4k_fill_tlb(CPUMIPSState *env, int idx)
+{
+ r4k_tlb_t *tlb;
+ uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
+
+ /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
+ tlb->EHINV = 1;
+ return;
+ }
+ tlb->EHINV = 0;
+ tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
+#if defined(TARGET_MIPS64)
+ tlb->VPN &= env->SEGMask;
+#endif
+ tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ tlb->MMID = env->CP0_MemoryMapID;
+ tlb->PageMask = env->CP0_PageMask;
+ tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
+ tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
+ tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
+ tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
+ tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
+ tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
+ tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
+ tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
+ tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
+ tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
+ tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
+ tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
+ tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
+}
+
+static void r4k_helper_tlbinv(CPUMIPSState *env)
+{
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ uint32_t tlb_mmid;
+ r4k_tlb_t *tlb;
+ int idx;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+ for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ if (!tlb->G && tlb_mmid == MMID) {
+ tlb->EHINV = 1;
+ }
+ }
+ cpu_mips_tlb_flush(env);
+}
+
+static void r4k_helper_tlbinvf(CPUMIPSState *env)
+{
+ int idx;
+
+ for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
+ env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
+ }
+ cpu_mips_tlb_flush(env);
+}
+
+static void r4k_helper_tlbwi(CPUMIPSState *env)
+{
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ target_ulong VPN;
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ uint32_t tlb_mmid;
+ bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
+ r4k_tlb_t *tlb;
+ int idx;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+
+ idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
+#if defined(TARGET_MIPS64)
+ VPN &= env->SEGMask;
+#endif
+ EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
+ G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
+ V0 = (env->CP0_EntryLo0 & 2) != 0;
+ D0 = (env->CP0_EntryLo0 & 4) != 0;
+ XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
+ RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
+ V1 = (env->CP0_EntryLo1 & 2) != 0;
+ D1 = (env->CP0_EntryLo1 & 4) != 0;
+ XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
+ RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
+
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ /*
+ * Discard cached TLB entries, unless tlbwi is just upgrading access
+ * permissions on the current entry.
+ */
+ if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G ||
+ (!tlb->EHINV && EHINV) ||
+ (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
+ (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
+ (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
+ (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
+ r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
+ }
+
+ r4k_invalidate_tlb(env, idx, 0);
+ r4k_fill_tlb(env, idx);
+}
+
+static void r4k_helper_tlbwr(CPUMIPSState *env)
+{
+ int r = cpu_mips_get_random(env);
+
+ r4k_invalidate_tlb(env, r, 1);
+ r4k_fill_tlb(env, r);
+}
+
+static void r4k_helper_tlbp(CPUMIPSState *env)
+{
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ r4k_tlb_t *tlb;
+ target_ulong mask;
+ target_ulong tag;
+ target_ulong VPN;
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ uint32_t tlb_mmid;
+ int i;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+ for (i = 0; i < env->tlb->nb_tlb; i++) {
+ tlb = &env->tlb->mmu.r4k.tlb[i];
+ /* 1k pages are not supported. */
+ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ tag = env->CP0_EntryHi & ~mask;
+ VPN = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ tag &= env->SEGMask;
+#endif
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ /* Check ASID/MMID, virtual page number & size */
+ if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
+ /* TLB match */
+ env->CP0_Index = i;
+ break;
+ }
+ }
+ if (i == env->tlb->nb_tlb) {
+ /* No match. Discard any shadow entries, if any of them match. */
+ for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
+ tlb = &env->tlb->mmu.r4k.tlb[i];
+ /* 1k pages are not supported. */
+ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ tag = env->CP0_EntryHi & ~mask;
+ VPN = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ tag &= env->SEGMask;
+#endif
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ /* Check ASID/MMID, virtual page number & size */
+ if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) {
+ r4k_mips_tlb_flush_extra(env, i);
+ break;
+ }
+ }
+
+ env->CP0_Index |= 0x80000000;
+ }
+}
+
+static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
+{
+#if defined(TARGET_MIPS64)
+ return tlb_pfn << 6;
+#else
+ return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
+ (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
+#endif
+}
+
+static void r4k_helper_tlbr(CPUMIPSState *env)
+{
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ uint32_t tlb_mmid;
+ r4k_tlb_t *tlb;
+ int idx;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+ idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ /* If this will change the current ASID/MMID, flush qemu's TLB. */
+ if (MMID != tlb_mmid) {
+ cpu_mips_tlb_flush(env);
+ }
+
+ r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
+
+ if (tlb->EHINV) {
+ env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
+ env->CP0_PageMask = 0;
+ env->CP0_EntryLo0 = 0;
+ env->CP0_EntryLo1 = 0;
+ } else {
+ env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID;
+ env->CP0_MemoryMapID = tlb->MMID;
+ env->CP0_PageMask = tlb->PageMask;
+ env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
+ ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
+ ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
+ get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
+ env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
+ ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
+ ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
+ get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
+ }
+}
+
+void helper_tlbwi(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbwi(env);
+}
+
+void helper_tlbwr(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbwr(env);
+}
+
+void helper_tlbp(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbp(env);
+}
+
+void helper_tlbr(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbr(env);
+}
+
+void helper_tlbinv(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbinv(env);
+}
+
+void helper_tlbinvf(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbinvf(env);
+}
+
+static void global_invalidate_tlb(CPUMIPSState *env,
+ uint32_t invMsgVPN2,
+ uint8_t invMsgR,
+ uint32_t invMsgMMid,
+ bool invAll,
+ bool invVAMMid,
+ bool invMMid,
+ bool invVA)
+{
+
+ int idx;
+ r4k_tlb_t *tlb;
+ bool VAMatch;
+ bool MMidMatch;
+
+ for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ VAMatch =
+ (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask))
+#ifdef TARGET_MIPS64
+ &&
+ (extract64(env->CP0_EntryHi, 62, 2) == invMsgR)
+#endif
+ );
+ MMidMatch = tlb->MMID == invMsgMMid;
+ if ((invAll && (idx > env->CP0_Wired)) ||
+ (VAMatch && invVAMMid && (tlb->G || MMidMatch)) ||
+ (VAMatch && invVA) ||
+ (MMidMatch && !(tlb->G) && invMMid)) {
+ tlb->EHINV = 1;
+ }
+ }
+ cpu_mips_tlb_flush(env);
+}
+
+void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type)
+{
+ bool invAll = type == 0;
+ bool invVA = type == 1;
+ bool invMMid = type == 2;
+ bool invVAMMid = type == 3;
+ uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1);
+ uint8_t invMsgR = 0;
+ uint32_t invMsgMMid = env->CP0_MemoryMapID;
+ CPUState *other_cs = first_cpu;
+
+#ifdef TARGET_MIPS64
+ invMsgR = extract64(arg, 62, 2);
+#endif
+
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid,
+ invAll, invVAMMid, invMMid, invVA);
+ }
+}
+
+/* no MMU emulation */
+static int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
+ target_ulong address, MMUAccessType access_type)
+{
+ *physical = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+}
+
+/* fixed mapping MMU emulation */
+static int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical,
+ int *prot, target_ulong address,
+ MMUAccessType access_type)
+{
+ if (address <= (int32_t)0x7FFFFFFFUL) {
+ if (!(env->CP0_Status & (1 << CP0St_ERL))) {
+ *physical = address + 0x40000000UL;
+ } else {
+ *physical = address;
+ }
+ } else if (address <= (int32_t)0xBFFFFFFFUL) {
+ *physical = address & 0x1FFFFFFF;
+ } else {
+ *physical = address;
+ }
+
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+}
+
+/* MIPS32/MIPS64 R4000-style MMU emulation */
+static int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
+ target_ulong address, MMUAccessType access_type)
+{
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ uint32_t tlb_mmid;
+ int i;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+
+ for (i = 0; i < env->tlb->tlb_in_use; i++) {
+ r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i];
+ /* 1k pages are not supported. */
+ target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ target_ulong tag = address & ~mask;
+ target_ulong VPN = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ tag &= env->SEGMask;
+#endif
+
+ /* Check ASID/MMID, virtual page number & size */
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
+ /* TLB match */
+ int n = !!(address & mask & ~(mask >> 1));
+ /* Check access rights */
+ if (!(n ? tlb->V1 : tlb->V0)) {
+ return TLBRET_INVALID;
+ }
+ if (access_type == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) {
+ return TLBRET_XI;
+ }
+ if (access_type == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) {
+ return TLBRET_RI;
+ }
+ if (access_type != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) {
+ *physical = tlb->PFN[n] | (address & (mask >> 1));
+ *prot = PAGE_READ;
+ if (n ? tlb->D1 : tlb->D0) {
+ *prot |= PAGE_WRITE;
+ }
+ if (!(n ? tlb->XI1 : tlb->XI0)) {
+ *prot |= PAGE_EXEC;
+ }
+ return TLBRET_MATCH;
+ }
+ return TLBRET_DIRTY;
+ }
+ }
+ return TLBRET_NOMATCH;
+}
+
+static void no_mmu_init(CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb->nb_tlb = 1;
+ env->tlb->map_address = &no_mmu_map_address;
+}
+
+static void fixed_mmu_init(CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb->nb_tlb = 1;
+ env->tlb->map_address = &fixed_mmu_map_address;
+}
+
+static void r4k_mmu_init(CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb->nb_tlb = 1 + ((def->CP0_Config1 >> CP0C1_MMU) & 63);
+ env->tlb->map_address = &r4k_map_address;
+ env->tlb->helper_tlbwi = r4k_helper_tlbwi;
+ env->tlb->helper_tlbwr = r4k_helper_tlbwr;
+ env->tlb->helper_tlbp = r4k_helper_tlbp;
+ env->tlb->helper_tlbr = r4k_helper_tlbr;
+ env->tlb->helper_tlbinv = r4k_helper_tlbinv;
+ env->tlb->helper_tlbinvf = r4k_helper_tlbinvf;
+}
+
+void mmu_init(CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext));
+
+ switch (def->mmu_type) {
+ case MMU_TYPE_NONE:
+ no_mmu_init(env, def);
+ break;
+ case MMU_TYPE_R4000:
+ r4k_mmu_init(env, def);
+ break;
+ case MMU_TYPE_FMT:
+ fixed_mmu_init(env, def);
+ break;
+ case MMU_TYPE_R3000:
+ case MMU_TYPE_R6000:
+ case MMU_TYPE_R8000:
+ default:
+ cpu_abort(env_cpu(env), "MMU type not supported\n");
+ }
+}
+
+void cpu_mips_tlb_flush(CPUMIPSState *env)
+{
+ /* Flush qemu's TLB and discard all shadowed entries. */
+ tlb_flush(env_cpu(env));
+ env->tlb->tlb_in_use = env->tlb->nb_tlb;
+}
+
+static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
+ MMUAccessType access_type, int tlb_error)
+{
+ CPUState *cs = env_cpu(env);
+ int exception = 0, error_code = 0;
+
+ if (access_type == MMU_INST_FETCH) {
+ error_code |= EXCP_INST_NOTAVAIL;
+ }
+
+ switch (tlb_error) {
+ default:
+ case TLBRET_BADADDR:
+ /* Reference to kernel address from user mode or supervisor mode */
+ /* Reference to supervisor address from user mode */
+ if (access_type == MMU_DATA_STORE) {
+ exception = EXCP_AdES;
+ } else {
+ exception = EXCP_AdEL;
+ }
+ break;
+ case TLBRET_NOMATCH:
+ /* No TLB match for a mapped address */
+ if (access_type == MMU_DATA_STORE) {
+ exception = EXCP_TLBS;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ error_code |= EXCP_TLB_NOMATCH;
+ break;
+ case TLBRET_INVALID:
+ /* TLB match with no valid bit */
+ if (access_type == MMU_DATA_STORE) {
+ exception = EXCP_TLBS;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ break;
+ case TLBRET_DIRTY:
+ /* TLB match but 'D' bit is cleared */
+ exception = EXCP_LTLBL;
+ break;
+ case TLBRET_XI:
+ /* Execute-Inhibit Exception */
+ if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
+ exception = EXCP_TLBXI;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ break;
+ case TLBRET_RI:
+ /* Read-Inhibit Exception */
+ if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
+ exception = EXCP_TLBRI;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ break;
+ }
+ /* Raise exception */
+ if (!(env->hflags & MIPS_HFLAG_DM)) {
+ env->CP0_BadVAddr = address;
+ }
+ env->CP0_Context = (env->CP0_Context & ~0x007fffff) |
+ ((address >> 9) & 0x007ffff0);
+ env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) |
+ (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) |
+ (address & (TARGET_PAGE_MASK << 1));
+#if defined(TARGET_MIPS64)
+ env->CP0_EntryHi &= env->SEGMask;
+ env->CP0_XContext =
+ (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) | /* PTEBase */
+ (extract64(address, 62, 2) << (env->SEGBITS - 9)) | /* R */
+ (extract64(address, 13, env->SEGBITS - 13) << 4); /* BadVPN2 */
+#endif
+ cs->exception_index = exception;
+ env->error_code = error_code;
+}
+
+#if !defined(TARGET_MIPS64)
+
+/*
+ * Perform hardware page table walk
+ *
+ * Memory accesses are performed using the KERNEL privilege level.
+ * Synchronous exceptions detected on memory accesses cause a silent exit
+ * from page table walking, resulting in a TLB or XTLB Refill exception.
+ *
+ * Implementations are not required to support page table walk memory
+ * accesses from mapped memory regions. When an unsupported access is
+ * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
+ * exception.
+ *
+ * Note that if an exception is caused by AddressTranslation or LoadMemory
+ * functions, the exception is not taken, a silent exit is taken,
+ * resulting in a TLB or XTLB Refill exception.
+ */
+
+static bool get_pte(CPUMIPSState *env, uint64_t vaddr, MemOp op,
+ uint64_t *pte, unsigned ptw_mmu_idx)
+{
+ MemOpIdx oi;
+
+ if ((vaddr & (memop_size(op) - 1)) != 0) {
+ return false;
+ }
+
+ oi = make_memop_idx(op | mo_endian_env(env), ptw_mmu_idx);
+ if (op == MO_64) {
+ *pte = cpu_ldq_mmu(env, vaddr, oi, 0);
+ } else {
+ *pte = cpu_ldl_mmu(env, vaddr, oi, 0);
+ }
+
+ return true;
+}
+
+static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry,
+ MemOp op, int ptei)
+{
+ unsigned entry_size = memop_size(op) << 3;
+ uint64_t result = entry;
+ uint64_t rixi;
+ if (ptei > entry_size) {
+ ptei -= 32;
+ }
+ result >>= (ptei - 2);
+ rixi = result & 3;
+ result >>= 2;
+ result |= rixi << CP0EnLo_XI;
+ return result;
+}
+
+static int walk_directory(CPUMIPSState *env, uint64_t *vaddr,
+ int directory_index, bool *huge_page, bool *hgpg_directory_hit,
+ uint64_t *pw_entrylo0, uint64_t *pw_entrylo1,
+ MemOp directory_mop, MemOp leaf_mop, int ptw_mmu_idx)
+{
+ int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1;
+ int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F;
+ int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
+ int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
+ uint64_t entry;
+ uint64_t paddr;
+ int prot;
+ uint64_t lsb = 0;
+ uint64_t w = 0;
+
+ if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD,
+ ptw_mmu_idx) != TLBRET_MATCH) {
+ /* wrong base address */
+ return 0;
+ }
+ if (!get_pte(env, *vaddr, directory_mop, &entry, ptw_mmu_idx)) {
+ return 0;
+ }
+
+ if ((entry & (1 << psn)) && hugepg) {
+ *huge_page = true;
+ *hgpg_directory_hit = true;
+ entry = get_tlb_entry_layout(env, entry, leaf_mop, pf_ptew);
+ w = directory_index - 1;
+ if (directory_index & 0x1) {
+ /* Generate adjacent page from same PTE for odd TLB page */
+ lsb = BIT_ULL(w) >> 6;
+ *pw_entrylo0 = entry & ~lsb; /* even page */
+ *pw_entrylo1 = entry | lsb; /* odd page */
+ } else if (dph) {
+ int oddpagebit = 1 << leaf_mop;
+ uint64_t vaddr2 = *vaddr ^ oddpagebit;
+ if (*vaddr & oddpagebit) {
+ *pw_entrylo1 = entry;
+ } else {
+ *pw_entrylo0 = entry;
+ }
+ if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD,
+ ptw_mmu_idx) != TLBRET_MATCH) {
+ return 0;
+ }
+ if (!get_pte(env, vaddr2, leaf_mop, &entry, ptw_mmu_idx)) {
+ return 0;
+ }
+ entry = get_tlb_entry_layout(env, entry, leaf_mop, pf_ptew);
+ if (*vaddr & oddpagebit) {
+ *pw_entrylo0 = entry;
+ } else {
+ *pw_entrylo1 = entry;
+ }
+ } else {
+ return 0;
+ }
+ return 1;
+ } else {
+ *vaddr = entry;
+ return 2;
+ }
+}
+
+static bool page_table_walk_refill(CPUMIPSState *env, vaddr address,
+ int ptw_mmu_idx)
+{
+ int gdw = (env->CP0_PWSize >> CP0PS_GDW) & 0x3F;
+ int udw = (env->CP0_PWSize >> CP0PS_UDW) & 0x3F;
+ int mdw = (env->CP0_PWSize >> CP0PS_MDW) & 0x3F;
+ int ptw = (env->CP0_PWSize >> CP0PS_PTW) & 0x3F;
+ int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F;
+
+ /* Initial values */
+ bool huge_page = false;
+ bool hgpg_bdhit = false;
+ bool hgpg_gdhit = false;
+ bool hgpg_udhit = false;
+ bool hgpg_mdhit = false;
+
+ int32_t pw_pagemask = 0;
+ target_ulong pw_entryhi = 0;
+ uint64_t pw_entrylo0 = 0;
+ uint64_t pw_entrylo1 = 0;
+
+ /* Native pointer size */
+ /*For the 32-bit architectures, this bit is fixed to 0.*/
+ MemOp native_op = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? MO_32 : MO_64;
+
+ /* Indices from PWField */
+ int pf_gdw = (env->CP0_PWField >> CP0PF_GDW) & 0x3F;
+ int pf_udw = (env->CP0_PWField >> CP0PF_UDW) & 0x3F;
+ int pf_mdw = (env->CP0_PWField >> CP0PF_MDW) & 0x3F;
+ int pf_ptw = (env->CP0_PWField >> CP0PF_PTW) & 0x3F;
+ int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
+
+ /* Indices computed from faulting address */
+ int gindex = (address >> pf_gdw) & ((1 << gdw) - 1);
+ int uindex = (address >> pf_udw) & ((1 << udw) - 1);
+ int mindex = (address >> pf_mdw) & ((1 << mdw) - 1);
+ int ptindex = (address >> pf_ptw) & ((1 << ptw) - 1);
+
+ /* Other HTW configs */
+ int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
+ MemOp directory_mop, leaf_mop;
+
+ /* Offsets into tables */
+ unsigned goffset, uoffset, moffset, ptoffset0, ptoffset1;
+
+ /* Starting address - Page Table Base */
+ uint64_t vaddr = env->CP0_PWBase;
+
+ uint64_t dir_entry;
+ uint64_t paddr;
+ int prot;
+ int m;
+
+ if (!(env->CP0_Config3 & (1 << CP0C3_PW))) {
+ /* walker is unimplemented */
+ return false;
+ }
+ if (!(env->CP0_PWCtl & (1 << CP0PC_PWEN))) {
+ /* walker is disabled */
+ return false;
+ }
+ if (!(gdw > 0 || udw > 0 || mdw > 0)) {
+ /* no structure to walk */
+ return false;
+ }
+ if (ptew > 1) {
+ return false;
+ }
+
+ /* HTW Shift values (depend on entry size) */
+ directory_mop = (hugepg && (ptew == 1)) ? native_op + 1 : native_op;
+ leaf_mop = (ptew == 1) ? native_op + 1 : native_op;
+
+ goffset = gindex << directory_mop;
+ uoffset = uindex << directory_mop;
+ moffset = mindex << directory_mop;
+ ptoffset0 = (ptindex >> 1) << (leaf_mop + 1);
+ ptoffset1 = ptoffset0 | (1 << (leaf_mop));
+
+ /* Global Directory */
+ if (gdw > 0) {
+ vaddr |= goffset;
+ switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit,
+ &pw_entrylo0, &pw_entrylo1,
+ directory_mop, leaf_mop, ptw_mmu_idx))
+ {
+ case 0:
+ return false;
+ case 1:
+ goto refill;
+ case 2:
+ default:
+ break;
+ }
+ }
+
+ /* Upper directory */
+ if (udw > 0) {
+ vaddr |= uoffset;
+ switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit,
+ &pw_entrylo0, &pw_entrylo1,
+ directory_mop, leaf_mop, ptw_mmu_idx))
+ {
+ case 0:
+ return false;
+ case 1:
+ goto refill;
+ case 2:
+ default:
+ break;
+ }
+ }
+
+ /* Middle directory */
+ if (mdw > 0) {
+ vaddr |= moffset;
+ switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit,
+ &pw_entrylo0, &pw_entrylo1,
+ directory_mop, leaf_mop, ptw_mmu_idx))
+ {
+ case 0:
+ return false;
+ case 1:
+ goto refill;
+ case 2:
+ default:
+ break;
+ }
+ }
+
+ /* Leaf Level Page Table - First half of PTE pair */
+ vaddr |= ptoffset0;
+ if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
+ ptw_mmu_idx) != TLBRET_MATCH) {
+ return false;
+ }
+ if (!get_pte(env, vaddr, leaf_mop, &dir_entry, ptw_mmu_idx)) {
+ return false;
+ }
+ dir_entry = get_tlb_entry_layout(env, dir_entry, leaf_mop, pf_ptew);
+ pw_entrylo0 = dir_entry;
+
+ /* Leaf Level Page Table - Second half of PTE pair */
+ vaddr |= ptoffset1;
+ if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
+ ptw_mmu_idx) != TLBRET_MATCH) {
+ return false;
+ }
+ if (!get_pte(env, vaddr, leaf_mop, &dir_entry, ptw_mmu_idx)) {
+ return false;
+ }
+ dir_entry = get_tlb_entry_layout(env, dir_entry, leaf_mop, pf_ptew);
+ pw_entrylo1 = dir_entry;
+
+refill:
+
+ m = (1 << pf_ptw) - 1;
+
+ if (huge_page) {
+ switch (hgpg_bdhit << 3 | hgpg_gdhit << 2 | hgpg_udhit << 1 |
+ hgpg_mdhit)
+ {
+ case 4:
+ m = (1 << pf_gdw) - 1;
+ if (pf_gdw & 1) {
+ m >>= 1;
+ }
+ break;
+ case 2:
+ m = (1 << pf_udw) - 1;
+ if (pf_udw & 1) {
+ m >>= 1;
+ }
+ break;
+ case 1:
+ m = (1 << pf_mdw) - 1;
+ if (pf_mdw & 1) {
+ m >>= 1;
+ }
+ break;
+ }
+ }
+ pw_pagemask = m >> TARGET_PAGE_BITS;
+ pw_pagemask = compute_pagemask(pw_pagemask << CP0PM_MASK);
+ pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF);
+ {
+ target_ulong tmp_entryhi = env->CP0_EntryHi;
+ int32_t tmp_pagemask = env->CP0_PageMask;
+ uint64_t tmp_entrylo0 = env->CP0_EntryLo0;
+ uint64_t tmp_entrylo1 = env->CP0_EntryLo1;
+
+ env->CP0_EntryHi = pw_entryhi;
+ env->CP0_PageMask = pw_pagemask;
+ env->CP0_EntryLo0 = pw_entrylo0;
+ env->CP0_EntryLo1 = pw_entrylo1;
+
+ /*
+ * The hardware page walker inserts a page into the TLB in a manner
+ * identical to a TLBWR instruction as executed by the software refill
+ * handler.
+ */
+ r4k_helper_tlbwr(env);
+
+ env->CP0_EntryHi = tmp_entryhi;
+ env->CP0_PageMask = tmp_pagemask;
+ env->CP0_EntryLo0 = tmp_entrylo0;
+ env->CP0_EntryLo1 = tmp_entrylo1;
+ }
+ return true;
+}
+#endif
+
+bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ CPUMIPSState *env = cpu_env(cs);
+ hwaddr physical;
+ int prot;
+ int ret = TLBRET_BADADDR;
+
+ /* data access */
+ /* XXX: put correct access by using cpu_restore_state() correctly */
+ ret = get_physical_address(env, &physical, &prot, address,
+ access_type, mmu_idx);
+ switch (ret) {
+ case TLBRET_MATCH:
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
+ " prot %d\n", __func__, address, physical, prot);
+ break;
+ default:
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
+ ret);
+ break;
+ }
+ if (ret == TLBRET_MATCH) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+#if !defined(TARGET_MIPS64)
+ if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
+ /*
+ * Memory reads during hardware page table walking are performed
+ * as if they were kernel-mode load instructions.
+ */
+ int ptw_mmu_idx = (env->hflags & MIPS_HFLAG_ERL ?
+ MMU_ERL_IDX : MMU_KERNEL_IDX);
+
+ if (page_table_walk_refill(env, address, ptw_mmu_idx)) {
+ ret = get_physical_address(env, &physical, &prot, address,
+ access_type, mmu_idx);
+ if (ret == TLBRET_MATCH) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+ }
+ }
+#endif
+ if (probe) {
+ return false;
+ }
+
+ raise_mmu_exception(env, address, access_type, ret);
+ do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
+}
+
+hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address,
+ MMUAccessType access_type, uintptr_t retaddr)
+{
+ hwaddr physical;
+ int prot;
+ int ret = 0;
+ CPUState *cs = env_cpu(env);
+
+ /* data access */
+ ret = get_physical_address(env, &physical, &prot, address, access_type,
+ mips_env_mmu_index(env));
+ if (ret == TLBRET_MATCH) {
+ return physical;
+ }
+
+ raise_mmu_exception(env, address, access_type, ret);
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+static void set_hflags_for_handler(CPUMIPSState *env)
+{
+ /* Exception handlers are entered in 32-bit mode. */
+ env->hflags &= ~(MIPS_HFLAG_M16);
+ /* ...except that microMIPS lets you choose. */
+ if (env->insn_flags & ASE_MICROMIPS) {
+ env->hflags |= (!!(env->CP0_Config3 &
+ (1 << CP0C3_ISA_ON_EXC))
+ << MIPS_HFLAG_M16_SHIFT);
+ }
+}
+
+static inline void set_badinstr_registers(CPUMIPSState *env)
+{
+ if (env->insn_flags & ISA_NANOMIPS32) {
+ if (env->CP0_Config3 & (1 << CP0C3_BI)) {
+ uint32_t instr = (cpu_lduw_code(env, env->active_tc.PC)) << 16;
+ if ((instr & 0x10000000) == 0) {
+ instr |= cpu_lduw_code(env, env->active_tc.PC + 2);
+ }
+ env->CP0_BadInstr = instr;
+
+ if ((instr & 0xFC000000) == 0x60000000) {
+ instr = cpu_lduw_code(env, env->active_tc.PC + 4) << 16;
+ env->CP0_BadInstrX = instr;
+ }
+ }
+ return;
+ }
+
+ if (env->hflags & MIPS_HFLAG_M16) {
+ /* TODO: add BadInstr support for microMIPS */
+ return;
+ }
+ if (env->CP0_Config3 & (1 << CP0C3_BI)) {
+ env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC);
+ }
+ if ((env->CP0_Config3 & (1 << CP0C3_BP)) &&
+ (env->hflags & MIPS_HFLAG_BMASK)) {
+ env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4);
+ }
+}
+
+void mips_cpu_do_interrupt(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ bool update_badinstr = 0;
+ target_ulong offset;
+ int cause = -1;
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)
+ && cs->exception_index != EXCP_EXT_INTERRUPT) {
+ qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx
+ " %s exception\n",
+ __func__, env->active_tc.PC, env->CP0_EPC,
+ mips_exception_name(cs->exception_index));
+ }
+ if (cs->exception_index == EXCP_EXT_INTERRUPT &&
+ (env->hflags & MIPS_HFLAG_DM)) {
+ cs->exception_index = EXCP_DINT;
+ }
+ offset = 0x180;
+ switch (cs->exception_index) {
+ case EXCP_SEMIHOST:
+ cs->exception_index = EXCP_NONE;
+ mips_semihosting(env);
+ env->active_tc.PC += env->error_code;
+ return;
+ case EXCP_DSS:
+ env->CP0_Debug |= 1 << CP0DB_DSS;
+ /*
+ * Debug single step cannot be raised inside a delay slot and
+ * resume will always occur on the next instruction
+ * (but we assume the pc has always been updated during
+ * code translation).
+ */
+ env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16);
+ goto enter_debug_mode;
+ case EXCP_DINT:
+ env->CP0_Debug |= 1 << CP0DB_DINT;
+ goto set_DEPC;
+ case EXCP_DIB:
+ env->CP0_Debug |= 1 << CP0DB_DIB;
+ goto set_DEPC;
+ case EXCP_DBp:
+ env->CP0_Debug |= 1 << CP0DB_DBp;
+ /* Setup DExcCode - SDBBP instruction */
+ env->CP0_Debug = (env->CP0_Debug & ~(0x1fULL << CP0DB_DEC)) |
+ (9 << CP0DB_DEC);
+ goto set_DEPC;
+ case EXCP_DDBS:
+ env->CP0_Debug |= 1 << CP0DB_DDBS;
+ goto set_DEPC;
+ case EXCP_DDBL:
+ env->CP0_Debug |= 1 << CP0DB_DDBL;
+ set_DEPC:
+ env->CP0_DEPC = exception_resume_pc(env);
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ enter_debug_mode:
+ if (env->insn_flags & ISA_MIPS3) {
+ env->hflags |= MIPS_HFLAG_64;
+ if (!(env->insn_flags & ISA_MIPS_R6) ||
+ env->CP0_Status & (1 << CP0St_KX)) {
+ env->hflags &= ~MIPS_HFLAG_AWRAP;
+ }
+ }
+ env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0;
+ env->hflags &= ~(MIPS_HFLAG_KSU);
+ /* EJTAG probe trap enable is not implemented... */
+ if (!(env->CP0_Status & (1 << CP0St_EXL))) {
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
+ }
+ env->active_tc.PC = env->exception_base + 0x480;
+ set_hflags_for_handler(env);
+ break;
+ case EXCP_RESET:
+ cpu_reset(CPU(cpu));
+ break;
+ case EXCP_SRESET:
+ env->CP0_Status |= (1 << CP0St_SR);
+ memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo));
+ goto set_error_EPC;
+ case EXCP_NMI:
+ env->CP0_Status |= (1 << CP0St_NMI);
+ set_error_EPC:
+ env->CP0_ErrorEPC = exception_resume_pc(env);
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV);
+ if (env->insn_flags & ISA_MIPS3) {
+ env->hflags |= MIPS_HFLAG_64;
+ if (!(env->insn_flags & ISA_MIPS_R6) ||
+ env->CP0_Status & (1 << CP0St_KX)) {
+ env->hflags &= ~MIPS_HFLAG_AWRAP;
+ }
+ }
+ env->hflags |= MIPS_HFLAG_CP0;
+ env->hflags &= ~(MIPS_HFLAG_KSU);
+ if (!(env->CP0_Status & (1 << CP0St_EXL))) {
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
+ }
+ env->active_tc.PC = env->exception_base;
+ set_hflags_for_handler(env);
+ break;
+ case EXCP_EXT_INTERRUPT:
+ cause = 0;
+ if (env->CP0_Cause & (1 << CP0Ca_IV)) {
+ uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f;
+
+ if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) {
+ offset = 0x200;
+ } else {
+ uint32_t vector = 0;
+ uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP;
+
+ if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
+ /*
+ * For VEIC mode, the external interrupt controller feeds
+ * the vector through the CP0Cause IP lines.
+ */
+ vector = pending;
+ } else {
+ /*
+ * Vectored Interrupts
+ * Mask with Status.IM7-IM0 to get enabled interrupts.
+ */
+ pending &= (env->CP0_Status >> CP0St_IM) & 0xff;
+ /* Find the highest-priority interrupt. */
+ while (pending >>= 1) {
+ vector++;
+ }
+ }
+ offset = 0x200 + (vector * (spacing << 5));
+ }
+ }
+ goto set_EPC;
+ case EXCP_LTLBL:
+ cause = 1;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ goto set_EPC;
+ case EXCP_TLBL:
+ cause = 2;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ if ((env->error_code & EXCP_TLB_NOMATCH) &&
+ !(env->CP0_Status & (1 << CP0St_EXL))) {
+#if defined(TARGET_MIPS64)
+ int R = env->CP0_BadVAddr >> 62;
+ int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
+ int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
+
+ if ((R != 0 || UX) && (R != 3 || KX) &&
+ (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
+ offset = 0x080;
+ } else {
+#endif
+ offset = 0x000;
+#if defined(TARGET_MIPS64)
+ }
+#endif
+ }
+ goto set_EPC;
+ case EXCP_TLBS:
+ cause = 3;
+ update_badinstr = 1;
+ if ((env->error_code & EXCP_TLB_NOMATCH) &&
+ !(env->CP0_Status & (1 << CP0St_EXL))) {
+#if defined(TARGET_MIPS64)
+ int R = env->CP0_BadVAddr >> 62;
+ int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
+ int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
+
+ if ((R != 0 || UX) && (R != 3 || KX) &&
+ (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
+ offset = 0x080;
+ } else {
+#endif
+ offset = 0x000;
+#if defined(TARGET_MIPS64)
+ }
+#endif
+ }
+ goto set_EPC;
+ case EXCP_AdEL:
+ cause = 4;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ goto set_EPC;
+ case EXCP_AdES:
+ cause = 5;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_IBE:
+ cause = 6;
+ goto set_EPC;
+ case EXCP_DBE:
+ cause = 7;
+ goto set_EPC;
+ case EXCP_SYSCALL:
+ cause = 8;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_BREAK:
+ cause = 9;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_RI:
+ cause = 10;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_CpU:
+ cause = 11;
+ update_badinstr = 1;
+ env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) |
+ (env->error_code << CP0Ca_CE);
+ goto set_EPC;
+ case EXCP_OVERFLOW:
+ cause = 12;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_TRAP:
+ cause = 13;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_MSAFPE:
+ cause = 14;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_FPE:
+ cause = 15;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_C2E:
+ cause = 18;
+ goto set_EPC;
+ case EXCP_TLBRI:
+ cause = 19;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_TLBXI:
+ cause = 20;
+ goto set_EPC;
+ case EXCP_MSADIS:
+ cause = 21;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_MDMX:
+ cause = 22;
+ goto set_EPC;
+ case EXCP_DWATCH:
+ cause = 23;
+ /* XXX: TODO: manage deferred watch exceptions */
+ goto set_EPC;
+ case EXCP_MCHECK:
+ cause = 24;
+ goto set_EPC;
+ case EXCP_THREAD:
+ cause = 25;
+ goto set_EPC;
+ case EXCP_DSPDIS:
+ cause = 26;
+ goto set_EPC;
+ case EXCP_CACHE:
+ cause = 30;
+ offset = 0x100;
+ set_EPC:
+ if (!(env->CP0_Status & (1 << CP0St_EXL))) {
+ env->CP0_EPC = exception_resume_pc(env);
+ if (update_badinstr) {
+ set_badinstr_registers(env);
+ }
+ if (env->hflags & MIPS_HFLAG_BMASK) {
+ env->CP0_Cause |= (1U << CP0Ca_BD);
+ } else {
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
+ }
+ env->CP0_Status |= (1 << CP0St_EXL);
+ if (env->insn_flags & ISA_MIPS3) {
+ env->hflags |= MIPS_HFLAG_64;
+ if (!(env->insn_flags & ISA_MIPS_R6) ||
+ env->CP0_Status & (1 << CP0St_KX)) {
+ env->hflags &= ~MIPS_HFLAG_AWRAP;
+ }
+ }
+ env->hflags |= MIPS_HFLAG_CP0;
+ env->hflags &= ~(MIPS_HFLAG_KSU);
+ }
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ if (env->CP0_Status & (1 << CP0St_BEV)) {
+ env->active_tc.PC = env->exception_base + 0x200;
+ } else if (cause == 30 && !(env->CP0_Config3 & (1 << CP0C3_SC) &&
+ env->CP0_Config5 & (1 << CP0C5_CV))) {
+ /* Force KSeg1 for cache errors */
+ env->active_tc.PC = KSEG1_BASE | (env->CP0_EBase & 0x1FFFF000);
+ } else {
+ env->active_tc.PC = env->CP0_EBase & ~0xfff;
+ }
+
+ env->active_tc.PC += offset;
+ set_hflags_for_handler(env);
+ env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) |
+ (cause << CP0Ca_EC);
+ break;
+ default:
+ abort();
+ }
+ if (qemu_loglevel_mask(CPU_LOG_INT)
+ && cs->exception_index != EXCP_EXT_INTERRUPT) {
+ qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
+ " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
+ __func__, env->active_tc.PC, env->CP0_EPC, cause,
+ env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr,
+ env->CP0_DEPC);
+ }
+ cs->exception_index = EXCP_NONE;
+}
+
+bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ CPUMIPSState *env = cpu_env(cs);
+
+ if (cpu_mips_hw_interrupts_enabled(env) &&
+ cpu_mips_hw_interrupts_pending(env)) {
+ /* Raise it */
+ cs->exception_index = EXCP_EXT_INTERRUPT;
+ env->error_code = 0;
+ mips_cpu_do_interrupt(cs);
+ return true;
+ }
+ }
+ return false;
+}
+
+void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra)
+{
+ CPUState *cs = env_cpu(env);
+ r4k_tlb_t *tlb;
+ target_ulong addr;
+ target_ulong end;
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ uint32_t MMID = env->CP0_MemoryMapID;
+ bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
+ uint32_t tlb_mmid;
+ target_ulong mask;
+
+ MMID = mi ? MMID : (uint32_t) ASID;
+
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ /*
+ * The qemu TLB is flushed when the ASID/MMID changes, so no need to
+ * flush these entries again.
+ */
+ tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
+ if (tlb->G == 0 && tlb_mmid != MMID) {
+ return;
+ }
+
+ if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) {
+ /*
+ * For tlbwr, we can shadow the discarded entry into
+ * a new (fake) TLB entry, as long as the guest can not
+ * tell that it's there.
+ */
+ env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb;
+ env->tlb->tlb_in_use++;
+ return;
+ }
+
+ /* 1k pages are not supported. */
+ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ if (tlb->V0) {
+ addr = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
+ addr |= 0x3FFFFF0000000000ULL;
+ }
+#endif
+ end = addr | (mask >> 1);
+ while (addr < end) {
+ tlb_flush_page(cs, addr);
+ addr += TARGET_PAGE_SIZE;
+ }
+ }
+ if (tlb->V1) {
+ addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
+#if defined(TARGET_MIPS64)
+ if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
+ addr |= 0x3FFFFF0000000000ULL;
+ }
+#endif
+ end = addr | mask;
+ while (addr - 1 < end) {
+ tlb_flush_page(cs, addr);
+ addr += TARGET_PAGE_SIZE;
+ }
+ }
+}