aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/alpha/cpu.c7
-rw-r--r--target/alpha/cpu.h21
-rw-r--r--target/alpha/helper.c39
-rw-r--r--target/alpha/mem_helper.c30
-rw-r--r--target/arm/cpu.c7
-rw-r--r--target/arm/cpu_tcg.c7
-rw-r--r--target/arm/gdbstub.c25
-rw-r--r--target/arm/hvf/hvf.c7
-rw-r--r--target/arm/internals.h8
-rw-r--r--target/arm/mte_helper.c6
-rw-r--r--target/arm/sve_helper.c2
-rw-r--r--target/arm/tlb_helper.c42
-rw-r--r--target/arm/translate-a32.h3
-rw-r--r--target/arm/translate-sve.c17
-rw-r--r--target/arm/translate.c27
-rw-r--r--target/cris/cpu.c4
-rw-r--r--target/cris/cpu.h8
-rw-r--r--target/cris/helper.c18
-rw-r--r--target/cris/meson.build7
-rw-r--r--target/hexagon/cpu.c23
-rw-r--r--target/hppa/cpu.c2
-rw-r--r--target/hppa/cpu.h2
-rw-r--r--target/hppa/mem_helper.c15
-rw-r--r--target/hppa/meson.build6
-rw-r--r--target/i386/monitor.c6
-rw-r--r--target/i386/tcg/helper-tcg.h6
-rw-r--r--target/i386/tcg/tcg-cpu.c3
-rw-r--r--target/i386/tcg/user/excp_helper.c23
-rw-r--r--target/m68k/cpu.c2
-rw-r--r--target/m68k/helper.c6
-rw-r--r--target/microblaze/cpu.c2
-rw-r--r--target/microblaze/cpu.h8
-rw-r--r--target/microblaze/helper.c13
-rw-r--r--target/microblaze/translate.c16
-rw-r--r--target/mips/cpu-defs.c.inc2
-rw-r--r--target/mips/cpu.c2
-rw-r--r--target/mips/tcg/meson.build3
-rw-r--r--target/mips/tcg/msa.decode243
-rw-r--r--target/mips/tcg/msa_helper.c64
-rw-r--r--target/mips/tcg/msa_translate.c2623
-rw-r--r--target/mips/tcg/tcg-internal.h7
-rw-r--r--target/mips/tcg/user/meson.build3
-rw-r--r--target/mips/tcg/user/tlb_helper.c59
-rw-r--r--target/nios2/cpu.c6
-rw-r--r--target/nios2/cpu.h6
-rw-r--r--target/nios2/helper.c7
-rw-r--r--target/openrisc/cpu.c2
-rw-r--r--target/openrisc/cpu.h7
-rw-r--r--target/openrisc/meson.build2
-rw-r--r--target/openrisc/mmu.c9
-rw-r--r--target/ppc/cpu.h3
-rw-r--r--target/ppc/cpu_init.c6
-rw-r--r--target/ppc/excp_helper.c41
-rw-r--r--target/ppc/internal.h17
-rw-r--r--target/ppc/user_only_helper.c15
-rw-r--r--target/riscv/cpu.c2
-rw-r--r--target/riscv/cpu_helper.c21
-rw-r--r--target/s390x/cpu.c7
-rw-r--r--target/s390x/s390x-internal.h9
-rw-r--r--target/s390x/tcg/excp_helper.c45
-rw-r--r--target/s390x/tcg/mem_helper.c18
-rw-r--r--target/sh4/cpu.c2
-rw-r--r--target/sh4/cpu.h6
-rw-r--r--target/sh4/helper.c9
-rw-r--r--target/sh4/op_helper.c5
-rw-r--r--target/sparc/cpu.c2
-rw-r--r--target/sparc/ldst_helper.c22
-rw-r--r--target/sparc/meson.build2
-rw-r--r--target/sparc/mmu_helper.c115
-rw-r--r--target/xtensa/cpu.c2
-rw-r--r--target/xtensa/cpu.h2
-rw-r--r--target/xtensa/helper.c22
72 files changed, 1292 insertions, 2544 deletions
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
index 93e16a2..a8990d4 100644
--- a/target/alpha/cpu.c
+++ b/target/alpha/cpu.c
@@ -218,9 +218,12 @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
static const struct TCGCPUOps alpha_tcg_ops = {
.initialize = alpha_translate_init,
- .tlb_fill = alpha_cpu_tlb_fill,
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ .record_sigsegv = alpha_cpu_record_sigsegv,
+ .record_sigbus = alpha_cpu_record_sigbus,
+#else
+ .tlb_fill = alpha_cpu_tlb_fill,
.cpu_exec_interrupt = alpha_cpu_exec_interrupt,
.do_interrupt = alpha_cpu_do_interrupt,
.do_transaction_failed = alpha_cpu_do_transaction_failed,
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
index 772828c..afd975c 100644
--- a/target/alpha/cpu.h
+++ b/target/alpha/cpu.h
@@ -282,9 +282,6 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags);
hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int alpha_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
-void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
- MMUAccessType access_type, int mmu_idx,
- uintptr_t retaddr) QEMU_NORETURN;
#define cpu_list alpha_cpu_list
@@ -439,9 +436,6 @@ void alpha_translate_init(void);
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
void alpha_cpu_list(void);
-bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
void QEMU_NORETURN dynamic_excp(CPUAlphaState *, uintptr_t, int, int);
void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t);
@@ -449,7 +443,20 @@ uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env);
void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val);
uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg);
void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val);
-#ifndef CONFIG_USER_ONLY
+
+#ifdef CONFIG_USER_ONLY
+void alpha_cpu_record_sigsegv(CPUState *cs, vaddr address,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t retaddr);
+void alpha_cpu_record_sigbus(CPUState *cs, vaddr address,
+ MMUAccessType access_type, uintptr_t retaddr);
+#else
+bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr) QEMU_NORETURN;
void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
vaddr addr, unsigned size,
MMUAccessType access_type,
diff --git a/target/alpha/helper.c b/target/alpha/helper.c
index 81550d9..b7e7f73 100644
--- a/target/alpha/helper.c
+++ b/target/alpha/helper.c
@@ -120,15 +120,44 @@ void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
}
#if defined(CONFIG_USER_ONLY)
-bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
+void alpha_cpu_record_sigsegv(CPUState *cs, vaddr address,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t retaddr)
{
AlphaCPU *cpu = ALPHA_CPU(cs);
+ target_ulong mmcsr, cause;
+
+ /* Assuming !maperr, infer the missing protection. */
+ switch (access_type) {
+ case MMU_DATA_LOAD:
+ mmcsr = MM_K_FOR;
+ cause = 0;
+ break;
+ case MMU_DATA_STORE:
+ mmcsr = MM_K_FOW;
+ cause = 1;
+ break;
+ case MMU_INST_FETCH:
+ mmcsr = MM_K_FOE;
+ cause = -1;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (maperr) {
+ if (address < BIT_ULL(TARGET_VIRT_ADDR_SPACE_BITS - 1)) {
+ /* Userspace address, therefore page not mapped. */
+ mmcsr = MM_K_TNV;
+ } else {
+ /* Kernel or invalid address. */
+ mmcsr = MM_K_ACV;
+ }
+ }
- cs->exception_index = EXCP_MMFAULT;
+ /* Record the arguments that PALcode would give to the kernel. */
cpu->env.trap_arg0 = address;
- cpu_loop_exit_restore(cs, retaddr);
+ cpu->env.trap_arg1 = mmcsr;
+ cpu->env.trap_arg2 = cause;
}
#else
/* Returns the OSF/1 entMM failure indication, or -1 on success. */
diff --git a/target/alpha/mem_helper.c b/target/alpha/mem_helper.c
index 75e72bc..47283a0 100644
--- a/target/alpha/mem_helper.c
+++ b/target/alpha/mem_helper.c
@@ -23,18 +23,12 @@
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
-/* Softmmu support */
-#ifndef CONFIG_USER_ONLY
-void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
- MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr)
+static void do_unaligned_access(CPUAlphaState *env, vaddr addr, uintptr_t retaddr)
{
- AlphaCPU *cpu = ALPHA_CPU(cs);
- CPUAlphaState *env = &cpu->env;
uint64_t pc;
uint32_t insn;
- cpu_restore_state(cs, retaddr, true);
+ cpu_restore_state(env_cpu(env), retaddr, true);
pc = env->pc;
insn = cpu_ldl_code(env, pc);
@@ -42,6 +36,26 @@ void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
env->trap_arg0 = addr;
env->trap_arg1 = insn >> 26; /* opcode */
env->trap_arg2 = (insn >> 21) & 31; /* dest regno */
+}
+
+#ifdef CONFIG_USER_ONLY
+void alpha_cpu_record_sigbus(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, uintptr_t retaddr)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+
+ do_unaligned_access(env, addr, retaddr);
+}
+#else
+void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+
+ do_unaligned_access(env, addr, retaddr);
cs->exception_index = EXCP_UNALIGN;
env->error_code = 0;
cpu_loop_exit(cs);
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 641a8c2..a211804 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -2031,10 +2031,13 @@ static const struct SysemuCPUOps arm_sysemu_ops = {
static const struct TCGCPUOps arm_tcg_ops = {
.initialize = arm_translate_init,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
- .tlb_fill = arm_cpu_tlb_fill,
.debug_excp_handler = arm_debug_excp_handler,
-#if !defined(CONFIG_USER_ONLY)
+#ifdef CONFIG_USER_ONLY
+ .record_sigsegv = arm_cpu_record_sigsegv,
+ .record_sigbus = arm_cpu_record_sigbus,
+#else
+ .tlb_fill = arm_cpu_tlb_fill,
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
.do_interrupt = arm_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
index 0d5adcc..13d0e9b 100644
--- a/target/arm/cpu_tcg.c
+++ b/target/arm/cpu_tcg.c
@@ -898,10 +898,13 @@ static void pxa270c5_initfn(Object *obj)
static const struct TCGCPUOps arm_v7m_tcg_ops = {
.initialize = arm_translate_init,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
- .tlb_fill = arm_cpu_tlb_fill,
.debug_excp_handler = arm_debug_excp_handler,
-#if !defined(CONFIG_USER_ONLY)
+#ifdef CONFIG_USER_ONLY
+ .record_sigsegv = arm_cpu_record_sigsegv,
+ .record_sigbus = arm_cpu_record_sigbus,
+#else
+ .tlb_fill = arm_cpu_tlb_fill,
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
.do_interrupt = arm_v7m_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
index e0dcb33..134da0d 100644
--- a/target/arm/gdbstub.c
+++ b/target/arm/gdbstub.c
@@ -199,6 +199,27 @@ static int vfp_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
return 0;
}
+static int mve_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
+{
+ switch (reg) {
+ case 0:
+ return gdb_get_reg32(buf, env->v7m.vpr);
+ default:
+ return 0;
+ }
+}
+
+static int mve_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ switch (reg) {
+ case 0:
+ env->v7m.vpr = ldl_p(buf);
+ return 4;
+ default:
+ return 0;
+ }
+}
+
/**
* arm_get/set_gdb_*: get/set a gdb register
* @env: the CPU state
@@ -468,6 +489,10 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
2, "arm-vfp-sysregs.xml", 0);
}
}
+ if (cpu_isar_feature(aa32_mve, cpu)) {
+ gdb_register_coprocessor(cs, mve_gdb_get_reg, mve_gdb_set_reg,
+ 1, "arm-m-profile-mve.xml", 0);
+ }
gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs),
"system-registers.xml", 0);
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
index bff3e0c..0dc9656 100644
--- a/target/arm/hvf/hvf.c
+++ b/target/arm/hvf/hvf.c
@@ -1150,12 +1150,19 @@ int hvf_vcpu_exec(CPUState *cpu)
uint32_t sas = (syndrome >> 22) & 3;
uint32_t len = 1 << sas;
uint32_t srt = (syndrome >> 16) & 0x1f;
+ uint32_t cm = (syndrome >> 8) & 0x1;
uint64_t val = 0;
trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address,
hvf_exit->exception.physical_address, isv,
iswrite, s1ptw, len, srt);
+ if (cm) {
+ /* We don't cache MMIO regions */
+ advance_pc = true;
+ break;
+ }
+
assert(isv);
if (iswrite) {
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 3612107..89f7610 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -544,9 +544,17 @@ static inline bool arm_extabort_type(MemTxResult result)
return result != MEMTX_DECODE_ERROR;
}
+#ifdef CONFIG_USER_ONLY
+void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type, uintptr_t ra);
+#else
bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
+#endif
static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
{
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
index 7241752..e09b7e4 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/mte_helper.c
@@ -84,10 +84,8 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
uintptr_t index;
if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
- /* SIGSEGV */
- arm_cpu_tlb_fill(env_cpu(env), ptr, ptr_size, ptr_access,
- ptr_mmu_idx, false, ra);
- g_assert_not_reached();
+ cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
+ !(flags & PAGE_VALID), ra);
}
/* Require both MAP_ANON and PROT_MTE for the page. */
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index dab5f1d..07be55b 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -6118,7 +6118,7 @@ DO_LDN_2(4, dd, MO_64)
* linux-user/ in its get_user/put_user macros.
*
* TODO: Construct some helpers, written in assembly, that interact with
- * handle_cpu_signal to produce memory ops which can properly report errors
+ * host_signal_handler to produce memory ops which can properly report errors
* without racing.
*/
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
index 3107f98..12a934e 100644
--- a/target/arm/tlb_helper.c
+++ b/target/arm/tlb_helper.c
@@ -147,28 +147,12 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
-#endif /* !defined(CONFIG_USER_ONLY) */
-
bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
ARMCPU *cpu = ARM_CPU(cs);
ARMMMUFaultInfo fi = {};
-
-#ifdef CONFIG_USER_ONLY
- int flags = page_get_flags(useronly_clean_ptr(address));
- if (flags & PAGE_VALID) {
- fi.type = ARMFault_Permission;
- } else {
- fi.type = ARMFault_Translation;
- }
- fi.level = 3;
-
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr, true);
- arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
-#else
hwaddr phys_addr;
target_ulong page_size;
int prot, ret;
@@ -210,5 +194,29 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
cpu_restore_state(cs, retaddr, true);
arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
}
-#endif
}
+#else
+void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra)
+{
+ ARMMMUFaultInfo fi = {
+ .type = maperr ? ARMFault_Translation : ARMFault_Permission,
+ .level = 3,
+ };
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ /*
+ * We report both ESR and FAR to signal handlers.
+ * For now, it's easiest to deliver the fault normally.
+ */
+ cpu_restore_state(cs, ra, true);
+ arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi);
+}
+
+void arm_cpu_record_sigbus(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, uintptr_t ra)
+{
+ arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra);
+}
+#endif /* !defined(CONFIG_USER_ONLY) */
diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h
index 88f15df..17af8dc 100644
--- a/target/arm/translate-a32.h
+++ b/target/arm/translate-a32.h
@@ -70,6 +70,9 @@ static inline void store_cpu_offset(TCGv_i32 var, int offset)
#define store_cpu_field(var, name) \
store_cpu_offset(var, offsetof(CPUARMState, name))
+#define store_cpu_field_constant(val, name) \
+ tcg_gen_st_i32(tcg_constant_i32(val), cpu_env, offsetof(CPUARMState, name))
+
/* Create a new temporary and set it to the value of a CPU register. */
static inline TCGv_i32 load_reg(DisasContext *s, int reg)
{
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index bc91a64..76b5fe9 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -1943,20 +1943,20 @@ static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
{
TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2;
if (u) {
if (d) {
tcg_gen_sub_i64(t0, reg, val);
- tcg_gen_movi_i64(t1, 0);
- tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t1, t0);
+ t2 = tcg_constant_i64(0);
+ tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t2, t0);
} else {
tcg_gen_add_i64(t0, reg, val);
- tcg_gen_movi_i64(t1, -1);
- tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t1, t0);
+ t2 = tcg_constant_i64(-1);
+ tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t2, t0);
}
} else {
+ TCGv_i64 t1 = tcg_temp_new_i64();
if (d) {
/* Detect signed overflow for subtraction. */
tcg_gen_xor_i64(t0, reg, val);
@@ -1966,7 +1966,7 @@ static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
/* Bound the result. */
tcg_gen_movi_i64(reg, INT64_MIN);
- t2 = tcg_const_i64(0);
+ t2 = tcg_constant_i64(0);
tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, reg, t1);
} else {
/* Detect signed overflow for addition. */
@@ -1977,13 +1977,12 @@ static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
/* Bound the result. */
tcg_gen_movi_i64(t1, INT64_MAX);
- t2 = tcg_const_i64(0);
+ t2 = tcg_constant_i64(0);
tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg);
}
- tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t1);
}
tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
/* Similarly with a vector and a scalar operand. */
diff --git a/target/arm/translate.c b/target/arm/translate.c
index d6af5b1..98f5925 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -364,8 +364,7 @@ void clear_eci_state(DisasContext *s)
* multiple insn executes.
*/
if (s->eci) {
- TCGv_i32 tmp = tcg_const_i32(0);
- store_cpu_field(tmp, condexec_bits);
+ store_cpu_field_constant(0, condexec_bits);
s->eci = 0;
}
}
@@ -389,13 +388,12 @@ static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
{
TCGv_i32 tmp = tcg_temp_new_i32();
- TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
+ TCGv_i32 mask = tcg_constant_i32(0x00ff00ff);
tcg_gen_shri_i32(tmp, var, 8);
tcg_gen_and_i32(tmp, tmp, mask);
tcg_gen_and_i32(var, var, mask);
tcg_gen_shli_i32(var, var, 8);
tcg_gen_or_i32(dest, var, tmp);
- tcg_temp_free_i32(mask);
tcg_temp_free_i32(tmp);
}
@@ -740,9 +738,8 @@ void gen_set_condexec(DisasContext *s)
{
if (s->condexec_mask) {
uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, val);
- store_cpu_field(tmp, condexec_bits);
+
+ store_cpu_field_constant(val, condexec_bits);
}
}
@@ -7849,10 +7846,9 @@ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
t3 = tcg_temp_new_i32();
tcg_gen_sari_i32(t3, t1, 31);
qf = load_cpu_field(QF);
- one = tcg_const_i32(1);
+ one = tcg_constant_i32(1);
tcg_gen_movcond_i32(TCG_COND_NE, qf, t2, t3, one, qf);
store_cpu_field(qf, QF);
- tcg_temp_free_i32(one);
tcg_temp_free_i32(t3);
tcg_temp_free_i32(t2);
}
@@ -8363,8 +8359,6 @@ static bool trans_BL(DisasContext *s, arg_i *a)
static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
{
- TCGv_i32 tmp;
-
/*
* BLX <imm> would be useless on M-profile; the encoding space
* is used for other insns from v8.1M onward, and UNDEFs before that.
@@ -8378,8 +8372,7 @@ static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
return false;
}
tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
- tmp = tcg_const_i32(!s->thumb);
- store_cpu_field(tmp, thumb);
+ store_cpu_field_constant(!s->thumb, thumb);
gen_jmp(s, (read_pc(s) & ~3) + a->imm);
return true;
}
@@ -8678,7 +8671,6 @@ static bool trans_LCTP(DisasContext *s, arg_LCTP *a)
* doesn't cache branch information, all we need to do is reset
* FPSCR.LTPSIZE to 4.
*/
- TCGv_i32 ltpsize;
if (!dc_isar_feature(aa32_lob, s) ||
!dc_isar_feature(aa32_mve, s)) {
@@ -8689,8 +8681,7 @@ static bool trans_LCTP(DisasContext *s, arg_LCTP *a)
return true;
}
- ltpsize = tcg_const_i32(4);
- store_cpu_field(ltpsize, v7m.ltpsize);
+ store_cpu_field_constant(4, v7m.ltpsize);
return true;
}
@@ -9488,9 +9479,7 @@ static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
/* Reset the conditional execution bits immediately. This avoids
complications trying to do it at the end of the block. */
if (dc->condexec_mask || dc->condexec_cond) {
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, 0);
- store_cpu_field(tmp, condexec_bits);
+ store_cpu_field_constant(0, condexec_bits);
}
}
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
index c2e7483..ed6c781 100644
--- a/target/cris/cpu.c
+++ b/target/cris/cpu.c
@@ -205,9 +205,9 @@ static const struct SysemuCPUOps cris_sysemu_ops = {
static const struct TCGCPUOps crisv10_tcg_ops = {
.initialize = cris_initialize_crisv10_tcg,
- .tlb_fill = cris_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .tlb_fill = cris_cpu_tlb_fill,
.cpu_exec_interrupt = cris_cpu_exec_interrupt,
.do_interrupt = crisv10_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
@@ -215,9 +215,9 @@ static const struct TCGCPUOps crisv10_tcg_ops = {
static const struct TCGCPUOps crisv32_tcg_ops = {
.initialize = cris_initialize_tcg,
- .tlb_fill = cris_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .tlb_fill = cris_cpu_tlb_fill,
.cpu_exec_interrupt = cris_cpu_exec_interrupt,
.do_interrupt = cris_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/cris/cpu.h b/target/cris/cpu.h
index 6603565..b445b19 100644
--- a/target/cris/cpu.h
+++ b/target/cris/cpu.h
@@ -189,6 +189,10 @@ extern const VMStateDescription vmstate_cris_cpu;
void cris_cpu_do_interrupt(CPUState *cpu);
void crisv10_cpu_do_interrupt(CPUState *cpu);
bool cris_cpu_exec_interrupt(CPUState *cpu, int int_req);
+
+bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
#endif
void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags);
@@ -251,10 +255,6 @@ static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
return !!(env->pregs[PR_CCS] & U_FLAG);
}
-bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
-
/* Support function regs. */
#define SFR_RW_GC_CFG 0][0
#define SFR_RW_MM_CFG env->pregs[PR_SRS]][0
diff --git a/target/cris/helper.c b/target/cris/helper.c
index 36926fa..a0d6ecd 100644
--- a/target/cris/helper.c
+++ b/target/cris/helper.c
@@ -39,22 +39,6 @@
#define D_LOG(...) do { } while (0)
#endif
-#if defined(CONFIG_USER_ONLY)
-
-bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
-{
- CRISCPU *cpu = CRIS_CPU(cs);
-
- cs->exception_index = 0xaa;
- cpu->env.pregs[PR_EDA] = address;
- cpu_loop_exit_restore(cs, retaddr);
-}
-
-#else /* !CONFIG_USER_ONLY */
-
-
static void cris_shift_ccs(CPUCRISState *env)
{
uint32_t ccs;
@@ -304,5 +288,3 @@ bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return ret;
}
-
-#endif /* !CONFIG_USER_ONLY */
diff --git a/target/cris/meson.build b/target/cris/meson.build
index 67c3793..c1e326d 100644
--- a/target/cris/meson.build
+++ b/target/cris/meson.build
@@ -2,13 +2,16 @@ cris_ss = ss.source_set()
cris_ss.add(files(
'cpu.c',
'gdbstub.c',
- 'helper.c',
'op_helper.c',
'translate.c',
))
cris_softmmu_ss = ss.source_set()
-cris_softmmu_ss.add(files('mmu.c', 'machine.c'))
+cris_softmmu_ss.add(files(
+ 'helper.c',
+ 'machine.c',
+ 'mmu.c',
+))
target_arch += {'cris': cris_ss}
target_softmmu_arch += {'cris': cris_softmmu_ss}
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
index 3338365..160a46a 100644
--- a/target/hexagon/cpu.c
+++ b/target/hexagon/cpu.c
@@ -245,34 +245,11 @@ static void hexagon_cpu_init(Object *obj)
qdev_property_add_static(DEVICE(obj), &hexagon_lldb_stack_adjust_property);
}
-static bool hexagon_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
-{
-#ifdef CONFIG_USER_ONLY
- switch (access_type) {
- case MMU_INST_FETCH:
- cs->exception_index = HEX_EXCP_FETCH_NO_UPAGE;
- break;
- case MMU_DATA_LOAD:
- cs->exception_index = HEX_EXCP_PRIV_NO_UREAD;
- break;
- case MMU_DATA_STORE:
- cs->exception_index = HEX_EXCP_PRIV_NO_UWRITE;
- break;
- }
- cpu_loop_exit_restore(cs, retaddr);
-#else
-#error System mode not implemented for Hexagon
-#endif
-}
-
#include "hw/core/tcg-cpu-ops.h"
static const struct TCGCPUOps hexagon_tcg_ops = {
.initialize = hexagon_translate_init,
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
- .tlb_fill = hexagon_tlb_fill,
};
static void hexagon_cpu_class_init(ObjectClass *c, void *data)
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
index 89cba9d..23eb254 100644
--- a/target/hppa/cpu.c
+++ b/target/hppa/cpu.c
@@ -145,9 +145,9 @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
static const struct TCGCPUOps hppa_tcg_ops = {
.initialize = hppa_translate_init,
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
- .tlb_fill = hppa_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .tlb_fill = hppa_cpu_tlb_fill,
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
.do_interrupt = hppa_cpu_do_interrupt,
.do_unaligned_access = hppa_cpu_do_unaligned_access,
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
index d3cb7a2..294fd72 100644
--- a/target/hppa/cpu.h
+++ b/target/hppa/cpu.h
@@ -323,10 +323,10 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
+#ifndef CONFIG_USER_ONLY
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
-#ifndef CONFIG_USER_ONLY
void hppa_cpu_do_interrupt(CPUState *cpu);
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c
index afc5b56..bf07445 100644
--- a/target/hppa/mem_helper.c
+++ b/target/hppa/mem_helper.c
@@ -24,20 +24,6 @@
#include "hw/core/cpu.h"
#include "trace.h"
-#ifdef CONFIG_USER_ONLY
-bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
-{
- HPPACPU *cpu = HPPA_CPU(cs);
-
- /* ??? Test between data page fault and data memory protection trap,
- which would affect si_code. */
- cs->exception_index = EXCP_DMP;
- cpu->env.cr[CR_IOR] = address;
- cpu_loop_exit_restore(cs, retaddr);
-}
-#else
static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
{
int i;
@@ -392,4 +378,3 @@ int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
return ent ? ent->ar_type : -1;
}
-#endif /* CONFIG_USER_ONLY */
diff --git a/target/hppa/meson.build b/target/hppa/meson.build
index 8a7ff82..021e42a 100644
--- a/target/hppa/meson.build
+++ b/target/hppa/meson.build
@@ -7,13 +7,15 @@ hppa_ss.add(files(
'gdbstub.c',
'helper.c',
'int_helper.c',
- 'mem_helper.c',
'op_helper.c',
'translate.c',
))
hppa_softmmu_ss = ss.source_set()
-hppa_softmmu_ss.add(files('machine.c'))
+hppa_softmmu_ss.add(files(
+ 'machine.c',
+ 'mem_helper.c',
+))
target_arch += {'hppa': hppa_ss}
target_softmmu_arch += {'hppa': hppa_softmmu_ss}
diff --git a/target/i386/monitor.c b/target/i386/monitor.c
index 8166e17..8e4b4d6 100644
--- a/target/i386/monitor.c
+++ b/target/i386/monitor.c
@@ -667,9 +667,3 @@ void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
}
x86_cpu_dump_local_apic_state(cs, CPU_DUMP_FPU);
}
-
-void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
-{
- monitor_printf(mon, "This command is obsolete and will be "
- "removed soon. Please use 'info pic' instead.\n");
-}
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
index 60ca09e..0a4401e 100644
--- a/target/i386/tcg/helper-tcg.h
+++ b/target/i386/tcg/helper-tcg.h
@@ -43,9 +43,15 @@ bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
#endif
/* helper.c */
+#ifdef CONFIG_USER_ONLY
+void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+#else
bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
+#endif
void breakpoint_handler(CPUState *cs);
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
index 3ecfae3..6fdfdf9 100644
--- a/target/i386/tcg/tcg-cpu.c
+++ b/target/i386/tcg/tcg-cpu.c
@@ -72,10 +72,11 @@ static const struct TCGCPUOps x86_tcg_ops = {
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
.cpu_exec_enter = x86_cpu_exec_enter,
.cpu_exec_exit = x86_cpu_exec_exit,
- .tlb_fill = x86_cpu_tlb_fill,
#ifdef CONFIG_USER_ONLY
.fake_user_interrupt = x86_cpu_do_interrupt,
+ .record_sigsegv = x86_cpu_record_sigsegv,
#else
+ .tlb_fill = x86_cpu_tlb_fill,
.do_interrupt = x86_cpu_do_interrupt,
.cpu_exec_interrupt = x86_cpu_exec_interrupt,
.debug_excp_handler = breakpoint_handler,
diff --git a/target/i386/tcg/user/excp_helper.c b/target/i386/tcg/user/excp_helper.c
index a89b522..cd507e2 100644
--- a/target/i386/tcg/user/excp_helper.c
+++ b/target/i386/tcg/user/excp_helper.c
@@ -22,18 +22,29 @@
#include "exec/exec-all.h"
#include "tcg/helper-tcg.h"
-bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
+void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ /*
+ * The error_code that hw reports as part of the exception frame
+ * is copied to linux sigcontext.err. The exception_index is
+ * copied to linux sigcontext.trapno. Short of inventing a new
+ * place to store the trapno, we cannot let our caller raise the
+ * signal and set exception_index to EXCP_INTERRUPT.
+ */
env->cr[2] = addr;
- env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT;
- env->error_code |= PG_ERROR_U_MASK;
+ env->error_code = ((access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT)
+ | (maperr ? 0 : PG_ERROR_P_MASK)
+ | PG_ERROR_U_MASK;
cs->exception_index = EXCP0E_PAGE;
+
+ /* Disable do_interrupt_user. */
env->exception_is_int = 0;
env->exception_next_eip = -1;
- cpu_loop_exit_restore(cs, retaddr);
+
+ cpu_loop_exit_restore(cs, ra);
}
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
index 66d22d1..c7aeb7d 100644
--- a/target/m68k/cpu.c
+++ b/target/m68k/cpu.c
@@ -515,9 +515,9 @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
static const struct TCGCPUOps m68k_tcg_ops = {
.initialize = m68k_tcg_init,
- .tlb_fill = m68k_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .tlb_fill = m68k_cpu_tlb_fill,
.cpu_exec_interrupt = m68k_cpu_exec_interrupt,
.do_interrupt = m68k_cpu_do_interrupt,
.do_transaction_failed = m68k_cpu_transaction_failed,
diff --git a/target/m68k/helper.c b/target/m68k/helper.c
index 137a3e1..5728e48 100644
--- a/target/m68k/helper.c
+++ b/target/m68k/helper.c
@@ -978,16 +978,12 @@ void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector)
}
}
-#endif
-
bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType qemu_access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
M68kCPU *cpu = M68K_CPU(cs);
CPUM68KState *env = &cpu->env;
-
-#ifndef CONFIG_USER_ONLY
hwaddr physical;
int prot;
int access_type;
@@ -1051,12 +1047,12 @@ bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (!(access_type & ACCESS_STORE)) {
env->mmu.ssw |= M68K_RW_040;
}
-#endif
cs->exception_index = EXCP_ACCESS;
env->mmu.ar = address;
cpu_loop_exit_restore(cs, retaddr);
}
+#endif /* !CONFIG_USER_ONLY */
uint32_t HELPER(bitrev)(uint32_t x)
{
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
index 15db277..b9c888b 100644
--- a/target/microblaze/cpu.c
+++ b/target/microblaze/cpu.c
@@ -365,9 +365,9 @@ static const struct SysemuCPUOps mb_sysemu_ops = {
static const struct TCGCPUOps mb_tcg_ops = {
.initialize = mb_tcg_init,
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
- .tlb_fill = mb_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .tlb_fill = mb_cpu_tlb_fill,
.cpu_exec_interrupt = mb_cpu_exec_interrupt,
.do_interrupt = mb_cpu_do_interrupt,
.do_transaction_failed = mb_cpu_transaction_failed,
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index b7a848b..e9cd0b8 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -394,10 +394,6 @@ void mb_tcg_init(void);
#define MMU_USER_IDX 2
/* See NB_MMU_MODES further up the file. */
-bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
-
typedef CPUMBState CPUArchState;
typedef MicroBlazeCPU ArchCPU;
@@ -415,6 +411,10 @@ static inline void cpu_get_tb_cpu_state(CPUMBState *env, target_ulong *pc,
}
#if !defined(CONFIG_USER_ONLY)
+bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
void mb_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
unsigned size, MMUAccessType access_type,
int mmu_idx, MemTxAttrs attrs,
diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index dd2aecd..a607fe6 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -24,18 +24,7 @@
#include "qemu/host-utils.h"
#include "exec/log.h"
-#if defined(CONFIG_USER_ONLY)
-
-bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
-{
- cs->exception_index = 0xaa;
- cpu_loop_exit_restore(cs, retaddr);
-}
-
-#else /* !CONFIG_USER_ONLY */
-
+#ifndef CONFIG_USER_ONLY
static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu,
MMUAccessType access_type)
{
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 437bbed..2561b90 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -722,6 +722,7 @@ static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
}
#endif
+#ifndef CONFIG_USER_ONLY
static void record_unaligned_ess(DisasContext *dc, int rd,
MemOp size, bool store)
{
@@ -734,6 +735,7 @@ static void record_unaligned_ess(DisasContext *dc, int rd,
tcg_set_insn_start_param(dc->insn_start, 1, iflags);
}
+#endif
static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
int mem_index, bool rev)
@@ -755,12 +757,19 @@ static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
}
}
+ /*
+ * For system mode, enforce alignment if the cpu configuration
+ * requires it. For user-mode, the Linux kernel will have fixed up
+ * any unaligned access, so emulate that by *not* setting MO_ALIGN.
+ */
+#ifndef CONFIG_USER_ONLY
if (size > MO_8 &&
(dc->tb_flags & MSR_EE) &&
dc->cfg->unaligned_exceptions) {
record_unaligned_ess(dc, rd, size, false);
mop |= MO_ALIGN;
}
+#endif
tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
@@ -901,12 +910,19 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
}
}
+ /*
+ * For system mode, enforce alignment if the cpu configuration
+ * requires it. For user-mode, the Linux kernel will have fixed up
+ * any unaligned access, so emulate that by *not* setting MO_ALIGN.
+ */
+#ifndef CONFIG_USER_ONLY
if (size > MO_8 &&
(dc->tb_flags & MSR_EE) &&
dc->cfg->unaligned_exceptions) {
record_unaligned_ess(dc, rd, size, true);
mop |= MO_ALIGN;
}
+#endif
tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
diff --git a/target/mips/cpu-defs.c.inc b/target/mips/cpu-defs.c.inc
index cbc45fc..582f940 100644
--- a/target/mips/cpu-defs.c.inc
+++ b/target/mips/cpu-defs.c.inc
@@ -369,7 +369,6 @@ const mips_def_t mips_defs[] =
* Config3: VZ, CTXTC, CDMM, TL
* Config4: MMUExtDef
* Config5: MRP
- * FIR(FCR0): Has2008
* */
.name = "P5600",
.CP0_PRid = 0x0001A800,
@@ -886,6 +885,7 @@ const mips_def_t mips_defs[] =
(0x1 << FCR0_D) | (0x1 << FCR0_S),
.CP1_fcr31 = 0,
.CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .MSAIR = (0x01 << MSAIR_ProcID) | (0x40 << MSAIR_Rev),
.SEGBITS = 48,
.PABITS = 48,
.insn_flags = CPU_MIPS64R2 | INSN_LOONGSON3A |
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
index 00e0c55..4aae239 100644
--- a/target/mips/cpu.c
+++ b/target/mips/cpu.c
@@ -539,9 +539,9 @@ static const struct SysemuCPUOps mips_sysemu_ops = {
static const struct TCGCPUOps mips_tcg_ops = {
.initialize = mips_tcg_init,
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
- .tlb_fill = mips_cpu_tlb_fill,
#if !defined(CONFIG_USER_ONLY)
+ .tlb_fill = mips_cpu_tlb_fill,
.cpu_exec_interrupt = mips_cpu_exec_interrupt,
.do_interrupt = mips_cpu_do_interrupt,
.do_transaction_failed = mips_cpu_do_transaction_failed,
diff --git a/target/mips/tcg/meson.build b/target/mips/tcg/meson.build
index 8f6f750..9800377 100644
--- a/target/mips/tcg/meson.build
+++ b/target/mips/tcg/meson.build
@@ -28,9 +28,6 @@ mips_ss.add(when: 'TARGET_MIPS64', if_true: files(
'mxu_translate.c',
))
-if have_user
- subdir('user')
-endif
if have_system
subdir('sysemu')
endif
diff --git a/target/mips/tcg/msa.decode b/target/mips/tcg/msa.decode
index 74d99f6..9575289 100644
--- a/target/mips/tcg/msa.decode
+++ b/target/mips/tcg/msa.decode
@@ -13,19 +13,246 @@
&r rs rt rd sa
-&msa_bz df wt s16
+&msa_r df wd ws wt
+&msa_bz df wt sa
+&msa_ldi df wd sa
+&msa_i df wd ws sa
+&msa_bit df wd ws m
+&msa_elm_df df wd ws n
+&msa_elm wd ws
+
+%elm_df 16:6 !function=elm_df
+%elm_n 16:6 !function=elm_n
+%bit_df 16:7 !function=bit_df
+%bit_m 16:7 !function=bit_m
+%2r_df_w 16:1 !function=plus_2
+%3r_df_h 21:1 !function=plus_1
+%3r_df_w 21:1 !function=plus_2
@lsa ...... rs:5 rt:5 rd:5 ... sa:2 ...... &r
-@bz ...... ... .. wt:5 s16:16 &msa_bz df=3
-@bz_df ...... ... df:2 wt:5 s16:16 &msa_bz
+@ldst ...... sa:s10 ws:5 wd:5 .... df:2 &msa_i
+@bz_v ...... ... .. wt:5 sa:16 &msa_bz df=3
+@bz ...... ... df:2 wt:5 sa:16 &msa_bz
+@elm_df ...... .... ...... ws:5 wd:5 ...... &msa_elm_df df=%elm_df n=%elm_n
+@elm ...... .......... ws:5 wd:5 ...... &msa_elm
+@vec ...... ..... wt:5 ws:5 wd:5 ...... &msa_r df=0
+@2r ...... ........ df:2 ws:5 wd:5 ...... &msa_r wt=0
+@2rf ...... ......... . ws:5 wd:5 ...... &msa_r wt=0 df=%2r_df_w
+@3r ...... ... df:2 wt:5 ws:5 wd:5 ...... &msa_r
+@3rf_h ...... .... . wt:5 ws:5 wd:5 ...... &msa_r df=%3r_df_h
+@3rf_w ...... .... . wt:5 ws:5 wd:5 ...... &msa_r df=%3r_df_w
+@u5 ...... ... df:2 sa:5 ws:5 wd:5 ...... &msa_i
+@s5 ...... ... df:2 sa:s5 ws:5 wd:5 ...... &msa_i
+@i8_df ...... df:2 sa:s8 ws:5 wd:5 ...... &msa_i
+@i8 ...... .. sa:s8 ws:5 wd:5 ...... &msa_i df=0
+@ldi ...... ... df:2 sa:s10 wd:5 ...... &msa_ldi
+@bit ...... ... ....... ws:5 wd:5 ...... &msa_bit df=%bit_df m=%bit_m
LSA 000000 ..... ..... ..... 000 .. 000101 @lsa
DLSA 000000 ..... ..... ..... 000 .. 010101 @lsa
-BZ_V 010001 01011 ..... ................ @bz
-BNZ_V 010001 01111 ..... ................ @bz
+BZ_V 010001 01011 ..... ................ @bz_v
+BNZ_V 010001 01111 ..... ................ @bz_v
+BZ 010001 110 .. ..... ................ @bz
+BNZ 010001 111 .. ..... ................ @bz
+
+ANDI 011110 00 ........ ..... ..... 000000 @i8
+ORI 011110 01 ........ ..... ..... 000000 @i8
+NORI 011110 10 ........ ..... ..... 000000 @i8
+XORI 011110 11 ........ ..... ..... 000000 @i8
+BMNZI 011110 00 ........ ..... ..... 000001 @i8
+BMZI 011110 01 ........ ..... ..... 000001 @i8
+BSELI 011110 10 ........ ..... ..... 000001 @i8
+SHF 011110 .. ........ ..... ..... 000010 @i8_df
+
+ADDVI 011110 000 .. ..... ..... ..... 000110 @u5
+SUBVI 011110 001 .. ..... ..... ..... 000110 @u5
+MAXI_S 011110 010 .. ..... ..... ..... 000110 @s5
+MAXI_U 011110 011 .. ..... ..... ..... 000110 @u5
+MINI_S 011110 100 .. ..... ..... ..... 000110 @s5
+MINI_U 011110 101 .. ..... ..... ..... 000110 @u5
+
+CEQI 011110 000 .. ..... ..... ..... 000111 @s5
+CLTI_S 011110 010 .. ..... ..... ..... 000111 @s5
+CLTI_U 011110 011 .. ..... ..... ..... 000111 @u5
+CLEI_S 011110 100 .. ..... ..... ..... 000111 @s5
+CLEI_U 011110 101 .. ..... ..... ..... 000111 @u5
+
+LDI 011110 110 .. .......... ..... 000111 @ldi
+
+SLLI 011110 000 ....... ..... ..... 001001 @bit
+SRAI 011110 001 ....... ..... ..... 001001 @bit
+SRLI 011110 010 ....... ..... ..... 001001 @bit
+BCLRI 011110 011 ....... ..... ..... 001001 @bit
+BSETI 011110 100 ....... ..... ..... 001001 @bit
+BNEGI 011110 101 ....... ..... ..... 001001 @bit
+BINSLI 011110 110 ....... ..... ..... 001001 @bit
+BINSRI 011110 111 ....... ..... ..... 001001 @bit
+
+SAT_S 011110 000 ....... ..... ..... 001010 @bit
+SAT_U 011110 001 ....... ..... ..... 001010 @bit
+SRARI 011110 010 ....... ..... ..... 001010 @bit
+SRLRI 011110 011 ....... ..... ..... 001010 @bit
+
+SLL 011110 000.. ..... ..... ..... 001101 @3r
+SRA 011110 001.. ..... ..... ..... 001101 @3r
+SRL 011110 010.. ..... ..... ..... 001101 @3r
+BCLR 011110 011.. ..... ..... ..... 001101 @3r
+BSET 011110 100.. ..... ..... ..... 001101 @3r
+BNEG 011110 101.. ..... ..... ..... 001101 @3r
+BINSL 011110 110.. ..... ..... ..... 001101 @3r
+BINSR 011110 111.. ..... ..... ..... 001101 @3r
+
+ADDV 011110 000.. ..... ..... ..... 001110 @3r
+SUBV 011110 001.. ..... ..... ..... 001110 @3r
+MAX_S 011110 010.. ..... ..... ..... 001110 @3r
+MAX_U 011110 011.. ..... ..... ..... 001110 @3r
+MIN_S 011110 100.. ..... ..... ..... 001110 @3r
+MIN_U 011110 101.. ..... ..... ..... 001110 @3r
+MAX_A 011110 110.. ..... ..... ..... 001110 @3r
+MIN_A 011110 111.. ..... ..... ..... 001110 @3r
+
+CEQ 011110 000.. ..... ..... ..... 001111 @3r
+CLT_S 011110 010.. ..... ..... ..... 001111 @3r
+CLT_U 011110 011.. ..... ..... ..... 001111 @3r
+CLE_S 011110 100.. ..... ..... ..... 001111 @3r
+CLE_U 011110 101.. ..... ..... ..... 001111 @3r
+
+ADD_A 011110 000.. ..... ..... ..... 010000 @3r
+ADDS_A 011110 001.. ..... ..... ..... 010000 @3r
+ADDS_S 011110 010.. ..... ..... ..... 010000 @3r
+ADDS_U 011110 011.. ..... ..... ..... 010000 @3r
+AVE_S 011110 100.. ..... ..... ..... 010000 @3r
+AVE_U 011110 101.. ..... ..... ..... 010000 @3r
+AVER_S 011110 110.. ..... ..... ..... 010000 @3r
+AVER_U 011110 111.. ..... ..... ..... 010000 @3r
+
+SUBS_S 011110 000.. ..... ..... ..... 010001 @3r
+SUBS_U 011110 001.. ..... ..... ..... 010001 @3r
+SUBSUS_U 011110 010.. ..... ..... ..... 010001 @3r
+SUBSUU_S 011110 011.. ..... ..... ..... 010001 @3r
+ASUB_S 011110 100.. ..... ..... ..... 010001 @3r
+ASUB_U 011110 101.. ..... ..... ..... 010001 @3r
+
+MULV 011110 000.. ..... ..... ..... 010010 @3r
+MADDV 011110 001.. ..... ..... ..... 010010 @3r
+MSUBV 011110 010.. ..... ..... ..... 010010 @3r
+DIV_S 011110 100.. ..... ..... ..... 010010 @3r
+DIV_U 011110 101.. ..... ..... ..... 010010 @3r
+MOD_S 011110 110.. ..... ..... ..... 010010 @3r
+MOD_U 011110 111.. ..... ..... ..... 010010 @3r
+
+DOTP_S 011110 000.. ..... ..... ..... 010011 @3r
+DOTP_U 011110 001.. ..... ..... ..... 010011 @3r
+DPADD_S 011110 010.. ..... ..... ..... 010011 @3r
+DPADD_U 011110 011.. ..... ..... ..... 010011 @3r
+DPSUB_S 011110 100.. ..... ..... ..... 010011 @3r
+DPSUB_U 011110 101.. ..... ..... ..... 010011 @3r
+
+SLD 011110 000 .. ..... ..... ..... 010100 @3r
+SPLAT 011110 001 .. ..... ..... ..... 010100 @3r
+PCKEV 011110 010 .. ..... ..... ..... 010100 @3r
+PCKOD 011110 011 .. ..... ..... ..... 010100 @3r
+ILVL 011110 100 .. ..... ..... ..... 010100 @3r
+ILVR 011110 101 .. ..... ..... ..... 010100 @3r
+ILVEV 011110 110 .. ..... ..... ..... 010100 @3r
+ILVOD 011110 111 .. ..... ..... ..... 010100 @3r
+
+VSHF 011110 000 .. ..... ..... ..... 010101 @3r
+SRAR 011110 001 .. ..... ..... ..... 010101 @3r
+SRLR 011110 010 .. ..... ..... ..... 010101 @3r
+HADD_S 011110 100.. ..... ..... ..... 010101 @3r
+HADD_U 011110 101.. ..... ..... ..... 010101 @3r
+HSUB_S 011110 110.. ..... ..... ..... 010101 @3r
+HSUB_U 011110 111.. ..... ..... ..... 010101 @3r
+
+{
+ CTCMSA 011110 0000111110 ..... ..... 011001 @elm
+ SLDI 011110 0000 ...... ..... ..... 011001 @elm_df
+}
+{
+ CFCMSA 011110 0001111110 ..... ..... 011001 @elm
+ SPLATI 011110 0001 ...... ..... ..... 011001 @elm_df
+}
+{
+ MOVE_V 011110 0010111110 ..... ..... 011001 @elm
+ COPY_S 011110 0010 ...... ..... ..... 011001 @elm_df
+}
+COPY_U 011110 0011 ...... ..... ..... 011001 @elm_df
+INSERT 011110 0100 ...... ..... ..... 011001 @elm_df
+INSVE 011110 0101 ...... ..... ..... 011001 @elm_df
+
+FCAF 011110 0000 . ..... ..... ..... 011010 @3rf_w
+FCUN 011110 0001 . ..... ..... ..... 011010 @3rf_w
+FCEQ 011110 0010 . ..... ..... ..... 011010 @3rf_w
+FCUEQ 011110 0011 . ..... ..... ..... 011010 @3rf_w
+FCLT 011110 0100 . ..... ..... ..... 011010 @3rf_w
+FCULT 011110 0101 . ..... ..... ..... 011010 @3rf_w
+FCLE 011110 0110 . ..... ..... ..... 011010 @3rf_w
+FCULE 011110 0111 . ..... ..... ..... 011010 @3rf_w
+FSAF 011110 1000 . ..... ..... ..... 011010 @3rf_w
+FSUN 011110 1001 . ..... ..... ..... 011010 @3rf_w
+FSEQ 011110 1010 . ..... ..... ..... 011010 @3rf_w
+FSUEQ 011110 1011 . ..... ..... ..... 011010 @3rf_w
+FSLT 011110 1100 . ..... ..... ..... 011010 @3rf_w
+FSULT 011110 1101 . ..... ..... ..... 011010 @3rf_w
+FSLE 011110 1110 . ..... ..... ..... 011010 @3rf_w
+FSULE 011110 1111 . ..... ..... ..... 011010 @3rf_w
+
+FADD 011110 0000 . ..... ..... ..... 011011 @3rf_w
+FSUB 011110 0001 . ..... ..... ..... 011011 @3rf_w
+FMUL 011110 0010 . ..... ..... ..... 011011 @3rf_w
+FDIV 011110 0011 . ..... ..... ..... 011011 @3rf_w
+FMADD 011110 0100 . ..... ..... ..... 011011 @3rf_w
+FMSUB 011110 0101 . ..... ..... ..... 011011 @3rf_w
+FEXP2 011110 0111 . ..... ..... ..... 011011 @3rf_w
+FEXDO 011110 1000 . ..... ..... ..... 011011 @3rf_w
+FTQ 011110 1010 . ..... ..... ..... 011011 @3rf_w
+FMIN 011110 1100 . ..... ..... ..... 011011 @3rf_w
+FMIN_A 011110 1101 . ..... ..... ..... 011011 @3rf_w
+FMAX 011110 1110 . ..... ..... ..... 011011 @3rf_w
+FMAX_A 011110 1111 . ..... ..... ..... 011011 @3rf_w
+
+FCOR 011110 0001 . ..... ..... ..... 011100 @3rf_w
+FCUNE 011110 0010 . ..... ..... ..... 011100 @3rf_w
+FCNE 011110 0011 . ..... ..... ..... 011100 @3rf_w
+MUL_Q 011110 0100 . ..... ..... ..... 011100 @3rf_h
+MADD_Q 011110 0101 . ..... ..... ..... 011100 @3rf_h
+MSUB_Q 011110 0110 . ..... ..... ..... 011100 @3rf_h
+FSOR 011110 1001 . ..... ..... ..... 011100 @3rf_w
+FSUNE 011110 1010 . ..... ..... ..... 011100 @3rf_w
+FSNE 011110 1011 . ..... ..... ..... 011100 @3rf_w
+MULR_Q 011110 1100 . ..... ..... ..... 011100 @3rf_h
+MADDR_Q 011110 1101 . ..... ..... ..... 011100 @3rf_h
+MSUBR_Q 011110 1110 . ..... ..... ..... 011100 @3rf_h
-BZ_x 010001 110 .. ..... ................ @bz_df
-BNZ_x 010001 111 .. ..... ................ @bz_df
+AND_V 011110 00000 ..... ..... ..... 011110 @vec
+OR_V 011110 00001 ..... ..... ..... 011110 @vec
+NOR_V 011110 00010 ..... ..... ..... 011110 @vec
+XOR_V 011110 00011 ..... ..... ..... 011110 @vec
+BMNZ_V 011110 00100 ..... ..... ..... 011110 @vec
+BMZ_V 011110 00101 ..... ..... ..... 011110 @vec
+BSEL_V 011110 00110 ..... ..... ..... 011110 @vec
+FILL 011110 11000000 .. ..... ..... 011110 @2r
+PCNT 011110 11000001 .. ..... ..... 011110 @2r
+NLOC 011110 11000010 .. ..... ..... 011110 @2r
+NLZC 011110 11000011 .. ..... ..... 011110 @2r
+FCLASS 011110 110010000 . ..... ..... 011110 @2rf
+FTRUNC_S 011110 110010001 . ..... ..... 011110 @2rf
+FTRUNC_U 011110 110010010 . ..... ..... 011110 @2rf
+FSQRT 011110 110010011 . ..... ..... 011110 @2rf
+FRSQRT 011110 110010100 . ..... ..... 011110 @2rf
+FRCP 011110 110010101 . ..... ..... 011110 @2rf
+FRINT 011110 110010110 . ..... ..... 011110 @2rf
+FLOG2 011110 110010111 . ..... ..... 011110 @2rf
+FEXUPL 011110 110011000 . ..... ..... 011110 @2rf
+FEXUPR 011110 110011001 . ..... ..... 011110 @2rf
+FFQL 011110 110011010 . ..... ..... 011110 @2rf
+FFQR 011110 110011011 . ..... ..... 011110 @2rf
+FTINT_S 011110 110011100 . ..... ..... 011110 @2rf
+FTINT_U 011110 110011101 . ..... ..... 011110 @2rf
+FFINT_S 011110 110011110 . ..... ..... 011110 @2rf
+FFINT_U 011110 110011111 . ..... ..... 011110 @2rf
-MSA 011110 --------------------------
+LD 011110 .......... ..... ..... 1000 .. @ldst
+ST 011110 .......... ..... ..... 1001 .. @ldst
diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c
index e40c1b7..5667b1f 100644
--- a/target/mips/tcg/msa_helper.c
+++ b/target/mips/tcg/msa_helper.c
@@ -3231,22 +3231,22 @@ void helper_msa_maddv_b(CPUMIPSState *env,
wr_t *pws = &(env->active_fpu.fpr[ws].wr);
wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
- pwd->b[0] = msa_maddv_df(DF_BYTE, pwt->b[0], pws->b[0], pwt->b[0]);
- pwd->b[1] = msa_maddv_df(DF_BYTE, pwt->b[1], pws->b[1], pwt->b[1]);
- pwd->b[2] = msa_maddv_df(DF_BYTE, pwt->b[2], pws->b[2], pwt->b[2]);
- pwd->b[3] = msa_maddv_df(DF_BYTE, pwt->b[3], pws->b[3], pwt->b[3]);
- pwd->b[4] = msa_maddv_df(DF_BYTE, pwt->b[4], pws->b[4], pwt->b[4]);
- pwd->b[5] = msa_maddv_df(DF_BYTE, pwt->b[5], pws->b[5], pwt->b[5]);
- pwd->b[6] = msa_maddv_df(DF_BYTE, pwt->b[6], pws->b[6], pwt->b[6]);
- pwd->b[7] = msa_maddv_df(DF_BYTE, pwt->b[7], pws->b[7], pwt->b[7]);
- pwd->b[8] = msa_maddv_df(DF_BYTE, pwt->b[8], pws->b[8], pwt->b[8]);
- pwd->b[9] = msa_maddv_df(DF_BYTE, pwt->b[9], pws->b[9], pwt->b[9]);
- pwd->b[10] = msa_maddv_df(DF_BYTE, pwt->b[10], pws->b[10], pwt->b[10]);
- pwd->b[11] = msa_maddv_df(DF_BYTE, pwt->b[11], pws->b[11], pwt->b[11]);
- pwd->b[12] = msa_maddv_df(DF_BYTE, pwt->b[12], pws->b[12], pwt->b[12]);
- pwd->b[13] = msa_maddv_df(DF_BYTE, pwt->b[13], pws->b[13], pwt->b[13]);
- pwd->b[14] = msa_maddv_df(DF_BYTE, pwt->b[14], pws->b[14], pwt->b[14]);
- pwd->b[15] = msa_maddv_df(DF_BYTE, pwt->b[15], pws->b[15], pwt->b[15]);
+ pwd->b[0] = msa_maddv_df(DF_BYTE, pwd->b[0], pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_maddv_df(DF_BYTE, pwd->b[1], pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_maddv_df(DF_BYTE, pwd->b[2], pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_maddv_df(DF_BYTE, pwd->b[3], pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_maddv_df(DF_BYTE, pwd->b[4], pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_maddv_df(DF_BYTE, pwd->b[5], pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_maddv_df(DF_BYTE, pwd->b[6], pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_maddv_df(DF_BYTE, pwd->b[7], pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_maddv_df(DF_BYTE, pwd->b[8], pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_maddv_df(DF_BYTE, pwd->b[9], pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_maddv_df(DF_BYTE, pwd->b[10], pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_maddv_df(DF_BYTE, pwd->b[11], pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_maddv_df(DF_BYTE, pwd->b[12], pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_maddv_df(DF_BYTE, pwd->b[13], pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_maddv_df(DF_BYTE, pwd->b[14], pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_maddv_df(DF_BYTE, pwd->b[15], pws->b[15], pwt->b[15]);
}
void helper_msa_maddv_h(CPUMIPSState *env,
@@ -3303,22 +3303,22 @@ void helper_msa_msubv_b(CPUMIPSState *env,
wr_t *pws = &(env->active_fpu.fpr[ws].wr);
wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
- pwd->b[0] = msa_msubv_df(DF_BYTE, pwt->b[0], pws->b[0], pwt->b[0]);
- pwd->b[1] = msa_msubv_df(DF_BYTE, pwt->b[1], pws->b[1], pwt->b[1]);
- pwd->b[2] = msa_msubv_df(DF_BYTE, pwt->b[2], pws->b[2], pwt->b[2]);
- pwd->b[3] = msa_msubv_df(DF_BYTE, pwt->b[3], pws->b[3], pwt->b[3]);
- pwd->b[4] = msa_msubv_df(DF_BYTE, pwt->b[4], pws->b[4], pwt->b[4]);
- pwd->b[5] = msa_msubv_df(DF_BYTE, pwt->b[5], pws->b[5], pwt->b[5]);
- pwd->b[6] = msa_msubv_df(DF_BYTE, pwt->b[6], pws->b[6], pwt->b[6]);
- pwd->b[7] = msa_msubv_df(DF_BYTE, pwt->b[7], pws->b[7], pwt->b[7]);
- pwd->b[8] = msa_msubv_df(DF_BYTE, pwt->b[8], pws->b[8], pwt->b[8]);
- pwd->b[9] = msa_msubv_df(DF_BYTE, pwt->b[9], pws->b[9], pwt->b[9]);
- pwd->b[10] = msa_msubv_df(DF_BYTE, pwt->b[10], pws->b[10], pwt->b[10]);
- pwd->b[11] = msa_msubv_df(DF_BYTE, pwt->b[11], pws->b[11], pwt->b[11]);
- pwd->b[12] = msa_msubv_df(DF_BYTE, pwt->b[12], pws->b[12], pwt->b[12]);
- pwd->b[13] = msa_msubv_df(DF_BYTE, pwt->b[13], pws->b[13], pwt->b[13]);
- pwd->b[14] = msa_msubv_df(DF_BYTE, pwt->b[14], pws->b[14], pwt->b[14]);
- pwd->b[15] = msa_msubv_df(DF_BYTE, pwt->b[15], pws->b[15], pwt->b[15]);
+ pwd->b[0] = msa_msubv_df(DF_BYTE, pwd->b[0], pws->b[0], pwt->b[0]);
+ pwd->b[1] = msa_msubv_df(DF_BYTE, pwd->b[1], pws->b[1], pwt->b[1]);
+ pwd->b[2] = msa_msubv_df(DF_BYTE, pwd->b[2], pws->b[2], pwt->b[2]);
+ pwd->b[3] = msa_msubv_df(DF_BYTE, pwd->b[3], pws->b[3], pwt->b[3]);
+ pwd->b[4] = msa_msubv_df(DF_BYTE, pwd->b[4], pws->b[4], pwt->b[4]);
+ pwd->b[5] = msa_msubv_df(DF_BYTE, pwd->b[5], pws->b[5], pwt->b[5]);
+ pwd->b[6] = msa_msubv_df(DF_BYTE, pwd->b[6], pws->b[6], pwt->b[6]);
+ pwd->b[7] = msa_msubv_df(DF_BYTE, pwd->b[7], pws->b[7], pwt->b[7]);
+ pwd->b[8] = msa_msubv_df(DF_BYTE, pwd->b[8], pws->b[8], pwt->b[8]);
+ pwd->b[9] = msa_msubv_df(DF_BYTE, pwd->b[9], pws->b[9], pwt->b[9]);
+ pwd->b[10] = msa_msubv_df(DF_BYTE, pwd->b[10], pws->b[10], pwt->b[10]);
+ pwd->b[11] = msa_msubv_df(DF_BYTE, pwd->b[11], pws->b[11], pwt->b[11]);
+ pwd->b[12] = msa_msubv_df(DF_BYTE, pwd->b[12], pws->b[12], pwt->b[12]);
+ pwd->b[13] = msa_msubv_df(DF_BYTE, pwd->b[13], pws->b[13], pwt->b[13]);
+ pwd->b[14] = msa_msubv_df(DF_BYTE, pwd->b[14], pws->b[14], pwt->b[14]);
+ pwd->b[15] = msa_msubv_df(DF_BYTE, pwd->b[15], pws->b[15], pwt->b[15]);
}
void helper_msa_msubv_h(CPUMIPSState *env,
diff --git a/target/mips/tcg/msa_translate.c b/target/mips/tcg/msa_translate.c
index 3ef912d..7576b3e 100644
--- a/target/mips/tcg/msa_translate.c
+++ b/target/mips/tcg/msa_translate.c
@@ -17,242 +17,23 @@
#include "fpu_helper.h"
#include "internal.h"
-/* Include the auto-generated decoder. */
-#include "decode-msa.c.inc"
+static int elm_n(DisasContext *ctx, int x);
+static int elm_df(DisasContext *ctx, int x);
+static int bit_m(DisasContext *ctx, int x);
+static int bit_df(DisasContext *ctx, int x);
-#define OPC_MSA (0x1E << 26)
-
-#define MASK_MSA_MINOR(op) (MASK_OP_MAJOR(op) | (op & 0x3F))
-enum {
- OPC_MSA_I8_00 = 0x00 | OPC_MSA,
- OPC_MSA_I8_01 = 0x01 | OPC_MSA,
- OPC_MSA_I8_02 = 0x02 | OPC_MSA,
- OPC_MSA_I5_06 = 0x06 | OPC_MSA,
- OPC_MSA_I5_07 = 0x07 | OPC_MSA,
- OPC_MSA_BIT_09 = 0x09 | OPC_MSA,
- OPC_MSA_BIT_0A = 0x0A | OPC_MSA,
- OPC_MSA_3R_0D = 0x0D | OPC_MSA,
- OPC_MSA_3R_0E = 0x0E | OPC_MSA,
- OPC_MSA_3R_0F = 0x0F | OPC_MSA,
- OPC_MSA_3R_10 = 0x10 | OPC_MSA,
- OPC_MSA_3R_11 = 0x11 | OPC_MSA,
- OPC_MSA_3R_12 = 0x12 | OPC_MSA,
- OPC_MSA_3R_13 = 0x13 | OPC_MSA,
- OPC_MSA_3R_14 = 0x14 | OPC_MSA,
- OPC_MSA_3R_15 = 0x15 | OPC_MSA,
- OPC_MSA_ELM = 0x19 | OPC_MSA,
- OPC_MSA_3RF_1A = 0x1A | OPC_MSA,
- OPC_MSA_3RF_1B = 0x1B | OPC_MSA,
- OPC_MSA_3RF_1C = 0x1C | OPC_MSA,
- OPC_MSA_VEC = 0x1E | OPC_MSA,
-
- /* MI10 instruction */
- OPC_LD_B = (0x20) | OPC_MSA,
- OPC_LD_H = (0x21) | OPC_MSA,
- OPC_LD_W = (0x22) | OPC_MSA,
- OPC_LD_D = (0x23) | OPC_MSA,
- OPC_ST_B = (0x24) | OPC_MSA,
- OPC_ST_H = (0x25) | OPC_MSA,
- OPC_ST_W = (0x26) | OPC_MSA,
- OPC_ST_D = (0x27) | OPC_MSA,
-};
+static inline int plus_1(DisasContext *s, int x)
+{
+ return x + 1;
+}
-enum {
- /* I5 instruction df(bits 22..21) = _b, _h, _w, _d */
- OPC_ADDVI_df = (0x0 << 23) | OPC_MSA_I5_06,
- OPC_CEQI_df = (0x0 << 23) | OPC_MSA_I5_07,
- OPC_SUBVI_df = (0x1 << 23) | OPC_MSA_I5_06,
- OPC_MAXI_S_df = (0x2 << 23) | OPC_MSA_I5_06,
- OPC_CLTI_S_df = (0x2 << 23) | OPC_MSA_I5_07,
- OPC_MAXI_U_df = (0x3 << 23) | OPC_MSA_I5_06,
- OPC_CLTI_U_df = (0x3 << 23) | OPC_MSA_I5_07,
- OPC_MINI_S_df = (0x4 << 23) | OPC_MSA_I5_06,
- OPC_CLEI_S_df = (0x4 << 23) | OPC_MSA_I5_07,
- OPC_MINI_U_df = (0x5 << 23) | OPC_MSA_I5_06,
- OPC_CLEI_U_df = (0x5 << 23) | OPC_MSA_I5_07,
- OPC_LDI_df = (0x6 << 23) | OPC_MSA_I5_07,
-
- /* I8 instruction */
- OPC_ANDI_B = (0x0 << 24) | OPC_MSA_I8_00,
- OPC_BMNZI_B = (0x0 << 24) | OPC_MSA_I8_01,
- OPC_SHF_B = (0x0 << 24) | OPC_MSA_I8_02,
- OPC_ORI_B = (0x1 << 24) | OPC_MSA_I8_00,
- OPC_BMZI_B = (0x1 << 24) | OPC_MSA_I8_01,
- OPC_SHF_H = (0x1 << 24) | OPC_MSA_I8_02,
- OPC_NORI_B = (0x2 << 24) | OPC_MSA_I8_00,
- OPC_BSELI_B = (0x2 << 24) | OPC_MSA_I8_01,
- OPC_SHF_W = (0x2 << 24) | OPC_MSA_I8_02,
- OPC_XORI_B = (0x3 << 24) | OPC_MSA_I8_00,
-
- /* VEC/2R/2RF instruction */
- OPC_AND_V = (0x00 << 21) | OPC_MSA_VEC,
- OPC_OR_V = (0x01 << 21) | OPC_MSA_VEC,
- OPC_NOR_V = (0x02 << 21) | OPC_MSA_VEC,
- OPC_XOR_V = (0x03 << 21) | OPC_MSA_VEC,
- OPC_BMNZ_V = (0x04 << 21) | OPC_MSA_VEC,
- OPC_BMZ_V = (0x05 << 21) | OPC_MSA_VEC,
- OPC_BSEL_V = (0x06 << 21) | OPC_MSA_VEC,
-
- OPC_MSA_2R = (0x18 << 21) | OPC_MSA_VEC,
- OPC_MSA_2RF = (0x19 << 21) | OPC_MSA_VEC,
-
- /* 2R instruction df(bits 17..16) = _b, _h, _w, _d */
- OPC_FILL_df = (0x00 << 18) | OPC_MSA_2R,
- OPC_PCNT_df = (0x01 << 18) | OPC_MSA_2R,
- OPC_NLOC_df = (0x02 << 18) | OPC_MSA_2R,
- OPC_NLZC_df = (0x03 << 18) | OPC_MSA_2R,
-
- /* 2RF instruction df(bit 16) = _w, _d */
- OPC_FCLASS_df = (0x00 << 17) | OPC_MSA_2RF,
- OPC_FTRUNC_S_df = (0x01 << 17) | OPC_MSA_2RF,
- OPC_FTRUNC_U_df = (0x02 << 17) | OPC_MSA_2RF,
- OPC_FSQRT_df = (0x03 << 17) | OPC_MSA_2RF,
- OPC_FRSQRT_df = (0x04 << 17) | OPC_MSA_2RF,
- OPC_FRCP_df = (0x05 << 17) | OPC_MSA_2RF,
- OPC_FRINT_df = (0x06 << 17) | OPC_MSA_2RF,
- OPC_FLOG2_df = (0x07 << 17) | OPC_MSA_2RF,
- OPC_FEXUPL_df = (0x08 << 17) | OPC_MSA_2RF,
- OPC_FEXUPR_df = (0x09 << 17) | OPC_MSA_2RF,
- OPC_FFQL_df = (0x0A << 17) | OPC_MSA_2RF,
- OPC_FFQR_df = (0x0B << 17) | OPC_MSA_2RF,
- OPC_FTINT_S_df = (0x0C << 17) | OPC_MSA_2RF,
- OPC_FTINT_U_df = (0x0D << 17) | OPC_MSA_2RF,
- OPC_FFINT_S_df = (0x0E << 17) | OPC_MSA_2RF,
- OPC_FFINT_U_df = (0x0F << 17) | OPC_MSA_2RF,
-
- /* 3R instruction df(bits 22..21) = _b, _h, _w, d */
- OPC_SLL_df = (0x0 << 23) | OPC_MSA_3R_0D,
- OPC_ADDV_df = (0x0 << 23) | OPC_MSA_3R_0E,
- OPC_CEQ_df = (0x0 << 23) | OPC_MSA_3R_0F,
- OPC_ADD_A_df = (0x0 << 23) | OPC_MSA_3R_10,
- OPC_SUBS_S_df = (0x0 << 23) | OPC_MSA_3R_11,
- OPC_MULV_df = (0x0 << 23) | OPC_MSA_3R_12,
- OPC_DOTP_S_df = (0x0 << 23) | OPC_MSA_3R_13,
- OPC_SLD_df = (0x0 << 23) | OPC_MSA_3R_14,
- OPC_VSHF_df = (0x0 << 23) | OPC_MSA_3R_15,
- OPC_SRA_df = (0x1 << 23) | OPC_MSA_3R_0D,
- OPC_SUBV_df = (0x1 << 23) | OPC_MSA_3R_0E,
- OPC_ADDS_A_df = (0x1 << 23) | OPC_MSA_3R_10,
- OPC_SUBS_U_df = (0x1 << 23) | OPC_MSA_3R_11,
- OPC_MADDV_df = (0x1 << 23) | OPC_MSA_3R_12,
- OPC_DOTP_U_df = (0x1 << 23) | OPC_MSA_3R_13,
- OPC_SPLAT_df = (0x1 << 23) | OPC_MSA_3R_14,
- OPC_SRAR_df = (0x1 << 23) | OPC_MSA_3R_15,
- OPC_SRL_df = (0x2 << 23) | OPC_MSA_3R_0D,
- OPC_MAX_S_df = (0x2 << 23) | OPC_MSA_3R_0E,
- OPC_CLT_S_df = (0x2 << 23) | OPC_MSA_3R_0F,
- OPC_ADDS_S_df = (0x2 << 23) | OPC_MSA_3R_10,
- OPC_SUBSUS_U_df = (0x2 << 23) | OPC_MSA_3R_11,
- OPC_MSUBV_df = (0x2 << 23) | OPC_MSA_3R_12,
- OPC_DPADD_S_df = (0x2 << 23) | OPC_MSA_3R_13,
- OPC_PCKEV_df = (0x2 << 23) | OPC_MSA_3R_14,
- OPC_SRLR_df = (0x2 << 23) | OPC_MSA_3R_15,
- OPC_BCLR_df = (0x3 << 23) | OPC_MSA_3R_0D,
- OPC_MAX_U_df = (0x3 << 23) | OPC_MSA_3R_0E,
- OPC_CLT_U_df = (0x3 << 23) | OPC_MSA_3R_0F,
- OPC_ADDS_U_df = (0x3 << 23) | OPC_MSA_3R_10,
- OPC_SUBSUU_S_df = (0x3 << 23) | OPC_MSA_3R_11,
- OPC_DPADD_U_df = (0x3 << 23) | OPC_MSA_3R_13,
- OPC_PCKOD_df = (0x3 << 23) | OPC_MSA_3R_14,
- OPC_BSET_df = (0x4 << 23) | OPC_MSA_3R_0D,
- OPC_MIN_S_df = (0x4 << 23) | OPC_MSA_3R_0E,
- OPC_CLE_S_df = (0x4 << 23) | OPC_MSA_3R_0F,
- OPC_AVE_S_df = (0x4 << 23) | OPC_MSA_3R_10,
- OPC_ASUB_S_df = (0x4 << 23) | OPC_MSA_3R_11,
- OPC_DIV_S_df = (0x4 << 23) | OPC_MSA_3R_12,
- OPC_DPSUB_S_df = (0x4 << 23) | OPC_MSA_3R_13,
- OPC_ILVL_df = (0x4 << 23) | OPC_MSA_3R_14,
- OPC_HADD_S_df = (0x4 << 23) | OPC_MSA_3R_15,
- OPC_BNEG_df = (0x5 << 23) | OPC_MSA_3R_0D,
- OPC_MIN_U_df = (0x5 << 23) | OPC_MSA_3R_0E,
- OPC_CLE_U_df = (0x5 << 23) | OPC_MSA_3R_0F,
- OPC_AVE_U_df = (0x5 << 23) | OPC_MSA_3R_10,
- OPC_ASUB_U_df = (0x5 << 23) | OPC_MSA_3R_11,
- OPC_DIV_U_df = (0x5 << 23) | OPC_MSA_3R_12,
- OPC_DPSUB_U_df = (0x5 << 23) | OPC_MSA_3R_13,
- OPC_ILVR_df = (0x5 << 23) | OPC_MSA_3R_14,
- OPC_HADD_U_df = (0x5 << 23) | OPC_MSA_3R_15,
- OPC_BINSL_df = (0x6 << 23) | OPC_MSA_3R_0D,
- OPC_MAX_A_df = (0x6 << 23) | OPC_MSA_3R_0E,
- OPC_AVER_S_df = (0x6 << 23) | OPC_MSA_3R_10,
- OPC_MOD_S_df = (0x6 << 23) | OPC_MSA_3R_12,
- OPC_ILVEV_df = (0x6 << 23) | OPC_MSA_3R_14,
- OPC_HSUB_S_df = (0x6 << 23) | OPC_MSA_3R_15,
- OPC_BINSR_df = (0x7 << 23) | OPC_MSA_3R_0D,
- OPC_MIN_A_df = (0x7 << 23) | OPC_MSA_3R_0E,
- OPC_AVER_U_df = (0x7 << 23) | OPC_MSA_3R_10,
- OPC_MOD_U_df = (0x7 << 23) | OPC_MSA_3R_12,
- OPC_ILVOD_df = (0x7 << 23) | OPC_MSA_3R_14,
- OPC_HSUB_U_df = (0x7 << 23) | OPC_MSA_3R_15,
-
- /* ELM instructions df(bits 21..16) = _b, _h, _w, _d */
- OPC_SLDI_df = (0x0 << 22) | (0x00 << 16) | OPC_MSA_ELM,
- OPC_CTCMSA = (0x0 << 22) | (0x3E << 16) | OPC_MSA_ELM,
- OPC_SPLATI_df = (0x1 << 22) | (0x00 << 16) | OPC_MSA_ELM,
- OPC_CFCMSA = (0x1 << 22) | (0x3E << 16) | OPC_MSA_ELM,
- OPC_COPY_S_df = (0x2 << 22) | (0x00 << 16) | OPC_MSA_ELM,
- OPC_MOVE_V = (0x2 << 22) | (0x3E << 16) | OPC_MSA_ELM,
- OPC_COPY_U_df = (0x3 << 22) | (0x00 << 16) | OPC_MSA_ELM,
- OPC_INSERT_df = (0x4 << 22) | (0x00 << 16) | OPC_MSA_ELM,
- OPC_INSVE_df = (0x5 << 22) | (0x00 << 16) | OPC_MSA_ELM,
-
- /* 3RF instruction _df(bit 21) = _w, _d */
- OPC_FCAF_df = (0x0 << 22) | OPC_MSA_3RF_1A,
- OPC_FADD_df = (0x0 << 22) | OPC_MSA_3RF_1B,
- OPC_FCUN_df = (0x1 << 22) | OPC_MSA_3RF_1A,
- OPC_FSUB_df = (0x1 << 22) | OPC_MSA_3RF_1B,
- OPC_FCOR_df = (0x1 << 22) | OPC_MSA_3RF_1C,
- OPC_FCEQ_df = (0x2 << 22) | OPC_MSA_3RF_1A,
- OPC_FMUL_df = (0x2 << 22) | OPC_MSA_3RF_1B,
- OPC_FCUNE_df = (0x2 << 22) | OPC_MSA_3RF_1C,
- OPC_FCUEQ_df = (0x3 << 22) | OPC_MSA_3RF_1A,
- OPC_FDIV_df = (0x3 << 22) | OPC_MSA_3RF_1B,
- OPC_FCNE_df = (0x3 << 22) | OPC_MSA_3RF_1C,
- OPC_FCLT_df = (0x4 << 22) | OPC_MSA_3RF_1A,
- OPC_FMADD_df = (0x4 << 22) | OPC_MSA_3RF_1B,
- OPC_MUL_Q_df = (0x4 << 22) | OPC_MSA_3RF_1C,
- OPC_FCULT_df = (0x5 << 22) | OPC_MSA_3RF_1A,
- OPC_FMSUB_df = (0x5 << 22) | OPC_MSA_3RF_1B,
- OPC_MADD_Q_df = (0x5 << 22) | OPC_MSA_3RF_1C,
- OPC_FCLE_df = (0x6 << 22) | OPC_MSA_3RF_1A,
- OPC_MSUB_Q_df = (0x6 << 22) | OPC_MSA_3RF_1C,
- OPC_FCULE_df = (0x7 << 22) | OPC_MSA_3RF_1A,
- OPC_FEXP2_df = (0x7 << 22) | OPC_MSA_3RF_1B,
- OPC_FSAF_df = (0x8 << 22) | OPC_MSA_3RF_1A,
- OPC_FEXDO_df = (0x8 << 22) | OPC_MSA_3RF_1B,
- OPC_FSUN_df = (0x9 << 22) | OPC_MSA_3RF_1A,
- OPC_FSOR_df = (0x9 << 22) | OPC_MSA_3RF_1C,
- OPC_FSEQ_df = (0xA << 22) | OPC_MSA_3RF_1A,
- OPC_FTQ_df = (0xA << 22) | OPC_MSA_3RF_1B,
- OPC_FSUNE_df = (0xA << 22) | OPC_MSA_3RF_1C,
- OPC_FSUEQ_df = (0xB << 22) | OPC_MSA_3RF_1A,
- OPC_FSNE_df = (0xB << 22) | OPC_MSA_3RF_1C,
- OPC_FSLT_df = (0xC << 22) | OPC_MSA_3RF_1A,
- OPC_FMIN_df = (0xC << 22) | OPC_MSA_3RF_1B,
- OPC_MULR_Q_df = (0xC << 22) | OPC_MSA_3RF_1C,
- OPC_FSULT_df = (0xD << 22) | OPC_MSA_3RF_1A,
- OPC_FMIN_A_df = (0xD << 22) | OPC_MSA_3RF_1B,
- OPC_MADDR_Q_df = (0xD << 22) | OPC_MSA_3RF_1C,
- OPC_FSLE_df = (0xE << 22) | OPC_MSA_3RF_1A,
- OPC_FMAX_df = (0xE << 22) | OPC_MSA_3RF_1B,
- OPC_MSUBR_Q_df = (0xE << 22) | OPC_MSA_3RF_1C,
- OPC_FSULE_df = (0xF << 22) | OPC_MSA_3RF_1A,
- OPC_FMAX_A_df = (0xF << 22) | OPC_MSA_3RF_1B,
-
- /* BIT instruction df(bits 22..16) = _B _H _W _D */
- OPC_SLLI_df = (0x0 << 23) | OPC_MSA_BIT_09,
- OPC_SAT_S_df = (0x0 << 23) | OPC_MSA_BIT_0A,
- OPC_SRAI_df = (0x1 << 23) | OPC_MSA_BIT_09,
- OPC_SAT_U_df = (0x1 << 23) | OPC_MSA_BIT_0A,
- OPC_SRLI_df = (0x2 << 23) | OPC_MSA_BIT_09,
- OPC_SRARI_df = (0x2 << 23) | OPC_MSA_BIT_0A,
- OPC_BCLRI_df = (0x3 << 23) | OPC_MSA_BIT_09,
- OPC_SRLRI_df = (0x3 << 23) | OPC_MSA_BIT_0A,
- OPC_BSETI_df = (0x4 << 23) | OPC_MSA_BIT_09,
- OPC_BNEGI_df = (0x5 << 23) | OPC_MSA_BIT_09,
- OPC_BINSLI_df = (0x6 << 23) | OPC_MSA_BIT_09,
- OPC_BINSRI_df = (0x7 << 23) | OPC_MSA_BIT_09,
-};
+static inline int plus_2(DisasContext *s, int x)
+{
+ return x + 2;
+}
+
+/* Include the auto-generated decoder. */
+#include "decode-msa.c.inc"
static const char msaregnames[][6] = {
"w0.d0", "w0.d1", "w1.d0", "w1.d1",
@@ -273,6 +54,77 @@ static const char msaregnames[][6] = {
"w30.d0", "w30.d1", "w31.d0", "w31.d1",
};
+/* Encoding of Operation Field (must be indexed by CPUMIPSMSADataFormat) */
+struct dfe {
+ int start;
+ int length;
+ uint32_t mask;
+};
+
+/*
+ * Extract immediate from df/{m,n} format (used by ELM & BIT instructions).
+ * Returns the immediate value, or -1 if the format does not match.
+ */
+static int df_extract_val(DisasContext *ctx, int x, const struct dfe *s)
+{
+ for (unsigned i = 0; i < 4; i++) {
+ if (extract32(x, s->start, s->length) == s->mask) {
+ return extract32(x, 0, s->start);
+ }
+ }
+ return -1;
+}
+
+/*
+ * Extract DataField from df/{m,n} format (used by ELM & BIT instructions).
+ * Returns the DataField, or -1 if the format does not match.
+ */
+static int df_extract_df(DisasContext *ctx, int x, const struct dfe *s)
+{
+ for (unsigned i = 0; i < 4; i++) {
+ if (extract32(x, s->start, s->length) == s->mask) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static const struct dfe df_elm[] = {
+ /* Table 3.26 ELM Instruction Format */
+ [DF_BYTE] = {4, 2, 0b00},
+ [DF_HALF] = {3, 3, 0b100},
+ [DF_WORD] = {2, 4, 0b1100},
+ [DF_DOUBLE] = {1, 5, 0b11100}
+};
+
+static int elm_n(DisasContext *ctx, int x)
+{
+ return df_extract_val(ctx, x, df_elm);
+}
+
+static int elm_df(DisasContext *ctx, int x)
+{
+ return df_extract_df(ctx, x, df_elm);
+}
+
+static const struct dfe df_bit[] = {
+ /* Table 3.28 BIT Instruction Format */
+ [DF_BYTE] = {3, 4, 0b1110},
+ [DF_HALF] = {4, 3, 0b110},
+ [DF_WORD] = {5, 2, 0b10},
+ [DF_DOUBLE] = {6, 1, 0b0}
+};
+
+static int bit_m(DisasContext *ctx, int x)
+{
+ return df_extract_val(ctx, x, df_bit);
+}
+
+static int bit_df(DisasContext *ctx, int x)
+{
+ return df_extract_df(ctx, x, df_bit);
+}
+
static TCGv_i64 msa_wr_d[64];
void msa_translate_init(void)
@@ -280,61 +132,80 @@ void msa_translate_init(void)
int i;
for (i = 0; i < 32; i++) {
- int off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]);
+ int off;
/*
* The MSA vector registers are mapped on the
* scalar floating-point unit (FPU) registers.
*/
+ off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]);
msa_wr_d[i * 2] = fpu_f64[i];
+
off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[1]);
msa_wr_d[i * 2 + 1] =
tcg_global_mem_new_i64(cpu_env, off, msaregnames[i * 2 + 1]);
}
}
-static inline int check_msa_access(DisasContext *ctx)
+/*
+ * Check if MSA is enabled.
+ * This function is always called with MSA available.
+ * If MSA is disabled, raise an exception.
+ */
+static inline bool check_msa_enabled(DisasContext *ctx)
{
if (unlikely((ctx->hflags & MIPS_HFLAG_FPU) &&
!(ctx->hflags & MIPS_HFLAG_F64))) {
gen_reserved_instruction(ctx);
- return 0;
+ return false;
}
if (unlikely(!(ctx->hflags & MIPS_HFLAG_MSA))) {
generate_exception_end(ctx, EXCP_MSADIS);
- return 0;
+ return false;
}
- return 1;
+ return true;
}
+typedef void gen_helper_piv(TCGv_ptr, TCGv_i32, TCGv);
+typedef void gen_helper_pii(TCGv_ptr, TCGv_i32, TCGv_i32);
+typedef void gen_helper_piii(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
+typedef void gen_helper_piiii(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
+
+#define TRANS_DF_x(TYPE, NAME, trans_func, gen_func) \
+ static gen_helper_p##TYPE * const NAME##_tab[4] = { \
+ gen_func##_b, gen_func##_h, gen_func##_w, gen_func##_d \
+ }; \
+ TRANS(NAME, trans_func, NAME##_tab[a->df])
+
+#define TRANS_DF_iv(NAME, trans_func, gen_func) \
+ TRANS_DF_x(iv, NAME, trans_func, gen_func)
+
+#define TRANS_DF_ii(NAME, trans_func, gen_func) \
+ TRANS_DF_x(ii, NAME, trans_func, gen_func)
+
+#define TRANS_DF_iii(NAME, trans_func, gen_func) \
+ TRANS_DF_x(iii, NAME, trans_func, gen_func)
+
+#define TRANS_DF_iii_b(NAME, trans_func, gen_func) \
+ static gen_helper_piii * const NAME##_tab[4] = { \
+ NULL, gen_func##_h, gen_func##_w, gen_func##_d \
+ }; \
+ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+ { \
+ return trans_func(ctx, a, NAME##_tab[a->df]); \
+ }
+
static void gen_check_zero_element(TCGv tresult, uint8_t df, uint8_t wt,
TCGCond cond)
{
/* generates tcg ops to check if any element is 0 */
/* Note this function only works with MSA_WRLEN = 128 */
- uint64_t eval_zero_or_big = 0;
- uint64_t eval_big = 0;
+ uint64_t eval_zero_or_big = dup_const(df, 1);
+ uint64_t eval_big = eval_zero_or_big << ((8 << df) - 1);
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
- switch (df) {
- case DF_BYTE:
- eval_zero_or_big = 0x0101010101010101ULL;
- eval_big = 0x8080808080808080ULL;
- break;
- case DF_HALF:
- eval_zero_or_big = 0x0001000100010001ULL;
- eval_big = 0x8000800080008000ULL;
- break;
- case DF_WORD:
- eval_zero_or_big = 0x0000000100000001ULL;
- eval_big = 0x8000000080000000ULL;
- break;
- case DF_DOUBLE:
- eval_zero_or_big = 0x0000000000000001ULL;
- eval_big = 0x8000000000000000ULL;
- break;
- }
+
tcg_gen_subi_i64(t0, msa_wr_d[wt << 1], eval_zero_or_big);
tcg_gen_andc_i64(t0, t0, msa_wr_d[wt << 1]);
tcg_gen_andi_i64(t0, t0, eval_big);
@@ -350,11 +221,13 @@ static void gen_check_zero_element(TCGv tresult, uint8_t df, uint8_t wt,
tcg_temp_free_i64(t1);
}
-static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int s16, TCGCond cond)
+static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int sa, TCGCond cond)
{
TCGv_i64 t0;
- check_msa_access(ctx);
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
if (ctx->hflags & MIPS_HFLAG_BMASK) {
gen_reserved_instruction(ctx);
@@ -366,7 +239,7 @@ static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int s16, TCGCond cond)
tcg_gen_trunc_i64_tl(bcond, t0);
tcg_temp_free_i64(t0);
- ctx->btarget = ctx->base.pc_next + (s16 << 2) + 4;
+ ctx->btarget = ctx->base.pc_next + (sa << 2) + 4;
ctx->hflags |= MIPS_HFLAG_BC;
ctx->hflags |= MIPS_HFLAG_BDS32;
@@ -376,17 +249,19 @@ static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int s16, TCGCond cond)
static bool trans_BZ_V(DisasContext *ctx, arg_msa_bz *a)
{
- return gen_msa_BxZ_V(ctx, a->wt, a->s16, TCG_COND_EQ);
+ return gen_msa_BxZ_V(ctx, a->wt, a->sa, TCG_COND_EQ);
}
static bool trans_BNZ_V(DisasContext *ctx, arg_msa_bz *a)
{
- return gen_msa_BxZ_V(ctx, a->wt, a->s16, TCG_COND_NE);
+ return gen_msa_BxZ_V(ctx, a->wt, a->sa, TCG_COND_NE);
}
-static bool gen_msa_BxZ(DisasContext *ctx, int df, int wt, int s16, bool if_not)
+static bool gen_msa_BxZ(DisasContext *ctx, int df, int wt, int sa, bool if_not)
{
- check_msa_access(ctx);
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
if (ctx->hflags & MIPS_HFLAG_BMASK) {
gen_reserved_instruction(ctx);
@@ -395,1865 +270,521 @@ static bool gen_msa_BxZ(DisasContext *ctx, int df, int wt, int s16, bool if_not)
gen_check_zero_element(bcond, df, wt, if_not ? TCG_COND_EQ : TCG_COND_NE);
- ctx->btarget = ctx->base.pc_next + (s16 << 2) + 4;
+ ctx->btarget = ctx->base.pc_next + (sa << 2) + 4;
ctx->hflags |= MIPS_HFLAG_BC;
ctx->hflags |= MIPS_HFLAG_BDS32;
return true;
}
-static bool trans_BZ_x(DisasContext *ctx, arg_msa_bz *a)
+static bool trans_BZ(DisasContext *ctx, arg_msa_bz *a)
{
- return gen_msa_BxZ(ctx, a->df, a->wt, a->s16, false);
+ return gen_msa_BxZ(ctx, a->df, a->wt, a->sa, false);
}
-static bool trans_BNZ_x(DisasContext *ctx, arg_msa_bz *a)
+static bool trans_BNZ(DisasContext *ctx, arg_msa_bz *a)
{
- return gen_msa_BxZ(ctx, a->df, a->wt, a->s16, true);
+ return gen_msa_BxZ(ctx, a->df, a->wt, a->sa, true);
}
-static void gen_msa_i8(DisasContext *ctx)
+static bool trans_msa_i8(DisasContext *ctx, arg_msa_i *a,
+ gen_helper_piii *gen_msa_i8)
{
-#define MASK_MSA_I8(op) (MASK_MSA_MINOR(op) | (op & (0x03 << 24)))
- uint8_t i8 = (ctx->opcode >> 16) & 0xff;
- uint8_t ws = (ctx->opcode >> 11) & 0x1f;
- uint8_t wd = (ctx->opcode >> 6) & 0x1f;
-
- TCGv_i32 twd = tcg_const_i32(wd);
- TCGv_i32 tws = tcg_const_i32(ws);
- TCGv_i32 ti8 = tcg_const_i32(i8);
-
- switch (MASK_MSA_I8(ctx->opcode)) {
- case OPC_ANDI_B:
- gen_helper_msa_andi_b(cpu_env, twd, tws, ti8);
- break;
- case OPC_ORI_B:
- gen_helper_msa_ori_b(cpu_env, twd, tws, ti8);
- break;
- case OPC_NORI_B:
- gen_helper_msa_nori_b(cpu_env, twd, tws, ti8);
- break;
- case OPC_XORI_B:
- gen_helper_msa_xori_b(cpu_env, twd, tws, ti8);
- break;
- case OPC_BMNZI_B:
- gen_helper_msa_bmnzi_b(cpu_env, twd, tws, ti8);
- break;
- case OPC_BMZI_B:
- gen_helper_msa_bmzi_b(cpu_env, twd, tws, ti8);
- break;
- case OPC_BSELI_B:
- gen_helper_msa_bseli_b(cpu_env, twd, tws, ti8);
- break;
- case OPC_SHF_B:
- case OPC_SHF_H:
- case OPC_SHF_W:
- {
- uint8_t df = (ctx->opcode >> 24) & 0x3;
- if (df == DF_DOUBLE) {
- gen_reserved_instruction(ctx);
- } else {
- TCGv_i32 tdf = tcg_const_i32(df);
- gen_helper_msa_shf_df(cpu_env, tdf, twd, tws, ti8);
- tcg_temp_free_i32(tdf);
- }
- }
- break;
- default:
- MIPS_INVAL("MSA instruction");
- gen_reserved_instruction(ctx);
- break;
+ if (!check_msa_enabled(ctx)) {
+ return true;
}
- tcg_temp_free_i32(twd);
- tcg_temp_free_i32(tws);
- tcg_temp_free_i32(ti8);
+ gen_msa_i8(cpu_env,
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->sa));
+
+ return true;
}
-static void gen_msa_i5(DisasContext *ctx)
+TRANS(ANDI, trans_msa_i8, gen_helper_msa_andi_b);
+TRANS(ORI, trans_msa_i8, gen_helper_msa_ori_b);
+TRANS(NORI, trans_msa_i8, gen_helper_msa_nori_b);
+TRANS(XORI, trans_msa_i8, gen_helper_msa_xori_b);
+TRANS(BMNZI, trans_msa_i8, gen_helper_msa_bmnzi_b);
+TRANS(BMZI, trans_msa_i8, gen_helper_msa_bmzi_b);
+TRANS(BSELI, trans_msa_i8, gen_helper_msa_bseli_b);
+
+static bool trans_SHF(DisasContext *ctx, arg_msa_i *a)
{
-#define MASK_MSA_I5(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23)))
- int8_t s5 = (int8_t) sextract32(ctx->opcode, 16, 5);
- uint8_t u5 = extract32(ctx->opcode, 16, 5);
-
- TCGv_i32 tdf = tcg_const_i32(extract32(ctx->opcode, 21, 2));
- TCGv_i32 twd = tcg_const_i32(extract32(ctx->opcode, 11, 5));
- TCGv_i32 tws = tcg_const_i32(extract32(ctx->opcode, 6, 5));
- TCGv_i32 timm = tcg_temp_new_i32();
- tcg_gen_movi_i32(timm, u5);
-
- switch (MASK_MSA_I5(ctx->opcode)) {
- case OPC_ADDVI_df:
- gen_helper_msa_addvi_df(cpu_env, tdf, twd, tws, timm);
- break;
- case OPC_SUBVI_df:
- gen_helper_msa_subvi_df(cpu_env, tdf, twd, tws, timm);
- break;
- case OPC_MAXI_S_df:
- tcg_gen_movi_i32(timm, s5);
- gen_helper_msa_maxi_s_df(cpu_env, tdf, twd, tws, timm);
- break;
- case OPC_MAXI_U_df:
- gen_helper_msa_maxi_u_df(cpu_env, tdf, twd, tws, timm);
- break;
- case OPC_MINI_S_df:
- tcg_gen_movi_i32(timm, s5);
- gen_helper_msa_mini_s_df(cpu_env, tdf, twd, tws, timm);
- break;
- case OPC_MINI_U_df:
- gen_helper_msa_mini_u_df(cpu_env, tdf, twd, tws, timm);
- break;
- case OPC_CEQI_df:
- tcg_gen_movi_i32(timm, s5);
- gen_helper_msa_ceqi_df(cpu_env, tdf, twd, tws, timm);
- break;
- case OPC_CLTI_S_df:
- tcg_gen_movi_i32(timm, s5);
- gen_helper_msa_clti_s_df(cpu_env, tdf, twd, tws, timm);
- break;
- case OPC_CLTI_U_df:
- gen_helper_msa_clti_u_df(cpu_env, tdf, twd, tws, timm);
- break;
- case OPC_CLEI_S_df:
- tcg_gen_movi_i32(timm, s5);
- gen_helper_msa_clei_s_df(cpu_env, tdf, twd, tws, timm);
- break;
- case OPC_CLEI_U_df:
- gen_helper_msa_clei_u_df(cpu_env, tdf, twd, tws, timm);
- break;
- case OPC_LDI_df:
- {
- int32_t s10 = sextract32(ctx->opcode, 11, 10);
- tcg_gen_movi_i32(timm, s10);
- gen_helper_msa_ldi_df(cpu_env, tdf, twd, timm);
- }
- break;
- default:
- MIPS_INVAL("MSA instruction");
- gen_reserved_instruction(ctx);
- break;
+ if (a->df == DF_DOUBLE) {
+ return false;
+ }
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
}
- tcg_temp_free_i32(tdf);
- tcg_temp_free_i32(twd);
- tcg_temp_free_i32(tws);
- tcg_temp_free_i32(timm);
+ gen_helper_msa_shf_df(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->sa));
+
+ return true;
}
-static void gen_msa_bit(DisasContext *ctx)
+static bool trans_msa_i5(DisasContext *ctx, arg_msa_i *a,
+ gen_helper_piiii *gen_msa_i5)
{
-#define MASK_MSA_BIT(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23)))
- uint8_t dfm = (ctx->opcode >> 16) & 0x7f;
- uint32_t df = 0, m = 0;
- uint8_t ws = (ctx->opcode >> 11) & 0x1f;
- uint8_t wd = (ctx->opcode >> 6) & 0x1f;
-
- TCGv_i32 tdf;
- TCGv_i32 tm;
- TCGv_i32 twd;
- TCGv_i32 tws;
-
- if ((dfm & 0x40) == 0x00) {
- m = dfm & 0x3f;
- df = DF_DOUBLE;
- } else if ((dfm & 0x60) == 0x40) {
- m = dfm & 0x1f;
- df = DF_WORD;
- } else if ((dfm & 0x70) == 0x60) {
- m = dfm & 0x0f;
- df = DF_HALF;
- } else if ((dfm & 0x78) == 0x70) {
- m = dfm & 0x7;
- df = DF_BYTE;
- } else {
- gen_reserved_instruction(ctx);
- return;
+ if (!check_msa_enabled(ctx)) {
+ return true;
}
- tdf = tcg_const_i32(df);
- tm = tcg_const_i32(m);
- twd = tcg_const_i32(wd);
- tws = tcg_const_i32(ws);
-
- switch (MASK_MSA_BIT(ctx->opcode)) {
- case OPC_SLLI_df:
- gen_helper_msa_slli_df(cpu_env, tdf, twd, tws, tm);
- break;
- case OPC_SRAI_df:
- gen_helper_msa_srai_df(cpu_env, tdf, twd, tws, tm);
- break;
- case OPC_SRLI_df:
- gen_helper_msa_srli_df(cpu_env, tdf, twd, tws, tm);
- break;
- case OPC_BCLRI_df:
- gen_helper_msa_bclri_df(cpu_env, tdf, twd, tws, tm);
- break;
- case OPC_BSETI_df:
- gen_helper_msa_bseti_df(cpu_env, tdf, twd, tws, tm);
- break;
- case OPC_BNEGI_df:
- gen_helper_msa_bnegi_df(cpu_env, tdf, twd, tws, tm);
- break;
- case OPC_BINSLI_df:
- gen_helper_msa_binsli_df(cpu_env, tdf, twd, tws, tm);
- break;
- case OPC_BINSRI_df:
- gen_helper_msa_binsri_df(cpu_env, tdf, twd, tws, tm);
- break;
- case OPC_SAT_S_df:
- gen_helper_msa_sat_s_df(cpu_env, tdf, twd, tws, tm);
- break;
- case OPC_SAT_U_df:
- gen_helper_msa_sat_u_df(cpu_env, tdf, twd, tws, tm);
- break;
- case OPC_SRARI_df:
- gen_helper_msa_srari_df(cpu_env, tdf, twd, tws, tm);
- break;
- case OPC_SRLRI_df:
- gen_helper_msa_srlri_df(cpu_env, tdf, twd, tws, tm);
- break;
- default:
- MIPS_INVAL("MSA instruction");
- gen_reserved_instruction(ctx);
- break;
+ gen_msa_i5(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->sa));
+
+ return true;
+}
+
+TRANS(ADDVI, trans_msa_i5, gen_helper_msa_addvi_df);
+TRANS(SUBVI, trans_msa_i5, gen_helper_msa_subvi_df);
+TRANS(MAXI_S, trans_msa_i5, gen_helper_msa_maxi_s_df);
+TRANS(MAXI_U, trans_msa_i5, gen_helper_msa_maxi_u_df);
+TRANS(MINI_S, trans_msa_i5, gen_helper_msa_mini_s_df);
+TRANS(MINI_U, trans_msa_i5, gen_helper_msa_mini_u_df);
+TRANS(CLTI_S, trans_msa_i5, gen_helper_msa_clti_s_df);
+TRANS(CLTI_U, trans_msa_i5, gen_helper_msa_clti_u_df);
+TRANS(CLEI_S, trans_msa_i5, gen_helper_msa_clei_s_df);
+TRANS(CLEI_U, trans_msa_i5, gen_helper_msa_clei_u_df);
+TRANS(CEQI, trans_msa_i5, gen_helper_msa_ceqi_df);
+
+static bool trans_LDI(DisasContext *ctx, arg_msa_ldi *a)
+{
+ if (!check_msa_enabled(ctx)) {
+ return true;
}
- tcg_temp_free_i32(tdf);
- tcg_temp_free_i32(tm);
- tcg_temp_free_i32(twd);
- tcg_temp_free_i32(tws);
+ gen_helper_msa_ldi_df(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->sa));
+
+ return true;
}
-static void gen_msa_3r(DisasContext *ctx)
+static bool trans_msa_bit(DisasContext *ctx, arg_msa_bit *a,
+ gen_helper_piiii *gen_msa_bit)
{
-#define MASK_MSA_3R(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23)))
- uint8_t df = (ctx->opcode >> 21) & 0x3;
- uint8_t wt = (ctx->opcode >> 16) & 0x1f;
- uint8_t ws = (ctx->opcode >> 11) & 0x1f;
- uint8_t wd = (ctx->opcode >> 6) & 0x1f;
-
- TCGv_i32 tdf = tcg_const_i32(df);
- TCGv_i32 twd = tcg_const_i32(wd);
- TCGv_i32 tws = tcg_const_i32(ws);
- TCGv_i32 twt = tcg_const_i32(wt);
-
- switch (MASK_MSA_3R(ctx->opcode)) {
- case OPC_BINSL_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_binsl_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_binsl_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_binsl_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_binsl_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_BINSR_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_binsr_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_binsr_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_binsr_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_binsr_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_BCLR_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_bclr_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_bclr_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_bclr_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_bclr_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_BNEG_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_bneg_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_bneg_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_bneg_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_bneg_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_BSET_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_bset_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_bset_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_bset_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_bset_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_ADD_A_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_add_a_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_add_a_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_add_a_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_add_a_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_ADDS_A_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_adds_a_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_adds_a_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_adds_a_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_adds_a_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_ADDS_S_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_adds_s_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_adds_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_adds_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_adds_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_ADDS_U_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_adds_u_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_adds_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_adds_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_adds_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_ADDV_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_addv_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_addv_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_addv_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_addv_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_AVE_S_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_ave_s_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_ave_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_ave_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_ave_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_AVE_U_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_ave_u_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_ave_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_ave_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_ave_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_AVER_S_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_aver_s_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_aver_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_aver_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_aver_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_AVER_U_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_aver_u_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_aver_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_aver_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_aver_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_CEQ_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_ceq_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_ceq_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_ceq_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_ceq_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_CLE_S_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_cle_s_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_cle_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_cle_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_cle_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_CLE_U_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_cle_u_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_cle_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_cle_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_cle_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_CLT_S_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_clt_s_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_clt_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_clt_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_clt_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_CLT_U_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_clt_u_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_clt_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_clt_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_clt_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_DIV_S_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_div_s_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_div_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_div_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_div_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_DIV_U_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_div_u_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_div_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_div_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_div_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_MAX_A_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_max_a_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_max_a_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_max_a_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_max_a_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_MAX_S_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_max_s_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_max_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_max_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_max_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_MAX_U_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_max_u_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_max_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_max_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_max_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_MIN_A_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_min_a_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_min_a_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_min_a_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_min_a_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_MIN_S_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_min_s_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_min_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_min_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_min_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_MIN_U_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_min_u_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_min_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_min_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_min_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_MOD_S_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_mod_s_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_mod_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_mod_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_mod_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_MOD_U_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_mod_u_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_mod_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_mod_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_mod_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_MADDV_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_maddv_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_maddv_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_maddv_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_maddv_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_MSUBV_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_msubv_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_msubv_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_msubv_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_msubv_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_ASUB_S_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_asub_s_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_asub_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_asub_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_asub_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_ASUB_U_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_asub_u_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_asub_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_asub_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_asub_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_ILVEV_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_ilvev_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_ilvev_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_ilvev_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_ilvev_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_ILVOD_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_ilvod_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_ilvod_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_ilvod_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_ilvod_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_ILVL_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_ilvl_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_ilvl_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_ilvl_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_ilvl_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_ILVR_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_ilvr_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_ilvr_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_ilvr_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_ilvr_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_PCKEV_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_pckev_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_pckev_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_pckev_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_pckev_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_PCKOD_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_pckod_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_pckod_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_pckod_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_pckod_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_SLL_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_sll_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_sll_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_sll_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_sll_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_SRA_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_sra_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_sra_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_sra_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_sra_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_SRAR_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_srar_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_srar_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_srar_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_srar_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_SRL_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_srl_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_srl_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_srl_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_srl_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_SRLR_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_srlr_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_srlr_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_srlr_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_srlr_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_SUBS_S_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_subs_s_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_subs_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_subs_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_subs_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_MULV_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_mulv_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_mulv_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_mulv_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_mulv_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_SLD_df:
- gen_helper_msa_sld_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_VSHF_df:
- gen_helper_msa_vshf_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_SUBV_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_subv_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_subv_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_subv_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_subv_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_SUBS_U_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_subs_u_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_subs_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_subs_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_subs_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_SPLAT_df:
- gen_helper_msa_splat_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_SUBSUS_U_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_subsus_u_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_subsus_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_subsus_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_subsus_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_SUBSUU_S_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_subsuu_s_b(cpu_env, twd, tws, twt);
- break;
- case DF_HALF:
- gen_helper_msa_subsuu_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_subsuu_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_subsuu_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
-
- case OPC_DOTP_S_df:
- case OPC_DOTP_U_df:
- case OPC_DPADD_S_df:
- case OPC_DPADD_U_df:
- case OPC_DPSUB_S_df:
- case OPC_HADD_S_df:
- case OPC_DPSUB_U_df:
- case OPC_HADD_U_df:
- case OPC_HSUB_S_df:
- case OPC_HSUB_U_df:
- if (df == DF_BYTE) {
- gen_reserved_instruction(ctx);
- break;
- }
- switch (MASK_MSA_3R(ctx->opcode)) {
- case OPC_HADD_S_df:
- switch (df) {
- case DF_HALF:
- gen_helper_msa_hadd_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_hadd_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_hadd_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_HADD_U_df:
- switch (df) {
- case DF_HALF:
- gen_helper_msa_hadd_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_hadd_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_hadd_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_HSUB_S_df:
- switch (df) {
- case DF_HALF:
- gen_helper_msa_hsub_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_hsub_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_hsub_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_HSUB_U_df:
- switch (df) {
- case DF_HALF:
- gen_helper_msa_hsub_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_hsub_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_hsub_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_DOTP_S_df:
- switch (df) {
- case DF_HALF:
- gen_helper_msa_dotp_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_dotp_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_dotp_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_DOTP_U_df:
- switch (df) {
- case DF_HALF:
- gen_helper_msa_dotp_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_dotp_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_dotp_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_DPADD_S_df:
- switch (df) {
- case DF_HALF:
- gen_helper_msa_dpadd_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_dpadd_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_dpadd_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_DPADD_U_df:
- switch (df) {
- case DF_HALF:
- gen_helper_msa_dpadd_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_dpadd_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_dpadd_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_DPSUB_S_df:
- switch (df) {
- case DF_HALF:
- gen_helper_msa_dpsub_s_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_dpsub_s_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_dpsub_s_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- case OPC_DPSUB_U_df:
- switch (df) {
- case DF_HALF:
- gen_helper_msa_dpsub_u_h(cpu_env, twd, tws, twt);
- break;
- case DF_WORD:
- gen_helper_msa_dpsub_u_w(cpu_env, twd, tws, twt);
- break;
- case DF_DOUBLE:
- gen_helper_msa_dpsub_u_d(cpu_env, twd, tws, twt);
- break;
- }
- break;
- }
- break;
- default:
- MIPS_INVAL("MSA instruction");
- gen_reserved_instruction(ctx);
- break;
+ if (a->df < 0) {
+ return false;
}
- tcg_temp_free_i32(twd);
- tcg_temp_free_i32(tws);
- tcg_temp_free_i32(twt);
- tcg_temp_free_i32(tdf);
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_msa_bit(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->m));
+
+ return true;
}
-static void gen_msa_elm_3e(DisasContext *ctx)
+TRANS(SLLI, trans_msa_bit, gen_helper_msa_slli_df);
+TRANS(SRAI, trans_msa_bit, gen_helper_msa_srai_df);
+TRANS(SRLI, trans_msa_bit, gen_helper_msa_srli_df);
+TRANS(BCLRI, trans_msa_bit, gen_helper_msa_bclri_df);
+TRANS(BSETI, trans_msa_bit, gen_helper_msa_bseti_df);
+TRANS(BNEGI, trans_msa_bit, gen_helper_msa_bnegi_df);
+TRANS(BINSLI, trans_msa_bit, gen_helper_msa_binsli_df);
+TRANS(BINSRI, trans_msa_bit, gen_helper_msa_binsri_df);
+TRANS(SAT_S, trans_msa_bit, gen_helper_msa_sat_u_df);
+TRANS(SAT_U, trans_msa_bit, gen_helper_msa_sat_u_df);
+TRANS(SRARI, trans_msa_bit, gen_helper_msa_srari_df);
+TRANS(SRLRI, trans_msa_bit, gen_helper_msa_srlri_df);
+
+static bool trans_msa_3rf(DisasContext *ctx, arg_msa_r *a,
+ gen_helper_piiii *gen_msa_3rf)
{
-#define MASK_MSA_ELM_DF3E(op) (MASK_MSA_MINOR(op) | (op & (0x3FF << 16)))
- uint8_t source = (ctx->opcode >> 11) & 0x1f;
- uint8_t dest = (ctx->opcode >> 6) & 0x1f;
- TCGv telm = tcg_temp_new();
- TCGv_i32 tsr = tcg_const_i32(source);
- TCGv_i32 tdt = tcg_const_i32(dest);
-
- switch (MASK_MSA_ELM_DF3E(ctx->opcode)) {
- case OPC_CTCMSA:
- gen_load_gpr(telm, source);
- gen_helper_msa_ctcmsa(cpu_env, telm, tdt);
- break;
- case OPC_CFCMSA:
- gen_helper_msa_cfcmsa(telm, cpu_env, tsr);
- gen_store_gpr(telm, dest);
- break;
- case OPC_MOVE_V:
- gen_helper_msa_move_v(cpu_env, tdt, tsr);
- break;
- default:
- MIPS_INVAL("MSA instruction");
- gen_reserved_instruction(ctx);
- break;
+ if (!check_msa_enabled(ctx)) {
+ return true;
}
- tcg_temp_free(telm);
- tcg_temp_free_i32(tdt);
- tcg_temp_free_i32(tsr);
+ gen_msa_3rf(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->wt));
+
+ return true;
}
-static void gen_msa_elm_df(DisasContext *ctx, uint32_t df, uint32_t n)
+static bool trans_msa_3r(DisasContext *ctx, arg_msa_r *a,
+ gen_helper_piii *gen_msa_3r)
{
-#define MASK_MSA_ELM(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22)))
- uint8_t ws = (ctx->opcode >> 11) & 0x1f;
- uint8_t wd = (ctx->opcode >> 6) & 0x1f;
-
- TCGv_i32 tws = tcg_const_i32(ws);
- TCGv_i32 twd = tcg_const_i32(wd);
- TCGv_i32 tn = tcg_const_i32(n);
- TCGv_i32 tdf = tcg_constant_i32(df);
-
- switch (MASK_MSA_ELM(ctx->opcode)) {
- case OPC_SLDI_df:
- gen_helper_msa_sldi_df(cpu_env, tdf, twd, tws, tn);
- break;
- case OPC_SPLATI_df:
- gen_helper_msa_splati_df(cpu_env, tdf, twd, tws, tn);
- break;
- case OPC_INSVE_df:
- gen_helper_msa_insve_df(cpu_env, tdf, twd, tws, tn);
- break;
- case OPC_COPY_S_df:
- case OPC_COPY_U_df:
- case OPC_INSERT_df:
-#if !defined(TARGET_MIPS64)
- /* Double format valid only for MIPS64 */
- if (df == DF_DOUBLE) {
- gen_reserved_instruction(ctx);
- break;
- }
- if ((MASK_MSA_ELM(ctx->opcode) == OPC_COPY_U_df) &&
- (df == DF_WORD)) {
- gen_reserved_instruction(ctx);
- break;
- }
-#endif
- switch (MASK_MSA_ELM(ctx->opcode)) {
- case OPC_COPY_S_df:
- if (likely(wd != 0)) {
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_copy_s_b(cpu_env, twd, tws, tn);
- break;
- case DF_HALF:
- gen_helper_msa_copy_s_h(cpu_env, twd, tws, tn);
- break;
- case DF_WORD:
- gen_helper_msa_copy_s_w(cpu_env, twd, tws, tn);
- break;
-#if defined(TARGET_MIPS64)
- case DF_DOUBLE:
- gen_helper_msa_copy_s_d(cpu_env, twd, tws, tn);
- break;
-#endif
- default:
- assert(0);
- }
- }
- break;
- case OPC_COPY_U_df:
- if (likely(wd != 0)) {
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_copy_u_b(cpu_env, twd, tws, tn);
- break;
- case DF_HALF:
- gen_helper_msa_copy_u_h(cpu_env, twd, tws, tn);
- break;
-#if defined(TARGET_MIPS64)
- case DF_WORD:
- gen_helper_msa_copy_u_w(cpu_env, twd, tws, tn);
- break;
-#endif
- default:
- assert(0);
- }
- }
- break;
- case OPC_INSERT_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_insert_b(cpu_env, twd, tws, tn);
- break;
- case DF_HALF:
- gen_helper_msa_insert_h(cpu_env, twd, tws, tn);
- break;
- case DF_WORD:
- gen_helper_msa_insert_w(cpu_env, twd, tws, tn);
- break;
-#if defined(TARGET_MIPS64)
- case DF_DOUBLE:
- gen_helper_msa_insert_d(cpu_env, twd, tws, tn);
- break;
-#endif
- default:
- assert(0);
- }
- break;
- }
- break;
- default:
- MIPS_INVAL("MSA instruction");
- gen_reserved_instruction(ctx);
+ if (!gen_msa_3r) {
+ return false;
}
- tcg_temp_free_i32(twd);
- tcg_temp_free_i32(tws);
- tcg_temp_free_i32(tn);
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_msa_3r(cpu_env,
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->wt));
+
+ return true;
}
-static void gen_msa_elm(DisasContext *ctx)
+TRANS(AND_V, trans_msa_3r, gen_helper_msa_and_v);
+TRANS(OR_V, trans_msa_3r, gen_helper_msa_or_v);
+TRANS(NOR_V, trans_msa_3r, gen_helper_msa_nor_v);
+TRANS(XOR_V, trans_msa_3r, gen_helper_msa_xor_v);
+TRANS(BMNZ_V, trans_msa_3r, gen_helper_msa_bmnz_v);
+TRANS(BMZ_V, trans_msa_3r, gen_helper_msa_bmz_v);
+TRANS(BSEL_V, trans_msa_3r, gen_helper_msa_bsel_v);
+
+TRANS_DF_iii(SLL, trans_msa_3r, gen_helper_msa_sll);
+TRANS_DF_iii(SRA, trans_msa_3r, gen_helper_msa_sra);
+TRANS_DF_iii(SRL, trans_msa_3r, gen_helper_msa_srl);
+TRANS_DF_iii(BCLR, trans_msa_3r, gen_helper_msa_bclr);
+TRANS_DF_iii(BSET, trans_msa_3r, gen_helper_msa_bset);
+TRANS_DF_iii(BNEG, trans_msa_3r, gen_helper_msa_bneg);
+TRANS_DF_iii(BINSL, trans_msa_3r, gen_helper_msa_binsl);
+TRANS_DF_iii(BINSR, trans_msa_3r, gen_helper_msa_binsr);
+
+TRANS_DF_iii(ADDV, trans_msa_3r, gen_helper_msa_addv);
+TRANS_DF_iii(SUBV, trans_msa_3r, gen_helper_msa_subv);
+TRANS_DF_iii(MAX_S, trans_msa_3r, gen_helper_msa_max_s);
+TRANS_DF_iii(MAX_U, trans_msa_3r, gen_helper_msa_max_u);
+TRANS_DF_iii(MIN_S, trans_msa_3r, gen_helper_msa_min_s);
+TRANS_DF_iii(MIN_U, trans_msa_3r, gen_helper_msa_min_u);
+TRANS_DF_iii(MAX_A, trans_msa_3r, gen_helper_msa_max_a);
+TRANS_DF_iii(MIN_A, trans_msa_3r, gen_helper_msa_min_a);
+
+TRANS_DF_iii(CEQ, trans_msa_3r, gen_helper_msa_ceq);
+TRANS_DF_iii(CLT_S, trans_msa_3r, gen_helper_msa_clt_s);
+TRANS_DF_iii(CLT_U, trans_msa_3r, gen_helper_msa_clt_u);
+TRANS_DF_iii(CLE_S, trans_msa_3r, gen_helper_msa_cle_s);
+TRANS_DF_iii(CLE_U, trans_msa_3r, gen_helper_msa_cle_u);
+
+TRANS_DF_iii(ADD_A, trans_msa_3r, gen_helper_msa_add_a);
+TRANS_DF_iii(ADDS_A, trans_msa_3r, gen_helper_msa_adds_a);
+TRANS_DF_iii(ADDS_S, trans_msa_3r, gen_helper_msa_adds_s);
+TRANS_DF_iii(ADDS_U, trans_msa_3r, gen_helper_msa_adds_u);
+TRANS_DF_iii(AVE_S, trans_msa_3r, gen_helper_msa_ave_s);
+TRANS_DF_iii(AVE_U, trans_msa_3r, gen_helper_msa_ave_u);
+TRANS_DF_iii(AVER_S, trans_msa_3r, gen_helper_msa_aver_s);
+TRANS_DF_iii(AVER_U, trans_msa_3r, gen_helper_msa_aver_u);
+
+TRANS_DF_iii(SUBS_S, trans_msa_3r, gen_helper_msa_subs_s);
+TRANS_DF_iii(SUBS_U, trans_msa_3r, gen_helper_msa_subs_u);
+TRANS_DF_iii(SUBSUS_U, trans_msa_3r, gen_helper_msa_subsus_u);
+TRANS_DF_iii(SUBSUU_S, trans_msa_3r, gen_helper_msa_subsuu_s);
+TRANS_DF_iii(ASUB_S, trans_msa_3r, gen_helper_msa_asub_s);
+TRANS_DF_iii(ASUB_U, trans_msa_3r, gen_helper_msa_asub_u);
+
+TRANS_DF_iii(MULV, trans_msa_3r, gen_helper_msa_mulv);
+TRANS_DF_iii(MADDV, trans_msa_3r, gen_helper_msa_maddv);
+TRANS_DF_iii(MSUBV, trans_msa_3r, gen_helper_msa_msubv);
+TRANS_DF_iii(DIV_S, trans_msa_3r, gen_helper_msa_div_s);
+TRANS_DF_iii(DIV_U, trans_msa_3r, gen_helper_msa_div_u);
+TRANS_DF_iii(MOD_S, trans_msa_3r, gen_helper_msa_mod_s);
+TRANS_DF_iii(MOD_U, trans_msa_3r, gen_helper_msa_mod_u);
+
+TRANS_DF_iii_b(DOTP_S, trans_msa_3r, gen_helper_msa_dotp_s);
+TRANS_DF_iii_b(DOTP_U, trans_msa_3r, gen_helper_msa_dotp_u);
+TRANS_DF_iii_b(DPADD_S, trans_msa_3r, gen_helper_msa_dpadd_s);
+TRANS_DF_iii_b(DPADD_U, trans_msa_3r, gen_helper_msa_dpadd_u);
+TRANS_DF_iii_b(DPSUB_S, trans_msa_3r, gen_helper_msa_dpsub_s);
+TRANS_DF_iii_b(DPSUB_U, trans_msa_3r, gen_helper_msa_dpsub_u);
+
+TRANS(SLD, trans_msa_3rf, gen_helper_msa_sld_df);
+TRANS(SPLAT, trans_msa_3rf, gen_helper_msa_splat_df);
+TRANS_DF_iii(PCKEV, trans_msa_3r, gen_helper_msa_pckev);
+TRANS_DF_iii(PCKOD, trans_msa_3r, gen_helper_msa_pckod);
+TRANS_DF_iii(ILVL, trans_msa_3r, gen_helper_msa_ilvl);
+TRANS_DF_iii(ILVR, trans_msa_3r, gen_helper_msa_ilvr);
+TRANS_DF_iii(ILVEV, trans_msa_3r, gen_helper_msa_ilvev);
+TRANS_DF_iii(ILVOD, trans_msa_3r, gen_helper_msa_ilvod);
+
+TRANS(VSHF, trans_msa_3rf, gen_helper_msa_vshf_df);
+TRANS_DF_iii(SRAR, trans_msa_3r, gen_helper_msa_srar);
+TRANS_DF_iii(SRLR, trans_msa_3r, gen_helper_msa_srlr);
+TRANS_DF_iii_b(HADD_S, trans_msa_3r, gen_helper_msa_hadd_s);
+TRANS_DF_iii_b(HADD_U, trans_msa_3r, gen_helper_msa_hadd_u);
+TRANS_DF_iii_b(HSUB_S, trans_msa_3r, gen_helper_msa_hsub_s);
+TRANS_DF_iii_b(HSUB_U, trans_msa_3r, gen_helper_msa_hsub_u);
+
+static bool trans_MOVE_V(DisasContext *ctx, arg_msa_elm *a)
{
- uint8_t dfn = (ctx->opcode >> 16) & 0x3f;
- uint32_t df = 0, n = 0;
-
- if ((dfn & 0x30) == 0x00) {
- n = dfn & 0x0f;
- df = DF_BYTE;
- } else if ((dfn & 0x38) == 0x20) {
- n = dfn & 0x07;
- df = DF_HALF;
- } else if ((dfn & 0x3c) == 0x30) {
- n = dfn & 0x03;
- df = DF_WORD;
- } else if ((dfn & 0x3e) == 0x38) {
- n = dfn & 0x01;
- df = DF_DOUBLE;
- } else if (dfn == 0x3E) {
- /* CTCMSA, CFCMSA, MOVE.V */
- gen_msa_elm_3e(ctx);
- return;
- } else {
- gen_reserved_instruction(ctx);
- return;
+ if (!check_msa_enabled(ctx)) {
+ return true;
}
- gen_msa_elm_df(ctx, df, n);
+ gen_helper_msa_move_v(cpu_env,
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws));
+
+ return true;
}
-static void gen_msa_3rf(DisasContext *ctx)
+static bool trans_CTCMSA(DisasContext *ctx, arg_msa_elm *a)
{
-#define MASK_MSA_3RF(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22)))
- uint8_t df = (ctx->opcode >> 21) & 0x1;
- uint8_t wt = (ctx->opcode >> 16) & 0x1f;
- uint8_t ws = (ctx->opcode >> 11) & 0x1f;
- uint8_t wd = (ctx->opcode >> 6) & 0x1f;
-
- TCGv_i32 twd = tcg_const_i32(wd);
- TCGv_i32 tws = tcg_const_i32(ws);
- TCGv_i32 twt = tcg_const_i32(wt);
- TCGv_i32 tdf;
-
- /* adjust df value for floating-point instruction */
- switch (MASK_MSA_3RF(ctx->opcode)) {
- case OPC_MUL_Q_df:
- case OPC_MADD_Q_df:
- case OPC_MSUB_Q_df:
- case OPC_MULR_Q_df:
- case OPC_MADDR_Q_df:
- case OPC_MSUBR_Q_df:
- tdf = tcg_constant_i32(df + 1);
- break;
- default:
- tdf = tcg_constant_i32(df + 2);
- break;
+ TCGv telm;
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
}
- switch (MASK_MSA_3RF(ctx->opcode)) {
- case OPC_FCAF_df:
- gen_helper_msa_fcaf_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FADD_df:
- gen_helper_msa_fadd_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FCUN_df:
- gen_helper_msa_fcun_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FSUB_df:
- gen_helper_msa_fsub_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FCOR_df:
- gen_helper_msa_fcor_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FCEQ_df:
- gen_helper_msa_fceq_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FMUL_df:
- gen_helper_msa_fmul_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FCUNE_df:
- gen_helper_msa_fcune_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FCUEQ_df:
- gen_helper_msa_fcueq_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FDIV_df:
- gen_helper_msa_fdiv_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FCNE_df:
- gen_helper_msa_fcne_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FCLT_df:
- gen_helper_msa_fclt_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FMADD_df:
- gen_helper_msa_fmadd_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_MUL_Q_df:
- gen_helper_msa_mul_q_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FCULT_df:
- gen_helper_msa_fcult_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FMSUB_df:
- gen_helper_msa_fmsub_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_MADD_Q_df:
- gen_helper_msa_madd_q_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FCLE_df:
- gen_helper_msa_fcle_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_MSUB_Q_df:
- gen_helper_msa_msub_q_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FCULE_df:
- gen_helper_msa_fcule_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FEXP2_df:
- gen_helper_msa_fexp2_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FSAF_df:
- gen_helper_msa_fsaf_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FEXDO_df:
- gen_helper_msa_fexdo_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FSUN_df:
- gen_helper_msa_fsun_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FSOR_df:
- gen_helper_msa_fsor_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FSEQ_df:
- gen_helper_msa_fseq_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FTQ_df:
- gen_helper_msa_ftq_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FSUNE_df:
- gen_helper_msa_fsune_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FSUEQ_df:
- gen_helper_msa_fsueq_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FSNE_df:
- gen_helper_msa_fsne_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FSLT_df:
- gen_helper_msa_fslt_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FMIN_df:
- gen_helper_msa_fmin_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_MULR_Q_df:
- gen_helper_msa_mulr_q_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FSULT_df:
- gen_helper_msa_fsult_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FMIN_A_df:
- gen_helper_msa_fmin_a_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_MADDR_Q_df:
- gen_helper_msa_maddr_q_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FSLE_df:
- gen_helper_msa_fsle_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FMAX_df:
- gen_helper_msa_fmax_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_MSUBR_Q_df:
- gen_helper_msa_msubr_q_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FSULE_df:
- gen_helper_msa_fsule_df(cpu_env, tdf, twd, tws, twt);
- break;
- case OPC_FMAX_A_df:
- gen_helper_msa_fmax_a_df(cpu_env, tdf, twd, tws, twt);
- break;
- default:
- MIPS_INVAL("MSA instruction");
- gen_reserved_instruction(ctx);
- break;
+ telm = tcg_temp_new();
+
+ gen_load_gpr(telm, a->ws);
+ gen_helper_msa_ctcmsa(cpu_env, telm, tcg_constant_i32(a->wd));
+
+ tcg_temp_free(telm);
+
+ return true;
+}
+
+static bool trans_CFCMSA(DisasContext *ctx, arg_msa_elm *a)
+{
+ TCGv telm;
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
}
- tcg_temp_free_i32(twd);
- tcg_temp_free_i32(tws);
- tcg_temp_free_i32(twt);
+ telm = tcg_temp_new();
+
+ gen_helper_msa_cfcmsa(telm, cpu_env, tcg_constant_i32(a->ws));
+ gen_store_gpr(telm, a->wd);
+
+ tcg_temp_free(telm);
+
+ return true;
}
-static void gen_msa_2r(DisasContext *ctx)
+static bool trans_msa_elm(DisasContext *ctx, arg_msa_elm_df *a,
+ gen_helper_piiii *gen_msa_elm_df)
{
-#define MASK_MSA_2R(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \
- (op & (0x7 << 18)))
- uint8_t ws = (ctx->opcode >> 11) & 0x1f;
- uint8_t wd = (ctx->opcode >> 6) & 0x1f;
- uint8_t df = (ctx->opcode >> 16) & 0x3;
- TCGv_i32 twd = tcg_const_i32(wd);
- TCGv_i32 tws = tcg_const_i32(ws);
-
- switch (MASK_MSA_2R(ctx->opcode)) {
- case OPC_FILL_df:
-#if !defined(TARGET_MIPS64)
- /* Double format valid only for MIPS64 */
- if (df == DF_DOUBLE) {
- gen_reserved_instruction(ctx);
- break;
- }
-#endif
- gen_helper_msa_fill_df(cpu_env, tcg_constant_i32(df),
- twd, tws); /* trs */
- break;
- case OPC_NLOC_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_nloc_b(cpu_env, twd, tws);
- break;
- case DF_HALF:
- gen_helper_msa_nloc_h(cpu_env, twd, tws);
- break;
- case DF_WORD:
- gen_helper_msa_nloc_w(cpu_env, twd, tws);
- break;
- case DF_DOUBLE:
- gen_helper_msa_nloc_d(cpu_env, twd, tws);
- break;
- }
- break;
- case OPC_NLZC_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_nlzc_b(cpu_env, twd, tws);
- break;
- case DF_HALF:
- gen_helper_msa_nlzc_h(cpu_env, twd, tws);
- break;
- case DF_WORD:
- gen_helper_msa_nlzc_w(cpu_env, twd, tws);
- break;
- case DF_DOUBLE:
- gen_helper_msa_nlzc_d(cpu_env, twd, tws);
- break;
- }
- break;
- case OPC_PCNT_df:
- switch (df) {
- case DF_BYTE:
- gen_helper_msa_pcnt_b(cpu_env, twd, tws);
- break;
- case DF_HALF:
- gen_helper_msa_pcnt_h(cpu_env, twd, tws);
- break;
- case DF_WORD:
- gen_helper_msa_pcnt_w(cpu_env, twd, tws);
- break;
- case DF_DOUBLE:
- gen_helper_msa_pcnt_d(cpu_env, twd, tws);
- break;
- }
- break;
- default:
- MIPS_INVAL("MSA instruction");
- gen_reserved_instruction(ctx);
- break;
+ if (a->df < 0) {
+ return false;
}
- tcg_temp_free_i32(twd);
- tcg_temp_free_i32(tws);
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_msa_elm_df(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->n));
+
+ return true;
}
-static void gen_msa_2rf(DisasContext *ctx)
+TRANS(SLDI, trans_msa_elm, gen_helper_msa_sldi_df);
+TRANS(SPLATI, trans_msa_elm, gen_helper_msa_splati_df);
+TRANS(INSVE, trans_msa_elm, gen_helper_msa_insve_df);
+
+static bool trans_msa_elm_fn(DisasContext *ctx, arg_msa_elm_df *a,
+ gen_helper_piii * const gen_msa_elm[4])
{
-#define MASK_MSA_2RF(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \
- (op & (0xf << 17)))
- uint8_t ws = (ctx->opcode >> 11) & 0x1f;
- uint8_t wd = (ctx->opcode >> 6) & 0x1f;
- uint8_t df = (ctx->opcode >> 16) & 0x1;
- TCGv_i32 twd = tcg_const_i32(wd);
- TCGv_i32 tws = tcg_const_i32(ws);
- /* adjust df value for floating-point instruction */
- TCGv_i32 tdf = tcg_constant_i32(df + 2);
-
- switch (MASK_MSA_2RF(ctx->opcode)) {
- case OPC_FCLASS_df:
- gen_helper_msa_fclass_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FTRUNC_S_df:
- gen_helper_msa_ftrunc_s_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FTRUNC_U_df:
- gen_helper_msa_ftrunc_u_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FSQRT_df:
- gen_helper_msa_fsqrt_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FRSQRT_df:
- gen_helper_msa_frsqrt_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FRCP_df:
- gen_helper_msa_frcp_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FRINT_df:
- gen_helper_msa_frint_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FLOG2_df:
- gen_helper_msa_flog2_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FEXUPL_df:
- gen_helper_msa_fexupl_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FEXUPR_df:
- gen_helper_msa_fexupr_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FFQL_df:
- gen_helper_msa_ffql_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FFQR_df:
- gen_helper_msa_ffqr_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FTINT_S_df:
- gen_helper_msa_ftint_s_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FTINT_U_df:
- gen_helper_msa_ftint_u_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FFINT_S_df:
- gen_helper_msa_ffint_s_df(cpu_env, tdf, twd, tws);
- break;
- case OPC_FFINT_U_df:
- gen_helper_msa_ffint_u_df(cpu_env, tdf, twd, tws);
- break;
+ if (a->df < 0 || !gen_msa_elm[a->df]) {
+ return false;
+ }
+
+ if (check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ if (a->wd == 0) {
+ /* Treat as NOP. */
+ return true;
}
- tcg_temp_free_i32(twd);
- tcg_temp_free_i32(tws);
+ gen_msa_elm[a->df](cpu_env,
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws),
+ tcg_constant_i32(a->n));
+
+ return true;
}
-static void gen_msa_vec_v(DisasContext *ctx)
+#if defined(TARGET_MIPS64)
+#define NULL_IF_MIPS32(function) function
+#else
+#define NULL_IF_MIPS32(function) NULL
+#endif
+
+static bool trans_COPY_U(DisasContext *ctx, arg_msa_elm_df *a)
{
-#define MASK_MSA_VEC(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)))
- uint8_t wt = (ctx->opcode >> 16) & 0x1f;
- uint8_t ws = (ctx->opcode >> 11) & 0x1f;
- uint8_t wd = (ctx->opcode >> 6) & 0x1f;
- TCGv_i32 twd = tcg_const_i32(wd);
- TCGv_i32 tws = tcg_const_i32(ws);
- TCGv_i32 twt = tcg_const_i32(wt);
-
- switch (MASK_MSA_VEC(ctx->opcode)) {
- case OPC_AND_V:
- gen_helper_msa_and_v(cpu_env, twd, tws, twt);
- break;
- case OPC_OR_V:
- gen_helper_msa_or_v(cpu_env, twd, tws, twt);
- break;
- case OPC_NOR_V:
- gen_helper_msa_nor_v(cpu_env, twd, tws, twt);
- break;
- case OPC_XOR_V:
- gen_helper_msa_xor_v(cpu_env, twd, tws, twt);
- break;
- case OPC_BMNZ_V:
- gen_helper_msa_bmnz_v(cpu_env, twd, tws, twt);
- break;
- case OPC_BMZ_V:
- gen_helper_msa_bmz_v(cpu_env, twd, tws, twt);
- break;
- case OPC_BSEL_V:
- gen_helper_msa_bsel_v(cpu_env, twd, tws, twt);
- break;
- default:
- MIPS_INVAL("MSA instruction");
- gen_reserved_instruction(ctx);
- break;
+ static gen_helper_piii * const gen_msa_copy_u[4] = {
+ gen_helper_msa_copy_u_b, gen_helper_msa_copy_u_h,
+ NULL_IF_MIPS32(gen_helper_msa_copy_u_w), NULL
+ };
+
+ return trans_msa_elm_fn(ctx, a, gen_msa_copy_u);
+}
+
+static bool trans_COPY_S(DisasContext *ctx, arg_msa_elm_df *a)
+{
+ static gen_helper_piii * const gen_msa_copy_s[4] = {
+ gen_helper_msa_copy_s_b, gen_helper_msa_copy_s_h,
+ gen_helper_msa_copy_s_w, NULL_IF_MIPS32(gen_helper_msa_copy_s_d)
+ };
+
+ return trans_msa_elm_fn(ctx, a, gen_msa_copy_s);
+}
+
+static bool trans_INSERT(DisasContext *ctx, arg_msa_elm_df *a)
+{
+ static gen_helper_piii * const gen_msa_insert[4] = {
+ gen_helper_msa_insert_b, gen_helper_msa_insert_h,
+ gen_helper_msa_insert_w, NULL_IF_MIPS32(gen_helper_msa_insert_d)
+ };
+
+ return trans_msa_elm_fn(ctx, a, gen_msa_insert);
+}
+
+TRANS(FCAF, trans_msa_3rf, gen_helper_msa_fcaf_df);
+TRANS(FCUN, trans_msa_3rf, gen_helper_msa_fcun_df);
+TRANS(FCEQ, trans_msa_3rf, gen_helper_msa_fceq_df);
+TRANS(FCUEQ, trans_msa_3rf, gen_helper_msa_fcueq_df);
+TRANS(FCLT, trans_msa_3rf, gen_helper_msa_fclt_df);
+TRANS(FCULT, trans_msa_3rf, gen_helper_msa_fcult_df);
+TRANS(FCLE, trans_msa_3rf, gen_helper_msa_fcle_df);
+TRANS(FCULE, trans_msa_3rf, gen_helper_msa_fcule_df);
+TRANS(FSAF, trans_msa_3rf, gen_helper_msa_fsaf_df);
+TRANS(FSUN, trans_msa_3rf, gen_helper_msa_fsun_df);
+TRANS(FSEQ, trans_msa_3rf, gen_helper_msa_fseq_df);
+TRANS(FSUEQ, trans_msa_3rf, gen_helper_msa_fsueq_df);
+TRANS(FSLT, trans_msa_3rf, gen_helper_msa_fslt_df);
+TRANS(FSULT, trans_msa_3rf, gen_helper_msa_fsult_df);
+TRANS(FSLE, trans_msa_3rf, gen_helper_msa_fsle_df);
+TRANS(FSULE, trans_msa_3rf, gen_helper_msa_fsule_df);
+
+TRANS(FADD, trans_msa_3rf, gen_helper_msa_fadd_df);
+TRANS(FSUB, trans_msa_3rf, gen_helper_msa_fsub_df);
+TRANS(FMUL, trans_msa_3rf, gen_helper_msa_fmul_df);
+TRANS(FDIV, trans_msa_3rf, gen_helper_msa_fdiv_df);
+TRANS(FMADD, trans_msa_3rf, gen_helper_msa_fmadd_df);
+TRANS(FMSUB, trans_msa_3rf, gen_helper_msa_fmsub_df);
+TRANS(FEXP2, trans_msa_3rf, gen_helper_msa_fexp2_df);
+TRANS(FEXDO, trans_msa_3rf, gen_helper_msa_fexdo_df);
+TRANS(FTQ, trans_msa_3rf, gen_helper_msa_ftq_df);
+TRANS(FMIN, trans_msa_3rf, gen_helper_msa_fmin_df);
+TRANS(FMIN_A, trans_msa_3rf, gen_helper_msa_fmin_a_df);
+TRANS(FMAX, trans_msa_3rf, gen_helper_msa_fmax_df);
+TRANS(FMAX_A, trans_msa_3rf, gen_helper_msa_fmax_a_df);
+
+TRANS(FCOR, trans_msa_3rf, gen_helper_msa_fcor_df);
+TRANS(FCUNE, trans_msa_3rf, gen_helper_msa_fcune_df);
+TRANS(FCNE, trans_msa_3rf, gen_helper_msa_fcne_df);
+TRANS(MUL_Q, trans_msa_3rf, gen_helper_msa_mul_q_df);
+TRANS(MADD_Q, trans_msa_3rf, gen_helper_msa_madd_q_df);
+TRANS(MSUB_Q, trans_msa_3rf, gen_helper_msa_msub_q_df);
+TRANS(FSOR, trans_msa_3rf, gen_helper_msa_fsor_df);
+TRANS(FSUNE, trans_msa_3rf, gen_helper_msa_fsune_df);
+TRANS(FSNE, trans_msa_3rf, gen_helper_msa_fsne_df);
+TRANS(MULR_Q, trans_msa_3rf, gen_helper_msa_mulr_q_df);
+TRANS(MADDR_Q, trans_msa_3rf, gen_helper_msa_maddr_q_df);
+TRANS(MSUBR_Q, trans_msa_3rf, gen_helper_msa_msubr_q_df);
+
+static bool trans_msa_2r(DisasContext *ctx, arg_msa_r *a,
+ gen_helper_pii *gen_msa_2r)
+{
+ if (!check_msa_enabled(ctx)) {
+ return true;
}
- tcg_temp_free_i32(twd);
- tcg_temp_free_i32(tws);
- tcg_temp_free_i32(twt);
+ gen_msa_2r(cpu_env, tcg_constant_i32(a->wd), tcg_constant_i32(a->ws));
+
+ return true;
}
-static void gen_msa_vec(DisasContext *ctx)
+TRANS_DF_ii(PCNT, trans_msa_2r, gen_helper_msa_pcnt);
+TRANS_DF_ii(NLOC, trans_msa_2r, gen_helper_msa_nloc);
+TRANS_DF_ii(NLZC, trans_msa_2r, gen_helper_msa_nlzc);
+
+static bool trans_FILL(DisasContext *ctx, arg_msa_r *a)
{
- switch (MASK_MSA_VEC(ctx->opcode)) {
- case OPC_AND_V:
- case OPC_OR_V:
- case OPC_NOR_V:
- case OPC_XOR_V:
- case OPC_BMNZ_V:
- case OPC_BMZ_V:
- case OPC_BSEL_V:
- gen_msa_vec_v(ctx);
- break;
- case OPC_MSA_2R:
- gen_msa_2r(ctx);
- break;
- case OPC_MSA_2RF:
- gen_msa_2rf(ctx);
- break;
- default:
- MIPS_INVAL("MSA instruction");
- gen_reserved_instruction(ctx);
- break;
+ if (TARGET_LONG_BITS != 64 && a->df == DF_DOUBLE) {
+ /* Double format valid only for MIPS64 */
+ return false;
+ }
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
+ }
+
+ gen_helper_msa_fill_df(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws));
+
+ return true;
+}
+
+static bool trans_msa_2rf(DisasContext *ctx, arg_msa_r *a,
+ gen_helper_piii *gen_msa_2rf)
+{
+ if (!check_msa_enabled(ctx)) {
+ return true;
}
+
+ gen_msa_2rf(cpu_env,
+ tcg_constant_i32(a->df),
+ tcg_constant_i32(a->wd),
+ tcg_constant_i32(a->ws));
+
+ return true;
}
-static bool trans_MSA(DisasContext *ctx, arg_MSA *a)
+TRANS(FCLASS, trans_msa_2rf, gen_helper_msa_fclass_df);
+TRANS(FTRUNC_S, trans_msa_2rf, gen_helper_msa_fclass_df);
+TRANS(FTRUNC_U, trans_msa_2rf, gen_helper_msa_ftrunc_s_df);
+TRANS(FSQRT, trans_msa_2rf, gen_helper_msa_fsqrt_df);
+TRANS(FRSQRT, trans_msa_2rf, gen_helper_msa_frsqrt_df);
+TRANS(FRCP, trans_msa_2rf, gen_helper_msa_frcp_df);
+TRANS(FRINT, trans_msa_2rf, gen_helper_msa_frint_df);
+TRANS(FLOG2, trans_msa_2rf, gen_helper_msa_flog2_df);
+TRANS(FEXUPL, trans_msa_2rf, gen_helper_msa_fexupl_df);
+TRANS(FEXUPR, trans_msa_2rf, gen_helper_msa_fexupr_df);
+TRANS(FFQL, trans_msa_2rf, gen_helper_msa_ffql_df);
+TRANS(FFQR, trans_msa_2rf, gen_helper_msa_ffqr_df);
+TRANS(FTINT_S, trans_msa_2rf, gen_helper_msa_ftint_s_df);
+TRANS(FTINT_U, trans_msa_2rf, gen_helper_msa_ftint_u_df);
+TRANS(FFINT_S, trans_msa_2rf, gen_helper_msa_ffint_s_df);
+TRANS(FFINT_U, trans_msa_2rf, gen_helper_msa_ffint_u_df);
+
+static bool trans_msa_ldst(DisasContext *ctx, arg_msa_i *a,
+ gen_helper_piv *gen_msa_ldst)
{
- uint32_t opcode = ctx->opcode;
-
- check_msa_access(ctx);
-
- switch (MASK_MSA_MINOR(opcode)) {
- case OPC_MSA_I8_00:
- case OPC_MSA_I8_01:
- case OPC_MSA_I8_02:
- gen_msa_i8(ctx);
- break;
- case OPC_MSA_I5_06:
- case OPC_MSA_I5_07:
- gen_msa_i5(ctx);
- break;
- case OPC_MSA_BIT_09:
- case OPC_MSA_BIT_0A:
- gen_msa_bit(ctx);
- break;
- case OPC_MSA_3R_0D:
- case OPC_MSA_3R_0E:
- case OPC_MSA_3R_0F:
- case OPC_MSA_3R_10:
- case OPC_MSA_3R_11:
- case OPC_MSA_3R_12:
- case OPC_MSA_3R_13:
- case OPC_MSA_3R_14:
- case OPC_MSA_3R_15:
- gen_msa_3r(ctx);
- break;
- case OPC_MSA_ELM:
- gen_msa_elm(ctx);
- break;
- case OPC_MSA_3RF_1A:
- case OPC_MSA_3RF_1B:
- case OPC_MSA_3RF_1C:
- gen_msa_3rf(ctx);
- break;
- case OPC_MSA_VEC:
- gen_msa_vec(ctx);
- break;
- case OPC_LD_B:
- case OPC_LD_H:
- case OPC_LD_W:
- case OPC_LD_D:
- case OPC_ST_B:
- case OPC_ST_H:
- case OPC_ST_W:
- case OPC_ST_D:
- {
- int32_t s10 = sextract32(ctx->opcode, 16, 10);
- uint8_t rs = (ctx->opcode >> 11) & 0x1f;
- uint8_t wd = (ctx->opcode >> 6) & 0x1f;
- uint8_t df = (ctx->opcode >> 0) & 0x3;
-
- TCGv_i32 twd = tcg_const_i32(wd);
- TCGv taddr = tcg_temp_new();
- gen_base_offset_addr(ctx, taddr, rs, s10 << df);
-
- switch (MASK_MSA_MINOR(opcode)) {
- case OPC_LD_B:
- gen_helper_msa_ld_b(cpu_env, twd, taddr);
- break;
- case OPC_LD_H:
- gen_helper_msa_ld_h(cpu_env, twd, taddr);
- break;
- case OPC_LD_W:
- gen_helper_msa_ld_w(cpu_env, twd, taddr);
- break;
- case OPC_LD_D:
- gen_helper_msa_ld_d(cpu_env, twd, taddr);
- break;
- case OPC_ST_B:
- gen_helper_msa_st_b(cpu_env, twd, taddr);
- break;
- case OPC_ST_H:
- gen_helper_msa_st_h(cpu_env, twd, taddr);
- break;
- case OPC_ST_W:
- gen_helper_msa_st_w(cpu_env, twd, taddr);
- break;
- case OPC_ST_D:
- gen_helper_msa_st_d(cpu_env, twd, taddr);
- break;
- }
-
- tcg_temp_free_i32(twd);
- tcg_temp_free(taddr);
- }
- break;
- default:
- MIPS_INVAL("MSA instruction");
- gen_reserved_instruction(ctx);
- break;
+ TCGv taddr;
+
+ if (!check_msa_enabled(ctx)) {
+ return true;
}
+ taddr = tcg_temp_new();
+
+ gen_base_offset_addr(ctx, taddr, a->ws, a->sa << a->df);
+ gen_msa_ldst(cpu_env, tcg_constant_i32(a->wd), taddr);
+
+ tcg_temp_free(taddr);
+
return true;
}
+TRANS_DF_iv(LD, trans_msa_ldst, gen_helper_msa_ld);
+TRANS_DF_iv(ST, trans_msa_ldst, gen_helper_msa_st);
+
static bool trans_LSA(DisasContext *ctx, arg_r *a)
{
return gen_lsa(ctx, a->rd, a->rt, a->rs, a->sa);
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
index bad3deb..466768a 100644
--- a/target/mips/tcg/tcg-internal.h
+++ b/target/mips/tcg/tcg-internal.h
@@ -18,9 +18,6 @@
void mips_tcg_init(void);
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
-bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
MMUAccessType access_type, int mmu_idx,
uintptr_t retaddr) QEMU_NORETURN;
@@ -60,6 +57,10 @@ void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
MemTxResult response, uintptr_t retaddr);
void cpu_mips_tlb_flush(CPUMIPSState *env);
+bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
#endif /* !CONFIG_USER_ONLY */
#endif
diff --git a/target/mips/tcg/user/meson.build b/target/mips/tcg/user/meson.build
deleted file mode 100644
index 79badcd..0000000
--- a/target/mips/tcg/user/meson.build
+++ /dev/null
@@ -1,3 +0,0 @@
-mips_user_ss.add(files(
- 'tlb_helper.c',
-))
diff --git a/target/mips/tcg/user/tlb_helper.c b/target/mips/tcg/user/tlb_helper.c
deleted file mode 100644
index 210c6d5..0000000
--- a/target/mips/tcg/user/tlb_helper.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * MIPS TLB (Translation lookaside buffer) helpers.
- *
- * Copyright (c) 2004-2005 Jocelyn Mayer
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "qemu/osdep.h"
-
-#include "cpu.h"
-#include "exec/exec-all.h"
-#include "internal.h"
-
-static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
- MMUAccessType access_type)
-{
- CPUState *cs = env_cpu(env);
-
- env->error_code = 0;
- if (access_type == MMU_INST_FETCH) {
- env->error_code |= EXCP_INST_NOTAVAIL;
- }
-
- /* Reference to kernel address from user mode or supervisor mode */
- /* Reference to supervisor address from user mode */
- if (access_type == MMU_DATA_STORE) {
- cs->exception_index = EXCP_AdES;
- } else {
- cs->exception_index = EXCP_AdEL;
- }
-
- /* Raise exception */
- if (!(env->hflags & MIPS_HFLAG_DM)) {
- env->CP0_BadVAddr = address;
- }
-}
-
-bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
-{
- MIPSCPU *cpu = MIPS_CPU(cs);
- CPUMIPSState *env = &cpu->env;
-
- /* data access */
- raise_mmu_exception(env, address, access_type);
- do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
-}
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
index 58ecd27..4cade61 100644
--- a/target/nios2/cpu.c
+++ b/target/nios2/cpu.c
@@ -216,9 +216,11 @@ static const struct SysemuCPUOps nios2_sysemu_ops = {
static const struct TCGCPUOps nios2_tcg_ops = {
.initialize = nios2_tcg_init,
- .tlb_fill = nios2_cpu_tlb_fill,
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ .record_sigsegv = nios2_cpu_record_sigsegv,
+#else
+ .tlb_fill = nios2_cpu_tlb_fill,
.cpu_exec_interrupt = nios2_cpu_exec_interrupt,
.do_interrupt = nios2_cpu_do_interrupt,
.do_unaligned_access = nios2_cpu_do_unaligned_access,
diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h
index a805873..1a69ed7 100644
--- a/target/nios2/cpu.h
+++ b/target/nios2/cpu.h
@@ -218,9 +218,15 @@ static inline int cpu_mmu_index(CPUNios2State *env, bool ifetch)
MMU_SUPERVISOR_IDX;
}
+#ifdef CONFIG_USER_ONLY
+void nios2_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+#else
bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
+#endif
static inline int cpu_interrupts_enabled(CPUNios2State *env)
{
diff --git a/target/nios2/helper.c b/target/nios2/helper.c
index 53be839..e5c9865 100644
--- a/target/nios2/helper.c
+++ b/target/nios2/helper.c
@@ -38,10 +38,11 @@ void nios2_cpu_do_interrupt(CPUState *cs)
env->regs[R_EA] = env->regs[R_PC] + 4;
}
-bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
+void nios2_cpu_record_sigsegv(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t retaddr)
{
+ /* FIXME: Disentangle kuser page from linux-user sigsegv handling. */
cs->exception_index = 0xaa;
cpu_loop_exit_restore(cs, retaddr);
}
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
index 27cb041..dfbafc5 100644
--- a/target/openrisc/cpu.c
+++ b/target/openrisc/cpu.c
@@ -186,9 +186,9 @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
static const struct TCGCPUOps openrisc_tcg_ops = {
.initialize = openrisc_translate_init,
- .tlb_fill = openrisc_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .tlb_fill = openrisc_cpu_tlb_fill,
.cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
.do_interrupt = openrisc_cpu_do_interrupt,
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
index 187a4a1..ee069b0 100644
--- a/target/openrisc/cpu.h
+++ b/target/openrisc/cpu.h
@@ -317,14 +317,15 @@ hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void openrisc_translate_init(void);
-bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
#define cpu_list cpu_openrisc_list
#ifndef CONFIG_USER_ONLY
+bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
extern const VMStateDescription vmstate_openrisc_cpu;
void openrisc_cpu_do_interrupt(CPUState *cpu);
diff --git a/target/openrisc/meson.build b/target/openrisc/meson.build
index e445dec..8432208 100644
--- a/target/openrisc/meson.build
+++ b/target/openrisc/meson.build
@@ -10,7 +10,6 @@ openrisc_ss.add(files(
'fpu_helper.c',
'gdbstub.c',
'interrupt_helper.c',
- 'mmu.c',
'sys_helper.c',
'translate.c',
))
@@ -19,6 +18,7 @@ openrisc_softmmu_ss = ss.source_set()
openrisc_softmmu_ss.add(files(
'interrupt.c',
'machine.c',
+ 'mmu.c',
))
target_arch += {'openrisc': openrisc_ss}
diff --git a/target/openrisc/mmu.c b/target/openrisc/mmu.c
index 94df8c7..e561ef2 100644
--- a/target/openrisc/mmu.c
+++ b/target/openrisc/mmu.c
@@ -23,11 +23,8 @@
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
#include "qemu/host-utils.h"
-#ifndef CONFIG_USER_ONLY
#include "hw/loader.h"
-#endif
-#ifndef CONFIG_USER_ONLY
static inline void get_phys_nommu(hwaddr *phys_addr, int *prot,
target_ulong address)
{
@@ -94,7 +91,6 @@ static int get_phys_mmu(OpenRISCCPU *cpu, hwaddr *phys_addr, int *prot,
return need & PAGE_EXEC ? EXCP_ITLBMISS : EXCP_DTLBMISS;
}
}
-#endif
static void raise_mmu_exception(OpenRISCCPU *cpu, target_ulong address,
int exception)
@@ -112,8 +108,6 @@ bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
{
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
int excp = EXCP_DPF;
-
-#ifndef CONFIG_USER_ONLY
int prot;
hwaddr phys_addr;
@@ -138,13 +132,11 @@ bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
if (probe) {
return false;
}
-#endif
raise_mmu_exception(cpu, addr, excp);
cpu_loop_exit_restore(cs, retaddr);
}
-#ifndef CONFIG_USER_ONLY
hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
@@ -177,4 +169,3 @@ hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return phys_addr;
}
}
-#endif
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 0472ec9..e946da5 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -1302,9 +1302,6 @@ extern const VMStateDescription vmstate_ppc_cpu;
/*****************************************************************************/
void ppc_translate_init(void);
-bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
#if !defined(CONFIG_USER_ONLY)
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index ba384a5..6695985 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -9014,9 +9014,11 @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
static const struct TCGCPUOps ppc_tcg_ops = {
.initialize = ppc_translate_init,
- .tlb_fill = ppc_cpu_tlb_fill,
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ .record_sigsegv = ppc_cpu_record_sigsegv,
+#else
+ .tlb_fill = ppc_cpu_tlb_fill,
.cpu_exec_interrupt = ppc_cpu_exec_interrupt,
.do_interrupt = ppc_cpu_do_interrupt,
.cpu_exec_enter = ppc_cpu_exec_enter,
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index b7d1767..17607ad 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -454,13 +454,15 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
break;
}
case POWERPC_EXCP_ALIGN: /* Alignment exception */
- /* Get rS/rD and rA from faulting opcode */
/*
- * Note: the opcode fields will not be set properly for a
- * direct store load/store, but nobody cares as nobody
- * actually uses direct store segments.
+ * Get rS/rD and rA from faulting opcode.
+ * Note: We will only invoke ALIGN for atomic operations,
+ * so all instructions are X-form.
*/
- env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
+ {
+ uint32_t insn = cpu_ldl_code(env, env->nip);
+ env->spr[SPR_DSISR] |= (insn & 0x03FF0000) >> 16;
+ }
break;
case POWERPC_EXCP_PROGRAM: /* Program exception */
switch (env->error_code & ~0xF) {
@@ -1452,24 +1454,31 @@ void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL);
}
-#endif
-#endif /* CONFIG_TCG */
-#endif
+#endif /* TARGET_PPC64 */
-#ifdef CONFIG_TCG
void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
CPUPPCState *env = cs->env_ptr;
- uint32_t insn;
- /* Restore state and reload the insn we executed, for filling in DSISR. */
- cpu_restore_state(cs, retaddr, true);
- insn = cpu_ldl_code(env, env->nip);
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ env->spr[SPR_40x_DEAR] = vaddr;
+ break;
+ case POWERPC_MMU_BOOKE:
+ case POWERPC_MMU_BOOKE206:
+ env->spr[SPR_BOOKE_DEAR] = vaddr;
+ break;
+ default:
+ env->spr[SPR_DAR] = vaddr;
+ break;
+ }
cs->exception_index = POWERPC_EXCP_ALIGN;
- env->error_code = insn & 0x03FF0000;
- cpu_loop_exit(cs);
+ env->error_code = 0;
+ cpu_loop_exit_restore(cs, retaddr);
}
-#endif
+#endif /* CONFIG_TCG */
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/ppc/internal.h b/target/ppc/internal.h
index 5528436..6aa9484 100644
--- a/target/ppc/internal.h
+++ b/target/ppc/internal.h
@@ -211,11 +211,6 @@ void helper_compute_fprf_float16(CPUPPCState *env, float16 arg);
void helper_compute_fprf_float32(CPUPPCState *env, float32 arg);
void helper_compute_fprf_float128(CPUPPCState *env, float128 arg);
-/* Raise a data fault alignment exception for the specified virtual address */
-void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
- MMUAccessType access_type, int mmu_idx,
- uintptr_t retaddr) QEMU_NORETURN;
-
/* translate.c */
int ppc_fixup_cpu(PowerPCCPU *cpu);
@@ -283,5 +278,17 @@ static inline void pte_invalidate(target_ulong *pte0)
#define PTE_PTEM_MASK 0x7FFFFFBF
#define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
+#ifdef CONFIG_USER_ONLY
+void ppc_cpu_record_sigsegv(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+#else
+bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr) QEMU_NORETURN;
+#endif
#endif /* PPC_INTERNAL_H */
diff --git a/target/ppc/user_only_helper.c b/target/ppc/user_only_helper.c
index aa3f867..7ff76f7 100644
--- a/target/ppc/user_only_helper.c
+++ b/target/ppc/user_only_helper.c
@@ -21,16 +21,23 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
+#include "internal.h"
-
-bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
+void ppc_cpu_record_sigsegv(CPUState *cs, vaddr address,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t retaddr)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
int exception, error_code;
+ /*
+ * Both DSISR and the "trap number" (exception vector offset,
+ * looked up from exception_index) are present in the linux-user
+ * signal frame.
+ * FIXME: we don't actually populate the trap number properly.
+ * It would be easiest to fill in an env->trap value now.
+ */
if (access_type == MMU_INST_FETCH) {
exception = POWERPC_EXCP_ISI;
error_code = 0x40000000;
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 7d53125..f812998 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -694,9 +694,9 @@ static const struct SysemuCPUOps riscv_sysemu_ops = {
static const struct TCGCPUOps riscv_tcg_ops = {
.initialize = riscv_translate_init,
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
- .tlb_fill = riscv_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .tlb_fill = riscv_cpu_tlb_fill,
.cpu_exec_interrupt = riscv_cpu_exec_interrupt,
.do_interrupt = riscv_cpu_do_interrupt,
.do_transaction_failed = riscv_cpu_do_transaction_failed,
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index f30ff67..9eeed38 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -814,7 +814,6 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
riscv_cpu_two_stage_lookup(mmu_idx);
riscv_raise_exception(env, cs->exception_index, retaddr);
}
-#endif /* !CONFIG_USER_ONLY */
bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
@@ -822,7 +821,6 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
-#ifndef CONFIG_USER_ONLY
vaddr im_address;
hwaddr pa = 0;
int prot, prot2, prot_pmp;
@@ -954,25 +952,8 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
}
return true;
-
-#else
- switch (access_type) {
- case MMU_INST_FETCH:
- cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
- break;
- case MMU_DATA_LOAD:
- cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT;
- break;
- case MMU_DATA_STORE:
- cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
- break;
- default:
- g_assert_not_reached();
- }
- env->badaddr = address;
- cpu_loop_exit_restore(cs, retaddr);
-#endif
}
+#endif /* !CONFIG_USER_ONLY */
/*
* Handle Traps
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
index 7b7b05f..ccdbaf8 100644
--- a/target/s390x/cpu.c
+++ b/target/s390x/cpu.c
@@ -266,9 +266,12 @@ static void s390_cpu_reset_full(DeviceState *dev)
static const struct TCGCPUOps s390_tcg_ops = {
.initialize = s390x_translate_init,
- .tlb_fill = s390_cpu_tlb_fill,
-#if !defined(CONFIG_USER_ONLY)
+#ifdef CONFIG_USER_ONLY
+ .record_sigsegv = s390_cpu_record_sigsegv,
+ .record_sigbus = s390_cpu_record_sigbus,
+#else
+ .tlb_fill = s390_cpu_tlb_fill,
.cpu_exec_interrupt = s390_cpu_exec_interrupt,
.do_interrupt = s390_cpu_do_interrupt,
.debug_excp_handler = s390x_cpu_debug_excp_handler,
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
index 27d4a03..1a178ae 100644
--- a/target/s390x/s390x-internal.h
+++ b/target/s390x/s390x-internal.h
@@ -270,12 +270,21 @@ ObjectClass *s390_cpu_class_by_name(const char *name);
void s390x_cpu_debug_excp_handler(CPUState *cs);
void s390_cpu_do_interrupt(CPUState *cpu);
bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req);
+
+#ifdef CONFIG_USER_ONLY
+void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t retaddr);
+void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
+ MMUAccessType access_type, uintptr_t retaddr);
+#else
bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type, int mmu_idx,
uintptr_t retaddr) QEMU_NORETURN;
+#endif
/* fpu_helper.c */
diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c
index 3d6662a..4e7648f 100644
--- a/target/s390x/tcg/excp_helper.c
+++ b/target/s390x/tcg/excp_helper.c
@@ -82,6 +82,19 @@ void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
tcg_s390_data_exception(env, dxc, GETPC());
}
+/*
+ * Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
+ * this is only for the atomic operations, for which we want to raise a
+ * specification exception.
+ */
+static void QEMU_NORETURN do_unaligned_access(CPUState *cs, uintptr_t retaddr)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
+}
+
#if defined(CONFIG_USER_ONLY)
void s390_cpu_do_interrupt(CPUState *cs)
@@ -89,19 +102,29 @@ void s390_cpu_do_interrupt(CPUState *cs)
cs->exception_index = -1;
}
-bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
+void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t retaddr)
{
S390CPU *cpu = S390_CPU(cs);
- trigger_pgm_exception(&cpu->env, PGM_ADDRESSING);
- /* On real machines this value is dropped into LowMem. Since this
- is userland, simply put this someplace that cpu_loop can find it. */
- cpu->env.__excp_addr = address;
+ trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION);
+ /*
+ * On real machines this value is dropped into LowMem. Since this
+ * is userland, simply put this someplace that cpu_loop can find it.
+ * S390 only gives the page of the fault, not the exact address.
+ * C.f. the construction of TEC in mmu_translate().
+ */
+ cpu->env.__excp_addr = address & TARGET_PAGE_MASK;
cpu_loop_exit_restore(cs, retaddr);
}
+void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
+ MMUAccessType access_type, uintptr_t retaddr)
+{
+ do_unaligned_access(cs, retaddr);
+}
+
#else /* !CONFIG_USER_ONLY */
static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
@@ -589,17 +612,11 @@ void s390x_cpu_debug_excp_handler(CPUState *cs)
}
}
-/* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
- this is only for the atomic operations, for which we want to raise a
- specification exception. */
void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
- S390CPU *cpu = S390_CPU(cs);
- CPUS390XState *env = &cpu->env;
-
- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
+ do_unaligned_access(cs, retaddr);
}
static void QEMU_NORETURN monitor_event(CPUS390XState *env,
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
index 17e3f83..362a30d 100644
--- a/target/s390x/tcg/mem_helper.c
+++ b/target/s390x/tcg/mem_helper.c
@@ -141,20 +141,12 @@ static int s390_probe_access(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t ra)
{
- int flags;
-
#if defined(CONFIG_USER_ONLY)
- flags = page_get_flags(addr);
- if (!(flags & (access_type == MMU_DATA_LOAD ? PAGE_READ : PAGE_WRITE_ORG))) {
- env->__excp_addr = addr;
- flags = (flags & PAGE_VALID) ? PGM_PROTECTION : PGM_ADDRESSING;
- if (nonfault) {
- return flags;
- }
- tcg_s390_program_interrupt(env, flags, ra);
- }
- *phost = g2h(env_cpu(env), addr);
+ return probe_access_flags(env, addr, access_type, mmu_idx,
+ nonfault, phost, ra);
#else
+ int flags;
+
/*
* For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL
* to detect if there was an exception during tlb_fill().
@@ -173,8 +165,8 @@ static int s390_probe_access(CPUArchState *env, target_ulong addr, int size,
(access_type == MMU_DATA_STORE
? BP_MEM_WRITE : BP_MEM_READ), ra);
}
-#endif
return 0;
+#endif
}
static int access_prepare_nf(S390Access *access, CPUS390XState *env,
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
index 2047742..06b2691 100644
--- a/target/sh4/cpu.c
+++ b/target/sh4/cpu.c
@@ -236,9 +236,9 @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
static const struct TCGCPUOps superh_tcg_ops = {
.initialize = sh4_translate_init,
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
- .tlb_fill = superh_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .tlb_fill = superh_cpu_tlb_fill,
.cpu_exec_interrupt = superh_cpu_exec_interrupt,
.do_interrupt = superh_cpu_do_interrupt,
.do_unaligned_access = superh_cpu_do_unaligned_access,
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index dc81406..4cfb109 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -213,12 +213,12 @@ void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
uintptr_t retaddr) QEMU_NORETURN;
void sh4_translate_init(void);
+void sh4_cpu_list(void);
+
+#if !defined(CONFIG_USER_ONLY)
bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
-
-void sh4_cpu_list(void);
-#if !defined(CONFIG_USER_ONLY)
void superh_cpu_do_interrupt(CPUState *cpu);
bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req);
void cpu_sh4_invalidate_tlb(CPUSH4State *s);
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index 53cb9c3..6a620e3 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -796,8 +796,6 @@ bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
-#endif /* !CONFIG_USER_ONLY */
-
bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
@@ -806,11 +804,6 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
CPUSH4State *env = &cpu->env;
int ret;
-#ifdef CONFIG_USER_ONLY
- ret = (access_type == MMU_DATA_STORE ? MMU_DTLB_VIOLATION_WRITE :
- access_type == MMU_INST_FETCH ? MMU_ITLB_VIOLATION :
- MMU_DTLB_VIOLATION_READ);
-#else
target_ulong physical;
int prot;
@@ -829,7 +822,6 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) {
env->pteh = (env->pteh & PTEH_ASID_MASK) | (address & PTEH_VPN_MASK);
}
-#endif
env->tea = address;
switch (ret) {
@@ -868,3 +860,4 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
}
cpu_loop_exit_restore(cs, retaddr);
}
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c
index c996dce..7526698 100644
--- a/target/sh4/op_helper.c
+++ b/target/sh4/op_helper.c
@@ -29,6 +29,9 @@ void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
+ CPUSH4State *env = cs->env_ptr;
+
+ env->tea = addr;
switch (access_type) {
case MMU_INST_FETCH:
case MMU_DATA_LOAD:
@@ -37,6 +40,8 @@ void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
case MMU_DATA_STORE:
cs->exception_index = 0x100;
break;
+ default:
+ g_assert_not_reached();
}
cpu_loop_exit_restore(cs, retaddr);
}
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
index 21dd277..55268ed 100644
--- a/target/sparc/cpu.c
+++ b/target/sparc/cpu.c
@@ -865,9 +865,9 @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
static const struct TCGCPUOps sparc_tcg_ops = {
.initialize = sparc_tcg_init,
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
- .tlb_fill = sparc_cpu_tlb_fill,
#ifndef CONFIG_USER_ONLY
+ .tlb_fill = sparc_cpu_tlb_fill,
.cpu_exec_interrupt = sparc_cpu_exec_interrupt,
.do_interrupt = sparc_cpu_do_interrupt,
.do_transaction_failed = sparc_cpu_do_transaction_failed,
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
index bbf3601..a3e1cf9 100644
--- a/target/sparc/ldst_helper.c
+++ b/target/sparc/ldst_helper.c
@@ -27,7 +27,6 @@
//#define DEBUG_MMU
//#define DEBUG_MXCC
-//#define DEBUG_UNALIGNED
//#define DEBUG_UNASSIGNED
//#define DEBUG_ASI
//#define DEBUG_CACHE_CONTROL
@@ -364,10 +363,6 @@ static void do_check_align(CPUSPARCState *env, target_ulong addr,
uint32_t align, uintptr_t ra)
{
if (addr & align) {
-#ifdef DEBUG_UNALIGNED
- printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
- "\n", addr, env->pc);
-#endif
cpu_raise_exception_ra(env, TT_UNALIGNED, ra);
}
}
@@ -1958,20 +1953,3 @@ void sparc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
is_asi, size, retaddr);
}
#endif
-
-#if !defined(CONFIG_USER_ONLY)
-void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
- MMUAccessType access_type,
- int mmu_idx,
- uintptr_t retaddr)
-{
- SPARCCPU *cpu = SPARC_CPU(cs);
- CPUSPARCState *env = &cpu->env;
-
-#ifdef DEBUG_UNALIGNED
- printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
- "\n", addr, env->pc);
-#endif
- cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
-}
-#endif
diff --git a/target/sparc/meson.build b/target/sparc/meson.build
index a3638b9..a801802 100644
--- a/target/sparc/meson.build
+++ b/target/sparc/meson.build
@@ -6,7 +6,6 @@ sparc_ss.add(files(
'gdbstub.c',
'helper.c',
'ldst_helper.c',
- 'mmu_helper.c',
'translate.c',
'win_helper.c',
))
@@ -16,6 +15,7 @@ sparc_ss.add(when: 'TARGET_SPARC64', if_true: files('int64_helper.c', 'vis_helpe
sparc_softmmu_ss = ss.source_set()
sparc_softmmu_ss.add(files(
'machine.c',
+ 'mmu_helper.c',
'monitor.c',
))
diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c
index a44473a..f266838 100644
--- a/target/sparc/mmu_helper.c
+++ b/target/sparc/mmu_helper.c
@@ -25,30 +25,6 @@
/* Sparc MMU emulation */
-#if defined(CONFIG_USER_ONLY)
-
-bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
-{
- SPARCCPU *cpu = SPARC_CPU(cs);
- CPUSPARCState *env = &cpu->env;
-
- if (access_type == MMU_INST_FETCH) {
- cs->exception_index = TT_TFAULT;
- } else {
- cs->exception_index = TT_DFAULT;
-#ifdef TARGET_SPARC64
- env->dmmu.mmuregs[4] = address;
-#else
- env->mmuregs[4] = address;
-#endif
- }
- cpu_loop_exit_restore(cs, retaddr);
-}
-
-#else
-
#ifndef TARGET_SPARC64
/*
* Sparc V8 Reference MMU (SRMMU)
@@ -526,16 +502,60 @@ static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
return 0;
}
+static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw)
+{
+ uint64_t sfsr = SFSR_VALID_BIT;
+
+ switch (mmu_idx) {
+ case MMU_PHYS_IDX:
+ sfsr |= SFSR_CT_NOTRANS;
+ break;
+ case MMU_USER_IDX:
+ case MMU_KERNEL_IDX:
+ sfsr |= SFSR_CT_PRIMARY;
+ break;
+ case MMU_USER_SECONDARY_IDX:
+ case MMU_KERNEL_SECONDARY_IDX:
+ sfsr |= SFSR_CT_SECONDARY;
+ break;
+ case MMU_NUCLEUS_IDX:
+ sfsr |= SFSR_CT_NUCLEUS;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (rw == 1) {
+ sfsr |= SFSR_WRITE_BIT;
+ } else if (rw == 4) {
+ sfsr |= SFSR_NF_BIT;
+ }
+
+ if (env->pstate & PS_PRIV) {
+ sfsr |= SFSR_PR_BIT;
+ }
+
+ if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
+ sfsr |= SFSR_OW_BIT; /* overflow (not read before another fault) */
+ }
+
+ /* FIXME: ASI field in SFSR must be set */
+
+ return sfsr;
+}
+
static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical,
int *prot, MemTxAttrs *attrs,
target_ulong address, int rw, int mmu_idx)
{
CPUState *cs = env_cpu(env);
unsigned int i;
+ uint64_t sfsr;
uint64_t context;
- uint64_t sfsr = 0;
bool is_user = false;
+ sfsr = build_sfsr(env, mmu_idx, rw);
+
switch (mmu_idx) {
case MMU_PHYS_IDX:
g_assert_not_reached();
@@ -544,29 +564,18 @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical,
/* fallthru */
case MMU_KERNEL_IDX:
context = env->dmmu.mmu_primary_context & 0x1fff;
- sfsr |= SFSR_CT_PRIMARY;
break;
case MMU_USER_SECONDARY_IDX:
is_user = true;
/* fallthru */
case MMU_KERNEL_SECONDARY_IDX:
context = env->dmmu.mmu_secondary_context & 0x1fff;
- sfsr |= SFSR_CT_SECONDARY;
break;
- case MMU_NUCLEUS_IDX:
- sfsr |= SFSR_CT_NUCLEUS;
- /* FALLTHRU */
default:
context = 0;
break;
}
- if (rw == 1) {
- sfsr |= SFSR_WRITE_BIT;
- } else if (rw == 4) {
- sfsr |= SFSR_NF_BIT;
- }
-
for (i = 0; i < 64; i++) {
/* ctx match, vaddr match, valid? */
if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) {
@@ -616,22 +625,9 @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical,
return 0;
}
- if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
- sfsr |= SFSR_OW_BIT; /* overflow (not read before
- another fault) */
- }
-
- if (env->pstate & PS_PRIV) {
- sfsr |= SFSR_PR_BIT;
- }
-
- /* FIXME: ASI field in SFSR must be set */
- env->dmmu.sfsr = sfsr | SFSR_VALID_BIT;
-
+ env->dmmu.sfsr = sfsr;
env->dmmu.sfar = address; /* Fault address register */
-
env->dmmu.tag_access = (address & ~0x1fffULL) | context;
-
return 1;
}
}
@@ -926,4 +922,23 @@ hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
}
return phys_addr;
}
+
+#ifndef CONFIG_USER_ONLY
+void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx,
+ uintptr_t retaddr)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+
+#ifdef TARGET_SPARC64
+ env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type);
+ env->dmmu.sfar = addr;
+#else
+ env->mmuregs[4] = addr;
#endif
+
+ cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
+}
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
index c1cbd03..224f723 100644
--- a/target/xtensa/cpu.c
+++ b/target/xtensa/cpu.c
@@ -192,10 +192,10 @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
static const struct TCGCPUOps xtensa_tcg_ops = {
.initialize = xtensa_translate_init,
- .tlb_fill = xtensa_cpu_tlb_fill,
.debug_excp_handler = xtensa_breakpoint_handler,
#ifndef CONFIG_USER_ONLY
+ .tlb_fill = xtensa_cpu_tlb_fill,
.cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
.do_interrupt = xtensa_cpu_do_interrupt,
.do_transaction_failed = xtensa_cpu_do_transaction_failed,
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
index f9a510c..02143f2 100644
--- a/target/xtensa/cpu.h
+++ b/target/xtensa/cpu.h
@@ -563,10 +563,10 @@ struct XtensaCPU {
};
+#ifndef CONFIG_USER_ONLY
bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
-#ifndef CONFIG_USER_ONLY
void xtensa_cpu_do_interrupt(CPUState *cpu);
bool xtensa_cpu_exec_interrupt(CPUState *cpu, int interrupt_request);
void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c
index f18ab38..29d216e 100644
--- a/target/xtensa/helper.c
+++ b/target/xtensa/helper.c
@@ -242,27 +242,7 @@ void xtensa_cpu_list(void)
}
}
-#ifdef CONFIG_USER_ONLY
-
-bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
-{
- XtensaCPU *cpu = XTENSA_CPU(cs);
- CPUXtensaState *env = &cpu->env;
-
- qemu_log_mask(CPU_LOG_INT,
- "%s: rw = %d, address = 0x%08" VADDR_PRIx ", size = %d\n",
- __func__, access_type, address, size);
- env->sregs[EXCVADDR] = address;
- env->sregs[EXCCAUSE] = (access_type == MMU_DATA_STORE ?
- STORE_PROHIBITED_CAUSE : LOAD_PROHIBITED_CAUSE);
- cs->exception_index = EXC_USER;
- cpu_loop_exit_restore(cs, retaddr);
-}
-
-#else /* !CONFIG_USER_ONLY */
-
+#ifndef CONFIG_USER_ONLY
void xtensa_cpu_do_unaligned_access(CPUState *cs,
vaddr addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)