diff options
Diffstat (limited to 'target')
-rw-r--r-- | target/alpha/gdbstub.c | 2 | ||||
-rw-r--r-- | target/avr/gdbstub.c | 4 | ||||
-rw-r--r-- | target/hexagon/gdbstub.c | 10 | ||||
-rw-r--r-- | target/loongarch/arch_dump.c | 6 | ||||
-rw-r--r-- | target/loongarch/gdbstub.c | 8 | ||||
-rw-r--r-- | target/mips/cpu.c | 17 | ||||
-rw-r--r-- | target/mips/cpu.h | 7 | ||||
-rw-r--r-- | target/mips/internal.h | 10 | ||||
-rw-r--r-- | target/mips/tcg/ldst_helper.c | 15 | ||||
-rw-r--r-- | target/mips/tcg/micromips_translate.c.inc | 34 | ||||
-rw-r--r-- | target/mips/tcg/mips16e_translate.c.inc | 118 | ||||
-rw-r--r-- | target/mips/tcg/msa_helper.c | 8 | ||||
-rw-r--r-- | target/mips/tcg/mxu_translate.c | 18 | ||||
-rw-r--r-- | target/mips/tcg/nanomips_translate.c.inc | 162 | ||||
-rw-r--r-- | target/mips/tcg/sysemu/tlb_helper.c | 2 | ||||
-rw-r--r-- | target/mips/tcg/translate.c | 193 | ||||
-rw-r--r-- | target/mips/tcg/translate.h | 13 | ||||
-rw-r--r-- | target/mips/tcg/tx79_translate.c | 8 | ||||
-rw-r--r-- | target/ppc/translate.c | 13 | ||||
-rw-r--r-- | target/tricore/gdbstub.c | 2 | ||||
-rw-r--r-- | target/tricore/translate.c | 3 |
21 files changed, 308 insertions, 345 deletions
diff --git a/target/alpha/gdbstub.c b/target/alpha/gdbstub.c index 13694fd..1a7e2dd 100644 --- a/target/alpha/gdbstub.c +++ b/target/alpha/gdbstub.c @@ -59,7 +59,7 @@ int alpha_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int alpha_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { CPUAlphaState *env = cpu_env(cs); - target_ulong tmp = ldtul_p(mem_buf); + target_ulong tmp = ldq_le_p(mem_buf); CPU_DoubleU d; switch (n) { diff --git a/target/avr/gdbstub.c b/target/avr/gdbstub.c index d6d3c14..aea7128 100644 --- a/target/avr/gdbstub.c +++ b/target/avr/gdbstub.c @@ -69,13 +69,13 @@ int avr_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) /* SP */ if (n == 33) { - env->sp = lduw_p(mem_buf); + env->sp = lduw_le_p(mem_buf); return 2; } /* PC */ if (n == 34) { - env->pc_w = ldl_p(mem_buf) / 2; + env->pc_w = ldl_le_p(mem_buf) / 2; return 4; } diff --git a/target/hexagon/gdbstub.c b/target/hexagon/gdbstub.c index 94e1db8..12d6b3b 100644 --- a/target/hexagon/gdbstub.c +++ b/target/hexagon/gdbstub.c @@ -52,7 +52,7 @@ int hexagon_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) CPUHexagonState *env = cpu_env(cs); if (n == HEX_REG_P3_0_ALIASED) { - uint32_t p3_0 = ldtul_p(mem_buf); + uint32_t p3_0 = ldl_le_p(mem_buf); for (int i = 0; i < NUM_PREGS; i++) { env->pred[i] = extract32(p3_0, i * 8, 8); } @@ -60,14 +60,14 @@ int hexagon_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) } if (n < TOTAL_PER_THREAD_REGS) { - env->gpr[n] = ldtul_p(mem_buf); + env->gpr[n] = ldl_le_p(mem_buf); return sizeof(target_ulong); } n -= TOTAL_PER_THREAD_REGS; if (n < NUM_PREGS) { - env->pred[n] = ldtul_p(mem_buf) & 0xff; + env->pred[n] = ldl_le_p(mem_buf) & 0xff; return sizeof(uint8_t); } @@ -117,7 +117,7 @@ static int gdb_put_vreg(CPUHexagonState *env, uint8_t *mem_buf, int n) { int i; for (i = 0; i < ARRAY_SIZE(env->VRegs[n].uw); i++) { - env->VRegs[n].uw[i] = ldtul_p(mem_buf); + env->VRegs[n].uw[i] = ldl_le_p(mem_buf); mem_buf += 4; } return MAX_VEC_SIZE_BYTES; @@ -127,7 +127,7 @@ static int gdb_put_qreg(CPUHexagonState *env, uint8_t *mem_buf, int n) { int i; for (i = 0; i < ARRAY_SIZE(env->QRegs[n].uw); i++) { - env->QRegs[n].uw[i] = ldtul_p(mem_buf); + env->QRegs[n].uw[i] = ldl_le_p(mem_buf); mem_buf += 4; } return MAX_VEC_SIZE_BYTES / 8; diff --git a/target/loongarch/arch_dump.c b/target/loongarch/arch_dump.c index 4986db9..d9e1120 100644 --- a/target/loongarch/arch_dump.c +++ b/target/loongarch/arch_dump.c @@ -97,11 +97,7 @@ static int loongarch_write_elf64_fprpreg(WriteCoreDumpFunction f, loongarch_note_init(¬e, s, "CORE", 5, NT_PRFPREG, sizeof(note.fpu)); note.fpu.fcsr = cpu_to_dump64(s, env->fcsr0); - - for (i = 0; i < 8; i++) { - note.fpu.fcc |= env->cf[i] << (8 * i); - } - note.fpu.fcc = cpu_to_dump64(s, note.fpu.fcc); + note.fpu.fcc = cpu_to_dump64(s, read_fcc(env)); for (i = 0; i < 32; ++i) { note.fpu.fpr[i] = cpu_to_dump64(s, env->fpr[i].vreg.UD[0]); diff --git a/target/loongarch/gdbstub.c b/target/loongarch/gdbstub.c index 3a03cf9..dafa4fe 100644 --- a/target/loongarch/gdbstub.c +++ b/target/loongarch/gdbstub.c @@ -67,10 +67,10 @@ int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) int length = 0; if (is_la64(env)) { - tmp = ldq_p(mem_buf); + tmp = ldq_le_p(mem_buf); read_length = 8; } else { - tmp = ldl_p(mem_buf); + tmp = ldl_le_p(mem_buf); read_length = 4; } @@ -106,13 +106,13 @@ static int loongarch_gdb_set_fpu(CPUState *cs, uint8_t *mem_buf, int n) int length = 0; if (0 <= n && n < 32) { - env->fpr[n].vreg.D(0) = ldq_p(mem_buf); + env->fpr[n].vreg.D(0) = ldq_le_p(mem_buf); length = 8; } else if (32 <= n && n < 40) { env->cf[n - 32] = ldub_p(mem_buf); length = 1; } else if (n == 40) { - env->fcsr0 = ldl_p(mem_buf); + env->fcsr0 = ldl_le_p(mem_buf); length = 4; } return length; diff --git a/target/mips/cpu.c b/target/mips/cpu.c index 89655b1..9724e71 100644 --- a/target/mips/cpu.c +++ b/target/mips/cpu.c @@ -200,10 +200,8 @@ static void mips_cpu_reset_hold(Object *obj, ResetType type) /* Reset registers to their default values */ env->CP0_PRid = env->cpu_model->CP0_PRid; - env->CP0_Config0 = env->cpu_model->CP0_Config0; -#if TARGET_BIG_ENDIAN - env->CP0_Config0 |= (1 << CP0C0_BE); -#endif + env->CP0_Config0 = deposit32(env->cpu_model->CP0_Config0, + CP0C0_BE, 1, cpu->is_big_endian); env->CP0_Config1 = env->cpu_model->CP0_Config1; env->CP0_Config2 = env->cpu_model->CP0_Config2; env->CP0_Config3 = env->cpu_model->CP0_Config3; @@ -541,6 +539,11 @@ static const struct SysemuCPUOps mips_sysemu_ops = { }; #endif +static Property mips_cpu_properties[] = { + DEFINE_PROP_BOOL("big-endian", MIPSCPU, is_big_endian, TARGET_BIG_ENDIAN), + DEFINE_PROP_END_OF_LIST(), +}; + #ifdef CONFIG_TCG #include "hw/core/tcg-cpu-ops.h" /* @@ -571,6 +574,7 @@ static void mips_cpu_class_init(ObjectClass *c, void *data) DeviceClass *dc = DEVICE_CLASS(c); ResettableClass *rc = RESETTABLE_CLASS(c); + device_class_set_props(dc, mips_cpu_properties); device_class_set_parent_realize(dc, mips_cpu_realizefn, &mcc->parent_realize); resettable_class_set_parent_phases(rc, NULL, mips_cpu_reset_hold, NULL, @@ -639,12 +643,15 @@ static void mips_cpu_register_types(void) type_init(mips_cpu_register_types) /* Could be used by generic CPU object */ -MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk) +MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk, + bool is_big_endian) { DeviceState *cpu; cpu = DEVICE(object_new(cpu_type)); qdev_connect_clock_in(cpu, "clk-in", cpu_refclk); + object_property_set_bool(OBJECT(cpu), "big-endian", is_big_endian, + &error_abort); qdev_realize(cpu, NULL, &error_abort); return MIPS_CPU(cpu); diff --git a/target/mips/cpu.h b/target/mips/cpu.h index 3e906a1..a4a46eb 100644 --- a/target/mips/cpu.h +++ b/target/mips/cpu.h @@ -1209,6 +1209,9 @@ struct ArchCPU { Clock *clock; Clock *count_div; /* Divider for CP0_Count clock */ + + /* Properties */ + bool is_big_endian; }; /** @@ -1373,12 +1376,14 @@ static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, vaddr *pc, * mips_cpu_create_with_clock: * @typename: a MIPS CPU type. * @cpu_refclk: this cpu input clock (an output clock of another device) + * @is_big_endian: whether this CPU is configured in big endianness * * Instantiates a MIPS CPU, set the input clock of the CPU to @cpu_refclk, * then realizes the CPU. * * Returns: A #CPUState or %NULL if an error occurred. */ -MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk); +MIPSCPU *mips_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk, + bool is_big_endian); #endif /* MIPS_CPU_H */ diff --git a/target/mips/internal.h b/target/mips/internal.h index a9a22ea..91c786c 100644 --- a/target/mips/internal.h +++ b/target/mips/internal.h @@ -225,6 +225,16 @@ static inline void mips_env_set_pc(CPUMIPSState *env, target_ulong value) } } +static inline bool mips_env_is_bigendian(CPUMIPSState *env) +{ + return extract32(env->CP0_Config0, CP0C0_BE, 1); +} + +static inline MemOp mo_endian_env(CPUMIPSState *env) +{ + return mips_env_is_bigendian(env) ? MO_BE : MO_LE; +} + static inline void restore_pamask(CPUMIPSState *env) { if (env->hflags & MIPS_HFLAG_ELPA) { diff --git a/target/mips/tcg/ldst_helper.c b/target/mips/tcg/ldst_helper.c index 97056d0..f92a923 100644 --- a/target/mips/tcg/ldst_helper.c +++ b/target/mips/tcg/ldst_helper.c @@ -53,11 +53,6 @@ HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong)) #endif /* !CONFIG_USER_ONLY */ -static inline bool cpu_is_bigendian(CPUMIPSState *env) -{ - return extract32(env->CP0_Config0, CP0C0_BE, 1); -} - static inline target_ulong get_lmask(CPUMIPSState *env, target_ulong value, unsigned bits) { @@ -65,7 +60,7 @@ static inline target_ulong get_lmask(CPUMIPSState *env, value &= mask; - if (!cpu_is_bigendian(env)) { + if (!mips_env_is_bigendian(env)) { value ^= mask; } @@ -76,7 +71,7 @@ void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { target_ulong lmask = get_lmask(env, arg2, 32); - int dir = cpu_is_bigendian(env) ? 1 : -1; + int dir = mips_env_is_bigendian(env) ? 1 : -1; cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC()); @@ -100,7 +95,7 @@ void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { target_ulong lmask = get_lmask(env, arg2, 32); - int dir = cpu_is_bigendian(env) ? 1 : -1; + int dir = mips_env_is_bigendian(env) ? 1 : -1; cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); @@ -130,7 +125,7 @@ void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { target_ulong lmask = get_lmask(env, arg2, 64); - int dir = cpu_is_bigendian(env) ? 1 : -1; + int dir = mips_env_is_bigendian(env) ? 1 : -1; cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC()); @@ -174,7 +169,7 @@ void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { target_ulong lmask = get_lmask(env, arg2, 64); - int dir = cpu_is_bigendian(env) ? 1 : -1; + int dir = mips_env_is_bigendian(env) ? 1 : -1; cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc index 7510831..3cbf53b 100644 --- a/target/mips/tcg/micromips_translate.c.inc +++ b/target/mips/tcg/micromips_translate.c.inc @@ -977,23 +977,21 @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd, gen_reserved_instruction(ctx); return; } - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd); - tcg_gen_movi_tl(t1, 4); - gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | + gen_op_addr_addi(ctx, t0, t0, 4); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd + 1); break; case SWP: gen_load_gpr(t1, rd); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); - tcg_gen_movi_tl(t1, 4); - gen_op_addr_add(ctx, t0, t0, t1); + gen_op_addr_addi(ctx, t0, t0, 4); gen_load_gpr(t1, rd + 1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); break; #ifdef TARGET_MIPS64 @@ -1002,23 +1000,21 @@ static void gen_ldst_pair(DisasContext *ctx, uint32_t opc, int rd, gen_reserved_instruction(ctx); return; } - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd); - tcg_gen_movi_tl(t1, 8); - gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + gen_op_addr_addi(ctx, t0, t0, 8); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_gpr(t1, rd + 1); break; case SDP: gen_load_gpr(t1, rd); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); - tcg_gen_movi_tl(t1, 8); - gen_op_addr_add(ctx, t0, t0, t1); + gen_op_addr_addi(ctx, t0, t0, 8); gen_load_gpr(t1, rd + 1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); break; #endif @@ -2572,13 +2568,13 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) gen_st(ctx, mips32_op, rt, rs, offset); break; case SC: - gen_st_cond(ctx, rt, rs, offset, MO_TESL, false); + gen_st_cond(ctx, rt, rs, offset, mo_endian(ctx) | MO_SL, false); break; #if defined(TARGET_MIPS64) case SCD: check_insn(ctx, ISA_MIPS3); check_mips_64(ctx); - gen_st_cond(ctx, rt, rs, offset, MO_TEUQ, false); + gen_st_cond(ctx, rt, rs, offset, mo_endian(ctx) | MO_UQ, false); break; #endif case LD_EVA: @@ -2659,7 +2655,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) mips32_op = OPC_SHE; goto do_st_lr; case SCE: - gen_st_cond(ctx, rt, rs, offset, MO_TESL, true); + gen_st_cond(ctx, rt, rs, offset, mo_endian(ctx) | MO_SL, true); break; case SWE: mips32_op = OPC_SWE; diff --git a/target/mips/tcg/mips16e_translate.c.inc b/target/mips/tcg/mips16e_translate.c.inc index 5cffe0e..a9af8f1 100644 --- a/target/mips/tcg/mips16e_translate.c.inc +++ b/target/mips/tcg/mips16e_translate.c.inc @@ -122,11 +122,21 @@ enum { static int xlat(int r) { - static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; + static const int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; return map[r]; } +static void decr_and_store(DisasContext *ctx, unsigned regidx, TCGv t0) +{ + TCGv t1 = tcg_temp_new(); + + gen_op_addr_addi(ctx, t0, t0, -4); + gen_load_gpr(t1, regidx); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | + ctx->default_tcg_memop_mask); +} + static void gen_mips16_save(DisasContext *ctx, int xsregs, int aregs, int do_ra, int do_s0, int do_s1, @@ -134,7 +144,6 @@ static void gen_mips16_save(DisasContext *ctx, { TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); int args, astatic; switch (aregs) { @@ -172,70 +181,62 @@ static void gen_mips16_save(DisasContext *ctx, case 4: gen_base_offset_addr(ctx, t0, 29, 12); gen_load_gpr(t1, 7); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); /* Fall through */ case 3: gen_base_offset_addr(ctx, t0, 29, 8); gen_load_gpr(t1, 6); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); /* Fall through */ case 2: gen_base_offset_addr(ctx, t0, 29, 4); gen_load_gpr(t1, 5); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); /* Fall through */ case 1: gen_base_offset_addr(ctx, t0, 29, 0); gen_load_gpr(t1, 4); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); } gen_load_gpr(t0, 29); -#define DECR_AND_STORE(reg) do { \ - tcg_gen_movi_tl(t2, -4); \ - gen_op_addr_add(ctx, t0, t0, t2); \ - gen_load_gpr(t1, reg); \ - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | \ - ctx->default_tcg_memop_mask); \ - } while (0) - if (do_ra) { - DECR_AND_STORE(31); + decr_and_store(ctx, 31, t0); } switch (xsregs) { case 7: - DECR_AND_STORE(30); + decr_and_store(ctx, 30, t0); /* Fall through */ case 6: - DECR_AND_STORE(23); + decr_and_store(ctx, 23, t0); /* Fall through */ case 5: - DECR_AND_STORE(22); + decr_and_store(ctx, 22, t0); /* Fall through */ case 4: - DECR_AND_STORE(21); + decr_and_store(ctx, 21, t0); /* Fall through */ case 3: - DECR_AND_STORE(20); + decr_and_store(ctx, 20, t0); /* Fall through */ case 2: - DECR_AND_STORE(19); + decr_and_store(ctx, 19, t0); /* Fall through */ case 1: - DECR_AND_STORE(18); + decr_and_store(ctx, 18, t0); } if (do_s1) { - DECR_AND_STORE(17); + decr_and_store(ctx, 17, t0); } if (do_s0) { - DECR_AND_STORE(16); + decr_and_store(ctx, 16, t0); } switch (aregs) { @@ -270,21 +271,31 @@ static void gen_mips16_save(DisasContext *ctx, } if (astatic > 0) { - DECR_AND_STORE(7); + decr_and_store(ctx, 7, t0); if (astatic > 1) { - DECR_AND_STORE(6); + decr_and_store(ctx, 6, t0); if (astatic > 2) { - DECR_AND_STORE(5); + decr_and_store(ctx, 5, t0); if (astatic > 3) { - DECR_AND_STORE(4); + decr_and_store(ctx, 4, t0); } } } } -#undef DECR_AND_STORE - tcg_gen_movi_tl(t2, -framesize); - gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2); + gen_op_addr_addi(ctx, cpu_gpr[29], cpu_gpr[29], -framesize); +} + +static void decr_and_load(DisasContext *ctx, unsigned regidx, TCGv t0) +{ + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_temp_new(); + + tcg_gen_movi_tl(t2, -4); + gen_op_addr_add(ctx, t0, t0, t2); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TE | MO_SL | + ctx->default_tcg_memop_mask); + gen_store_gpr(t1, regidx); } static void gen_mips16_restore(DisasContext *ctx, @@ -294,52 +305,41 @@ static void gen_mips16_restore(DisasContext *ctx, { int astatic; TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); - - tcg_gen_movi_tl(t2, framesize); - gen_op_addr_add(ctx, t0, cpu_gpr[29], t2); -#define DECR_AND_LOAD(reg) do { \ - tcg_gen_movi_tl(t2, -4); \ - gen_op_addr_add(ctx, t0, t0, t2); \ - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL | \ - ctx->default_tcg_memop_mask); \ - gen_store_gpr(t1, reg); \ - } while (0) + gen_op_addr_addi(ctx, t0, cpu_gpr[29], -framesize); if (do_ra) { - DECR_AND_LOAD(31); + decr_and_load(ctx, 31, t0); } switch (xsregs) { case 7: - DECR_AND_LOAD(30); + decr_and_load(ctx, 30, t0); /* Fall through */ case 6: - DECR_AND_LOAD(23); + decr_and_load(ctx, 23, t0); /* Fall through */ case 5: - DECR_AND_LOAD(22); + decr_and_load(ctx, 22, t0); /* Fall through */ case 4: - DECR_AND_LOAD(21); + decr_and_load(ctx, 21, t0); /* Fall through */ case 3: - DECR_AND_LOAD(20); + decr_and_load(ctx, 20, t0); /* Fall through */ case 2: - DECR_AND_LOAD(19); + decr_and_load(ctx, 19, t0); /* Fall through */ case 1: - DECR_AND_LOAD(18); + decr_and_load(ctx, 18, t0); } if (do_s1) { - DECR_AND_LOAD(17); + decr_and_load(ctx, 17, t0); } if (do_s0) { - DECR_AND_LOAD(16); + decr_and_load(ctx, 16, t0); } switch (aregs) { @@ -374,21 +374,19 @@ static void gen_mips16_restore(DisasContext *ctx, } if (astatic > 0) { - DECR_AND_LOAD(7); + decr_and_load(ctx, 7, t0); if (astatic > 1) { - DECR_AND_LOAD(6); + decr_and_load(ctx, 6, t0); if (astatic > 2) { - DECR_AND_LOAD(5); + decr_and_load(ctx, 5, t0); if (astatic > 3) { - DECR_AND_LOAD(4); + decr_and_load(ctx, 4, t0); } } } } -#undef DECR_AND_LOAD - tcg_gen_movi_tl(t2, framesize); - gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2); + gen_op_addr_addi(ctx, cpu_gpr[29], cpu_gpr[29], -framesize); } #if defined(TARGET_MIPS64) diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c index d218176..1d40383 100644 --- a/target/mips/tcg/msa_helper.c +++ b/target/mips/tcg/msa_helper.c @@ -8211,14 +8211,6 @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, /* Element-by-element access macros */ #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) -#if !defined(CONFIG_USER_ONLY) -#define MEMOP_IDX(DF) \ - MemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \ - mips_env_mmu_index(env)); -#else -#define MEMOP_IDX(DF) -#endif - #if TARGET_BIG_ENDIAN static inline uint64_t bswap16x4(uint64_t x) { diff --git a/target/mips/tcg/mxu_translate.c b/target/mips/tcg/mxu_translate.c index c517258..35ebb03 100644 --- a/target/mips/tcg/mxu_translate.c +++ b/target/mips/tcg/mxu_translate.c @@ -1533,7 +1533,7 @@ static void gen_mxu_s32ldxx(DisasContext *ctx, bool reversed, bool postinc) tcg_gen_add_tl(t0, t0, t1); tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, - (MO_TESL ^ (reversed ? MO_BSWAP : 0)) | + MO_SL | mo_endian_rev(ctx, reversed) | ctx->default_tcg_memop_mask); gen_store_mxu_gpr(t1, XRa); @@ -1569,7 +1569,7 @@ static void gen_mxu_s32stxx(DisasContext *ctx, bool reversed, bool postinc) gen_load_mxu_gpr(t1, XRa); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - (MO_TESL ^ (reversed ? MO_BSWAP : 0)) | + MO_SL | mo_endian_rev(ctx, reversed) | ctx->default_tcg_memop_mask); if (postinc) { @@ -1605,7 +1605,7 @@ static void gen_mxu_s32ldxvx(DisasContext *ctx, bool reversed, tcg_gen_add_tl(t0, t0, t1); tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, - (MO_TESL ^ (reversed ? MO_BSWAP : 0)) | + MO_SL | mo_endian_rev(ctx, reversed) | ctx->default_tcg_memop_mask); gen_store_mxu_gpr(t1, XRa); @@ -1675,7 +1675,7 @@ static void gen_mxu_s32stxvx(DisasContext *ctx, bool reversed, gen_load_mxu_gpr(t1, XRa); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - (MO_TESL ^ (reversed ? MO_BSWAP : 0)) | + MO_SL | mo_endian_rev(ctx, reversed) | ctx->default_tcg_memop_mask); if (postinc) { @@ -4803,19 +4803,19 @@ static void decode_opc_mxu__pool17(DisasContext *ctx) switch (opcode) { case OPC_MXU_LXW: - gen_mxu_lxx(ctx, strd2, MO_TE | MO_UL); + gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_UL); break; case OPC_MXU_LXB: - gen_mxu_lxx(ctx, strd2, MO_TE | MO_SB); + gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_SB); break; case OPC_MXU_LXH: - gen_mxu_lxx(ctx, strd2, MO_TE | MO_SW); + gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_SW); break; case OPC_MXU_LXBU: - gen_mxu_lxx(ctx, strd2, MO_TE | MO_UB); + gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_UB); break; case OPC_MXU_LXHU: - gen_mxu_lxx(ctx, strd2, MO_TE | MO_UW); + gen_mxu_lxx(ctx, strd2, mo_endian(ctx) | MO_UW); break; default: MIPS_INVAL("decode_opc_mxu"); diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc index b4b746d..1e27414 100644 --- a/target/mips/tcg/nanomips_translate.c.inc +++ b/target/mips/tcg/nanomips_translate.c.inc @@ -998,8 +998,9 @@ static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset, TCGv tmp2 = tcg_temp_new(); gen_base_offset_addr(ctx, taddr, base, offset); - tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx, MO_TEUQ | MO_ALIGN); - if (cpu_is_bigendian(ctx)) { + tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx, + mo_endian(ctx) | MO_UQ | MO_ALIGN); + if (disas_is_bigendian(ctx)) { tcg_gen_extr_i64_tl(tmp2, tmp1, tval); } else { tcg_gen_extr_i64_tl(tmp1, tmp2, tval); @@ -1031,7 +1032,7 @@ static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset, gen_load_gpr(tmp1, reg1); gen_load_gpr(tmp2, reg2); - if (cpu_is_bigendian(ctx)) { + if (disas_is_bigendian(ctx)) { tcg_gen_concat_tl_i64(tval, tmp2, tmp1); } else { tcg_gen_concat_tl_i64(tval, tmp1, tmp2); @@ -1052,8 +1053,7 @@ static void gen_scwp(DisasContext *ctx, uint32_t base, int16_t offset, tcg_gen_movi_tl(cpu_gpr[reg1], 0); } gen_set_label(lab_done); - tcg_gen_movi_tl(lladdr, -1); - tcg_gen_st_tl(lladdr, tcg_env, offsetof(CPUMIPSState, lladdr)); + tcg_gen_st_tl(tcg_constant_tl(-1), tcg_env, offsetof(CPUMIPSState, lladdr)); } static void gen_adjust_sp(DisasContext *ctx, int u) @@ -1075,7 +1075,7 @@ static void gen_save(DisasContext *ctx, uint8_t rt, uint8_t count, gen_base_offset_addr(ctx, va, 29, this_offset); gen_load_gpr(t0, this_rt); tcg_gen_qemu_st_tl(t0, va, ctx->mem_idx, - (MO_TEUL | ctx->default_tcg_memop_mask)); + mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); counter++; } @@ -1095,8 +1095,8 @@ static void gen_restore(DisasContext *ctx, uint8_t rt, uint8_t count, int this_rt = use_gp ? 28 : (rt & 0x10) | ((rt + counter) & 0x1f); int this_offset = u - ((counter + 1) << 2); gen_base_offset_addr(ctx, va, 29, this_offset); - tcg_gen_qemu_ld_tl(t0, va, ctx->mem_idx, MO_TESL | - ctx->default_tcg_memop_mask); + tcg_gen_qemu_ld_tl(t0, va, ctx->mem_idx, + mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); tcg_gen_ext32s_tl(t0, t0); gen_store_gpr(t0, this_rt); counter++; @@ -1543,7 +1543,6 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc, { int16_t imm; TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); TCGv v0_t = tcg_temp_new(); gen_load_gpr(v0_t, v1); @@ -1570,12 +1569,10 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc, check_dsp(ctx); switch (extract32(ctx->opcode, 12, 2)) { case NM_MTHLIP: - tcg_gen_movi_tl(t0, v2 >> 3); - gen_helper_mthlip(t0, v0_t, tcg_env); + gen_helper_mthlip(tcg_constant_tl(v2 >> 3), v0_t, tcg_env); break; case NM_SHILOV: - tcg_gen_movi_tl(t0, v2 >> 3); - gen_helper_shilo(t0, v0_t, tcg_env); + gen_helper_shilo(tcg_constant_tl(v2 >> 3), v0_t, tcg_env); break; default: gen_reserved_instruction(ctx); @@ -1587,39 +1584,34 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc, imm = extract32(ctx->opcode, 14, 7); switch (extract32(ctx->opcode, 12, 2)) { case NM_RDDSP: - tcg_gen_movi_tl(t0, imm); - gen_helper_rddsp(t0, t0, tcg_env); + gen_helper_rddsp(t0, tcg_constant_tl(imm), tcg_env); gen_store_gpr(t0, ret); break; case NM_WRDSP: gen_load_gpr(t0, ret); - tcg_gen_movi_tl(t1, imm); - gen_helper_wrdsp(t0, t1, tcg_env); + gen_helper_wrdsp(t0, tcg_constant_tl(imm), tcg_env); break; case NM_EXTP: - tcg_gen_movi_tl(t0, v2 >> 3); - tcg_gen_movi_tl(t1, v1); - gen_helper_extp(t0, t0, t1, tcg_env); + gen_helper_extp(t0, tcg_constant_tl(v2 >> 3), + tcg_constant_tl(v1), tcg_env); gen_store_gpr(t0, ret); break; case NM_EXTPDP: - tcg_gen_movi_tl(t0, v2 >> 3); - tcg_gen_movi_tl(t1, v1); - gen_helper_extpdp(t0, t0, t1, tcg_env); + gen_helper_extpdp(t0, tcg_constant_tl(v2 >> 3), + tcg_constant_tl(v1), tcg_env); gen_store_gpr(t0, ret); break; } break; case NM_POOL32AXF_1_4: check_dsp(ctx); - tcg_gen_movi_tl(t0, v2 >> 2); switch (extract32(ctx->opcode, 12, 1)) { case NM_SHLL_QB: - gen_helper_shll_qb(t0, t0, v0_t, tcg_env); + gen_helper_shll_qb(t0, tcg_constant_tl(v2 >> 2), v0_t, tcg_env); gen_store_gpr(t0, ret); break; case NM_SHRL_QB: - gen_helper_shrl_qb(t0, t0, v0_t); + gen_helper_shrl_qb(t0, tcg_constant_tl(v2 >> 2), v0_t); gen_store_gpr(t0, ret); break; } @@ -1630,23 +1622,25 @@ static void gen_pool32axf_1_nanomips_insn(DisasContext *ctx, uint32_t opc, break; case NM_POOL32AXF_1_7: check_dsp(ctx); - tcg_gen_movi_tl(t0, v2 >> 3); - tcg_gen_movi_tl(t1, v1); switch (extract32(ctx->opcode, 12, 2)) { case NM_EXTR_W: - gen_helper_extr_w(t0, t0, t1, tcg_env); + gen_helper_extr_w(t0, tcg_constant_tl(v2 >> 3), + tcg_constant_tl(v1), tcg_env); gen_store_gpr(t0, ret); break; case NM_EXTR_R_W: - gen_helper_extr_r_w(t0, t0, t1, tcg_env); + gen_helper_extr_r_w(t0, tcg_constant_tl(v2 >> 3), + tcg_constant_tl(v1), tcg_env); gen_store_gpr(t0, ret); break; case NM_EXTR_RS_W: - gen_helper_extr_rs_w(t0, t0, t1, tcg_env); + gen_helper_extr_rs_w(t0, tcg_constant_tl(v2 >> 3), + tcg_constant_tl(v1), tcg_env); gen_store_gpr(t0, ret); break; case NM_EXTR_S_H: - gen_helper_extr_s_h(t0, t0, t1, tcg_env); + gen_helper_extr_s_h(t0, tcg_constant_tl(v2 >> 3), + tcg_constant_tl(v1), tcg_env); gen_store_gpr(t0, ret); break; } @@ -1848,8 +1842,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, case NM_EXTRV_W: check_dsp(ctx); gen_load_gpr(v1_t, rs); - tcg_gen_movi_tl(t0, rd >> 3); - gen_helper_extr_w(t0, t0, v1_t, tcg_env); + gen_helper_extr_w(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env); gen_store_gpr(t0, ret); break; } @@ -1903,8 +1896,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, break; case NM_EXTRV_R_W: check_dsp(ctx); - tcg_gen_movi_tl(t0, rd >> 3); - gen_helper_extr_r_w(t0, t0, v1_t, tcg_env); + gen_helper_extr_r_w(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env); gen_store_gpr(t0, ret); break; default: @@ -1923,8 +1915,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, break; case NM_EXTPV: check_dsp(ctx); - tcg_gen_movi_tl(t0, rd >> 3); - gen_helper_extp(t0, t0, v1_t, tcg_env); + gen_helper_extp(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env); gen_store_gpr(t0, ret); break; case NM_MSUB: @@ -1947,8 +1938,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, break; case NM_EXTRV_RS_W: check_dsp(ctx); - tcg_gen_movi_tl(t0, rd >> 3); - gen_helper_extr_rs_w(t0, t0, v1_t, tcg_env); + gen_helper_extr_rs_w(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env); gen_store_gpr(t0, ret); break; } @@ -1964,8 +1954,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, break; case NM_EXTPDPV: check_dsp(ctx); - tcg_gen_movi_tl(t0, rd >> 3); - gen_helper_extpdp(t0, t0, v1_t, tcg_env); + gen_helper_extpdp(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env); gen_store_gpr(t0, ret); break; case NM_MSUBU: @@ -1990,8 +1979,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, break; case NM_EXTRV_S_H: check_dsp(ctx); - tcg_gen_movi_tl(t0, rd >> 3); - gen_helper_extr_s_h(t0, t0, v1_t, tcg_env); + gen_helper_extr_s_h(t0, tcg_constant_tl(rd >> 3), v1_t, tcg_env); gen_store_gpr(t0, ret); break; } @@ -2149,24 +2137,22 @@ static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc, switch (opc) { case NM_SHRA_R_QB: check_dsp_r2(ctx); - tcg_gen_movi_tl(t0, rd >> 2); switch (extract32(ctx->opcode, 12, 1)) { case 0: /* NM_SHRA_QB */ - gen_helper_shra_qb(t0, t0, rs_t); + gen_helper_shra_qb(t0, tcg_constant_tl(rd >> 2), rs_t); gen_store_gpr(t0, rt); break; case 1: /* NM_SHRA_R_QB */ - gen_helper_shra_r_qb(t0, t0, rs_t); + gen_helper_shra_r_qb(t0, tcg_constant_tl(rd >> 2), rs_t); gen_store_gpr(t0, rt); break; } break; case NM_SHRL_PH: check_dsp_r2(ctx); - tcg_gen_movi_tl(t0, rd >> 1); - gen_helper_shrl_ph(t0, t0, rs_t); + gen_helper_shrl_ph(t0, tcg_constant_tl(rd >> 1), rs_t); gen_store_gpr(t0, rt); break; case NM_REPL_QB: @@ -2180,8 +2166,7 @@ static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc, (uint32_t)imm << 8 | (uint32_t)imm; result = (int32_t)result; - tcg_gen_movi_tl(t0, result); - gen_store_gpr(t0, rt); + gen_store_gpr(tcg_constant_tl(result), rt); } break; default: @@ -2302,10 +2287,9 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, { TCGCond cond = TCG_COND_ALWAYS; TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); + TCGv timm = tcg_constant_tl(imm); gen_load_gpr(t0, rt); - tcg_gen_movi_tl(t1, imm); ctx->btarget = addr_add(ctx, ctx->base.pc_next + 4, offset); /* Load needed operands and calculate btarget */ @@ -2334,7 +2318,7 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, } else { tcg_gen_shri_tl(t0, t0, imm); tcg_gen_andi_tl(t0, t0, 1); - tcg_gen_movi_tl(t1, 0); + timm = tcg_constant_tl(0); if (opc == NM_BBEQZC) { cond = TCG_COND_EQ; } else { @@ -2389,7 +2373,7 @@ static void gen_compute_imm_branch(DisasContext *ctx, uint32_t opc, /* Conditional compact branch */ TCGLabel *fs = gen_new_label(); - tcg_gen_brcond_tl(tcg_invert_cond(cond), t0, t1, fs); + tcg_gen_brcond_tl(tcg_invert_cond(cond), t0, timm, fs); gen_goto_tb(ctx, 1, ctx->btarget); gen_set_label(fs); @@ -2403,7 +2387,6 @@ static void gen_compute_nanomips_pbalrsc_branch(DisasContext *ctx, int rs, int rt) { TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); /* load rs */ gen_load_gpr(t0, rs); @@ -2415,8 +2398,7 @@ static void gen_compute_nanomips_pbalrsc_branch(DisasContext *ctx, int rs, /* calculate btarget */ tcg_gen_shli_tl(t0, t0, 1); - tcg_gen_movi_tl(t1, ctx->base.pc_next + 4); - gen_op_addr_add(ctx, btarget, t1, t0); + gen_op_addr_add(ctx, btarget, tcg_constant_tl(ctx->base.pc_next + 4), t0); /* branch completion */ clear_branch_hflags(ctx); @@ -2469,11 +2451,9 @@ static void gen_compute_compact_branch_nm(DisasContext *ctx, uint32_t opc, } else { /* OPC_JIC, OPC_JIALC */ TCGv tbase = tcg_temp_new(); - TCGv toffset = tcg_temp_new(); gen_load_gpr(tbase, rt); - tcg_gen_movi_tl(toffset, offset); - gen_op_addr_add(ctx, btarget, tbase, toffset); + gen_op_addr_addi(ctx, btarget, tbase, offset); } break; default: @@ -2647,13 +2627,13 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) case NM_LHX: /*case NM_LHXS:*/ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_TESW | ctx->default_tcg_memop_mask); + mo_endian(ctx) | MO_SW | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rd); break; case NM_LWX: /*case NM_LWXS:*/ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_TESL | ctx->default_tcg_memop_mask); + mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rd); break; case NM_LBUX: @@ -2663,7 +2643,7 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) case NM_LHUX: /*case NM_LHUXS:*/ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, - MO_TEUW | ctx->default_tcg_memop_mask); + mo_endian(ctx) | MO_UW | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rd); break; case NM_SBX: @@ -2676,14 +2656,14 @@ static void gen_p_lsx(DisasContext *ctx, int rd, int rs, int rt) check_nms(ctx); gen_load_gpr(t1, rd); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - MO_TEUW | ctx->default_tcg_memop_mask); + mo_endian(ctx) | MO_UW | ctx->default_tcg_memop_mask); break; case NM_SWX: /*case NM_SWXS:*/ check_nms(ctx); gen_load_gpr(t1, rd); tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - MO_TEUL | ctx->default_tcg_memop_mask); + mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); break; case NM_LWC1X: /*case NM_LWC1XS:*/ @@ -3445,13 +3425,10 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, case NM_SHILO: check_dsp(ctx); { - TCGv tv0 = tcg_temp_new(); - TCGv tv1 = tcg_temp_new(); int16_t imm = extract32(ctx->opcode, 16, 7); - tcg_gen_movi_tl(tv0, rd >> 3); - tcg_gen_movi_tl(tv1, imm); - gen_helper_shilo(tv0, tv1, tcg_env); + gen_helper_shilo(tcg_constant_tl(rd >> 3), + tcg_constant_tl(imm), tcg_env); } break; case NM_MULEQ_S_W_PHL: @@ -3506,8 +3483,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, break; case NM_SHRA_R_W: check_dsp(ctx); - tcg_gen_movi_tl(t0, rd); - gen_helper_shra_r_w(v1_t, t0, v1_t); + gen_helper_shra_r_w(v1_t, tcg_constant_tl(rd), v1_t); gen_store_gpr(v1_t, rt); break; case NM_SHRA_R_PH: @@ -3547,8 +3523,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, break; case NM_SHLL_S_W: check_dsp(ctx); - tcg_gen_movi_tl(t0, rd); - gen_helper_shll_s_w(v1_t, t0, v1_t, tcg_env); + gen_helper_shll_s_w(v1_t, tcg_constant_tl(rd), v1_t, tcg_env); gen_store_gpr(v1_t, rt); break; case NM_REPL_PH: @@ -3729,32 +3704,29 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) case NM_LWPC48: check_nms(ctx); if (rt != 0) { - TCGv t0; - t0 = tcg_temp_new(); - target_long addr = addr_add(ctx, ctx->base.pc_next + 6, addr_off); - tcg_gen_movi_tl(t0, addr); - tcg_gen_qemu_ld_tl(cpu_gpr[rt], t0, ctx->mem_idx, - MO_TESL | ctx->default_tcg_memop_mask); + tcg_gen_qemu_ld_tl(cpu_gpr[rt], tcg_constant_tl(addr), + ctx->mem_idx, + mo_endian(ctx) | MO_SL + | ctx->default_tcg_memop_mask); } break; case NM_SWPC48: check_nms(ctx); { - TCGv t0, t1; - t0 = tcg_temp_new(); + TCGv t1; t1 = tcg_temp_new(); target_long addr = addr_add(ctx, ctx->base.pc_next + 6, addr_off); - tcg_gen_movi_tl(t0, addr); gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, - MO_TEUL | ctx->default_tcg_memop_mask); + tcg_gen_qemu_st_tl(t1, tcg_constant_tl(addr), ctx->mem_idx, + mo_endian(ctx) | MO_UL + | ctx->default_tcg_memop_mask); } break; default: @@ -4132,14 +4104,14 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) switch (extract32(ctx->opcode, 11, 4)) { case NM_UALH: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW | - MO_UNALN); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, + mo_endian(ctx) | MO_SW | MO_UNALN); gen_store_gpr(t0, rt); break; case NM_UASH: gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW | - MO_UNALN); + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, + mo_endian(ctx) | MO_UW | MO_UNALN); break; } } @@ -4161,7 +4133,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) case NM_P_SC: switch (ctx->opcode & 0x03) { case NM_SC: - gen_st_cond(ctx, rt, rs, s, MO_TESL, false); + gen_st_cond(ctx, rt, rs, s, mo_endian(ctx) | MO_SL, + false); break; case NM_SCWP: check_xnp(ctx); @@ -4274,7 +4247,8 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) check_xnp(ctx); check_eva(ctx); check_cp0_enabled(ctx); - gen_st_cond(ctx, rt, rs, s, MO_TESL, true); + gen_st_cond(ctx, rt, rs, s, mo_endian(ctx) | MO_SL, + true); break; case NM_SCWPE: check_xnp(ctx); @@ -4317,7 +4291,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) switch (extract32(ctx->opcode, 11, 1)) { case NM_LWM: tcg_gen_qemu_ld_tl(t1, va, ctx->mem_idx, - memop | MO_TESL); + memop | mo_endian(ctx) | MO_SL); gen_store_gpr(t1, this_rt); if ((this_rt == rs) && (counter != (count - 1))) { @@ -4328,7 +4302,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) this_rt = (rt == 0) ? 0 : this_rt; gen_load_gpr(t1, this_rt); tcg_gen_qemu_st_tl(t1, va, ctx->mem_idx, - memop | MO_TEUL); + memop | mo_endian(ctx) | MO_UL); break; } counter++; diff --git a/target/mips/tcg/sysemu/tlb_helper.c b/target/mips/tcg/sysemu/tlb_helper.c index 3836137..e98bb95 100644 --- a/target/mips/tcg/sysemu/tlb_helper.c +++ b/target/mips/tcg/sysemu/tlb_helper.c @@ -601,7 +601,7 @@ static bool get_pte(CPUMIPSState *env, uint64_t vaddr, MemOp op, return false; } - oi = make_memop_idx(op | MO_TE, ptw_mmu_idx); + oi = make_memop_idx(op | mo_endian_env(env), ptw_mmu_idx); if (op == MO_64) { *pte = cpu_ldq_mmu(env, vaddr, oi, 0); } else { diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c index 50d8537..d92fc41 100644 --- a/target/mips/tcg/translate.c +++ b/target/mips/tcg/translate.c @@ -1456,8 +1456,7 @@ void gen_op_addr_add(DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1) #endif } -static inline void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base, - target_long ofs) +void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base, target_long ofs) { tcg_gen_addi_tl(ret, base, ofs); @@ -1957,16 +1956,16 @@ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \ tcg_gen_st_tl(ret, tcg_env, offsetof(CPUMIPSState, llval)); \ } #else -#define OP_LD_ATOMIC(insn, fname) \ +#define OP_LD_ATOMIC(insn, ignored_memop) \ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \ DisasContext *ctx) \ { \ gen_helper_##insn(ret, tcg_env, arg1, tcg_constant_i32(mem_idx)); \ } #endif -OP_LD_ATOMIC(ll, MO_TESL); +OP_LD_ATOMIC(ll, mo_endian(ctx) | MO_SL); #if defined(TARGET_MIPS64) -OP_LD_ATOMIC(lld, MO_TEUQ); +OP_LD_ATOMIC(lld, mo_endian(ctx) | MO_UQ); #endif #undef OP_LD_ATOMIC @@ -2010,7 +2009,7 @@ static void gen_lxl(DisasContext *ctx, TCGv reg, TCGv addr, */ tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB); tcg_gen_andi_tl(t1, addr, sizem1); - if (!cpu_is_bigendian(ctx)) { + if (!disas_is_bigendian(ctx)) { tcg_gen_xori_tl(t1, t1, sizem1); } tcg_gen_shli_tl(t1, t1, 3); @@ -2037,7 +2036,7 @@ static void gen_lxr(DisasContext *ctx, TCGv reg, TCGv addr, */ tcg_gen_qemu_ld_tl(t1, addr, mem_idx, MO_UB); tcg_gen_andi_tl(t1, addr, sizem1); - if (cpu_is_bigendian(ctx)) { + if (disas_is_bigendian(ctx)) { tcg_gen_xori_tl(t1, t1, sizem1); } tcg_gen_shli_tl(t1, t1, 3); @@ -2073,12 +2072,12 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, switch (opc) { #if defined(TARGET_MIPS64) case OPC_LWU: - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL | + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; case OPC_LD: - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -2090,33 +2089,33 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, case OPC_LDL: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - gen_lxl(ctx, t1, t0, mem_idx, MO_TEUQ); + gen_lxl(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UQ); gen_store_gpr(t1, rt); break; case OPC_LDR: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - gen_lxr(ctx, t1, t0, mem_idx, MO_TEUQ); + gen_lxr(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UQ); gen_store_gpr(t1, rt); break; case OPC_LDPC: t1 = tcg_constant_tl(pc_relative_pc(ctx)); gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UQ); gen_store_gpr(t0, rt); break; #endif case OPC_LWPC: t1 = tcg_constant_tl(pc_relative_pc(ctx)); gen_op_addr_add(ctx, t0, t0, t1); - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL); + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_SL); gen_store_gpr(t0, rt); break; case OPC_LWE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LW: - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL | + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -2124,7 +2123,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LH: - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESW | + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_SW | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -2132,7 +2131,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_LHU: - tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUW | + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, mo_endian(ctx) | MO_UW | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -2156,7 +2155,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, case OPC_LWL: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - gen_lxl(ctx, t1, t0, mem_idx, MO_TEUL); + gen_lxl(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UL); tcg_gen_ext32s_tl(t1, t1); gen_store_gpr(t1, rt); break; @@ -2166,7 +2165,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc, case OPC_LWR: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - gen_lxr(ctx, t1, t0, mem_idx, MO_TEUL); + gen_lxr(ctx, t1, t0, mem_idx, mo_endian(ctx) | MO_UL); tcg_gen_ext32s_tl(t1, t1); gen_store_gpr(t1, rt); break; @@ -2194,7 +2193,7 @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt, switch (opc) { #if defined(TARGET_MIPS64) case OPC_SD: - tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); break; case OPC_SDL: @@ -2208,14 +2207,14 @@ static void gen_st(DisasContext *ctx, uint32_t opc, int rt, mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_SW: - tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); break; case OPC_SHE: mem_idx = MIPS_HFLAG_UM; /* fall through */ case OPC_SH: - tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUW | + tcg_gen_qemu_st_tl(t1, t0, mem_idx, mo_endian(ctx) | MO_UW | ctx->default_tcg_memop_mask); break; case OPC_SBE: @@ -2253,8 +2252,7 @@ static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset, /* compare the address against that of the preceding LL */ gen_base_offset_addr(ctx, addr, base, offset); tcg_gen_brcond_tl(TCG_COND_EQ, addr, cpu_lladdr, l1); - tcg_gen_movi_tl(t0, 0); - gen_store_gpr(t0, rt); + gen_store_gpr(tcg_constant_tl(0), rt); tcg_gen_br(done); gen_set_label(l1); @@ -2281,7 +2279,7 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, case OPC_LWC1: { TCGv_i32 fp0 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL | + tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); gen_store_fpr32(ctx, fp0, ft); } @@ -2290,14 +2288,14 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, { TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, ft); - tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); } break; case OPC_LDC1: { TCGv_i64 fp0 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_fpr64(ctx, fp0, ft); } @@ -2306,7 +2304,7 @@ static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, { TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, ft); - tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); } break; @@ -2987,14 +2985,14 @@ static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc, case R6_OPC_LWPC: offset = sextract32(ctx->opcode << 2, 0, 21); addr = addr_add(ctx, pc, offset); - gen_r6_ld(addr, rs, ctx->mem_idx, MO_TESL); + gen_r6_ld(addr, rs, ctx->mem_idx, mo_endian(ctx) | MO_SL); break; #if defined(TARGET_MIPS64) case OPC_LWUPC: check_mips_64(ctx); offset = sextract32(ctx->opcode << 2, 0, 21); addr = addr_add(ctx, pc, offset); - gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEUL); + gen_r6_ld(addr, rs, ctx->mem_idx, mo_endian(ctx) | MO_UL); break; #endif default: @@ -3021,7 +3019,7 @@ static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc, check_mips_64(ctx); offset = sextract32(ctx->opcode << 3, 0, 21); addr = addr_add(ctx, (pc & ~0x7), offset); - gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEUQ); + gen_r6_ld(addr, rs, ctx->mem_idx, mo_endian(ctx) | MO_UQ); break; #endif default: @@ -3060,8 +3058,7 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_and_tl(t2, t2, t3); tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1); tcg_gen_div_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); } @@ -3077,30 +3074,27 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_and_tl(t2, t2, t3); tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1); tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); } break; case R6_OPC_DIVU: { - TCGv t2 = tcg_constant_tl(0); - TCGv t3 = tcg_constant_tl(1); tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); - tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, + tcg_constant_tl(0), tcg_constant_tl(1), t1); tcg_gen_divu_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); } break; case R6_OPC_MODU: { - TCGv t2 = tcg_constant_tl(0); - TCGv t3 = tcg_constant_tl(1); tcg_gen_ext32u_tl(t0, t0); tcg_gen_ext32u_tl(t1, t1); - tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, + tcg_constant_tl(0), tcg_constant_tl(1), t1); tcg_gen_remu_tl(cpu_gpr[rd], t0, t1); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); } @@ -3155,8 +3149,7 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_and_tl(t2, t2, t3); tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1); tcg_gen_div_tl(cpu_gpr[rd], t0, t1); } break; @@ -3169,24 +3162,21 @@ static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) tcg_gen_and_tl(t2, t2, t3); tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1); tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); } break; case R6_OPC_DDIVU: { - TCGv t2 = tcg_constant_tl(0); - TCGv t3 = tcg_constant_tl(1); - tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, + tcg_constant_tl(0), tcg_constant_tl(1), t1); tcg_gen_divu_i64(cpu_gpr[rd], t0, t1); } break; case R6_OPC_DMODU: { - TCGv t2 = tcg_constant_tl(0); - TCGv t3 = tcg_constant_tl(1); - tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, + tcg_constant_tl(0), tcg_constant_tl(1), t1); tcg_gen_remu_i64(cpu_gpr[rd], t0, t1); } break; @@ -3239,8 +3229,7 @@ static void gen_div1_tx79(DisasContext *ctx, uint32_t opc, int rs, int rt) tcg_gen_and_tl(t2, t2, t3); tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1); tcg_gen_div_tl(cpu_LO[1], t0, t1); tcg_gen_rem_tl(cpu_HI[1], t0, t1); tcg_gen_ext32s_tl(cpu_LO[1], cpu_LO[1]); @@ -3295,8 +3284,7 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, tcg_gen_and_tl(t2, t2, t3); tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1); tcg_gen_div_tl(cpu_LO[acc], t0, t1); tcg_gen_rem_tl(cpu_HI[acc], t0, t1); tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]); @@ -3348,17 +3336,15 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, tcg_gen_and_tl(t2, t2, t3); tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); tcg_gen_or_tl(t2, t2, t3); - tcg_gen_movi_tl(t3, 0); - tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, tcg_constant_tl(0), t2, t1); tcg_gen_div_tl(cpu_LO[acc], t0, t1); tcg_gen_rem_tl(cpu_HI[acc], t0, t1); } break; case OPC_DDIVU: { - TCGv t2 = tcg_constant_tl(0); - TCGv t3 = tcg_constant_tl(1); - tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, + tcg_constant_tl(0), tcg_constant_tl(1), t1); tcg_gen_divu_i64(cpu_LO[acc], t0, t1); tcg_gen_remu_i64(cpu_HI[acc], t0, t1); } @@ -4160,10 +4146,10 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, case OPC_GSLQ: t1 = tcg_temp_new(); gen_base_offset_addr(ctx, t0, rs, lsq_offset); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8); - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_gpr(t1, rt); gen_store_gpr(t0, lsq_rt1); @@ -4172,10 +4158,10 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, check_cp1_enabled(ctx); t1 = tcg_temp_new(); gen_base_offset_addr(ctx, t0, rs, lsq_offset); - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8); - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_fpr64(ctx, t1, rt); gen_store_fpr64(ctx, t0, lsq_rt1); @@ -4184,11 +4170,11 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, t1 = tcg_temp_new(); gen_base_offset_addr(ctx, t0, rs, lsq_offset); gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8); gen_load_gpr(t1, lsq_rt1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); break; case OPC_GSSQC1: @@ -4196,11 +4182,11 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, t1 = tcg_temp_new(); gen_base_offset_addr(ctx, t0, rs, lsq_offset); gen_load_fpr64(ctx, t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8); gen_load_fpr64(ctx, t1, lsq_rt1); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); break; #endif @@ -4213,7 +4199,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_load_fpr32(ctx, fp0, rt); t1 = tcg_temp_new(); tcg_gen_ext_i32_tl(t1, fp0); - gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUL); + gen_lxl(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL); tcg_gen_trunc_tl_i32(fp0, t1); gen_store_fpr32(ctx, fp0, rt); break; @@ -4224,7 +4210,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_load_fpr32(ctx, fp0, rt); t1 = tcg_temp_new(); tcg_gen_ext_i32_tl(t1, fp0); - gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUL); + gen_lxr(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL); tcg_gen_trunc_tl_i32(fp0, t1); gen_store_fpr32(ctx, fp0, rt); break; @@ -4234,7 +4220,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_base_offset_addr(ctx, t0, rs, shf_offset); t1 = tcg_temp_new(); gen_load_fpr64(ctx, t1, rt); - gen_lxl(ctx, t1, t0, ctx->mem_idx, MO_TEUQ); + gen_lxl(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ); gen_store_fpr64(ctx, t1, rt); break; case OPC_GSLDRC1: @@ -4242,7 +4228,7 @@ static void gen_loongson_lswc2(DisasContext *ctx, int rt, gen_base_offset_addr(ctx, t0, rs, shf_offset); t1 = tcg_temp_new(); gen_load_fpr64(ctx, t1, rt); - gen_lxr(ctx, t1, t0, ctx->mem_idx, MO_TEUQ); + gen_lxr(ctx, t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ); gen_store_fpr64(ctx, t1, rt); break; #endif @@ -4360,7 +4346,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, gen_store_gpr(t0, rt); break; case OPC_GSLHX: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SW | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -4369,7 +4355,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, if (rd) { gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0); } - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -4379,7 +4365,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, if (rd) { gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0); } - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_gpr(t0, rt); break; @@ -4390,7 +4376,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0); } fp0 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL | + tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL | ctx->default_tcg_memop_mask); gen_store_fpr32(ctx, fp0, rt); break; @@ -4400,7 +4386,7 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, if (rd) { gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0); } - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); gen_store_fpr64(ctx, t0, rt); break; @@ -4413,34 +4399,34 @@ static void gen_loongson_lsdc2(DisasContext *ctx, int rt, case OPC_GSSHX: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UW | ctx->default_tcg_memop_mask); break; case OPC_GSSWX: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); break; #if defined(TARGET_MIPS64) case OPC_GSSDX: t1 = tcg_temp_new(); gen_load_gpr(t1, rt); - tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); break; #endif case OPC_GSSWXC1: fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, rt); - tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL | + tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL | ctx->default_tcg_memop_mask); break; #if defined(TARGET_MIPS64) case OPC_GSSDXC1: t1 = tcg_temp_new(); gen_load_fpr64(ctx, t1, rt); - tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEUQ | + tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ | ctx->default_tcg_memop_mask); break; #endif @@ -10779,7 +10765,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, { TCGv_i32 fp0 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL); tcg_gen_trunc_tl_i32(fp0, t0); gen_store_fpr32(ctx, fp0, fd); } @@ -10789,7 +10775,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, check_cp1_registers(ctx, fd); { TCGv_i64 fp0 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ); gen_store_fpr64(ctx, fp0, fd); } break; @@ -10799,7 +10785,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, { TCGv_i64 fp0 = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ); gen_store_fpr64(ctx, fp0, fd); } break; @@ -10808,7 +10794,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, { TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32(ctx, fp0, fs); - tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UL); } break; case OPC_SDXC1: @@ -10817,7 +10803,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, { TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); - tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ); } break; case OPC_SUXC1: @@ -10826,7 +10812,7 @@ static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, { TCGv_i64 fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); - tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ); } break; } @@ -10856,7 +10842,7 @@ static void gen_flt3_arith(DisasContext *ctx, uint32_t opc, tcg_gen_br(l2); gen_set_label(l1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 4, l2); - if (cpu_is_bigendian(ctx)) { + if (disas_is_bigendian(ctx)) { gen_load_fpr32(ctx, fp, fs); gen_load_fpr32h(ctx, fph, ft); gen_store_fpr32h(ctx, fp, fd); @@ -11265,10 +11251,9 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, } else { /* OPC_JIC, OPC_JIALC */ TCGv tbase = tcg_temp_new(); - TCGv toffset = tcg_constant_tl(offset); gen_load_gpr(tbase, rt); - gen_op_addr_add(ctx, btarget, tbase, toffset); + gen_op_addr_addi(ctx, btarget, tbase, offset); } break; default: @@ -11428,20 +11413,18 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, void gen_addiupc(DisasContext *ctx, int rx, int imm, int is_64_bit, int extended) { - TCGv t0; + target_ulong npc; if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) { gen_reserved_instruction(ctx); return; } - t0 = tcg_temp_new(); - - tcg_gen_movi_tl(t0, pc_relative_pc(ctx)); - tcg_gen_addi_tl(cpu_gpr[rx], t0, imm); + npc = pc_relative_pc(ctx) + imm; if (!is_64_bit) { - tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]); + npc = (int32_t)npc; } + tcg_gen_movi_tl(cpu_gpr[rx], npc); } static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base, @@ -11476,7 +11459,7 @@ void gen_ldxs(DisasContext *ctx, int base, int index, int rd) gen_op_addr_add(ctx, t0, t1, t0); } - tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL); + tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL); gen_store_gpr(t1, rd); } @@ -11567,16 +11550,16 @@ static void gen_mips_lx(DisasContext *ctx, uint32_t opc, gen_store_gpr(t0, rd); break; case OPC_LHX: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SW); gen_store_gpr(t0, rd); break; case OPC_LWX: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_SL); gen_store_gpr(t0, rd); break; #if defined(TARGET_MIPS64) case OPC_LDX: - tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, mo_endian(ctx) | MO_UQ); gen_store_gpr(t0, rd); break; #endif @@ -13719,7 +13702,7 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx) } break; case R6_OPC_SC: - gen_st_cond(ctx, rt, rs, imm, MO_TESL, false); + gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_SL, false); break; case R6_OPC_LL: gen_ld(ctx, op1, rt, rs, imm); @@ -13765,7 +13748,7 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx) #endif #if defined(TARGET_MIPS64) case R6_OPC_SCD: - gen_st_cond(ctx, rt, rs, imm, MO_TEUQ, false); + gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_UQ, false); break; case R6_OPC_LLD: gen_ld(ctx, op1, rt, rs, imm); @@ -14448,7 +14431,7 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx) return; case OPC_SCE: check_cp0_enabled(ctx); - gen_st_cond(ctx, rt, rs, imm, MO_TESL, true); + gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_SL, true); return; case OPC_CACHEE: check_eva(ctx); @@ -14912,7 +14895,7 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) if (ctx->insn_flags & INSN_R5900) { check_insn_opc_user_only(ctx, INSN_R5900); } - gen_st_cond(ctx, rt, rs, imm, MO_TESL, false); + gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_SL, false); break; case OPC_CACHE: check_cp0_enabled(ctx); @@ -15191,7 +15174,7 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) check_insn_opc_user_only(ctx, INSN_R5900); } check_mips_64(ctx); - gen_st_cond(ctx, rt, rs, imm, MO_TEUQ, false); + gen_st_cond(ctx, rt, rs, imm, mo_endian(ctx) | MO_UQ, false); break; case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */ if (ctx->insn_flags & ISA_MIPS_R6) { diff --git a/target/mips/tcg/translate.h b/target/mips/tcg/translate.h index 2b6646b..5d196e6 100644 --- a/target/mips/tcg/translate.h +++ b/target/mips/tcg/translate.h @@ -176,6 +176,7 @@ void gen_addiupc(DisasContext *ctx, int rx, int imm, * Address Computation and Large Constant Instructions */ void gen_op_addr_add(DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1); +void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base, target_long ofs); bool gen_lsa(DisasContext *ctx, int rd, int rt, int rs, int sa); bool gen_dlsa(DisasContext *ctx, int rd, int rt, int rs, int sa); @@ -235,9 +236,19 @@ bool decode_ext_vr54xx(DisasContext *ctx, uint32_t insn); static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ { return FUNC(ctx, a, __VA_ARGS__); } -static inline bool cpu_is_bigendian(DisasContext *ctx) +static inline bool disas_is_bigendian(DisasContext *ctx) { return extract32(ctx->CP0_Config0, CP0C0_BE, 1); } +static inline MemOp mo_endian(DisasContext *dc) +{ + return disas_is_bigendian(dc) ? MO_BE : MO_LE; +} + +static inline MemOp mo_endian_rev(DisasContext *dc, bool reversed) +{ + return disas_is_bigendian(dc) ^ reversed ? MO_BE : MO_LE; +} + #endif diff --git a/target/mips/tcg/tx79_translate.c b/target/mips/tcg/tx79_translate.c index dd6fb8a..ae3f5e1 100644 --- a/target/mips/tcg/tx79_translate.c +++ b/target/mips/tcg/tx79_translate.c @@ -340,12 +340,12 @@ static bool trans_LQ(DisasContext *ctx, arg_i *a) tcg_gen_andi_tl(addr, addr, ~0xf); /* Lower half */ - tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ); gen_store_gpr(t0, a->rt); /* Upper half */ tcg_gen_addi_i64(addr, addr, 8); - tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ); gen_store_gpr_hi(t0, a->rt); return true; } @@ -364,12 +364,12 @@ static bool trans_SQ(DisasContext *ctx, arg_i *a) /* Lower half */ gen_load_gpr(t0, a->rt); - tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ); /* Upper half */ tcg_gen_addi_i64(addr, addr, 8); gen_load_gpr_hi(t0, a->rt); - tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, mo_endian(ctx) | MO_UQ); return true; } diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 71513ba..7689b2a 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -1588,16 +1588,13 @@ static opc_handler_t invalid_handler = { static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf) { TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); TCGv_i32 t = tcg_temp_new_i32(); - tcg_gen_movi_tl(t0, CRF_EQ); - tcg_gen_movi_tl(t1, CRF_LT); tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU), - t0, arg0, arg1, t1, t0); - tcg_gen_movi_tl(t1, CRF_GT); + t0, arg0, arg1, + tcg_constant_tl(CRF_LT), tcg_constant_tl(CRF_EQ)); tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU), - t0, arg0, arg1, t1, t0); + t0, arg0, arg1, tcg_constant_tl(CRF_GT), t0); tcg_gen_trunc_tl_i32(t, t0); tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so); @@ -2974,8 +2971,8 @@ static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop, tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop); /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */ - tcg_gen_movi_tl(u, 1 << (memop_size(memop) * 8 - 1)); - tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u); + tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, + tcg_constant_tl(1 << (memop_size(memop) * 8 - 1))); } static void gen_ld_atomic(DisasContext *ctx, MemOp memop) diff --git a/target/tricore/gdbstub.c b/target/tricore/gdbstub.c index 29a7005..0b73b12 100644 --- a/target/tricore/gdbstub.c +++ b/target/tricore/gdbstub.c @@ -124,7 +124,7 @@ int tricore_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) CPUTriCoreState *env = cpu_env(cs); uint32_t tmp; - tmp = ldl_p(mem_buf); + tmp = ldl_le_p(mem_buf); if (n < 16) { /* data registers */ env->gpr_d[n] = tmp; diff --git a/target/tricore/translate.c b/target/tricore/translate.c index a46a03e..4a12d2c 100644 --- a/target/tricore/translate.c +++ b/target/tricore/translate.c @@ -2732,8 +2732,7 @@ static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos) TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); - tcg_gen_movi_tl(mask, 1); - tcg_gen_shl_tl(mask, mask, width); + tcg_gen_shl_tl(mask, tcg_constant_tl(1), width); tcg_gen_subi_tl(mask, mask, 1); tcg_gen_shl_tl(mask, mask, pos); |