aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/i386/ops_sse.h12
-rw-r--r--target/i386/ops_sse_header.h2
-rw-r--r--target/i386/tcg/excp_helper.c13
-rw-r--r--target/i386/tcg/helper-tcg.h28
-rw-r--r--target/i386/tcg/sysemu/excp_helper.c8
-rw-r--r--target/i386/tcg/tcg-cpu.c2
-rw-r--r--target/i386/tcg/translate.c95
-rw-r--r--target/i386/tcg/user/excp_helper.c7
-rw-r--r--target/m68k/cpu.c2
-rw-r--r--target/m68k/cpu.h5
-rw-r--r--target/m68k/helper.c2
-rw-r--r--target/m68k/op_helper.c2
-rw-r--r--target/m68k/translate.c196
-rw-r--r--target/ppc/cpu.h8
-rw-r--r--target/ppc/cpu_init.c28
-rw-r--r--target/ppc/dfp_helper.c31
-rw-r--r--target/ppc/excp_helper.c83
-rw-r--r--target/ppc/fpu_helper.c74
-rw-r--r--target/ppc/helper.h8
-rw-r--r--target/ppc/insn32.decode18
-rw-r--r--target/ppc/int_helper.c4
-rw-r--r--target/ppc/translate.c13
-rw-r--r--target/ppc/translate/fixedpoint-impl.c.inc34
-rw-r--r--target/ppc/translate/fp-impl.c.inc50
-rw-r--r--target/ppc/translate/fp-ops.c.inc2
25 files changed, 509 insertions, 218 deletions
diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h
index c0766de..7bf8bb9 100644
--- a/target/i386/ops_sse.h
+++ b/target/i386/ops_sse.h
@@ -926,7 +926,7 @@ static inline uint64_t helper_extrq(uint64_t src, int shift, int len)
void helper_extrq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), s->ZMM_B(1), s->ZMM_B(0));
+ d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), s->ZMM_B(1) & 63, s->ZMM_B(0) & 63);
}
void helper_extrq_i(CPUX86State *env, ZMMReg *d, int index, int length)
@@ -934,7 +934,7 @@ void helper_extrq_i(CPUX86State *env, ZMMReg *d, int index, int length)
d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), index, length);
}
-static inline uint64_t helper_insertq(uint64_t src, int shift, int len)
+static inline uint64_t helper_insertq(uint64_t dest, uint64_t src, int shift, int len)
{
uint64_t mask;
@@ -943,17 +943,17 @@ static inline uint64_t helper_insertq(uint64_t src, int shift, int len)
} else {
mask = (1ULL << len) - 1;
}
- return (src & ~(mask << shift)) | ((src & mask) << shift);
+ return (dest & ~(mask << shift)) | ((src & mask) << shift);
}
void helper_insertq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s)
{
- d->ZMM_Q(0) = helper_insertq(s->ZMM_Q(0), s->ZMM_B(9), s->ZMM_B(8));
+ d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), s->ZMM_Q(0), s->ZMM_B(9) & 63, s->ZMM_B(8) & 63);
}
-void helper_insertq_i(CPUX86State *env, ZMMReg *d, int index, int length)
+void helper_insertq_i(CPUX86State *env, ZMMReg *d, ZMMReg *s, int index, int length)
{
- d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), index, length);
+ d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), s->ZMM_Q(0), index, length);
}
#endif
diff --git a/target/i386/ops_sse_header.h b/target/i386/ops_sse_header.h
index d99464a..400b24c0 100644
--- a/target/i386/ops_sse_header.h
+++ b/target/i386/ops_sse_header.h
@@ -193,7 +193,7 @@ DEF_HELPER_3(rcpss, void, env, ZMMReg, ZMMReg)
DEF_HELPER_3(extrq_r, void, env, ZMMReg, ZMMReg)
DEF_HELPER_4(extrq_i, void, env, ZMMReg, int, int)
DEF_HELPER_3(insertq_r, void, env, ZMMReg, ZMMReg)
-DEF_HELPER_4(insertq_i, void, env, ZMMReg, int, int)
+DEF_HELPER_5(insertq_i, void, env, ZMMReg, ZMMReg, int, int)
DEF_HELPER_3(glue(haddps, SUFFIX), void, env, ZMMReg, ZMMReg)
DEF_HELPER_3(glue(haddpd, SUFFIX), void, env, ZMMReg, ZMMReg)
DEF_HELPER_3(glue(hsubps, SUFFIX), void, env, ZMMReg, ZMMReg)
diff --git a/target/i386/tcg/excp_helper.c b/target/i386/tcg/excp_helper.c
index c1ffa1c..7c3c8dc 100644
--- a/target/i386/tcg/excp_helper.c
+++ b/target/i386/tcg/excp_helper.c
@@ -140,3 +140,16 @@ G_NORETURN void raise_exception_ra(CPUX86State *env, int exception_index,
{
raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
}
+
+G_NORETURN void handle_unaligned_access(CPUX86State *env, vaddr vaddr,
+ MMUAccessType access_type,
+ uintptr_t retaddr)
+{
+ /*
+ * Unaligned accesses are currently only triggered by SSE/AVX
+ * instructions that impose alignment requirements on memory
+ * operands. These instructions raise #GP(0) upon accessing an
+ * unaligned address.
+ */
+ raise_exception_ra(env, EXCP0D_GPF, retaddr);
+}
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
index 34167e2..cd17233 100644
--- a/target/i386/tcg/helper-tcg.h
+++ b/target/i386/tcg/helper-tcg.h
@@ -42,17 +42,6 @@ void x86_cpu_do_interrupt(CPUState *cpu);
bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
#endif
-/* helper.c */
-#ifdef CONFIG_USER_ONLY
-void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr,
- MMUAccessType access_type,
- bool maperr, uintptr_t ra);
-#else
-bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
-#endif
-
void breakpoint_handler(CPUState *cs);
/* n must be a constant to be efficient */
@@ -78,6 +67,23 @@ G_NORETURN void raise_exception_err_ra(CPUX86State *env, int exception_index,
int error_code, uintptr_t retaddr);
G_NORETURN void raise_interrupt(CPUX86State *nenv, int intno, int is_int,
int error_code, int next_eip_addend);
+G_NORETURN void handle_unaligned_access(CPUX86State *env, vaddr vaddr,
+ MMUAccessType access_type,
+ uintptr_t retaddr);
+#ifdef CONFIG_USER_ONLY
+void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+void x86_cpu_record_sigbus(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, uintptr_t ra);
+#else
+bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr);
+#endif
/* cc_helper.c */
extern const uint8_t parity_table[256];
diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c
index 48feba7..796dc2a 100644
--- a/target/i386/tcg/sysemu/excp_helper.c
+++ b/target/i386/tcg/sysemu/excp_helper.c
@@ -439,3 +439,11 @@ bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
}
return true;
}
+
+G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ handle_unaligned_access(&cpu->env, vaddr, access_type, retaddr);
+}
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
index 6fdfdf9..d3c2b8f 100644
--- a/target/i386/tcg/tcg-cpu.c
+++ b/target/i386/tcg/tcg-cpu.c
@@ -75,10 +75,12 @@ static const struct TCGCPUOps x86_tcg_ops = {
#ifdef CONFIG_USER_ONLY
.fake_user_interrupt = x86_cpu_do_interrupt,
.record_sigsegv = x86_cpu_record_sigsegv,
+ .record_sigbus = x86_cpu_record_sigbus,
#else
.tlb_fill = x86_cpu_tlb_fill,
.do_interrupt = x86_cpu_do_interrupt,
.cpu_exec_interrupt = x86_cpu_exec_interrupt,
+ .do_unaligned_access = x86_cpu_do_unaligned_access,
.debug_excp_handler = breakpoint_handler,
.debug_check_breakpoint = x86_debug_check_breakpoint,
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
index d6420df..44af8c1 100644
--- a/target/i386/tcg/translate.c
+++ b/target/i386/tcg/translate.c
@@ -2289,6 +2289,31 @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
}
}
+static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
+{
+ target_ulong ret;
+
+ switch (ot) {
+ case MO_8:
+ ret = x86_ldub_code(env, s);
+ break;
+ case MO_16:
+ ret = x86_lduw_code(env, s);
+ break;
+ case MO_32:
+ ret = x86_ldl_code(env, s);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ ret = x86_ldq_code(env, s);
+ break;
+#endif
+ default:
+ g_assert_not_reached();
+ }
+ return ret;
+}
+
static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
{
uint32_t ret;
@@ -2738,21 +2763,23 @@ static inline void gen_stq_env_A0(DisasContext *s, int offset)
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
}
-static inline void gen_ldo_env_A0(DisasContext *s, int offset)
+static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
{
int mem_index = s->mem_index;
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, MO_LEUQ);
+ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
+ MO_LEUQ | (align ? MO_ALIGN_16 : 0));
tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
tcg_gen_addi_tl(s->tmp0, s->A0, 8);
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
}
-static inline void gen_sto_env_A0(DisasContext *s, int offset)
+static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
{
int mem_index = s->mem_index;
tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, MO_LEUQ);
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
+ MO_LEUQ | (align ? MO_ALIGN_16 : 0));
tcg_gen_addi_tl(s->tmp0, s->A0, 8);
tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
@@ -3131,7 +3158,7 @@ static const struct SSEOpHelper_table6 sse_op_table6[256] = {
[0x25] = UNARY_OP(pmovsxdq, SSE41, SSE_OPF_MMX),
[0x28] = BINARY_OP(pmuldq, SSE41, SSE_OPF_MMX),
[0x29] = BINARY_OP(pcmpeqq, SSE41, SSE_OPF_MMX),
- [0x2a] = SPECIAL_OP(SSE41), /* movntqda */
+ [0x2a] = SPECIAL_OP(SSE41), /* movntdqa */
[0x2b] = BINARY_OP(packusdw, SSE41, SSE_OPF_MMX),
[0x30] = UNARY_OP(pmovzxbw, SSE41, SSE_OPF_MMX),
[0x31] = UNARY_OP(pmovzxbd, SSE41, SSE_OPF_MMX),
@@ -3294,17 +3321,17 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
break;
case 0x1e7: /* movntdq */
case 0x02b: /* movntps */
- case 0x12b: /* movntps */
+ case 0x12b: /* movntpd */
if (mod == 3)
goto illegal_op;
gen_lea_modrm(env, s, modrm);
- gen_sto_env_A0(s, ZMM_OFFSET(reg));
+ gen_sto_env_A0(s, ZMM_OFFSET(reg), true);
break;
case 0x3f0: /* lddqu */
if (mod == 3)
goto illegal_op;
gen_lea_modrm(env, s, modrm);
- gen_ldo_env_A0(s, ZMM_OFFSET(reg));
+ gen_ldo_env_A0(s, ZMM_OFFSET(reg), false);
break;
case 0x22b: /* movntss */
case 0x32b: /* movntsd */
@@ -3373,7 +3400,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
case 0x26f: /* movdqu xmm, ea */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
- gen_ldo_env_A0(s, ZMM_OFFSET(reg));
+ gen_ldo_env_A0(s, ZMM_OFFSET(reg),
+ /* movaps, movapd, movdqa */
+ b == 0x028 || b == 0x128 || b == 0x16f);
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movo(s, ZMM_OFFSET(reg), ZMM_OFFSET(rm));
@@ -3432,7 +3461,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
case 0x212: /* movsldup */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
- gen_ldo_env_A0(s, ZMM_OFFSET(reg));
+ gen_ldo_env_A0(s, ZMM_OFFSET(reg), true);
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)),
@@ -3474,7 +3503,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
case 0x216: /* movshdup */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
- gen_ldo_env_A0(s, ZMM_OFFSET(reg));
+ gen_ldo_env_A0(s, ZMM_OFFSET(reg), true);
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)),
@@ -3502,10 +3531,20 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_helper_extrq_i(cpu_env, s->ptr0,
tcg_const_i32(bit_index),
tcg_const_i32(field_length));
- else
- gen_helper_insertq_i(cpu_env, s->ptr0,
+ else {
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ op2_offset = offsetof(CPUX86State, xmm_t0);
+ gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ op2_offset = ZMM_OFFSET(rm);
+ }
+ tcg_gen_addi_ptr(s->ptr1, cpu_env, op2_offset);
+ gen_helper_insertq_i(cpu_env, s->ptr0, s->ptr1,
tcg_const_i32(bit_index),
tcg_const_i32(field_length));
+ }
}
break;
case 0x7e: /* movd ea, mm */
@@ -3568,7 +3607,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
case 0x27f: /* movdqu ea, xmm */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
- gen_sto_env_A0(s, ZMM_OFFSET(reg));
+ gen_sto_env_A0(s, ZMM_OFFSET(reg),
+ /* movaps, movapd, movdqa */
+ b == 0x029 || b == 0x129 || b == 0x17f);
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movo(s, ZMM_OFFSET(rm), ZMM_OFFSET(reg));
@@ -3724,7 +3765,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
op2_offset = offsetof(CPUX86State,xmm_t0);
- gen_ldo_env_A0(s, op2_offset);
+ /* FIXME: should be 64-bit access if b1 == 0. */
+ gen_ldo_env_A0(s, op2_offset, !!b1);
} else {
rm = (modrm & 7) | REX_B(s);
op2_offset = ZMM_OFFSET(rm);
@@ -3913,11 +3955,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
tcg_gen_st16_tl(s->tmp0, cpu_env, op2_offset +
offsetof(ZMMReg, ZMM_W(0)));
break;
- case 0x2a: /* movntqda */
- gen_ldo_env_A0(s, op1_offset);
+ case 0x2a: /* movntdqa */
+ gen_ldo_env_A0(s, op1_offset, true);
return;
default:
- gen_ldo_env_A0(s, op2_offset);
+ gen_ldo_env_A0(s, op2_offset, true);
}
}
if (!op6->fn[b1].op1) {
@@ -4499,7 +4541,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
} else {
op2_offset = offsetof(CPUX86State, xmm_t0);
gen_lea_modrm(env, s, modrm);
- gen_ldo_env_A0(s, op2_offset);
+ gen_ldo_env_A0(s, op2_offset, true);
}
val = x86_ldub_code(env, s);
@@ -4606,7 +4648,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
break;
default:
/* 128 bit access */
- gen_ldo_env_A0(s, op2_offset);
+ gen_ldo_env_A0(s, op2_offset, true);
break;
}
} else {
@@ -4716,9 +4758,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
switch (b) {
case 0xf3:
prefixes |= PREFIX_REPZ;
+ prefixes &= ~PREFIX_REPNZ;
goto next_byte;
case 0xf2:
prefixes |= PREFIX_REPNZ;
+ prefixes &= ~PREFIX_REPZ;
goto next_byte;
case 0xf0:
prefixes |= PREFIX_LOCK;
@@ -5832,16 +5876,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
target_ulong offset_addr;
ot = mo_b_d(b, dflag);
- switch (s->aflag) {
-#ifdef TARGET_X86_64
- case MO_64:
- offset_addr = x86_ldq_code(env, s);
- break;
-#endif
- default:
- offset_addr = insn_get(env, s, s->aflag);
- break;
- }
+ offset_addr = insn_get_addr(env, s, s->aflag);
tcg_gen_movi_tl(s->A0, offset_addr);
gen_add_A0_ds_seg(s);
if ((b & 2) == 0) {
diff --git a/target/i386/tcg/user/excp_helper.c b/target/i386/tcg/user/excp_helper.c
index cd507e2..b3bdb78 100644
--- a/target/i386/tcg/user/excp_helper.c
+++ b/target/i386/tcg/user/excp_helper.c
@@ -48,3 +48,10 @@ void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr,
cpu_loop_exit_restore(cs, ra);
}
+
+void x86_cpu_record_sigbus(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, uintptr_t ra)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ handle_unaligned_access(&cpu->env, addr, access_type, ra);
+}
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
index 5bbefda..f681be3 100644
--- a/target/m68k/cpu.c
+++ b/target/m68k/cpu.c
@@ -110,7 +110,7 @@ static void m68000_cpu_initfn(Object *obj)
M68kCPU *cpu = M68K_CPU(obj);
CPUM68KState *env = &cpu->env;
- m68k_set_feature(env, M68K_FEATURE_M68000);
+ m68k_set_feature(env, M68K_FEATURE_M68K);
m68k_set_feature(env, M68K_FEATURE_USP);
m68k_set_feature(env, M68K_FEATURE_WORD_INDEX);
m68k_set_feature(env, M68K_FEATURE_MOVEP);
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
index 4d8f48e..67b6c12 100644
--- a/target/m68k/cpu.h
+++ b/target/m68k/cpu.h
@@ -480,8 +480,9 @@ void do_m68k_semihosting(CPUM68KState *env, int nr);
*/
enum m68k_features {
- /* Base m68k instruction set */
- M68K_FEATURE_M68000,
+ /* Base Motorola CPU set (not set for Coldfire CPUs) */
+ M68K_FEATURE_M68K,
+ /* Motorola CPU feature sets */
M68K_FEATURE_M68010,
M68K_FEATURE_M68020,
M68K_FEATURE_M68030,
diff --git a/target/m68k/helper.c b/target/m68k/helper.c
index 5728e48..4621cf2 100644
--- a/target/m68k/helper.c
+++ b/target/m68k/helper.c
@@ -460,7 +460,7 @@ void m68k_switch_sp(CPUM68KState *env)
int new_sp;
env->sp[env->current_sp] = env->aregs[7];
- if (m68k_feature(env, M68K_FEATURE_M68000)) {
+ if (m68k_feature(env, M68K_FEATURE_M68K)) {
if (env->sr & SR_S) {
/* SR:Master-Mode bit unimplemented then ISP is not available */
if (!m68k_feature(env, M68K_FEATURE_MSP) || env->sr & SR_M) {
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
index a96a034..5da176d 100644
--- a/target/m68k/op_helper.c
+++ b/target/m68k/op_helper.c
@@ -432,7 +432,7 @@ static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
static void do_interrupt_all(CPUM68KState *env, int is_hw)
{
- if (m68k_feature(env, M68K_FEATURE_M68000)) {
+ if (m68k_feature(env, M68K_FEATURE_M68K)) {
m68k_interrupt_all(env, is_hw);
return;
}
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index 5098f7e..233b9d8 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -471,7 +471,7 @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
return NULL_QREG;
- if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
+ if (m68k_feature(s->env, M68K_FEATURE_M68K) &&
!m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
ext &= ~(3 << 9);
}
@@ -804,7 +804,7 @@ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
reg = get_areg(s, reg0);
tmp = mark_to_release(s, tcg_temp_new());
if (reg0 == 7 && opsize == OS_BYTE &&
- m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ m68k_feature(s->env, M68K_FEATURE_M68K)) {
tcg_gen_subi_i32(tmp, reg, 2);
} else {
tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
@@ -888,7 +888,7 @@ static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
if (what == EA_STORE || !addrp) {
TCGv tmp = tcg_temp_new();
if (reg0 == 7 && opsize == OS_BYTE &&
- m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ m68k_feature(s->env, M68K_FEATURE_M68K)) {
tcg_gen_addi_i32(tmp, reg, 2);
} else {
tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
@@ -2210,7 +2210,7 @@ DISAS_INSN(bitop_im)
op = (insn >> 6) & 3;
bitnum = read_im16(env, s);
- if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
if (bitnum & 0xfe00) {
disas_undef(env, s, insn);
return;
@@ -2285,9 +2285,9 @@ static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
} else {
- TCGv sr = tcg_const_i32(val);
- gen_helper_set_sr(cpu_env, sr);
- tcg_temp_free(sr);
+ /* Must writeback before changing security state. */
+ do_writebacks(s);
+ gen_helper_set_sr(cpu_env, tcg_constant_i32(val));
}
set_cc_op(s, CC_OP_FLAGS);
}
@@ -2297,6 +2297,8 @@ static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
if (ccr_only) {
gen_helper_set_ccr(cpu_env, val);
} else {
+ /* Must writeback before changing security state. */
+ do_writebacks(s);
gen_helper_set_sr(cpu_env, val);
}
set_cc_op(s, CC_OP_FLAGS);
@@ -2373,6 +2375,7 @@ DISAS_INSN(arith_im)
tcg_gen_or_i32(dest, src1, im);
if (with_SR) {
gen_set_sr(s, dest, opsize == OS_BYTE);
+ gen_exit_tb(s);
} else {
DEST_EA(env, insn, opsize, dest, &addr);
gen_logic_cc(s, dest, opsize);
@@ -2382,6 +2385,7 @@ DISAS_INSN(arith_im)
tcg_gen_and_i32(dest, src1, im);
if (with_SR) {
gen_set_sr(s, dest, opsize == OS_BYTE);
+ gen_exit_tb(s);
} else {
DEST_EA(env, insn, opsize, dest, &addr);
gen_logic_cc(s, dest, opsize);
@@ -2405,6 +2409,7 @@ DISAS_INSN(arith_im)
tcg_gen_xor_i32(dest, src1, im);
if (with_SR) {
gen_set_sr(s, dest, opsize == OS_BYTE);
+ gen_exit_tb(s);
} else {
DEST_EA(env, insn, opsize, dest, &addr);
gen_logic_cc(s, dest, opsize);
@@ -2825,19 +2830,39 @@ DISAS_INSN(illegal)
gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
}
-/* ??? This should be atomic. */
DISAS_INSN(tas)
{
- TCGv dest;
- TCGv src1;
- TCGv addr;
+ int mode = extract32(insn, 3, 3);
+ int reg0 = REG(insn, 0);
- dest = tcg_temp_new();
- SRC_EA(env, src1, OS_BYTE, 1, &addr);
- gen_logic_cc(s, src1, OS_BYTE);
- tcg_gen_ori_i32(dest, src1, 0x80);
- DEST_EA(env, insn, OS_BYTE, dest, &addr);
- tcg_temp_free(dest);
+ if (mode == 0) {
+ /* data register direct */
+ TCGv dest = cpu_dregs[reg0];
+ gen_logic_cc(s, dest, OS_BYTE);
+ tcg_gen_ori_tl(dest, dest, 0x80);
+ } else {
+ TCGv src1, addr;
+
+ addr = gen_lea_mode(env, s, mode, reg0, OS_BYTE);
+ if (IS_NULL_QREG(addr)) {
+ gen_addr_fault(s);
+ return;
+ }
+ src1 = tcg_temp_new();
+ tcg_gen_atomic_fetch_or_tl(src1, addr, tcg_constant_tl(0x80),
+ IS_USER(s), MO_SB);
+ gen_logic_cc(s, src1, OS_BYTE);
+ tcg_temp_free(src1);
+
+ switch (mode) {
+ case 3: /* Indirect postincrement. */
+ tcg_gen_addi_i32(AREG(insn, 0), addr, 1);
+ break;
+ case 4: /* Indirect predecrememnt. */
+ tcg_gen_mov_i32(AREG(insn, 0), addr);
+ break;
+ }
+ }
}
DISAS_INSN(mull)
@@ -2875,7 +2900,7 @@ DISAS_INSN(mull)
return;
}
SRC_EA(env, src1, OS_LONG, 0, NULL);
- if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
tcg_gen_movi_i32(QREG_CC_C, 0);
if (sign) {
tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
@@ -3470,7 +3495,7 @@ static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
* while M68000 sets if the most significant bit is changed at
* any time during the shift operation.
*/
- if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
/* if shift count >= bits, V is (reg != 0) */
if (count >= bits) {
tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
@@ -3554,7 +3579,7 @@ static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
* int64_t t = (int64_t)(intN_t)reg << count;
* V = ((s ^ t) & (-1 << (bits - 1))) != 0
*/
- if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
TCGv_i64 tt = tcg_const_i64(32);
/* if shift is greater than 32, use 32 */
tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
@@ -3647,7 +3672,7 @@ DISAS_INSN(shift_mem)
* while M68000 sets if the most significant bit is changed at
* any time during the shift operation
*/
- if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
src = gen_extend(s, src, OS_WORD, 1);
tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
}
@@ -4592,13 +4617,14 @@ DISAS_INSN(strldsr)
}
gen_push(s, gen_get_sr(s));
gen_set_sr_im(s, ext, 0);
+ gen_exit_tb(s);
}
DISAS_INSN(move_from_sr)
{
TCGv sr;
- if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
+ if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68K)) {
gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
return;
}
@@ -5892,8 +5918,10 @@ DISAS_INSN(from_mext)
DISAS_INSN(macsr_to_ccr)
{
TCGv tmp = tcg_temp_new();
- tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
- gen_helper_set_sr(cpu_env, tmp);
+
+ /* Note that X and C are always cleared. */
+ tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V);
+ gen_helper_set_ccr(cpu_env, tmp);
tcg_temp_free(tmp);
set_cc_op(s, CC_OP_FLAGS);
}
@@ -6011,7 +6039,7 @@ void register_m68k_insns (CPUM68KState *env)
} while(0)
BASE(undef, 0000, 0000);
INSN(arith_im, 0080, fff8, CF_ISA_A);
- INSN(arith_im, 0000, ff00, M68000);
+ INSN(arith_im, 0000, ff00, M68K);
INSN(chk2, 00c0, f9c0, CHK2);
INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
BASE(bitop_reg, 0100, f1c0);
@@ -6020,26 +6048,26 @@ void register_m68k_insns (CPUM68KState *env)
BASE(bitop_reg, 01c0, f1c0);
INSN(movep, 0108, f138, MOVEP);
INSN(arith_im, 0280, fff8, CF_ISA_A);
- INSN(arith_im, 0200, ff00, M68000);
- INSN(undef, 02c0, ffc0, M68000);
+ INSN(arith_im, 0200, ff00, M68K);
+ INSN(undef, 02c0, ffc0, M68K);
INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
INSN(arith_im, 0480, fff8, CF_ISA_A);
- INSN(arith_im, 0400, ff00, M68000);
- INSN(undef, 04c0, ffc0, M68000);
- INSN(arith_im, 0600, ff00, M68000);
- INSN(undef, 06c0, ffc0, M68000);
+ INSN(arith_im, 0400, ff00, M68K);
+ INSN(undef, 04c0, ffc0, M68K);
+ INSN(arith_im, 0600, ff00, M68K);
+ INSN(undef, 06c0, ffc0, M68K);
INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
INSN(arith_im, 0680, fff8, CF_ISA_A);
INSN(arith_im, 0c00, ff38, CF_ISA_A);
- INSN(arith_im, 0c00, ff00, M68000);
+ INSN(arith_im, 0c00, ff00, M68K);
BASE(bitop_im, 0800, ffc0);
BASE(bitop_im, 0840, ffc0);
BASE(bitop_im, 0880, ffc0);
BASE(bitop_im, 08c0, ffc0);
INSN(arith_im, 0a80, fff8, CF_ISA_A);
- INSN(arith_im, 0a00, ff00, M68000);
+ INSN(arith_im, 0a00, ff00, M68K);
#if defined(CONFIG_SOFTMMU)
- INSN(moves, 0e00, ff00, M68000);
+ INSN(moves, 0e00, ff00, M68K);
#endif
INSN(cas, 0ac0, ffc0, CAS);
INSN(cas, 0cc0, ffc0, CAS);
@@ -6049,44 +6077,44 @@ void register_m68k_insns (CPUM68KState *env)
BASE(move, 1000, f000);
BASE(move, 2000, f000);
BASE(move, 3000, f000);
- INSN(chk, 4000, f040, M68000);
+ INSN(chk, 4000, f040, M68K);
INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
INSN(negx, 4080, fff8, CF_ISA_A);
- INSN(negx, 4000, ff00, M68000);
- INSN(undef, 40c0, ffc0, M68000);
+ INSN(negx, 4000, ff00, M68K);
+ INSN(undef, 40c0, ffc0, M68K);
INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
- INSN(move_from_sr, 40c0, ffc0, M68000);
+ INSN(move_from_sr, 40c0, ffc0, M68K);
BASE(lea, 41c0, f1c0);
BASE(clr, 4200, ff00);
BASE(undef, 42c0, ffc0);
INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
- INSN(move_from_ccr, 42c0, ffc0, M68000);
+ INSN(move_from_ccr, 42c0, ffc0, M68K);
INSN(neg, 4480, fff8, CF_ISA_A);
- INSN(neg, 4400, ff00, M68000);
- INSN(undef, 44c0, ffc0, M68000);
+ INSN(neg, 4400, ff00, M68K);
+ INSN(undef, 44c0, ffc0, M68K);
BASE(move_to_ccr, 44c0, ffc0);
INSN(not, 4680, fff8, CF_ISA_A);
- INSN(not, 4600, ff00, M68000);
+ INSN(not, 4600, ff00, M68K);
#if defined(CONFIG_SOFTMMU)
BASE(move_to_sr, 46c0, ffc0);
#endif
- INSN(nbcd, 4800, ffc0, M68000);
- INSN(linkl, 4808, fff8, M68000);
+ INSN(nbcd, 4800, ffc0, M68K);
+ INSN(linkl, 4808, fff8, M68K);
BASE(pea, 4840, ffc0);
BASE(swap, 4840, fff8);
INSN(bkpt, 4848, fff8, BKPT);
INSN(movem, 48d0, fbf8, CF_ISA_A);
INSN(movem, 48e8, fbf8, CF_ISA_A);
- INSN(movem, 4880, fb80, M68000);
+ INSN(movem, 4880, fb80, M68K);
BASE(ext, 4880, fff8);
BASE(ext, 48c0, fff8);
BASE(ext, 49c0, fff8);
BASE(tst, 4a00, ff00);
INSN(tas, 4ac0, ffc0, CF_ISA_B);
- INSN(tas, 4ac0, ffc0, M68000);
+ INSN(tas, 4ac0, ffc0, M68K);
#if defined(CONFIG_SOFTMMU)
INSN(halt, 4ac8, ffff, CF_ISA_A);
- INSN(halt, 4ac8, ffff, M68060);
+ INSN(halt, 4ac8, ffff, M68K);
#endif
INSN(pulse, 4acc, ffff, CF_ISA_A);
BASE(illegal, 4afc, ffff);
@@ -6101,7 +6129,7 @@ void register_m68k_insns (CPUM68KState *env)
#if defined(CONFIG_SOFTMMU)
INSN(move_to_usp, 4e60, fff8, USP);
INSN(move_from_usp, 4e68, fff8, USP);
- INSN(reset, 4e70, ffff, M68000);
+ INSN(reset, 4e70, ffff, M68K);
BASE(stop, 4e72, ffff);
BASE(rte, 4e73, ffff);
INSN(cf_movec, 4e7b, ffff, CF_ISA_A);
@@ -6110,15 +6138,15 @@ void register_m68k_insns (CPUM68KState *env)
BASE(nop, 4e71, ffff);
INSN(rtd, 4e74, ffff, RTD);
BASE(rts, 4e75, ffff);
- INSN(trapv, 4e76, ffff, M68000);
- INSN(rtr, 4e77, ffff, M68000);
+ INSN(trapv, 4e76, ffff, M68K);
+ INSN(rtr, 4e77, ffff, M68K);
BASE(jump, 4e80, ffc0);
BASE(jump, 4ec0, ffc0);
- INSN(addsubq, 5000, f080, M68000);
+ INSN(addsubq, 5000, f080, M68K);
BASE(addsubq, 5080, f0c0);
INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */
- INSN(scc, 50c0, f0c0, M68000); /* Scc.B <EA> */
- INSN(dbcc, 50c8, f0f8, M68000);
+ INSN(scc, 50c0, f0c0, M68K); /* Scc.B <EA> */
+ INSN(dbcc, 50c8, f0f8, M68K);
INSN(trapcc, 50fa, f0fe, TRAPCC); /* opmode 010, 011 */
INSN(trapcc, 50fc, f0ff, TRAPCC); /* opmode 100 */
INSN(trapcc, 51fa, fffe, CF_ISA_A); /* TPF (trapf) opmode 010, 011 */
@@ -6137,15 +6165,15 @@ void register_m68k_insns (CPUM68KState *env)
INSN(mvzs, 7100, f100, CF_ISA_B);
BASE(or, 8000, f000);
BASE(divw, 80c0, f0c0);
- INSN(sbcd_reg, 8100, f1f8, M68000);
- INSN(sbcd_mem, 8108, f1f8, M68000);
+ INSN(sbcd_reg, 8100, f1f8, M68K);
+ INSN(sbcd_mem, 8108, f1f8, M68K);
BASE(addsub, 9000, f000);
INSN(undef, 90c0, f0c0, CF_ISA_A);
INSN(subx_reg, 9180, f1f8, CF_ISA_A);
- INSN(subx_reg, 9100, f138, M68000);
- INSN(subx_mem, 9108, f138, M68000);
+ INSN(subx_reg, 9100, f138, M68K);
+ INSN(subx_mem, 9108, f138, M68K);
INSN(suba, 91c0, f1c0, CF_ISA_A);
- INSN(suba, 90c0, f0c0, M68000);
+ INSN(suba, 90c0, f0c0, M68K);
BASE(undef_mac, a000, f000);
INSN(mac, a000, f100, CF_EMAC);
@@ -6166,41 +6194,41 @@ void register_m68k_insns (CPUM68KState *env)
INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
INSN(cmp, b080, f1c0, CF_ISA_A);
INSN(cmpa, b1c0, f1c0, CF_ISA_A);
- INSN(cmp, b000, f100, M68000);
- INSN(eor, b100, f100, M68000);
- INSN(cmpm, b108, f138, M68000);
- INSN(cmpa, b0c0, f0c0, M68000);
+ INSN(cmp, b000, f100, M68K);
+ INSN(eor, b100, f100, M68K);
+ INSN(cmpm, b108, f138, M68K);
+ INSN(cmpa, b0c0, f0c0, M68K);
INSN(eor, b180, f1c0, CF_ISA_A);
BASE(and, c000, f000);
- INSN(exg_dd, c140, f1f8, M68000);
- INSN(exg_aa, c148, f1f8, M68000);
- INSN(exg_da, c188, f1f8, M68000);
+ INSN(exg_dd, c140, f1f8, M68K);
+ INSN(exg_aa, c148, f1f8, M68K);
+ INSN(exg_da, c188, f1f8, M68K);
BASE(mulw, c0c0, f0c0);
- INSN(abcd_reg, c100, f1f8, M68000);
- INSN(abcd_mem, c108, f1f8, M68000);
+ INSN(abcd_reg, c100, f1f8, M68K);
+ INSN(abcd_mem, c108, f1f8, M68K);
BASE(addsub, d000, f000);
INSN(undef, d0c0, f0c0, CF_ISA_A);
INSN(addx_reg, d180, f1f8, CF_ISA_A);
- INSN(addx_reg, d100, f138, M68000);
- INSN(addx_mem, d108, f138, M68000);
+ INSN(addx_reg, d100, f138, M68K);
+ INSN(addx_mem, d108, f138, M68K);
INSN(adda, d1c0, f1c0, CF_ISA_A);
- INSN(adda, d0c0, f0c0, M68000);
+ INSN(adda, d0c0, f0c0, M68K);
INSN(shift_im, e080, f0f0, CF_ISA_A);
INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
- INSN(shift8_im, e000, f0f0, M68000);
- INSN(shift16_im, e040, f0f0, M68000);
- INSN(shift_im, e080, f0f0, M68000);
- INSN(shift8_reg, e020, f0f0, M68000);
- INSN(shift16_reg, e060, f0f0, M68000);
- INSN(shift_reg, e0a0, f0f0, M68000);
- INSN(shift_mem, e0c0, fcc0, M68000);
- INSN(rotate_im, e090, f0f0, M68000);
- INSN(rotate8_im, e010, f0f0, M68000);
- INSN(rotate16_im, e050, f0f0, M68000);
- INSN(rotate_reg, e0b0, f0f0, M68000);
- INSN(rotate8_reg, e030, f0f0, M68000);
- INSN(rotate16_reg, e070, f0f0, M68000);
- INSN(rotate_mem, e4c0, fcc0, M68000);
+ INSN(shift8_im, e000, f0f0, M68K);
+ INSN(shift16_im, e040, f0f0, M68K);
+ INSN(shift_im, e080, f0f0, M68K);
+ INSN(shift8_reg, e020, f0f0, M68K);
+ INSN(shift16_reg, e060, f0f0, M68K);
+ INSN(shift_reg, e0a0, f0f0, M68K);
+ INSN(shift_mem, e0c0, fcc0, M68K);
+ INSN(rotate_im, e090, f0f0, M68K);
+ INSN(rotate8_im, e010, f0f0, M68K);
+ INSN(rotate16_im, e050, f0f0, M68K);
+ INSN(rotate_reg, e0b0, f0f0, M68K);
+ INSN(rotate8_reg, e030, f0f0, M68K);
+ INSN(rotate16_reg, e070, f0f0, M68K);
+ INSN(rotate_mem, e4c0, fcc0, M68K);
INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */
INSN(bfext_reg, e9c0, fdf8, BITFIELD);
INSN(bfins_mem, efc0, ffc0, BITFIELD);
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index a4c893c..7f73e2a 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -246,7 +246,7 @@ typedef union _ppc_vsr_t {
#ifdef CONFIG_INT128
__uint128_t u128;
#endif
- Int128 s128;
+ Int128 s128;
} ppc_vsr_t;
typedef ppc_vsr_t ppc_avr_t;
@@ -1506,10 +1506,6 @@ void ppc_compat_add_property(Object *obj, const char *name,
#define XER_CMP 8
#define XER_BC 0
#define xer_so (env->so)
-#define xer_ov (env->ov)
-#define xer_ca (env->ca)
-#define xer_ov32 (env->ov)
-#define xer_ca32 (env->ca)
#define xer_cmp ((env->xer >> XER_CMP) & 0xFF)
#define xer_bc ((env->xer >> XER_BC) & 0x7F)
@@ -1676,6 +1672,8 @@ void ppc_compat_add_property(Object *obj, const char *name,
#define SPR_BOOKE_GIVOR14 (0x1BD)
#define SPR_TIR (0x1BE)
#define SPR_PTCR (0x1D0)
+#define SPR_HASHKEYR (0x1D4)
+#define SPR_HASHPKEYR (0x1D5)
#define SPR_BOOKE_SPEFSCR (0x200)
#define SPR_Exxx_BBEAR (0x201)
#define SPR_Exxx_BBTAR (0x202)
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index 899c4a5..6e080eb 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -5700,6 +5700,33 @@ static void register_power9_mmu_sprs(CPUPPCState *env)
#endif
}
+static void register_power10_hash_sprs(CPUPPCState *env)
+{
+ /*
+ * it's the OS responsability to generate a random value for the registers
+ * in each process' context. So, initialize it with 0 here.
+ */
+ uint64_t hashkeyr_initial_value = 0, hashpkeyr_initial_value = 0;
+#if defined(CONFIG_USER_ONLY)
+ /* in linux-user, setup the hash register with a random value */
+ GRand *rand = g_rand_new();
+ hashkeyr_initial_value =
+ ((uint64_t)g_rand_int(rand) << 32) | (uint64_t)g_rand_int(rand);
+ hashpkeyr_initial_value =
+ ((uint64_t)g_rand_int(rand) << 32) | (uint64_t)g_rand_int(rand);
+ g_rand_free(rand);
+#endif
+ spr_register(env, SPR_HASHKEYR, "HASHKEYR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ hashkeyr_initial_value);
+ spr_register_hv(env, SPR_HASHPKEYR, "HASHPKEYR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ hashpkeyr_initial_value);
+}
+
/*
* Initialize PMU counter overflow timers for Power8 and
* newer Power chips when using TCG.
@@ -6518,6 +6545,7 @@ static void init_proc_POWER10(CPUPPCState *env)
register_power8_book4_sprs(env);
register_power8_rpr_sprs(env);
register_power9_mmu_sprs(env);
+ register_power10_hash_sprs(env);
/* FIXME: Filter fields properly based on privilege level */
spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL,
diff --git a/target/ppc/dfp_helper.c b/target/ppc/dfp_helper.c
index 5ba74b2..cc02431 100644
--- a/target/ppc/dfp_helper.c
+++ b/target/ppc/dfp_helper.c
@@ -42,13 +42,16 @@ static void get_dfp128(ppc_vsr_t *dst, ppc_fprp_t *dfp)
static void set_dfp64(ppc_fprp_t *dfp, ppc_vsr_t *src)
{
- dfp->VsrD(0) = src->VsrD(1);
+ dfp[0].VsrD(0) = src->VsrD(1);
+ dfp[0].VsrD(1) = 0ULL;
}
static void set_dfp128(ppc_fprp_t *dfp, ppc_vsr_t *src)
{
dfp[0].VsrD(0) = src->VsrD(0);
dfp[1].VsrD(0) = src->VsrD(1);
+ dfp[0].VsrD(1) = 0ULL;
+ dfp[1].VsrD(1) = 0ULL;
}
static void set_dfp128_to_avr(ppc_avr_t *dst, ppc_vsr_t *src)
@@ -1144,6 +1147,26 @@ static inline uint8_t dfp_get_bcd_digit_128(ppc_vsr_t *t, unsigned n)
return t->VsrD((n & 0x10) ? 0 : 1) >> ((n << 2) & 63) & 15;
}
+static inline void dfp_invalid_op_vxcvi_64(struct PPC_DFP *dfp)
+{
+ /* TODO: fpscr is incorrectly not being saved to env */
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE);
+ if ((dfp->env->fpscr & FP_VE) == 0) {
+ dfp->vt.VsrD(1) = 0x7c00000000000000; /* QNaN */
+ }
+}
+
+
+static inline void dfp_invalid_op_vxcvi_128(struct PPC_DFP *dfp)
+{
+ /* TODO: fpscr is incorrectly not being saved to env */
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE);
+ if ((dfp->env->fpscr & FP_VE) == 0) {
+ dfp->vt.VsrD(0) = 0x7c00000000000000; /* QNaN */
+ dfp->vt.VsrD(1) = 0x0;
+ }
+}
+
#define DFP_HELPER_ENBCD(op, size) \
void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
uint32_t s) \
@@ -1170,7 +1193,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
sgn = 0; \
break; \
default: \
- dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \
+ dfp_invalid_op_vxcvi_##size(&dfp); \
+ set_dfp##size(t, &dfp.vt); \
return; \
} \
} \
@@ -1180,7 +1204,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
digits[(size) / 4 - n] = dfp_get_bcd_digit_##size(&dfp.vb, \
offset++); \
if (digits[(size) / 4 - n] > 10) { \
- dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \
+ dfp_invalid_op_vxcvi_##size(&dfp); \
+ set_dfp##size(t, &dfp.vt); \
return; \
} else { \
nonzero |= (digits[(size) / 4 - n] > 0); \
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index 7550aaf..214acf5 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -2173,6 +2173,89 @@ void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
#endif
#endif
+static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane)
+{
+ const uint16_t c = 0xfffc;
+ const uint64_t z0 = 0xfa2561cdf44ac398ULL;
+ uint16_t z = 0, temp;
+ uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32];
+
+ for (int i = 3; i >= 0; i--) {
+ k[i] = key & 0xffff;
+ key >>= 16;
+ }
+ xleft[0] = x & 0xffff;
+ xright[0] = (x >> 16) & 0xffff;
+
+ for (int i = 0; i < 28; i++) {
+ z = (z0 >> (63 - i)) & 1;
+ temp = ror16(k[i + 3], 3) ^ k[i + 1];
+ k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1);
+ }
+
+ for (int i = 0; i < 8; i++) {
+ eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)];
+ eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)];
+ eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)];
+ eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)];
+ }
+
+ for (int i = 0; i < 32; i++) {
+ fxleft[i] = (rol16(xleft[i], 1) &
+ rol16(xleft[i], 8)) ^ rol16(xleft[i], 2);
+ xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i];
+ xright[i + 1] = xleft[i];
+ }
+
+ return (((uint32_t)xright[32]) << 16) | xleft[32];
+}
+
+static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key)
+{
+ uint64_t stage0_h = 0ULL, stage0_l = 0ULL;
+ uint64_t stage1_h, stage1_l;
+
+ for (int i = 0; i < 4; i++) {
+ stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1));
+ stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i);
+ stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1));
+ stage0_l |= (ra & 0xff) << (8 * 2 * i);
+ rb >>= 8;
+ ra >>= 8;
+ }
+
+ stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32;
+ stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1);
+ stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32;
+ stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3);
+
+ return stage1_h ^ stage1_l;
+}
+
+#include "qemu/guest-random.h"
+
+#define HELPER_HASH(op, key, store) \
+void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
+ target_ulong rb) \
+{ \
+ uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash; \
+ \
+ if (store) { \
+ cpu_stq_data_ra(env, ea, calculated_hash, GETPC()); \
+ } else { \
+ loaded_hash = cpu_ldq_data_ra(env, ea, GETPC()); \
+ if (loaded_hash != calculated_hash) { \
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, \
+ POWERPC_EXCP_TRAP, GETPC()); \
+ } \
+ } \
+}
+
+HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true)
+HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false)
+HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true)
+HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false)
+
#if !defined(CONFIG_USER_ONLY)
#ifdef CONFIG_TCG
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
index 0f045b7..ae25f32 100644
--- a/target/ppc/fpu_helper.c
+++ b/target/ppc/fpu_helper.c
@@ -830,30 +830,21 @@ static void float_invalid_op_sqrt(CPUPPCState *env, int flags,
}
}
-/* fsqrt - fsqrt. */
-float64 helper_fsqrt(CPUPPCState *env, float64 arg)
-{
- float64 ret = float64_sqrt(arg, &env->fp_status);
- int flags = get_float_exception_flags(&env->fp_status);
-
- if (unlikely(flags & float_flag_invalid)) {
- float_invalid_op_sqrt(env, flags, 1, GETPC());
- }
-
- return ret;
+#define FPU_FSQRT(name, op) \
+float64 helper_##name(CPUPPCState *env, float64 arg) \
+{ \
+ float64 ret = op(arg, &env->fp_status); \
+ int flags = get_float_exception_flags(&env->fp_status); \
+ \
+ if (unlikely(flags & float_flag_invalid)) { \
+ float_invalid_op_sqrt(env, flags, 1, GETPC()); \
+ } \
+ \
+ return ret; \
}
-/* fsqrts - fsqrts. */
-float64 helper_fsqrts(CPUPPCState *env, float64 arg)
-{
- float64 ret = float64r32_sqrt(arg, &env->fp_status);
- int flags = get_float_exception_flags(&env->fp_status);
-
- if (unlikely(flags & float_flag_invalid)) {
- float_invalid_op_sqrt(env, flags, 1, GETPC());
- }
- return ret;
-}
+FPU_FSQRT(FSQRT, float64_sqrt)
+FPU_FSQRT(FSQRTS, float64r32_sqrt)
/* fre - fre. */
float64 helper_fre(CPUPPCState *env, float64 arg)
@@ -2176,7 +2167,7 @@ VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
ppc_vsr_t *s1, ppc_vsr_t *s2, ppc_vsr_t *s3) \
{ \
- ppc_vsr_t t = *xt; \
+ ppc_vsr_t t = { }; \
int i; \
\
helper_reset_fpstatus(env); \
@@ -2637,6 +2628,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
int all_true = 1; \
int all_false = 1; \
\
+ helper_reset_fpstatus(env); \
+ \
for (i = 0; i < nels; i++) { \
if (unlikely(tp##_is_any_nan(xa->fld) || \
tp##_is_any_nan(xb->fld))) { \
@@ -2690,6 +2683,8 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
ppc_vsr_t t = { }; \
int i; \
\
+ helper_reset_fpstatus(env); \
+ \
for (i = 0; i < nels; i++) { \
t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
if (unlikely(stp##_is_signaling_nan(xb->sfld, \
@@ -2715,6 +2710,8 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
ppc_vsr_t t = { }; \
int i; \
\
+ helper_reset_fpstatus(env); \
+ \
for (i = 0; i < nels; i++) { \
t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
if (unlikely(stp##_is_signaling_nan(xb->VsrD(i), \
@@ -2752,6 +2749,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode, \
ppc_vsr_t t = *xt; \
int i; \
\
+ helper_reset_fpstatus(env); \
+ \
for (i = 0; i < nels; i++) { \
t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
if (unlikely(stp##_is_signaling_nan(xb->sfld, \
@@ -2787,6 +2786,8 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
ppc_vsr_t t = { }; \
int i; \
\
+ helper_reset_fpstatus(env); \
+ \
for (i = 0; i < nels; i++) { \
t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \
if (unlikely(stp##_is_signaling_nan(xb->sfld, \
@@ -2834,6 +2835,8 @@ void helper_XSCVQPDP(CPUPPCState *env, uint32_t ro, ppc_vsr_t *xt,
ppc_vsr_t t = { };
float_status tstat;
+ helper_reset_fpstatus(env);
+
tstat = env->fp_status;
if (ro != 0) {
tstat.float_rounding_mode = float_round_to_odd;
@@ -2855,6 +2858,7 @@ uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
{
uint64_t result, sign, exp, frac;
+ helper_reset_fpstatus(env);
float_status tstat = env->fp_status;
set_float_exception_flags(0, &tstat);
@@ -2910,22 +2914,20 @@ uint64_t helper_XSCVSPDPN(uint64_t xb)
#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, sfi, rnan) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- int all_flags = env->fp_status.float_exception_flags, flags; \
ppc_vsr_t t = { }; \
- int i; \
+ int i, flags; \
+ \
+ helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
- env->fp_status.float_exception_flags = 0; \
t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
flags = env->fp_status.float_exception_flags; \
if (unlikely(flags & float_flag_invalid)) { \
t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC());\
} \
- all_flags |= flags; \
} \
\
*xt = t; \
- env->fp_status.float_exception_flags = all_flags; \
do_float_check_status(env, sfi, GETPC()); \
}
@@ -2977,12 +2979,12 @@ VSX_CVT_FP_TO_INT128(XSCVQPSQZ, int128, 0x8000000000000000ULL);
#define VSX_CVT_FP_TO_INT2(op, nels, stp, ttp, sfi, rnan) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- int all_flags = env->fp_status.float_exception_flags, flags; \
ppc_vsr_t t = { }; \
- int i; \
+ int i, flags; \
+ \
+ helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
- env->fp_status.float_exception_flags = 0; \
t.VsrW(2 * i) = stp##_to_##ttp##_round_to_zero(xb->VsrD(i), \
&env->fp_status); \
flags = env->fp_status.float_exception_flags; \
@@ -2991,11 +2993,9 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
rnan, 0, GETPC()); \
} \
t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
- all_flags |= flags; \
} \
\
*xt = t; \
- env->fp_status.float_exception_flags = all_flags; \
do_float_check_status(env, sfi, GETPC()); \
}
@@ -3020,6 +3020,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode, \
ppc_vsr_t t = { }; \
int flags; \
\
+ helper_reset_fpstatus(env); \
+ \
t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
flags = get_float_exception_flags(&env->fp_status); \
if (flags & float_flag_invalid) { \
@@ -3032,7 +3034,6 @@ void helper_##op(CPUPPCState *env, uint32_t opcode, \
VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \
0x8000000000000000ULL)
-
VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \
0xffffffff80000000ULL)
VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
@@ -3055,6 +3056,8 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
ppc_vsr_t t = { }; \
int i; \
\
+ helper_reset_fpstatus(env); \
+ \
for (i = 0; i < nels; i++) { \
t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
if (r2sp) { \
@@ -3124,6 +3127,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode, \
{ \
ppc_vsr_t t = *xt; \
\
+ helper_reset_fpstatus(env); \
t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
helper_compute_fprf_##ttp(env, t.tfld); \
\
@@ -3157,6 +3161,8 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
int i; \
FloatRoundMode curr_rounding_mode; \
\
+ helper_reset_fpstatus(env); \
+ \
if (rmode != FLOAT_ROUND_CURRENT) { \
curr_rounding_mode = get_float_rounding_mode(&env->fp_status); \
set_float_rounding_mode(rmode, &env->fp_status); \
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index 159b352..57eee07 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -4,6 +4,10 @@ DEF_HELPER_FLAGS_4(tw, TCG_CALL_NO_WG, void, env, tl, tl, i32)
#if defined(TARGET_PPC64)
DEF_HELPER_FLAGS_4(td, TCG_CALL_NO_WG, void, env, tl, tl, i32)
#endif
+DEF_HELPER_4(HASHST, void, env, tl, tl, tl)
+DEF_HELPER_4(HASHCHK, void, env, tl, tl, tl)
+DEF_HELPER_4(HASHSTP, void, env, tl, tl, tl)
+DEF_HELPER_4(HASHCHKP, void, env, tl, tl, tl)
#if !defined(CONFIG_USER_ONLY)
DEF_HELPER_2(store_msr, void, env, tl)
DEF_HELPER_1(rfi, void, env)
@@ -116,8 +120,8 @@ DEF_HELPER_4(fmadds, i64, env, i64, i64, i64)
DEF_HELPER_4(fmsubs, i64, env, i64, i64, i64)
DEF_HELPER_4(fnmadds, i64, env, i64, i64, i64)
DEF_HELPER_4(fnmsubs, i64, env, i64, i64, i64)
-DEF_HELPER_2(fsqrt, f64, env, f64)
-DEF_HELPER_2(fsqrts, f64, env, f64)
+DEF_HELPER_2(FSQRT, f64, env, f64)
+DEF_HELPER_2(FSQRTS, f64, env, f64)
DEF_HELPER_2(fre, i64, env, i64)
DEF_HELPER_2(fres, i64, env, i64)
DEF_HELPER_2(frsqrte, i64, env, i64)
diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode
index eb41efc..a5249ee 100644
--- a/target/ppc/insn32.decode
+++ b/target/ppc/insn32.decode
@@ -20,6 +20,9 @@
&A frt fra frb frc rc:bool
@A ...... frt:5 fra:5 frb:5 frc:5 ..... rc:1 &A
+&A_tb frt frb rc:bool
+@A_tb ...... frt:5 ..... frb:5 ..... ..... rc:1 &A_tb
+
&D rt ra si:int64_t
@D ...... rt:5 ra:5 si:s16 &D
@@ -172,6 +175,9 @@
@X_TSX ...... ..... ra:5 rb:5 .......... . &X rt=%x_rt_tsx
@X_TSXP ...... ..... ra:5 rb:5 .......... . &X rt=%rt_tsxp
+%x_dw 0:1 21:5 !function=dw_compose_ea
+@X_DW ...... ..... ra:5 rb:5 .......... . &X rt=%x_dw
+
&X_frtp_vrb frtp vrb
@X_frtp_vrb ...... ....0 ..... vrb:5 .......... . &X_frtp_vrb frtp=%x_frtp
@@ -323,6 +329,13 @@ CNTTZDM 011111 ..... ..... ..... 1000111011 - @X
PDEPD 011111 ..... ..... ..... 0010011100 - @X
PEXTD 011111 ..... ..... ..... 0010111100 - @X
+# Fixed-Point Hash Instructions
+
+HASHST 011111 ..... ..... ..... 1011010010 . @X_DW
+HASHCHK 011111 ..... ..... ..... 1011110010 . @X_DW
+HASHSTP 011111 ..... ..... ..... 1010010010 . @X_DW
+HASHCHKP 011111 ..... ..... ..... 1010110010 . @X_DW
+
## BCD Assist
ADDG6S 011111 ..... ..... ..... - 001001010 - @X
@@ -353,6 +366,11 @@ STFDU 110111 ..... ...... ............... @D
STFDX 011111 ..... ...... .... 1011010111 - @X
STFDUX 011111 ..... ...... .... 1011110111 - @X
+### Floating-Point Arithmetic Instructions
+
+FSQRT 111111 ..... ----- ..... ----- 10110 . @A_tb
+FSQRTS 111011 ..... ----- ..... ----- 10110 . @A_tb
+
### Floating-Point Select Instruction
FSEL 111111 ..... ..... ..... ..... 10111 . @A
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index d905f07..6960961 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -37,9 +37,9 @@
static inline void helper_update_ov_legacy(CPUPPCState *env, int ov)
{
if (unlikely(ov)) {
- env->so = env->ov = 1;
+ env->so = env->ov = env->ov32 = 1;
} else {
- env->ov = 0;
+ env->ov = env->ov32 = 0;
}
}
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 000b1e5..e810842 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -6443,6 +6443,14 @@ static inline void get_fpr(TCGv_i64 dst, int regno)
static inline void set_fpr(int regno, TCGv_i64 src)
{
tcg_gen_st_i64(src, cpu_env, fpr_offset(regno));
+ /*
+ * Before PowerISA v3.1 the result of doubleword 1 of the VSR
+ * corresponding to the target FPR was undefined. However,
+ * most (if not all) real hardware were setting the result to 0.
+ * Starting at ISA v3.1, the result for doubleword 1 is now defined
+ * to be 0.
+ */
+ tcg_gen_st_i64(tcg_constant_i64(0), cpu_env, vsr64_offset(regno, false));
}
static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
@@ -6473,6 +6481,11 @@ static int times_16(DisasContext *ctx, int x)
return x * 16;
}
+static int64_t dw_compose_ea(DisasContext *ctx, int x)
+{
+ return deposit64(0xfffffffffffffe00, 3, 6, x);
+}
+
/*
* Helpers for trans_* functions to check for specific insns flags.
* Use token pasting to ensure that we use the proper flag with the
diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc
index db14d3b..1ba56cb 100644
--- a/target/ppc/translate/fixedpoint-impl.c.inc
+++ b/target/ppc/translate/fixedpoint-impl.c.inc
@@ -540,3 +540,37 @@ static bool trans_CBCDTD(DisasContext *ctx, arg_X_sa *a)
gen_helper_CBCDTD(cpu_gpr[a->ra], cpu_gpr[a->rs]);
return true;
}
+
+static bool do_hash(DisasContext *ctx, arg_X *a, bool priv,
+ void (*helper)(TCGv_ptr, TCGv, TCGv, TCGv))
+{
+ TCGv ea;
+
+ if (!(ctx->insns_flags2 & PPC2_ISA310)) {
+ /* if version is before v3.1, this operation is a nop */
+ return true;
+ }
+
+ if (priv) {
+ /* if instruction is privileged but the context is in user space */
+ REQUIRE_SV(ctx);
+ }
+
+ if (unlikely(a->ra == 0)) {
+ /* if RA=0, the instruction form is invalid */
+ gen_invalid(ctx);
+ return true;
+ }
+
+ ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->rt));
+ helper(cpu_env, ea, cpu_gpr[a->ra], cpu_gpr[a->rb]);
+
+ tcg_temp_free(ea);
+
+ return true;
+}
+
+TRANS(HASHST, do_hash, false, gen_helper_HASHST)
+TRANS(HASHCHK, do_hash, false, gen_helper_HASHCHK)
+TRANS(HASHSTP, do_hash, true, gen_helper_HASHSTP)
+TRANS(HASHCHKP, do_hash, true, gen_helper_HASHCHKP)
diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc
index 0e893ea..8d5cf0f 100644
--- a/target/ppc/translate/fp-impl.c.inc
+++ b/target/ppc/translate/fp-impl.c.inc
@@ -254,51 +254,35 @@ static bool trans_FSEL(DisasContext *ctx, arg_A *a)
GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
/* Optional: */
-/* fsqrt */
-static void gen_fsqrt(DisasContext *ctx)
+static bool do_helper_fsqrt(DisasContext *ctx, arg_A_tb *a,
+ void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64))
{
- TCGv_i64 t0;
- TCGv_i64 t1;
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- t0 = tcg_temp_new_i64();
- t1 = tcg_temp_new_i64();
- gen_reset_fpstatus();
- get_fpr(t0, rB(ctx->opcode));
- gen_helper_fsqrt(t1, cpu_env, t0);
- set_fpr(rD(ctx->opcode), t1);
- gen_compute_fprf_float64(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_cr1_from_fpscr(ctx);
- }
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
-}
+ TCGv_i64 t0, t1;
+
+ REQUIRE_INSNS_FLAGS(ctx, FLOAT_FSQRT);
+ REQUIRE_FPU(ctx);
-static void gen_fsqrts(DisasContext *ctx)
-{
- TCGv_i64 t0;
- TCGv_i64 t1;
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
+
gen_reset_fpstatus();
- get_fpr(t0, rB(ctx->opcode));
- gen_helper_fsqrts(t1, cpu_env, t0);
- set_fpr(rD(ctx->opcode), t1);
+ get_fpr(t0, a->frb);
+ helper(t1, cpu_env, t0);
+ set_fpr(a->frt, t1);
gen_compute_fprf_float64(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
+ if (unlikely(a->rc != 0)) {
gen_set_cr1_from_fpscr(ctx);
}
+
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
+
+ return true;
}
+TRANS(FSQRT, do_helper_fsqrt, gen_helper_FSQRT);
+TRANS(FSQRTS, do_helper_fsqrt, gen_helper_FSQRTS);
+
/*** Floating-Point multiply-and-add ***/
/* fmadd - fmadds */
GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
diff --git a/target/ppc/translate/fp-ops.c.inc b/target/ppc/translate/fp-ops.c.inc
index 1b65f5a..d4c6c4b 100644
--- a/target/ppc/translate/fp-ops.c.inc
+++ b/target/ppc/translate/fp-ops.c.inc
@@ -62,8 +62,6 @@ GEN_HANDLER_E(stfdepx, 0x1F, 0x1F, 0x16, 0x00000001, PPC_NONE, PPC2_BOOKE206),
GEN_HANDLER_E(stfdpx, 0x1F, 0x17, 0x1C, 0x00200001, PPC_NONE, PPC2_ISA205),
GEN_HANDLER(frsqrtes, 0x3B, 0x1A, 0xFF, 0x001F07C0, PPC_FLOAT_FRSQRTES),
-GEN_HANDLER(fsqrt, 0x3F, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT),
-GEN_HANDLER(fsqrts, 0x3B, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT),
GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT),
GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT),
GEN_HANDLER(fabs, 0x3F, 0x08, 0x08, 0x001F0000, PPC_FLOAT),