aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Cave-Ayland <mark.cave-ayland@ilande.co.uk>2019-01-02 09:14:19 +0000
committerDavid Gibson <david@gibson.dropbear.id.au>2019-01-09 09:28:14 +1100
commit8b3b2d75c7c0481544e277dad226223245e058eb (patch)
tree45c0ffdb5fdba19f46c195b617c503c96c90f60e
parentc4a18dbf52261ec505214d55d8681deeb2e33524 (diff)
downloadqemu-8b3b2d75c7c0481544e277dad226223245e058eb.zip
qemu-8b3b2d75c7c0481544e277dad226223245e058eb.tar.gz
qemu-8b3b2d75c7c0481544e277dad226223245e058eb.tar.bz2
target/ppc: introduce get_cpu_vsr{l,h}() and set_cpu_vsr{l,h}() helpers for VSR register access
These helpers allow us to move VSR register values to/from the specified TCGv_i64 argument. To prevent VSX helpers accessing the cpu_vsr array directly, add extra TCG temporaries as required. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Acked-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
-rw-r--r--target/ppc/translate/vsx-impl.inc.c862
1 files changed, 638 insertions, 224 deletions
diff --git a/target/ppc/translate/vsx-impl.inc.c b/target/ppc/translate/vsx-impl.inc.c
index 85ed135..f0665df 100644
--- a/target/ppc/translate/vsx-impl.inc.c
+++ b/target/ppc/translate/vsx-impl.inc.c
@@ -1,20 +1,48 @@
/*** VSX extension ***/
-static inline TCGv_i64 cpu_vsrh(int n)
+static inline void get_vsr(TCGv_i64 dst, int n)
+{
+ tcg_gen_mov_i64(dst, cpu_vsr[n]);
+}
+
+static inline void set_vsr(int n, TCGv_i64 src)
+{
+ tcg_gen_mov_i64(cpu_vsr[n], src);
+}
+
+static inline void get_cpu_vsrh(TCGv_i64 dst, int n)
+{
+ if (n < 32) {
+ get_fpr(dst, n);
+ } else {
+ get_avr64(dst, n - 32, true);
+ }
+}
+
+static inline void get_cpu_vsrl(TCGv_i64 dst, int n)
+{
+ if (n < 32) {
+ get_vsr(dst, n);
+ } else {
+ get_avr64(dst, n - 32, false);
+ }
+}
+
+static inline void set_cpu_vsrh(int n, TCGv_i64 src)
{
if (n < 32) {
- return cpu_fpr[n];
+ set_fpr(n, src);
} else {
- return cpu_avrh[n-32];
+ set_avr64(n - 32, src, true);
}
}
-static inline TCGv_i64 cpu_vsrl(int n)
+static inline void set_cpu_vsrl(int n, TCGv_i64 src)
{
if (n < 32) {
- return cpu_vsr[n];
+ set_vsr(n, src);
} else {
- return cpu_avrl[n-32];
+ set_avr64(n - 32, src, false);
}
}
@@ -22,16 +50,20 @@ static inline TCGv_i64 cpu_vsrl(int n)
static void gen_##name(DisasContext *ctx) \
{ \
TCGv EA; \
+ TCGv_i64 t0; \
if (unlikely(!ctx->vsx_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
+ t0 = tcg_temp_new_i64(); \
gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
- gen_qemu_##operation(ctx, cpu_vsrh(xT(ctx->opcode)), EA); \
+ gen_qemu_##operation(ctx, t0, EA); \
+ set_cpu_vsrh(xT(ctx->opcode), t0); \
/* NOTE: cpu_vsrl is undefined */ \
tcg_temp_free(EA); \
+ tcg_temp_free_i64(t0); \
}
VSX_LOAD_SCALAR(lxsdx, ld64_i64)
@@ -44,43 +76,60 @@ VSX_LOAD_SCALAR(lxsspx, ld32fs)
static void gen_lxvd2x(DisasContext *ctx)
{
TCGv EA;
+ TCGv_i64 t0;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ t0 = tcg_temp_new_i64();
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
+ gen_qemu_ld64_i64(ctx, t0, EA);
+ set_cpu_vsrh(xT(ctx->opcode), t0);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_ld64_i64(ctx, cpu_vsrl(xT(ctx->opcode)), EA);
+ gen_qemu_ld64_i64(ctx, t0, EA);
+ set_cpu_vsrl(xT(ctx->opcode), t0);
tcg_temp_free(EA);
+ tcg_temp_free_i64(t0);
}
static void gen_lxvdsx(DisasContext *ctx)
{
TCGv EA;
+ TCGv_i64 t0;
+ TCGv_i64 t1;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
+ gen_qemu_ld64_i64(ctx, t0, EA);
+ set_cpu_vsrh(xT(ctx->opcode), t0);
+ tcg_gen_mov_i64(t1, t0);
+ set_cpu_vsrl(xT(ctx->opcode), t1);
tcg_temp_free(EA);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
}
static void gen_lxvw4x(DisasContext *ctx)
{
TCGv EA;
- TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
- TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ get_cpu_vsrh(xth, xT(ctx->opcode));
+ get_cpu_vsrl(xtl, xT(ctx->opcode));
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
@@ -104,6 +153,8 @@ static void gen_lxvw4x(DisasContext *ctx)
tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
}
tcg_temp_free(EA);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
}
static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl,
@@ -151,13 +202,17 @@ static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl,
static void gen_lxvh8x(DisasContext *ctx)
{
TCGv EA;
- TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
- TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ get_cpu_vsrh(xth, xT(ctx->opcode));
+ get_cpu_vsrl(xtl, xT(ctx->opcode));
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
@@ -169,18 +224,24 @@ static void gen_lxvh8x(DisasContext *ctx)
gen_bswap16x8(xth, xtl, xth, xtl);
}
tcg_temp_free(EA);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
}
static void gen_lxvb16x(DisasContext *ctx)
{
TCGv EA;
- TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
- TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ get_cpu_vsrh(xth, xT(ctx->opcode));
+ get_cpu_vsrl(xtl, xT(ctx->opcode));
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
@@ -188,6 +249,8 @@ static void gen_lxvb16x(DisasContext *ctx)
tcg_gen_addi_tl(EA, EA, 8);
tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
tcg_temp_free(EA);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
}
#define VSX_VECTOR_LOAD_STORE(name, op, indexed) \
@@ -195,15 +258,14 @@ static void gen_##name(DisasContext *ctx) \
{ \
int xt; \
TCGv EA; \
- TCGv_i64 xth, xtl; \
+ TCGv_i64 xth; \
+ TCGv_i64 xtl; \
\
if (indexed) { \
xt = xT(ctx->opcode); \
} else { \
xt = DQxT(ctx->opcode); \
} \
- xth = cpu_vsrh(xt); \
- xtl = cpu_vsrl(xt); \
\
if (xt < 32) { \
if (unlikely(!ctx->vsx_enabled)) { \
@@ -216,6 +278,10 @@ static void gen_##name(DisasContext *ctx) \
return; \
} \
} \
+ xth = tcg_temp_new_i64(); \
+ xtl = tcg_temp_new_i64(); \
+ get_cpu_vsrh(xth, xt); \
+ get_cpu_vsrl(xtl, xt); \
gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
if (indexed) { \
@@ -225,14 +291,20 @@ static void gen_##name(DisasContext *ctx) \
} \
if (ctx->le_mode) { \
tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_LEQ); \
+ set_cpu_vsrl(xt, xtl); \
tcg_gen_addi_tl(EA, EA, 8); \
tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_LEQ); \
+ set_cpu_vsrh(xt, xth); \
} else { \
tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_BEQ); \
+ set_cpu_vsrh(xt, xth); \
tcg_gen_addi_tl(EA, EA, 8); \
tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_BEQ); \
+ set_cpu_vsrl(xt, xtl); \
} \
tcg_temp_free(EA); \
+ tcg_temp_free_i64(xth); \
+ tcg_temp_free_i64(xtl); \
}
VSX_VECTOR_LOAD_STORE(lxv, ld_i64, 0)
@@ -276,18 +348,22 @@ VSX_VECTOR_LOAD_STORE_LENGTH(stxvll)
static void gen_##name(DisasContext *ctx) \
{ \
TCGv EA; \
- TCGv_i64 xth = cpu_vsrh(rD(ctx->opcode) + 32); \
+ TCGv_i64 xth; \
\
if (unlikely(!ctx->altivec_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
+ xth = tcg_temp_new_i64(); \
+ get_cpu_vsrh(xth, rD(ctx->opcode) + 32); \
gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
gen_addr_imm_index(ctx, EA, 0x03); \
gen_qemu_##operation(ctx, xth, EA); \
+ set_cpu_vsrh(rD(ctx->opcode) + 32, xth); \
/* NOTE: cpu_vsrl is undefined */ \
tcg_temp_free(EA); \
+ tcg_temp_free_i64(xth); \
}
VSX_LOAD_SCALAR_DS(lxsd, ld64_i64)
@@ -297,15 +373,19 @@ VSX_LOAD_SCALAR_DS(lxssp, ld32fs)
static void gen_##name(DisasContext *ctx) \
{ \
TCGv EA; \
+ TCGv_i64 t0; \
if (unlikely(!ctx->vsx_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
+ t0 = tcg_temp_new_i64(); \
gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
- gen_qemu_##operation(ctx, cpu_vsrh(xS(ctx->opcode)), EA); \
+ gen_qemu_##operation(ctx, t0, EA); \
+ set_cpu_vsrh(xS(ctx->opcode), t0); \
tcg_temp_free(EA); \
+ tcg_temp_free_i64(t0); \
}
VSX_STORE_SCALAR(stxsdx, st64_i64)
@@ -318,28 +398,38 @@ VSX_STORE_SCALAR(stxsspx, st32fs)
static void gen_stxvd2x(DisasContext *ctx)
{
TCGv EA;
+ TCGv_i64 t0;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ t0 = tcg_temp_new_i64();
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- gen_qemu_st64_i64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
+ get_cpu_vsrh(t0, xS(ctx->opcode));
+ gen_qemu_st64_i64(ctx, t0, EA);
tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64_i64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
+ get_cpu_vsrl(t0, xS(ctx->opcode));
+ gen_qemu_st64_i64(ctx, t0, EA);
tcg_temp_free(EA);
+ tcg_temp_free_i64(t0);
}
static void gen_stxvw4x(DisasContext *ctx)
{
- TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode));
- TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode));
TCGv EA;
+ TCGv_i64 xsh;
+ TCGv_i64 xsl;
+
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xsh = tcg_temp_new_i64();
+ xsl = tcg_temp_new_i64();
+ get_cpu_vsrh(xsh, xS(ctx->opcode));
+ get_cpu_vsrl(xsl, xS(ctx->opcode));
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
@@ -362,18 +452,24 @@ static void gen_stxvw4x(DisasContext *ctx)
tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
}
tcg_temp_free(EA);
+ tcg_temp_free_i64(xsh);
+ tcg_temp_free_i64(xsl);
}
static void gen_stxvh8x(DisasContext *ctx)
{
- TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode));
- TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode));
TCGv EA;
+ TCGv_i64 xsh;
+ TCGv_i64 xsl;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xsh = tcg_temp_new_i64();
+ xsl = tcg_temp_new_i64();
+ get_cpu_vsrh(xsh, xS(ctx->opcode));
+ get_cpu_vsrl(xsl, xS(ctx->opcode));
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
@@ -393,18 +489,24 @@ static void gen_stxvh8x(DisasContext *ctx)
tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
}
tcg_temp_free(EA);
+ tcg_temp_free_i64(xsh);
+ tcg_temp_free_i64(xsl);
}
static void gen_stxvb16x(DisasContext *ctx)
{
- TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode));
- TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode));
TCGv EA;
+ TCGv_i64 xsh;
+ TCGv_i64 xsl;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xsh = tcg_temp_new_i64();
+ xsl = tcg_temp_new_i64();
+ get_cpu_vsrh(xsh, xS(ctx->opcode));
+ get_cpu_vsrl(xsl, xS(ctx->opcode));
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
@@ -412,80 +514,144 @@ static void gen_stxvb16x(DisasContext *ctx)
tcg_gen_addi_tl(EA, EA, 8);
tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
tcg_temp_free(EA);
+ tcg_temp_free_i64(xsh);
+ tcg_temp_free_i64(xsl);
}
#define VSX_STORE_SCALAR_DS(name, operation) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv EA; \
- TCGv_i64 xth = cpu_vsrh(rD(ctx->opcode) + 32); \
+ TCGv_i64 xth; \
\
if (unlikely(!ctx->altivec_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
+ xth = tcg_temp_new_i64(); \
+ get_cpu_vsrh(xth, rD(ctx->opcode) + 32); \
gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
gen_addr_imm_index(ctx, EA, 0x03); \
gen_qemu_##operation(ctx, xth, EA); \
/* NOTE: cpu_vsrl is undefined */ \
tcg_temp_free(EA); \
+ tcg_temp_free_i64(xth); \
}
VSX_LOAD_SCALAR_DS(stxsd, st64_i64)
VSX_LOAD_SCALAR_DS(stxssp, st32fs)
-#define MV_VSRW(name, tcgop1, tcgop2, target, source) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- if (xS(ctx->opcode) < 32) { \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- } else { \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- } \
- TCGv_i64 tmp = tcg_temp_new_i64(); \
- tcg_gen_##tcgop1(tmp, source); \
- tcg_gen_##tcgop2(target, tmp); \
- tcg_temp_free_i64(tmp); \
+static void gen_mfvsrwz(DisasContext *ctx)
+{
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGv_i64 xsh = tcg_temp_new_i64();
+ get_cpu_vsrh(xsh, xS(ctx->opcode));
+ tcg_gen_ext32u_i64(tmp, xsh);
+ tcg_gen_trunc_i64_tl(cpu_gpr[rA(ctx->opcode)], tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(xsh);
}
+static void gen_mtvsrwa(DisasContext *ctx)
+{
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGv_i64 xsh = tcg_temp_new_i64();
+ tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32s_i64(xsh, tmp);
+ set_cpu_vsrh(xT(ctx->opcode), xsh);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(xsh);
+}
-MV_VSRW(mfvsrwz, ext32u_i64, trunc_i64_tl, cpu_gpr[rA(ctx->opcode)], \
- cpu_vsrh(xS(ctx->opcode)))
-MV_VSRW(mtvsrwa, extu_tl_i64, ext32s_i64, cpu_vsrh(xT(ctx->opcode)), \
- cpu_gpr[rA(ctx->opcode)])
-MV_VSRW(mtvsrwz, extu_tl_i64, ext32u_i64, cpu_vsrh(xT(ctx->opcode)), \
- cpu_gpr[rA(ctx->opcode)])
+static void gen_mtvsrwz(DisasContext *ctx)
+{
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGv_i64 xsh = tcg_temp_new_i64();
+ tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32u_i64(xsh, tmp);
+ set_cpu_vsrh(xT(ctx->opcode), xsh);
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(xsh);
+}
#if defined(TARGET_PPC64)
-#define MV_VSRD(name, target, source) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- if (xS(ctx->opcode) < 32) { \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- } else { \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- } \
- tcg_gen_mov_i64(target, source); \
+static void gen_mfvsrd(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+ t0 = tcg_temp_new_i64();
+ get_cpu_vsrh(t0, xS(ctx->opcode));
+ tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_temp_free_i64(t0);
}
-MV_VSRD(mfvsrd, cpu_gpr[rA(ctx->opcode)], cpu_vsrh(xS(ctx->opcode)))
-MV_VSRD(mtvsrd, cpu_vsrh(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)])
+static void gen_mtvsrd(DisasContext *ctx)
+{
+ TCGv_i64 t0;
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+ t0 = tcg_temp_new_i64();
+ tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ set_cpu_vsrh(xT(ctx->opcode), t0);
+ tcg_temp_free_i64(t0);
+}
static void gen_mfvsrld(DisasContext *ctx)
{
+ TCGv_i64 t0;
if (xS(ctx->opcode) < 32) {
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
@@ -497,12 +663,15 @@ static void gen_mfvsrld(DisasContext *ctx)
return;
}
}
-
- tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], cpu_vsrl(xS(ctx->opcode)));
+ t0 = tcg_temp_new_i64();
+ get_cpu_vsrl(t0, xS(ctx->opcode));
+ tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_temp_free_i64(t0);
}
static void gen_mtvsrdd(DisasContext *ctx)
{
+ TCGv_i64 t0;
if (xT(ctx->opcode) < 32) {
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
@@ -515,17 +684,22 @@ static void gen_mtvsrdd(DisasContext *ctx)
}
}
+ t0 = tcg_temp_new_i64();
if (!rA(ctx->opcode)) {
- tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), 0);
+ tcg_gen_movi_i64(t0, 0);
} else {
- tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
}
+ set_cpu_vsrh(xT(ctx->opcode), t0);
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mov_i64(t0, cpu_gpr[rB(ctx->opcode)]);
+ set_cpu_vsrl(xT(ctx->opcode), t0);
+ tcg_temp_free_i64(t0);
}
static void gen_mtvsrws(DisasContext *ctx)
{
+ TCGv_i64 t0;
if (xT(ctx->opcode) < 32) {
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
@@ -538,55 +712,61 @@ static void gen_mtvsrws(DisasContext *ctx)
}
}
- tcg_gen_deposit_i64(cpu_vsrl(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)],
+ t0 = tcg_temp_new_i64();
+ tcg_gen_deposit_i64(t0, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rA(ctx->opcode)], 32, 32);
- tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrl(xT(ctx->opcode)));
+ set_cpu_vsrl(xT(ctx->opcode), t0);
+ set_cpu_vsrh(xT(ctx->opcode), t0);
+ tcg_temp_free_i64(t0);
}
#endif
static void gen_xxpermdi(DisasContext *ctx)
{
+ TCGv_i64 xh, xl;
+
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xh = tcg_temp_new_i64();
+ xl = tcg_temp_new_i64();
+
if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) ||
(xT(ctx->opcode) == xB(ctx->opcode)))) {
- TCGv_i64 xh, xl;
-
- xh = tcg_temp_new_i64();
- xl = tcg_temp_new_i64();
-
if ((DM(ctx->opcode) & 2) == 0) {
- tcg_gen_mov_i64(xh, cpu_vsrh(xA(ctx->opcode)));
+ get_cpu_vsrh(xh, xA(ctx->opcode));
} else {
- tcg_gen_mov_i64(xh, cpu_vsrl(xA(ctx->opcode)));
+ get_cpu_vsrl(xh, xA(ctx->opcode));
}
if ((DM(ctx->opcode) & 1) == 0) {
- tcg_gen_mov_i64(xl, cpu_vsrh(xB(ctx->opcode)));
+ get_cpu_vsrh(xl, xB(ctx->opcode));
} else {
- tcg_gen_mov_i64(xl, cpu_vsrl(xB(ctx->opcode)));
+ get_cpu_vsrl(xl, xB(ctx->opcode));
}
- tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xh);
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xl);
-
- tcg_temp_free_i64(xh);
- tcg_temp_free_i64(xl);
+ set_cpu_vsrh(xT(ctx->opcode), xh);
+ set_cpu_vsrl(xT(ctx->opcode), xl);
} else {
if ((DM(ctx->opcode) & 2) == 0) {
- tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)));
+ get_cpu_vsrh(xh, xA(ctx->opcode));
+ set_cpu_vsrh(xT(ctx->opcode), xh);
} else {
- tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)));
+ get_cpu_vsrl(xh, xA(ctx->opcode));
+ set_cpu_vsrh(xT(ctx->opcode), xh);
}
if ((DM(ctx->opcode) & 1) == 0) {
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xB(ctx->opcode)));
+ get_cpu_vsrh(xl, xB(ctx->opcode));
+ set_cpu_vsrl(xT(ctx->opcode), xl);
} else {
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xB(ctx->opcode)));
+ get_cpu_vsrl(xl, xB(ctx->opcode));
+ set_cpu_vsrl(xT(ctx->opcode), xl);
}
}
+ tcg_temp_free_i64(xh);
+ tcg_temp_free_i64(xl);
}
#define OP_ABS 1
@@ -606,7 +786,7 @@ static void glue(gen_, name)(DisasContext * ctx) \
} \
xb = tcg_temp_new_i64(); \
sgm = tcg_temp_new_i64(); \
- tcg_gen_mov_i64(xb, cpu_vsrh(xB(ctx->opcode))); \
+ get_cpu_vsrh(xb, xB(ctx->opcode)); \
tcg_gen_movi_i64(sgm, sgn_mask); \
switch (op) { \
case OP_ABS: { \
@@ -623,7 +803,7 @@ static void glue(gen_, name)(DisasContext * ctx) \
} \
case OP_CPSGN: { \
TCGv_i64 xa = tcg_temp_new_i64(); \
- tcg_gen_mov_i64(xa, cpu_vsrh(xA(ctx->opcode))); \
+ get_cpu_vsrh(xa, xA(ctx->opcode)); \
tcg_gen_and_i64(xa, xa, sgm); \
tcg_gen_andc_i64(xb, xb, sgm); \
tcg_gen_or_i64(xb, xb, xa); \
@@ -631,7 +811,7 @@ static void glue(gen_, name)(DisasContext * ctx) \
break; \
} \
} \
- tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xb); \
+ set_cpu_vsrh(xT(ctx->opcode), xb); \
tcg_temp_free_i64(xb); \
tcg_temp_free_i64(sgm); \
}
@@ -647,7 +827,7 @@ static void glue(gen_, name)(DisasContext *ctx) \
int xa; \
int xt = rD(ctx->opcode) + 32; \
int xb = rB(ctx->opcode) + 32; \
- TCGv_i64 xah, xbh, xbl, sgm; \
+ TCGv_i64 xah, xbh, xbl, sgm, tmp; \
\
if (unlikely(!ctx->vsx_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
@@ -656,8 +836,9 @@ static void glue(gen_, name)(DisasContext *ctx) \
xbh = tcg_temp_new_i64(); \
xbl = tcg_temp_new_i64(); \
sgm = tcg_temp_new_i64(); \
- tcg_gen_mov_i64(xbh, cpu_vsrh(xb)); \
- tcg_gen_mov_i64(xbl, cpu_vsrl(xb)); \
+ tmp = tcg_temp_new_i64(); \
+ get_cpu_vsrh(xbh, xb); \
+ get_cpu_vsrl(xbl, xb); \
tcg_gen_movi_i64(sgm, sgn_mask); \
switch (op) { \
case OP_ABS: \
@@ -672,17 +853,19 @@ static void glue(gen_, name)(DisasContext *ctx) \
case OP_CPSGN: \
xah = tcg_temp_new_i64(); \
xa = rA(ctx->opcode) + 32; \
- tcg_gen_and_i64(xah, cpu_vsrh(xa), sgm); \
+ get_cpu_vsrh(tmp, xa); \
+ tcg_gen_and_i64(xah, tmp, sgm); \
tcg_gen_andc_i64(xbh, xbh, sgm); \
tcg_gen_or_i64(xbh, xbh, xah); \
tcg_temp_free_i64(xah); \
break; \
} \
- tcg_gen_mov_i64(cpu_vsrh(xt), xbh); \
- tcg_gen_mov_i64(cpu_vsrl(xt), xbl); \
+ set_cpu_vsrh(xt, xbh); \
+ set_cpu_vsrl(xt, xbl); \
tcg_temp_free_i64(xbl); \
tcg_temp_free_i64(xbh); \
tcg_temp_free_i64(sgm); \
+ tcg_temp_free_i64(tmp); \
}
VSX_SCALAR_MOVE_QP(xsabsqp, OP_ABS, SGN_MASK_DP)
@@ -701,8 +884,8 @@ static void glue(gen_, name)(DisasContext * ctx) \
xbh = tcg_temp_new_i64(); \
xbl = tcg_temp_new_i64(); \
sgm = tcg_temp_new_i64(); \
- tcg_gen_mov_i64(xbh, cpu_vsrh(xB(ctx->opcode))); \
- tcg_gen_mov_i64(xbl, cpu_vsrl(xB(ctx->opcode))); \
+ set_cpu_vsrh(xB(ctx->opcode), xbh); \
+ set_cpu_vsrl(xB(ctx->opcode), xbl); \
tcg_gen_movi_i64(sgm, sgn_mask); \
switch (op) { \
case OP_ABS: { \
@@ -723,8 +906,8 @@ static void glue(gen_, name)(DisasContext * ctx) \
case OP_CPSGN: { \
TCGv_i64 xah = tcg_temp_new_i64(); \
TCGv_i64 xal = tcg_temp_new_i64(); \
- tcg_gen_mov_i64(xah, cpu_vsrh(xA(ctx->opcode))); \
- tcg_gen_mov_i64(xal, cpu_vsrl(xA(ctx->opcode))); \
+ get_cpu_vsrh(xah, xA(ctx->opcode)); \
+ get_cpu_vsrl(xal, xA(ctx->opcode)); \
tcg_gen_and_i64(xah, xah, sgm); \
tcg_gen_and_i64(xal, xal, sgm); \
tcg_gen_andc_i64(xbh, xbh, sgm); \
@@ -736,8 +919,8 @@ static void glue(gen_, name)(DisasContext * ctx) \
break; \
} \
} \
- tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xbh); \
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xbl); \
+ set_cpu_vsrh(xT(ctx->opcode), xbh); \
+ set_cpu_vsrl(xT(ctx->opcode), xbl); \
tcg_temp_free_i64(xbh); \
tcg_temp_free_i64(xbl); \
tcg_temp_free_i64(sgm); \
@@ -768,12 +951,19 @@ static void gen_##name(DisasContext * ctx) \
#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
static void gen_##name(DisasContext * ctx) \
{ \
+ TCGv_i64 t0; \
+ TCGv_i64 t1; \
if (unlikely(!ctx->vsx_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- gen_helper_##name(cpu_vsrh(xT(ctx->opcode)), cpu_env, \
- cpu_vsrh(xB(ctx->opcode))); \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ get_cpu_vsrh(t0, xB(ctx->opcode)); \
+ gen_helper_##name(t1, cpu_env, t0); \
+ set_cpu_vsrh(xT(ctx->opcode), t1); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
}
GEN_VSX_HELPER_2(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
@@ -949,76 +1139,146 @@ GEN_VSX_HELPER_2(xxpermr, 0x08, 0x07, 0, PPC2_ISA300)
static void gen_xxbrd(DisasContext *ctx)
{
- TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
- TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
- TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
- TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsrh(xbh, xB(ctx->opcode));
+ get_cpu_vsrl(xbl, xB(ctx->opcode));
+
tcg_gen_bswap64_i64(xth, xbh);
tcg_gen_bswap64_i64(xtl, xbl);
+ set_cpu_vsrh(xT(ctx->opcode), xth);
+ set_cpu_vsrl(xT(ctx->opcode), xtl);
+
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
}
static void gen_xxbrh(DisasContext *ctx)
{
- TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
- TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
- TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
- TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsrh(xbh, xB(ctx->opcode));
+ get_cpu_vsrl(xbl, xB(ctx->opcode));
+
gen_bswap16x8(xth, xtl, xbh, xbl);
+ set_cpu_vsrh(xT(ctx->opcode), xth);
+ set_cpu_vsrl(xT(ctx->opcode), xtl);
+
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
}
static void gen_xxbrq(DisasContext *ctx)
{
- TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
- TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
- TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
- TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
- TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
+ TCGv_i64 t0;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsrh(xbh, xB(ctx->opcode));
+ get_cpu_vsrl(xbl, xB(ctx->opcode));
+ t0 = tcg_temp_new_i64();
+
tcg_gen_bswap64_i64(t0, xbl);
tcg_gen_bswap64_i64(xtl, xbh);
+ set_cpu_vsrl(xT(ctx->opcode), xtl);
tcg_gen_mov_i64(xth, t0);
+ set_cpu_vsrl(xT(ctx->opcode), xth);
+
tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
}
static void gen_xxbrw(DisasContext *ctx)
{
- TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
- TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
- TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
- TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsrh(xbh, xB(ctx->opcode));
+ get_cpu_vsrl(xbl, xB(ctx->opcode));
+
gen_bswap32x4(xth, xtl, xbh, xbl);
+ set_cpu_vsrl(xT(ctx->opcode), xth);
+ set_cpu_vsrl(xT(ctx->opcode), xtl);
+
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
}
#define VSX_LOGICAL(name, tcg_op) \
static void glue(gen_, name)(DisasContext * ctx) \
{ \
+ TCGv_i64 t0; \
+ TCGv_i64 t1; \
+ TCGv_i64 t2; \
if (unlikely(!ctx->vsx_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- tcg_op(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)), \
- cpu_vsrh(xB(ctx->opcode))); \
- tcg_op(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)), \
- cpu_vsrl(xB(ctx->opcode))); \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ t2 = tcg_temp_new_i64(); \
+ get_cpu_vsrh(t0, xA(ctx->opcode)); \
+ get_cpu_vsrh(t1, xB(ctx->opcode)); \
+ tcg_op(t2, t0, t1); \
+ set_cpu_vsrh(xT(ctx->opcode), t2); \
+ get_cpu_vsrl(t0, xA(ctx->opcode)); \
+ get_cpu_vsrl(t1, xB(ctx->opcode)); \
+ tcg_op(t2, t0, t1); \
+ set_cpu_vsrl(xT(ctx->opcode), t2); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+ tcg_temp_free_i64(t2); \
}
VSX_LOGICAL(xxland, tcg_gen_and_i64)
@@ -1033,7 +1293,7 @@ VSX_LOGICAL(xxlorc, tcg_gen_orc_i64)
#define VSX_XXMRG(name, high) \
static void glue(gen_, name)(DisasContext * ctx) \
{ \
- TCGv_i64 a0, a1, b0, b1; \
+ TCGv_i64 a0, a1, b0, b1, tmp; \
if (unlikely(!ctx->vsx_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
@@ -1042,27 +1302,29 @@ static void glue(gen_, name)(DisasContext * ctx) \
a1 = tcg_temp_new_i64(); \
b0 = tcg_temp_new_i64(); \
b1 = tcg_temp_new_i64(); \
+ tmp = tcg_temp_new_i64(); \
if (high) { \
- tcg_gen_mov_i64(a0, cpu_vsrh(xA(ctx->opcode))); \
- tcg_gen_mov_i64(a1, cpu_vsrh(xA(ctx->opcode))); \
- tcg_gen_mov_i64(b0, cpu_vsrh(xB(ctx->opcode))); \
- tcg_gen_mov_i64(b1, cpu_vsrh(xB(ctx->opcode))); \
+ get_cpu_vsrh(a0, xA(ctx->opcode)); \
+ get_cpu_vsrh(a1, xA(ctx->opcode)); \
+ get_cpu_vsrh(b0, xB(ctx->opcode)); \
+ get_cpu_vsrh(b1, xB(ctx->opcode)); \
} else { \
- tcg_gen_mov_i64(a0, cpu_vsrl(xA(ctx->opcode))); \
- tcg_gen_mov_i64(a1, cpu_vsrl(xA(ctx->opcode))); \
- tcg_gen_mov_i64(b0, cpu_vsrl(xB(ctx->opcode))); \
- tcg_gen_mov_i64(b1, cpu_vsrl(xB(ctx->opcode))); \
+ get_cpu_vsrl(a0, xA(ctx->opcode)); \
+ get_cpu_vsrl(a1, xA(ctx->opcode)); \
+ get_cpu_vsrl(b0, xB(ctx->opcode)); \
+ get_cpu_vsrl(b1, xB(ctx->opcode)); \
} \
tcg_gen_shri_i64(a0, a0, 32); \
tcg_gen_shri_i64(b0, b0, 32); \
- tcg_gen_deposit_i64(cpu_vsrh(xT(ctx->opcode)), \
- b0, a0, 32, 32); \
- tcg_gen_deposit_i64(cpu_vsrl(xT(ctx->opcode)), \
- b1, a1, 32, 32); \
+ tcg_gen_deposit_i64(tmp, b0, a0, 32, 32); \
+ set_cpu_vsrh(xT(ctx->opcode), tmp); \
+ tcg_gen_deposit_i64(tmp, b1, a1, 32, 32); \
+ set_cpu_vsrl(xT(ctx->opcode), tmp); \
tcg_temp_free_i64(a0); \
tcg_temp_free_i64(a1); \
tcg_temp_free_i64(b0); \
tcg_temp_free_i64(b1); \
+ tcg_temp_free_i64(tmp); \
}
VSX_XXMRG(xxmrghw, 1)
@@ -1070,7 +1332,7 @@ VSX_XXMRG(xxmrglw, 0)
static void gen_xxsel(DisasContext * ctx)
{
- TCGv_i64 a, b, c;
+ TCGv_i64 a, b, c, tmp;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
@@ -1078,40 +1340,49 @@ static void gen_xxsel(DisasContext * ctx)
a = tcg_temp_new_i64();
b = tcg_temp_new_i64();
c = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
- tcg_gen_mov_i64(a, cpu_vsrh(xA(ctx->opcode)));
- tcg_gen_mov_i64(b, cpu_vsrh(xB(ctx->opcode)));
- tcg_gen_mov_i64(c, cpu_vsrh(xC(ctx->opcode)));
+ get_cpu_vsrh(a, xA(ctx->opcode));
+ get_cpu_vsrh(b, xB(ctx->opcode));
+ get_cpu_vsrh(c, xC(ctx->opcode));
tcg_gen_and_i64(b, b, c);
tcg_gen_andc_i64(a, a, c);
- tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), a, b);
+ tcg_gen_or_i64(tmp, a, b);
+ set_cpu_vsrh(xT(ctx->opcode), tmp);
- tcg_gen_mov_i64(a, cpu_vsrl(xA(ctx->opcode)));
- tcg_gen_mov_i64(b, cpu_vsrl(xB(ctx->opcode)));
- tcg_gen_mov_i64(c, cpu_vsrl(xC(ctx->opcode)));
+ get_cpu_vsrl(a, xA(ctx->opcode));
+ get_cpu_vsrl(b, xB(ctx->opcode));
+ get_cpu_vsrl(c, xC(ctx->opcode));
tcg_gen_and_i64(b, b, c);
tcg_gen_andc_i64(a, a, c);
- tcg_gen_or_i64(cpu_vsrl(xT(ctx->opcode)), a, b);
+ tcg_gen_or_i64(tmp, a, b);
+ set_cpu_vsrl(xT(ctx->opcode), tmp);
tcg_temp_free_i64(a);
tcg_temp_free_i64(b);
tcg_temp_free_i64(c);
+ tcg_temp_free_i64(tmp);
}
static void gen_xxspltw(DisasContext *ctx)
{
TCGv_i64 b, b2;
- TCGv_i64 vsr = (UIM(ctx->opcode) & 2) ?
- cpu_vsrl(xB(ctx->opcode)) :
- cpu_vsrh(xB(ctx->opcode));
+ TCGv_i64 vsr;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ vsr = tcg_temp_new_i64();
+ if (UIM(ctx->opcode) & 2) {
+ get_cpu_vsrl(vsr, xB(ctx->opcode));
+ } else {
+ get_cpu_vsrh(vsr, xB(ctx->opcode));
+ }
+
b = tcg_temp_new_i64();
b2 = tcg_temp_new_i64();
@@ -1122,9 +1393,11 @@ static void gen_xxspltw(DisasContext *ctx)
}
tcg_gen_shli_i64(b2, b, 32);
- tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), b, b2);
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
+ tcg_gen_or_i64(vsr, b, b2);
+ set_cpu_vsrh(xT(ctx->opcode), vsr);
+ set_cpu_vsrl(xT(ctx->opcode), vsr);
+ tcg_temp_free_i64(vsr);
tcg_temp_free_i64(b);
tcg_temp_free_i64(b2);
}
@@ -1134,6 +1407,7 @@ static void gen_xxspltw(DisasContext *ctx)
static void gen_xxspltib(DisasContext *ctx)
{
unsigned char uim8 = IMM8(ctx->opcode);
+ TCGv_i64 vsr;
if (xS(ctx->opcode) < 32) {
if (unlikely(!ctx->altivec_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VPU);
@@ -1145,8 +1419,11 @@ static void gen_xxspltib(DisasContext *ctx)
return;
}
}
- tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), pattern(uim8));
- tcg_gen_movi_i64(cpu_vsrl(xT(ctx->opcode)), pattern(uim8));
+ vsr = tcg_temp_new_i64();
+ tcg_gen_movi_i64(vsr, pattern(uim8));
+ set_cpu_vsrh(xT(ctx->opcode), vsr);
+ set_cpu_vsrl(xT(ctx->opcode), vsr);
+ tcg_temp_free_i64(vsr);
}
static void gen_xxsldwi(DisasContext *ctx)
@@ -1161,40 +1438,40 @@ static void gen_xxsldwi(DisasContext *ctx)
switch (SHW(ctx->opcode)) {
case 0: {
- tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
- tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
+ get_cpu_vsrh(xth, xA(ctx->opcode));
+ get_cpu_vsrl(xtl, xA(ctx->opcode));
break;
}
case 1: {
TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
+ get_cpu_vsrh(xth, xA(ctx->opcode));
tcg_gen_shli_i64(xth, xth, 32);
- tcg_gen_mov_i64(t0, cpu_vsrl(xA(ctx->opcode)));
+ get_cpu_vsrl(t0, xA(ctx->opcode));
tcg_gen_shri_i64(t0, t0, 32);
tcg_gen_or_i64(xth, xth, t0);
- tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
+ get_cpu_vsrl(xtl, xA(ctx->opcode));
tcg_gen_shli_i64(xtl, xtl, 32);
- tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
+ get_cpu_vsrh(t0, xB(ctx->opcode));
tcg_gen_shri_i64(t0, t0, 32);
tcg_gen_or_i64(xtl, xtl, t0);
tcg_temp_free_i64(t0);
break;
}
case 2: {
- tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
- tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
+ get_cpu_vsrl(xth, xA(ctx->opcode));
+ get_cpu_vsrh(xtl, xB(ctx->opcode));
break;
}
case 3: {
TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
+ get_cpu_vsrl(xth, xA(ctx->opcode));
tcg_gen_shli_i64(xth, xth, 32);
- tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
+ get_cpu_vsrh(t0, xB(ctx->opcode));
tcg_gen_shri_i64(t0, t0, 32);
tcg_gen_or_i64(xth, xth, t0);
- tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
+ get_cpu_vsrh(xtl, xB(ctx->opcode));
tcg_gen_shli_i64(xtl, xtl, 32);
- tcg_gen_mov_i64(t0, cpu_vsrl(xB(ctx->opcode)));
+ get_cpu_vsrl(t0, xB(ctx->opcode));
tcg_gen_shri_i64(t0, t0, 32);
tcg_gen_or_i64(xtl, xtl, t0);
tcg_temp_free_i64(t0);
@@ -1202,8 +1479,8 @@ static void gen_xxsldwi(DisasContext *ctx)
}
}
- tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xth);
- tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xtl);
+ set_cpu_vsrh(xT(ctx->opcode), xth);
+ set_cpu_vsrl(xT(ctx->opcode), xtl);
tcg_temp_free_i64(xth);
tcg_temp_free_i64(xtl);
@@ -1213,7 +1490,8 @@ static void gen_xxsldwi(DisasContext *ctx)
static void gen_##name(DisasContext *ctx) \
{ \
TCGv xt, xb; \
- TCGv_i32 t0 = tcg_temp_new_i32(); \
+ TCGv_i32 t0; \
+ TCGv_i64 t1; \
uint8_t uimm = UIMM4(ctx->opcode); \
\
if (unlikely(!ctx->vsx_enabled)) { \
@@ -1222,12 +1500,15 @@ static void gen_##name(DisasContext *ctx) \
} \
xt = tcg_const_tl(xT(ctx->opcode)); \
xb = tcg_const_tl(xB(ctx->opcode)); \
+ t0 = tcg_temp_new_i32(); \
+ t1 = tcg_temp_new_i64(); \
/* uimm > 15 out of bound and for \
* uimm > 12 handle as per hardware in helper \
*/ \
if (uimm > 15) { \
- tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), 0); \
- tcg_gen_movi_i64(cpu_vsrl(xT(ctx->opcode)), 0); \
+ tcg_gen_movi_i64(t1, 0); \
+ set_cpu_vsrh(xT(ctx->opcode), t1); \
+ set_cpu_vsrl(xT(ctx->opcode), t1); \
return; \
} \
tcg_gen_movi_i32(t0, uimm); \
@@ -1235,6 +1516,7 @@ static void gen_##name(DisasContext *ctx) \
tcg_temp_free(xb); \
tcg_temp_free(xt); \
tcg_temp_free_i32(t0); \
+ tcg_temp_free_i64(t1); \
}
VSX_EXTRACT_INSERT(xxextractuw)
@@ -1244,30 +1526,45 @@ VSX_EXTRACT_INSERT(xxinsertw)
static void gen_xsxexpdp(DisasContext *ctx)
{
TCGv rt = cpu_gpr[rD(ctx->opcode)];
+ TCGv_i64 t0;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
- tcg_gen_extract_i64(rt, cpu_vsrh(xB(ctx->opcode)), 52, 11);
+ t0 = tcg_temp_new_i64();
+ get_cpu_vsrh(t0, xB(ctx->opcode));
+ tcg_gen_extract_i64(rt, t0, 52, 11);
+ tcg_temp_free_i64(t0);
}
static void gen_xsxexpqp(DisasContext *ctx)
{
- TCGv_i64 xth = cpu_vsrh(rD(ctx->opcode) + 32);
- TCGv_i64 xtl = cpu_vsrl(rD(ctx->opcode) + 32);
- TCGv_i64 xbh = cpu_vsrh(rB(ctx->opcode) + 32);
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ get_cpu_vsrh(xbh, rB(ctx->opcode) + 32);
+
tcg_gen_extract_i64(xth, xbh, 48, 15);
+ set_cpu_vsrh(rD(ctx->opcode) + 32, xth);
tcg_gen_movi_i64(xtl, 0);
+ set_cpu_vsrl(rD(ctx->opcode) + 32, xtl);
+
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
}
static void gen_xsiexpdp(DisasContext *ctx)
{
- TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xth;
TCGv ra = cpu_gpr[rA(ctx->opcode)];
TCGv rb = cpu_gpr[rB(ctx->opcode)];
TCGv_i64 t0;
@@ -1277,40 +1574,60 @@ static void gen_xsiexpdp(DisasContext *ctx)
return;
}
t0 = tcg_temp_new_i64();
+ xth = tcg_temp_new_i64();
tcg_gen_andi_i64(xth, ra, 0x800FFFFFFFFFFFFF);
tcg_gen_andi_i64(t0, rb, 0x7FF);
tcg_gen_shli_i64(t0, t0, 52);
tcg_gen_or_i64(xth, xth, t0);
+ set_cpu_vsrh(xT(ctx->opcode), xth);
/* dword[1] is undefined */
tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(xth);
}
static void gen_xsiexpqp(DisasContext *ctx)
{
- TCGv_i64 xth = cpu_vsrh(rD(ctx->opcode) + 32);
- TCGv_i64 xtl = cpu_vsrl(rD(ctx->opcode) + 32);
- TCGv_i64 xah = cpu_vsrh(rA(ctx->opcode) + 32);
- TCGv_i64 xal = cpu_vsrl(rA(ctx->opcode) + 32);
- TCGv_i64 xbh = cpu_vsrh(rB(ctx->opcode) + 32);
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xah;
+ TCGv_i64 xal;
+ TCGv_i64 xbh;
TCGv_i64 t0;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xah = tcg_temp_new_i64();
+ xal = tcg_temp_new_i64();
+ get_cpu_vsrh(xah, rA(ctx->opcode) + 32);
+ get_cpu_vsrl(xal, rA(ctx->opcode) + 32);
+ xbh = tcg_temp_new_i64();
+ get_cpu_vsrh(xbh, rB(ctx->opcode) + 32);
t0 = tcg_temp_new_i64();
+
tcg_gen_andi_i64(xth, xah, 0x8000FFFFFFFFFFFF);
tcg_gen_andi_i64(t0, xbh, 0x7FFF);
tcg_gen_shli_i64(t0, t0, 48);
tcg_gen_or_i64(xth, xth, t0);
+ set_cpu_vsrh(rD(ctx->opcode) + 32, xth);
tcg_gen_mov_i64(xtl, xal);
+ set_cpu_vsrl(rD(ctx->opcode) + 32, xtl);
+
tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xah);
+ tcg_temp_free_i64(xal);
+ tcg_temp_free_i64(xbh);
}
static void gen_xsxsigdp(DisasContext *ctx)
{
TCGv rt = cpu_gpr[rD(ctx->opcode)];
- TCGv_i64 t0, zr, nan, exp;
+ TCGv_i64 t0, t1, zr, nan, exp;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
@@ -1318,17 +1635,21 @@ static void gen_xsxsigdp(DisasContext *ctx)
}
exp = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
zr = tcg_const_i64(0);
nan = tcg_const_i64(2047);
- tcg_gen_extract_i64(exp, cpu_vsrh(xB(ctx->opcode)), 52, 11);
+ get_cpu_vsrh(t1, xB(ctx->opcode));
+ tcg_gen_extract_i64(exp, t1, 52, 11);
tcg_gen_movi_i64(t0, 0x0010000000000000);
tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
- tcg_gen_andi_i64(rt, cpu_vsrh(xB(ctx->opcode)), 0x000FFFFFFFFFFFFF);
+ get_cpu_vsrh(t1, xB(ctx->opcode));
+ tcg_gen_andi_i64(rt, t1, 0x000FFFFFFFFFFFFF);
tcg_gen_or_i64(rt, rt, t0);
tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
tcg_temp_free_i64(exp);
tcg_temp_free_i64(zr);
tcg_temp_free_i64(nan);
@@ -1337,132 +1658,219 @@ static void gen_xsxsigdp(DisasContext *ctx)
static void gen_xsxsigqp(DisasContext *ctx)
{
TCGv_i64 t0, zr, nan, exp;
- TCGv_i64 xth = cpu_vsrh(rD(ctx->opcode) + 32);
- TCGv_i64 xtl = cpu_vsrl(rD(ctx->opcode) + 32);
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsrh(xbh, rB(ctx->opcode) + 32);
+ get_cpu_vsrl(xbl, rB(ctx->opcode) + 32);
exp = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
zr = tcg_const_i64(0);
nan = tcg_const_i64(32767);
- tcg_gen_extract_i64(exp, cpu_vsrh(rB(ctx->opcode) + 32), 48, 15);
+ tcg_gen_extract_i64(exp, xbh, 48, 15);
tcg_gen_movi_i64(t0, 0x0001000000000000);
tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
- tcg_gen_andi_i64(xth, cpu_vsrh(rB(ctx->opcode) + 32), 0x0000FFFFFFFFFFFF);
+ tcg_gen_andi_i64(xth, xbh, 0x0000FFFFFFFFFFFF);
tcg_gen_or_i64(xth, xth, t0);
- tcg_gen_mov_i64(xtl, cpu_vsrl(rB(ctx->opcode) + 32));
+ set_cpu_vsrh(rD(ctx->opcode) + 32, xth);
+ tcg_gen_mov_i64(xtl, xbl);
+ set_cpu_vsrl(rD(ctx->opcode) + 32, xtl);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(exp);
tcg_temp_free_i64(zr);
tcg_temp_free_i64(nan);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
}
#endif
static void gen_xviexpsp(DisasContext *ctx)
{
- TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
- TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
- TCGv_i64 xah = cpu_vsrh(xA(ctx->opcode));
- TCGv_i64 xal = cpu_vsrl(xA(ctx->opcode));
- TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
- TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xah;
+ TCGv_i64 xal;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
TCGv_i64 t0;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xah = tcg_temp_new_i64();
+ xal = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsrh(xah, xA(ctx->opcode));
+ get_cpu_vsrl(xal, xA(ctx->opcode));
+ get_cpu_vsrh(xbh, xB(ctx->opcode));
+ get_cpu_vsrl(xbl, xB(ctx->opcode));
t0 = tcg_temp_new_i64();
+
tcg_gen_andi_i64(xth, xah, 0x807FFFFF807FFFFF);
tcg_gen_andi_i64(t0, xbh, 0xFF000000FF);
tcg_gen_shli_i64(t0, t0, 23);
tcg_gen_or_i64(xth, xth, t0);
+ set_cpu_vsrh(xT(ctx->opcode), xth);
tcg_gen_andi_i64(xtl, xal, 0x807FFFFF807FFFFF);
tcg_gen_andi_i64(t0, xbl, 0xFF000000FF);
tcg_gen_shli_i64(t0, t0, 23);
tcg_gen_or_i64(xtl, xtl, t0);
+ set_cpu_vsrl(xT(ctx->opcode), xtl);
+
tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xah);
+ tcg_temp_free_i64(xal);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
}
static void gen_xviexpdp(DisasContext *ctx)
{
- TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
- TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
- TCGv_i64 xah = cpu_vsrh(xA(ctx->opcode));
- TCGv_i64 xal = cpu_vsrl(xA(ctx->opcode));
- TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
- TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xah;
+ TCGv_i64 xal;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
TCGv_i64 t0;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xah = tcg_temp_new_i64();
+ xal = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsrh(xah, xA(ctx->opcode));
+ get_cpu_vsrl(xal, xA(ctx->opcode));
+ get_cpu_vsrh(xbh, xB(ctx->opcode));
+ get_cpu_vsrl(xbl, xB(ctx->opcode));
t0 = tcg_temp_new_i64();
+
tcg_gen_andi_i64(xth, xah, 0x800FFFFFFFFFFFFF);
tcg_gen_andi_i64(t0, xbh, 0x7FF);
tcg_gen_shli_i64(t0, t0, 52);
tcg_gen_or_i64(xth, xth, t0);
+ set_cpu_vsrh(xT(ctx->opcode), xth);
tcg_gen_andi_i64(xtl, xal, 0x800FFFFFFFFFFFFF);
tcg_gen_andi_i64(t0, xbl, 0x7FF);
tcg_gen_shli_i64(t0, t0, 52);
tcg_gen_or_i64(xtl, xtl, t0);
+ set_cpu_vsrl(xT(ctx->opcode), xtl);
+
tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xah);
+ tcg_temp_free_i64(xal);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
}
static void gen_xvxexpsp(DisasContext *ctx)
{
- TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
- TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
- TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
- TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsrh(xbh, xB(ctx->opcode));
+ get_cpu_vsrl(xbl, xB(ctx->opcode));
+
tcg_gen_shri_i64(xth, xbh, 23);
tcg_gen_andi_i64(xth, xth, 0xFF000000FF);
+ set_cpu_vsrh(xT(ctx->opcode), xth);
tcg_gen_shri_i64(xtl, xbl, 23);
tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF);
+ set_cpu_vsrl(xT(ctx->opcode), xtl);
+
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
}
static void gen_xvxexpdp(DisasContext *ctx)
{
- TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
- TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
- TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
- TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsrh(xbh, xB(ctx->opcode));
+ get_cpu_vsrl(xbl, xB(ctx->opcode));
+
tcg_gen_extract_i64(xth, xbh, 52, 11);
+ set_cpu_vsrh(xT(ctx->opcode), xth);
tcg_gen_extract_i64(xtl, xbl, 52, 11);
+ set_cpu_vsrl(xT(ctx->opcode), xtl);
+
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
}
GEN_VSX_HELPER_2(xvxsigsp, 0x00, 0x04, 0, PPC2_ISA300)
static void gen_xvxsigdp(DisasContext *ctx)
{
- TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
- TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
- TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
- TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
-
+ TCGv_i64 xth;
+ TCGv_i64 xtl;
+ TCGv_i64 xbh;
+ TCGv_i64 xbl;
TCGv_i64 t0, zr, nan, exp;
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+ xbh = tcg_temp_new_i64();
+ xbl = tcg_temp_new_i64();
+ get_cpu_vsrh(xbh, xB(ctx->opcode));
+ get_cpu_vsrl(xbl, xB(ctx->opcode));
exp = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
zr = tcg_const_i64(0);
@@ -1474,6 +1882,7 @@ static void gen_xvxsigdp(DisasContext *ctx)
tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
tcg_gen_andi_i64(xth, xbh, 0x000FFFFFFFFFFFFF);
tcg_gen_or_i64(xth, xth, t0);
+ set_cpu_vsrh(xT(ctx->opcode), xth);
tcg_gen_extract_i64(exp, xbl, 52, 11);
tcg_gen_movi_i64(t0, 0x0010000000000000);
@@ -1481,11 +1890,16 @@ static void gen_xvxsigdp(DisasContext *ctx)
tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
tcg_gen_andi_i64(xtl, xbl, 0x000FFFFFFFFFFFFF);
tcg_gen_or_i64(xtl, xtl, t0);
+ set_cpu_vsrl(xT(ctx->opcode), xtl);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(exp);
tcg_temp_free_i64(zr);
tcg_temp_free_i64(nan);
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+ tcg_temp_free_i64(xbh);
+ tcg_temp_free_i64(xbl);
}
#undef GEN_XX2FORM