aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-07-02 18:56:44 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-07-02 18:56:44 +0100
commit506179e42112be77bfd071f050b15762d3b2cd43 (patch)
tree4f7c5c8c71a5969fb04fbd6266ff958824833462 /target
parentefa85a4d1ab13e962c0a93d09b7e935571d669fe (diff)
parent1c3d4a8f4b4f24baa9dae31db0599925abc7d2a2 (diff)
downloadqemu-506179e42112be77bfd071f050b15762d3b2cd43.zip
qemu-506179e42112be77bfd071f050b15762d3b2cd43.tar.gz
qemu-506179e42112be77bfd071f050b15762d3b2cd43.tar.bz2
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.1-20190702' into staging
ppc patch queue 2019-07-2 Here's my next pull request for qemu-4.1. I'm not sure if this will squeak in just before the soft freeze, or just after. I don't think it really matters - most of this is bugfixes anyway. There's some cleanups which aren't stictly bugfixes, but which I think are safe enough improvements to go in the soft freeze. There's no true feature work. Unfortunately, I wasn't able to complete a few of my standard battery of pre-pull tests, due to some failures that appear to also be in master. I'm hoping that hasn't missed anything important in here. Highlights are: * A number of fixe and cleanups for the XIVE implementation * Cleanups to the XICS interrupt controller to fit better with the new XIVE code * Numerous fixes and improvements to TCG handling of ppc vector instructions * Remove a number of unnnecessary #ifdef CONFIG_KVM guards * Fix some errors in the PCI hotplug paths * Assorted other fixes # gpg: Signature made Tue 02 Jul 2019 07:07:15 BST # gpg: using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full] # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full] # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full] # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown] # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-4.1-20190702: (49 commits) spapr/xive: Add proper rollback to kvmppc_xive_connect() ppc/xive: Fix TM_PULL_POOL_CTX special operation ppc/pnv: Rework cache watch model of PnvXIVE ppc/xive: Make the PIPR register readonly ppc/xive: Force the Physical CAM line value to group mode spapr/xive: simplify spapr_irq_init_device() to remove the emulated init spapr/xive: rework the mapping the KVM memory regions spapr_pci: Unregister listeners before destroying the IOMMU address space target/ppc: improve VSX_FMADD with new GEN_VSX_HELPER_VSX_MADD macro target/ppc: decode target register in VSX_EXTRACT_INSERT at translation time target/ppc: decode target register in VSX_VECTOR_LOAD_STORE_LENGTH at translation time target/ppc: introduce GEN_VSX_HELPER_R2_AB macro to fpu_helper.c target/ppc: introduce GEN_VSX_HELPER_R2 macro to fpu_helper.c target/ppc: introduce GEN_VSX_HELPER_R3 macro to fpu_helper.c target/ppc: introduce GEN_VSX_HELPER_X1 macro to fpu_helper.c target/ppc: introduce GEN_VSX_HELPER_X2_AB macro to fpu_helper.c target/ppc: introduce GEN_VSX_HELPER_X2 macro to fpu_helper.c target/ppc: introduce separate generator and helper for xscvqpdp target/ppc: introduce GEN_VSX_HELPER_X3 macro to fpu_helper.c target/ppc: introduce separate VSX_CMP macro for xvcmp* instructions ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/ppc/fpu_helper.c841
-rw-r--r--target/ppc/helper.h320
-rw-r--r--target/ppc/int_helper.c26
-rw-r--r--target/ppc/internal.h12
-rw-r--r--target/ppc/kvm.c11
-rw-r--r--target/ppc/kvm_ppc.h10
-rw-r--r--target/ppc/machine.c2
-rw-r--r--target/ppc/mem_helper.c25
-rw-r--r--target/ppc/translate/vsx-impl.inc.c567
-rw-r--r--target/ppc/translate/vsx-ops.inc.c70
10 files changed, 972 insertions, 912 deletions
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
index ffbd19a..f437c88 100644
--- a/target/ppc/fpu_helper.c
+++ b/target/ppc/fpu_helper.c
@@ -1801,37 +1801,35 @@ uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
* sfprf - set FPRF
*/
#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
-void helper_##name(CPUPPCState *env, uint32_t opcode) \
+void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \
+ t.fld = tp##_##op(xa->fld, xb->fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
float_invalid_op_addsub(env, sfprf, GETPC(), \
- tp##_classify(xa.fld) | \
- tp##_classify(xb.fld)); \
+ tp##_classify(xa->fld) | \
+ tp##_classify(xb->fld)); \
} \
\
if (r2sp) { \
- xt.fld = helper_frsp(env, xt.fld); \
+ t.fld = helper_frsp(env, t.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -1844,14 +1842,12 @@ VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
-void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
+void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
{
- ppc_vsr_t xt, xa, xb;
+ ppc_vsr_t t = *xt;
float_status tstat;
- getVSR(rA(opcode) + 32, &xa, env);
- getVSR(rB(opcode) + 32, &xb, env);
- getVSR(rD(opcode) + 32, &xt, env);
helper_reset_fpstatus(env);
tstat = env->fp_status;
@@ -1860,18 +1856,18 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
}
set_float_exception_flags(0, &tstat);
- xt.f128 = float128_add(xa.f128, xb.f128, &tstat);
+ t.f128 = float128_add(xa->f128, xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
float_invalid_op_addsub(env, 1, GETPC(),
- float128_classify(xa.f128) |
- float128_classify(xb.f128));
+ float128_classify(xa->f128) |
+ float128_classify(xb->f128));
}
- helper_compute_fprf_float128(env, xt.f128);
+ helper_compute_fprf_float128(env, t.f128);
- putVSR(rD(opcode) + 32, &xt, env);
+ *xt = t;
do_float_check_status(env, GETPC());
}
@@ -1884,38 +1880,36 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
* sfprf - set FPRF
*/
#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \
+ t.fld = tp##_mul(xa->fld, xb->fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
float_invalid_op_mul(env, sfprf, GETPC(), \
- tp##_classify(xa.fld) | \
- tp##_classify(xb.fld)); \
+ tp##_classify(xa->fld) | \
+ tp##_classify(xb->fld)); \
} \
\
if (r2sp) { \
- xt.fld = helper_frsp(env, xt.fld); \
+ t.fld = helper_frsp(env, t.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -1924,15 +1918,12 @@ VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
-void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
+void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
{
- ppc_vsr_t xt, xa, xb;
+ ppc_vsr_t t = *xt;
float_status tstat;
- getVSR(rA(opcode) + 32, &xa, env);
- getVSR(rB(opcode) + 32, &xb, env);
- getVSR(rD(opcode) + 32, &xt, env);
-
helper_reset_fpstatus(env);
tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
@@ -1940,17 +1931,17 @@ void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
}
set_float_exception_flags(0, &tstat);
- xt.f128 = float128_mul(xa.f128, xb.f128, &tstat);
+ t.f128 = float128_mul(xa->f128, xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
float_invalid_op_mul(env, 1, GETPC(),
- float128_classify(xa.f128) |
- float128_classify(xb.f128));
+ float128_classify(xa->f128) |
+ float128_classify(xb->f128));
}
- helper_compute_fprf_float128(env, xt.f128);
+ helper_compute_fprf_float128(env, t.f128);
- putVSR(rD(opcode) + 32, &xt, env);
+ *xt = t;
do_float_check_status(env, GETPC());
}
@@ -1963,41 +1954,39 @@ void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
* sfprf - set FPRF
*/
#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \
+ t.fld = tp##_div(xa->fld, xb->fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
float_invalid_op_div(env, sfprf, GETPC(), \
- tp##_classify(xa.fld) | \
- tp##_classify(xb.fld)); \
+ tp##_classify(xa->fld) | \
+ tp##_classify(xb->fld)); \
} \
if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
float_zero_divide_excp(env, GETPC()); \
} \
\
if (r2sp) { \
- xt.fld = helper_frsp(env, xt.fld); \
+ t.fld = helper_frsp(env, t.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2006,15 +1995,12 @@ VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
-void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
+void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
{
- ppc_vsr_t xt, xa, xb;
+ ppc_vsr_t t = *xt;
float_status tstat;
- getVSR(rA(opcode) + 32, &xa, env);
- getVSR(rB(opcode) + 32, &xb, env);
- getVSR(rD(opcode) + 32, &xt, env);
-
helper_reset_fpstatus(env);
tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
@@ -2022,20 +2008,20 @@ void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
}
set_float_exception_flags(0, &tstat);
- xt.f128 = float128_div(xa.f128, xb.f128, &tstat);
+ t.f128 = float128_div(xa->f128, xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
float_invalid_op_div(env, 1, GETPC(),
- float128_classify(xa.f128) |
- float128_classify(xb.f128));
+ float128_classify(xa->f128) |
+ float128_classify(xb->f128));
}
if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
float_zero_divide_excp(env, GETPC());
}
- helper_compute_fprf_float128(env, xt.f128);
- putVSR(rD(opcode) + 32, &xt, env);
+ helper_compute_fprf_float128(env, t.f128);
+ *xt = t;
do_float_check_status(env, GETPC());
}
@@ -2048,31 +2034,29 @@ void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
* sfprf - set FPRF
*/
#define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
+ if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
float_invalid_op_vxsnan(env, GETPC()); \
} \
- xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
+ t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \
\
if (r2sp) { \
- xt.fld = helper_frsp(env, xt.fld); \
+ t.fld = helper_frsp(env, t.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2090,39 +2074,37 @@ VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
* sfprf - set FPRF
*/
#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld = tp##_sqrt(xb.fld, &tstat); \
+ t.fld = tp##_sqrt(xb->fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
+ if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \
float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
- } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
+ } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \
float_invalid_op_vxsnan(env, GETPC()); \
} \
} \
\
if (r2sp) { \
- xt.fld = helper_frsp(env, xt.fld); \
+ t.fld = helper_frsp(env, t.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2140,40 +2122,38 @@ VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
* sfprf - set FPRF
*/
#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld = tp##_sqrt(xb.fld, &tstat); \
- xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \
+ t.fld = tp##_sqrt(xb->fld, &tstat); \
+ t.fld = tp##_div(tp##_one, t.fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
+ if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \
float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
- } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
+ } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \
float_invalid_op_vxsnan(env, GETPC()); \
} \
} \
\
if (r2sp) { \
- xt.fld = helper_frsp(env, xt.fld); \
+ t.fld = helper_frsp(env, t.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2193,39 +2173,36 @@ VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
* nbits - number of fraction bits
*/
#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xa, xb; \
int i; \
int fe_flag = 0; \
int fg_flag = 0; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- \
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_infinity(xa.fld) || \
- tp##_is_infinity(xb.fld) || \
- tp##_is_zero(xb.fld))) { \
+ if (unlikely(tp##_is_infinity(xa->fld) || \
+ tp##_is_infinity(xb->fld) || \
+ tp##_is_zero(xb->fld))) { \
fe_flag = 1; \
fg_flag = 1; \
} else { \
- int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \
- int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
+ int e_a = ppc_##tp##_get_unbiased_exp(xa->fld); \
+ int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
\
- if (unlikely(tp##_is_any_nan(xa.fld) || \
- tp##_is_any_nan(xb.fld))) { \
+ if (unlikely(tp##_is_any_nan(xa->fld) || \
+ tp##_is_any_nan(xb->fld))) { \
fe_flag = 1; \
} else if ((e_b <= emin) || (e_b >= (emax - 2))) { \
fe_flag = 1; \
- } else if (!tp##_is_zero(xa.fld) && \
+ } else if (!tp##_is_zero(xa->fld) && \
(((e_a - e_b) >= emax) || \
((e_a - e_b) <= (emin + 1)) || \
(e_a <= (emin + nbits)))) { \
fe_flag = 1; \
} \
\
- if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
+ if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
/* \
* XB is not zero because of the above check and so \
* must be denormalized. \
@@ -2253,36 +2230,32 @@ VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
* nbits - number of fraction bits
*/
#define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xa, xb; \
int i; \
int fe_flag = 0; \
int fg_flag = 0; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- \
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_infinity(xb.fld) || \
- tp##_is_zero(xb.fld))) { \
+ if (unlikely(tp##_is_infinity(xb->fld) || \
+ tp##_is_zero(xb->fld))) { \
fe_flag = 1; \
fg_flag = 1; \
} else { \
- int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
+ int e_b = ppc_##tp##_get_unbiased_exp(xb->fld); \
\
- if (unlikely(tp##_is_any_nan(xb.fld))) { \
+ if (unlikely(tp##_is_any_nan(xb->fld))) { \
fe_flag = 1; \
- } else if (unlikely(tp##_is_zero(xb.fld))) { \
+ } else if (unlikely(tp##_is_zero(xb->fld))) { \
fe_flag = 1; \
- } else if (unlikely(tp##_is_neg(xb.fld))) { \
+ } else if (unlikely(tp##_is_neg(xb->fld))) { \
fe_flag = 1; \
- } else if (!tp##_is_zero(xb.fld) && \
+ } else if (!tp##_is_zero(xb->fld) && \
(e_b <= (emin + nbits))) { \
fe_flag = 1; \
} \
\
- if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
+ if (unlikely(tp##_is_zero_or_denormal(xb->fld))) { \
/* \
* XB is not zero because of the above check and \
* therefore must be denormalized. \
@@ -2307,30 +2280,15 @@ VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
* fld - vsr_t field (VsrD(*) or VsrW(*))
* maddflgs - flags for the float*muladd routine that control the
* various forms (madd, msub, nmadd, nmsub)
- * afrm - A form (1=A, 0=M)
* sfprf - set FPRF
*/
-#define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+#define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \
{ \
- ppc_vsr_t xt_in, xa, xb, xt_out; \
- ppc_vsr_t *b, *c; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- if (afrm) { /* AxB + T */ \
- b = &xb; \
- c = &xt_in; \
- } else { /* AxT + B */ \
- b = &xt_in; \
- c = &xb; \
- } \
- \
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt_in, env); \
- \
- xt_out = xt_in; \
- \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
@@ -2342,68 +2300,51 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
* result to odd. \
*/ \
set_float_rounding_mode(float_round_to_zero, &tstat); \
- xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
- maddflgs, &tstat); \
- xt_out.fld |= (get_float_exception_flags(&tstat) & \
- float_flag_inexact) != 0; \
+ t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
+ maddflgs, &tstat); \
+ t.fld |= (get_float_exception_flags(&tstat) & \
+ float_flag_inexact) != 0; \
} else { \
- xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
- maddflgs, &tstat); \
+ t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
+ maddflgs, &tstat); \
} \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- tp##_maddsub_update_excp(env, xa.fld, b->fld, \
+ tp##_maddsub_update_excp(env, xa->fld, b->fld, \
c->fld, maddflgs, GETPC()); \
} \
\
if (r2sp) { \
- xt_out.fld = helper_frsp(env, xt_out.fld); \
+ t.fld = helper_frsp(env, t.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf_float64(env, xt_out.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
- putVSR(xT(opcode), &xt_out, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
-VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
-VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
-VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
-VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
-VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
-VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
-VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
-VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
-
-VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
-VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
-VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
-VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
-VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
-VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
-VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
-VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
-
-VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
-VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
-VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
-VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
-VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
-VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
-
-VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
-VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
-VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
-VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
-VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
-VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
+VSX_MADD(xsmadddp, 1, float64, VsrD(0), MADD_FLGS, 1, 0)
+VSX_MADD(xsmsubdp, 1, float64, VsrD(0), MSUB_FLGS, 1, 0)
+VSX_MADD(xsnmadddp, 1, float64, VsrD(0), NMADD_FLGS, 1, 0)
+VSX_MADD(xsnmsubdp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0)
+VSX_MADD(xsmaddsp, 1, float64, VsrD(0), MADD_FLGS, 1, 1)
+VSX_MADD(xsmsubsp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1)
+VSX_MADD(xsnmaddsp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1)
+VSX_MADD(xsnmsubsp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1)
+
+VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0, 0)
+VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0)
+VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0)
+VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0)
+
+VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0)
+VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0)
+VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0)
+VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0)
/*
* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
@@ -2413,24 +2354,21 @@ VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
* svxvc - set VXVC bit
*/
#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
- \
- if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
- float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
+ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
+ float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
vxsnan_flag = true; \
if (fpscr_ve == 0 && svxvc) { \
vxvc_flag = true; \
} \
} else if (svxvc) { \
- vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
- float64_is_quiet_nan(xb.VsrD(0), &env->fp_status); \
+ vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \
+ float64_is_quiet_nan(xb->VsrD(0), &env->fp_status); \
} \
if (vxsnan_flag) { \
float_invalid_op_vxsnan(env, GETPC()); \
@@ -2441,15 +2379,16 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
\
if (!vex_flag) { \
- if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) { \
- xt.VsrD(0) = -1; \
- xt.VsrD(1) = 0; \
+ if (float64_##cmp(xb->VsrD(0), xa->VsrD(0), \
+ &env->fp_status) == exp) { \
+ t.VsrD(0) = -1; \
+ t.VsrD(1) = 0; \
} else { \
- xt.VsrD(0) = 0; \
- xt.VsrD(1) = 0; \
+ t.VsrD(0) = 0; \
+ t.VsrD(1) = 0; \
} \
} \
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2458,20 +2397,17 @@ VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
-void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode)
+void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xa, ppc_vsr_t *xb)
{
- ppc_vsr_t xa, xb;
int64_t exp_a, exp_b;
uint32_t cc;
- getVSR(xA(opcode), &xa, env);
- getVSR(xB(opcode), &xb, env);
-
- exp_a = extract64(xa.VsrD(0), 52, 11);
- exp_b = extract64(xb.VsrD(0), 52, 11);
+ exp_a = extract64(xa->VsrD(0), 52, 11);
+ exp_b = extract64(xb->VsrD(0), 52, 11);
- if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||
- float64_is_any_nan(xb.VsrD(0)))) {
+ if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||
+ float64_is_any_nan(xb->VsrD(0)))) {
cc = CRF_SO;
} else {
if (exp_a < exp_b) {
@@ -2490,20 +2426,17 @@ void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode)
do_float_check_status(env, GETPC());
}
-void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode)
+void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xa, ppc_vsr_t *xb)
{
- ppc_vsr_t xa, xb;
int64_t exp_a, exp_b;
uint32_t cc;
- getVSR(rA(opcode) + 32, &xa, env);
- getVSR(rB(opcode) + 32, &xb, env);
+ exp_a = extract64(xa->VsrD(0), 48, 15);
+ exp_b = extract64(xb->VsrD(0), 48, 15);
- exp_a = extract64(xa.VsrD(0), 48, 15);
- exp_b = extract64(xb.VsrD(0), 48, 15);
-
- if (unlikely(float128_is_any_nan(xa.f128) ||
- float128_is_any_nan(xb.f128))) {
+ if (unlikely(float128_is_any_nan(xa->f128) ||
+ float128_is_any_nan(xb->f128))) {
cc = CRF_SO;
} else {
if (exp_a < exp_b) {
@@ -2523,25 +2456,23 @@ void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode)
}
#define VSX_SCALAR_CMP(op, ordered) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xa, xb; \
uint32_t cc = 0; \
bool vxsnan_flag = false, vxvc_flag = false; \
\
helper_reset_fpstatus(env); \
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
\
- if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
- float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
+ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
+ float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
vxsnan_flag = true; \
cc = CRF_SO; \
if (fpscr_ve == 0 && ordered) { \
vxvc_flag = true; \
} \
- } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
- float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) { \
+ } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \
+ float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) { \
cc = CRF_SO; \
if (ordered) { \
vxvc_flag = true; \
@@ -2554,9 +2485,9 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
float_invalid_op_vxvc(env, 0, GETPC()); \
} \
\
- if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
+ if (float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { \
cc |= CRF_LT; \
- } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
+ } else if (!float64_le(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { \
cc |= CRF_GT; \
} else { \
cc |= CRF_EQ; \
@@ -2573,25 +2504,23 @@ VSX_SCALAR_CMP(xscmpodp, 1)
VSX_SCALAR_CMP(xscmpudp, 0)
#define VSX_SCALAR_CMPQ(op, ordered) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xa, xb; \
uint32_t cc = 0; \
bool vxsnan_flag = false, vxvc_flag = false; \
\
helper_reset_fpstatus(env); \
- getVSR(rA(opcode) + 32, &xa, env); \
- getVSR(rB(opcode) + 32, &xb, env); \
\
- if (float128_is_signaling_nan(xa.f128, &env->fp_status) || \
- float128_is_signaling_nan(xb.f128, &env->fp_status)) { \
+ if (float128_is_signaling_nan(xa->f128, &env->fp_status) || \
+ float128_is_signaling_nan(xb->f128, &env->fp_status)) { \
vxsnan_flag = true; \
cc = CRF_SO; \
if (fpscr_ve == 0 && ordered) { \
vxvc_flag = true; \
} \
- } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) || \
- float128_is_quiet_nan(xb.f128, &env->fp_status)) { \
+ } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) || \
+ float128_is_quiet_nan(xb->f128, &env->fp_status)) { \
cc = CRF_SO; \
if (ordered) { \
vxvc_flag = true; \
@@ -2604,9 +2533,9 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
float_invalid_op_vxvc(env, 0, GETPC()); \
} \
\
- if (float128_lt(xa.f128, xb.f128, &env->fp_status)) { \
+ if (float128_lt(xa->f128, xb->f128, &env->fp_status)) { \
cc |= CRF_LT; \
- } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) { \
+ } else if (!float128_le(xa->f128, xb->f128, &env->fp_status)) { \
cc |= CRF_GT; \
} else { \
cc |= CRF_EQ; \
@@ -2631,24 +2560,21 @@ VSX_SCALAR_CMPQ(xscmpuqp, 0)
* fld - vsr_t field (VsrD(*) or VsrW(*))
*/
#define VSX_MAX_MIN(name, op, nels, tp, fld) \
-void helper_##name(CPUPPCState *env, uint32_t opcode) \
+void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
- \
for (i = 0; i < nels; i++) { \
- xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
- if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
- tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
+ t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status); \
+ if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
+ tp##_is_signaling_nan(xb->fld, &env->fp_status))) { \
float_invalid_op_vxsnan(env, GETPC()); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2660,29 +2586,26 @@ VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
#define VSX_MAX_MINC(name, max) \
-void helper_##name(CPUPPCState *env, uint32_t opcode) \
+void helper_##name(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
bool vxsnan_flag = false, vex_flag = false; \
\
- getVSR(rA(opcode) + 32, &xa, env); \
- getVSR(rB(opcode) + 32, &xb, env); \
- getVSR(rD(opcode) + 32, &xt, env); \
- \
- if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
- float64_is_any_nan(xb.VsrD(0)))) { \
- if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
- float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
+ if (unlikely(float64_is_any_nan(xa->VsrD(0)) || \
+ float64_is_any_nan(xb->VsrD(0)))) { \
+ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
+ float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
vxsnan_flag = true; \
} \
- xt.VsrD(0) = xb.VsrD(0); \
+ t.VsrD(0) = xb->VsrD(0); \
} else if ((max && \
- !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
+ !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
(!max && \
- float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
- xt.VsrD(0) = xa.VsrD(0); \
+ float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
+ t.VsrD(0) = xa->VsrD(0); \
} else { \
- xt.VsrD(0) = xb.VsrD(0); \
+ t.VsrD(0) = xb->VsrD(0); \
} \
\
vex_flag = fpscr_ve & vxsnan_flag; \
@@ -2690,7 +2613,7 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
float_invalid_op_vxsnan(env, GETPC()); \
} \
if (!vex_flag) { \
- putVSR(rD(opcode) + 32, &xt, env); \
+ *xt = t; \
} \
} \
@@ -2698,46 +2621,46 @@ VSX_MAX_MINC(xsmaxcdp, 1);
VSX_MAX_MINC(xsmincdp, 0);
#define VSX_MAX_MINJ(name, max) \
-void helper_##name(CPUPPCState *env, uint32_t opcode) \
+void helper_##name(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
bool vxsnan_flag = false, vex_flag = false; \
\
- getVSR(rA(opcode) + 32, &xa, env); \
- getVSR(rB(opcode) + 32, &xb, env); \
- getVSR(rD(opcode) + 32, &xt, env); \
- \
- if (unlikely(float64_is_any_nan(xa.VsrD(0)))) { \
- if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) { \
+ if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \
+ if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \
vxsnan_flag = true; \
} \
- xt.VsrD(0) = xa.VsrD(0); \
- } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) { \
- if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
+ t.VsrD(0) = xa->VsrD(0); \
+ } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \
+ if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
vxsnan_flag = true; \
} \
- xt.VsrD(0) = xb.VsrD(0); \
- } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) { \
+ t.VsrD(0) = xb->VsrD(0); \
+ } else if (float64_is_zero(xa->VsrD(0)) && \
+ float64_is_zero(xb->VsrD(0))) { \
if (max) { \
- if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
- xt.VsrD(0) = 0ULL; \
+ if (!float64_is_neg(xa->VsrD(0)) || \
+ !float64_is_neg(xb->VsrD(0))) { \
+ t.VsrD(0) = 0ULL; \
} else { \
- xt.VsrD(0) = 0x8000000000000000ULL; \
+ t.VsrD(0) = 0x8000000000000000ULL; \
} \
} else { \
- if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) { \
- xt.VsrD(0) = 0x8000000000000000ULL; \
+ if (float64_is_neg(xa->VsrD(0)) || \
+ float64_is_neg(xb->VsrD(0))) { \
+ t.VsrD(0) = 0x8000000000000000ULL; \
} else { \
- xt.VsrD(0) = 0ULL; \
+ t.VsrD(0) = 0ULL; \
} \
} \
} else if ((max && \
- !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) || \
+ !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
(!max && \
- float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) { \
- xt.VsrD(0) = xa.VsrD(0); \
+ float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
+ t.VsrD(0) = xa->VsrD(0); \
} else { \
- xt.VsrD(0) = xb.VsrD(0); \
+ t.VsrD(0) = xb->VsrD(0); \
} \
\
vex_flag = fpscr_ve & vxsnan_flag; \
@@ -2745,7 +2668,7 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
float_invalid_op_vxsnan(env, GETPC()); \
} \
if (!vex_flag) { \
- putVSR(rD(opcode) + 32, &xt, env); \
+ *xt = t; \
} \
} \
@@ -2763,46 +2686,42 @@ VSX_MAX_MINJ(xsminjdp, 0);
* exp - expected result of comparison
*/
#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xa, xb; \
+ ppc_vsr_t t = *xt; \
+ uint32_t crf6 = 0; \
int i; \
int all_true = 1; \
int all_false = 1; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
- \
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_any_nan(xa.fld) || \
- tp##_is_any_nan(xb.fld))) { \
- if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
- tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \
+ if (unlikely(tp##_is_any_nan(xa->fld) || \
+ tp##_is_any_nan(xb->fld))) { \
+ if (tp##_is_signaling_nan(xa->fld, &env->fp_status) || \
+ tp##_is_signaling_nan(xb->fld, &env->fp_status)) { \
float_invalid_op_vxsnan(env, GETPC()); \
} \
if (svxvc) { \
float_invalid_op_vxvc(env, 0, GETPC()); \
} \
- xt.fld = 0; \
+ t.fld = 0; \
all_true = 0; \
} else { \
- if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) { \
- xt.fld = -1; \
+ if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) { \
+ t.fld = -1; \
all_false = 0; \
} else { \
- xt.fld = 0; \
+ t.fld = 0; \
all_true = 0; \
} \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
- if ((opcode >> (31 - 21)) & 1) { \
- env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
- } \
- do_float_check_status(env, GETPC()); \
- }
+ *xt = t; \
+ crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
+ return crf6; \
+}
VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
@@ -2824,27 +2743,24 @@ VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
* sfprf - set FPRF
*/
#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
- \
for (i = 0; i < nels; i++) { \
- xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
- if (unlikely(stp##_is_signaling_nan(xb.sfld, \
+ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
+ if (unlikely(stp##_is_signaling_nan(xb->sfld, \
&env->fp_status))) { \
float_invalid_op_vxsnan(env, GETPC()); \
- xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
+ t.tfld = ttp##_snan_to_qnan(t.tfld); \
} \
if (sfprf) { \
- helper_compute_fprf_##ttp(env, xt.tfld); \
+ helper_compute_fprf_##ttp(env, t.tfld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2864,27 +2780,25 @@ VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
* sfprf - set FPRF
*/
#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(rB(opcode) + 32, &xb, env); \
- getVSR(rD(opcode) + 32, &xt, env); \
- \
for (i = 0; i < nels; i++) { \
- xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
- if (unlikely(stp##_is_signaling_nan(xb.sfld, \
+ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
+ if (unlikely(stp##_is_signaling_nan(xb->sfld, \
&env->fp_status))) { \
float_invalid_op_vxsnan(env, GETPC()); \
- xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
+ t.tfld = ttp##_snan_to_qnan(t.tfld); \
} \
if (sfprf) { \
- helper_compute_fprf_##ttp(env, xt.tfld); \
+ helper_compute_fprf_##ttp(env, t.tfld); \
} \
} \
\
- putVSR(rD(opcode) + 32, &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2902,27 +2816,24 @@ VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
* sfprf - set FPRF
*/
#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = { }; \
int i; \
\
- getVSR(xB(opcode), &xb, env); \
- memset(&xt, 0, sizeof(xt)); \
- \
for (i = 0; i < nels; i++) { \
- xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status); \
- if (unlikely(stp##_is_signaling_nan(xb.sfld, \
+ t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \
+ if (unlikely(stp##_is_signaling_nan(xb->sfld, \
&env->fp_status))) { \
float_invalid_op_vxsnan(env, GETPC()); \
- xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
+ t.tfld = ttp##_snan_to_qnan(t.tfld); \
} \
if (sfprf) { \
- helper_compute_fprf_##ttp(env, xt.tfld); \
+ helper_compute_fprf_##ttp(env, t.tfld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -2935,28 +2846,26 @@ VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
* xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
* added to this later.
*/
-void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode)
+void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xb)
{
- ppc_vsr_t xt, xb;
+ ppc_vsr_t t = { };
float_status tstat;
- getVSR(rB(opcode) + 32, &xb, env);
- memset(&xt, 0, sizeof(xt));
-
tstat = env->fp_status;
if (unlikely(Rc(opcode) != 0)) {
tstat.float_rounding_mode = float_round_to_odd;
}
- xt.VsrD(0) = float128_to_float64(xb.f128, &tstat);
+ t.VsrD(0) = float128_to_float64(xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
- if (unlikely(float128_is_signaling_nan(xb.f128, &tstat))) {
+ if (unlikely(float128_is_signaling_nan(xb->f128, &tstat))) {
float_invalid_op_vxsnan(env, GETPC());
- xt.VsrD(0) = float64_snan_to_qnan(xt.VsrD(0));
+ t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0));
}
- helper_compute_fprf_float64(env, xt.VsrD(0));
+ helper_compute_fprf_float64(env, t.VsrD(0));
- putVSR(rD(opcode) + 32, &xt, env);
+ *xt = t;
do_float_check_status(env, GETPC());
}
@@ -2987,27 +2896,24 @@ uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
* rnan - resulting NaN
*/
#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
int all_flags = env->fp_status.float_exception_flags, flags; \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
- \
for (i = 0; i < nels; i++) { \
env->fp_status.float_exception_flags = 0; \
- xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, &env->fp_status); \
+ t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
flags = env->fp_status.float_exception_flags; \
if (unlikely(flags & float_flag_invalid)) { \
- float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb.sfld)); \
- xt.tfld = rnan; \
+ float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \
+ t.tfld = rnan; \
} \
all_flags |= flags; \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
env->fp_status.float_exception_flags = all_flags; \
do_float_check_status(env, GETPC()); \
}
@@ -3040,20 +2946,18 @@ VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
* rnan - resulting NaN
*/
#define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = { }; \
\
- getVSR(rB(opcode) + 32, &xb, env); \
- memset(&xt, 0, sizeof(xt)); \
- \
- xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, &env->fp_status); \
+ t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
if (env->fp_status.float_exception_flags & float_flag_invalid) { \
- float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb.sfld)); \
- xt.tfld = rnan; \
+ float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \
+ t.tfld = rnan; \
} \
\
- putVSR(rD(opcode) + 32, &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -3077,25 +2981,22 @@ VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
* sfprf - set FPRF
*/
#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
\
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
- \
for (i = 0; i < nels; i++) { \
- xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
+ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
if (r2sp) { \
- xt.tfld = helper_frsp(env, xt.tfld); \
+ t.tfld = helper_frsp(env, t.tfld); \
} \
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.tfld); \
+ helper_compute_fprf_float64(env, t.tfld); \
} \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -3121,17 +3022,15 @@ VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
* tfld - target vsr_t field
*/
#define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
\
- getVSR(rB(opcode) + 32, &xb, env); \
- getVSR(rD(opcode) + 32, &xt, env); \
+ t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
+ helper_compute_fprf_##ttp(env, t.tfld); \
\
- xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
- helper_compute_fprf_##ttp(env, xt.tfld); \
- \
- putVSR(xT(opcode) + 32, &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -3155,27 +3054,25 @@ VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
* sfprf - set FPRF
*/
#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t t = *xt; \
int i; \
- getVSR(xB(opcode), &xb, env); \
- getVSR(xT(opcode), &xt, env); \
\
if (rmode != FLOAT_ROUND_CURRENT) { \
set_float_rounding_mode(rmode, &env->fp_status); \
} \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_signaling_nan(xb.fld, \
+ if (unlikely(tp##_is_signaling_nan(xb->fld, \
&env->fp_status))) { \
float_invalid_op_vxsnan(env, GETPC()); \
- xt.fld = tp##_snan_to_qnan(xb.fld); \
+ t.fld = tp##_snan_to_qnan(xb->fld); \
} else { \
- xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
+ t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \
} \
if (sfprf) { \
- helper_compute_fprf_float64(env, xt.fld); \
+ helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
@@ -3189,7 +3086,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
env->fp_status.float_exception_flags &= ~float_flag_inexact; \
} \
\
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
do_float_check_status(env, GETPC()); \
}
@@ -3223,46 +3120,41 @@ uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
}
#define VSX_XXPERM(op, indexed) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *pcv) \
{ \
- ppc_vsr_t xt, xa, pcv, xto; \
+ ppc_vsr_t t = *xt; \
int i, idx; \
\
- getVSR(xA(opcode), &xa, env); \
- getVSR(xT(opcode), &xt, env); \
- getVSR(xB(opcode), &pcv, env); \
- \
for (i = 0; i < 16; i++) { \
- idx = pcv.VsrB(i) & 0x1F; \
+ idx = pcv->VsrB(i) & 0x1F; \
if (indexed) { \
idx = 31 - idx; \
} \
- xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
+ t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx) \
+ : xt->VsrB(idx - 16); \
} \
- putVSR(xT(opcode), &xto, env); \
+ *xt = t; \
}
VSX_XXPERM(xxperm, 0)
VSX_XXPERM(xxpermr, 1)
-void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
+void helper_xvxsigsp(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
{
- ppc_vsr_t xt, xb;
+ ppc_vsr_t t = { };
uint32_t exp, i, fraction;
- getVSR(xB(opcode), &xb, env);
- memset(&xt, 0, sizeof(xt));
-
for (i = 0; i < 4; i++) {
- exp = (xb.VsrW(i) >> 23) & 0xFF;
- fraction = xb.VsrW(i) & 0x7FFFFF;
+ exp = (xb->VsrW(i) >> 23) & 0xFF;
+ fraction = xb->VsrW(i) & 0x7FFFFF;
if (exp != 0 && exp != 255) {
- xt.VsrW(i) = fraction | 0x00800000;
+ t.VsrW(i) = fraction | 0x00800000;
} else {
- xt.VsrW(i) = fraction;
+ t.VsrW(i) = fraction;
}
}
- putVSR(xT(opcode), &xt, env);
+ *xt = t;
}
/*
@@ -3279,27 +3171,28 @@ void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
#define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
{ \
- ppc_vsr_t xt, xb; \
+ ppc_vsr_t *xt = &env->vsr[xT(opcode)]; \
+ ppc_vsr_t *xb = &env->vsr[xbn]; \
+ ppc_vsr_t t = { }; \
uint32_t i, sign, dcmx; \
uint32_t cc, match = 0; \
\
- getVSR(xbn, &xb, env); \
if (!scrf) { \
- memset(&xt, 0, sizeof(xt)); \
dcmx = DCMX_XV(opcode); \
} else { \
+ t = *xt; \
dcmx = DCMX(opcode); \
} \
\
for (i = 0; i < nels; i++) { \
- sign = tp##_is_neg(xb.fld); \
- if (tp##_is_any_nan(xb.fld)) { \
+ sign = tp##_is_neg(xb->fld); \
+ if (tp##_is_any_nan(xb->fld)) { \
match = extract32(dcmx, 6, 1); \
- } else if (tp##_is_infinity(xb.fld)) { \
+ } else if (tp##_is_infinity(xb->fld)) { \
match = extract32(dcmx, 4 + !sign, 1); \
- } else if (tp##_is_zero(xb.fld)) { \
+ } else if (tp##_is_zero(xb->fld)) { \
match = extract32(dcmx, 2 + !sign, 1); \
- } else if (tp##_is_zero_or_denormal(xb.fld)) { \
+ } else if (tp##_is_zero_or_denormal(xb->fld)) { \
match = extract32(dcmx, 0 + !sign, 1); \
} \
\
@@ -3309,12 +3202,12 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
env->fpscr |= cc << FPSCR_FPRF; \
env->crf[BF(opcode)] = cc; \
} else { \
- xt.tfld = match ? fld_max : 0; \
+ t.tfld = match ? fld_max : 0; \
} \
match = 0; \
} \
if (!scrf) { \
- putVSR(xT(opcode), &xt, env); \
+ *xt = t; \
} \
}
@@ -3323,31 +3216,29 @@ VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
-void helper_xststdcsp(CPUPPCState *env, uint32_t opcode)
+void helper_xststdcsp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)
{
- ppc_vsr_t xb;
uint32_t dcmx, sign, exp;
uint32_t cc, match = 0, not_sp = 0;
- getVSR(xB(opcode), &xb, env);
dcmx = DCMX(opcode);
- exp = (xb.VsrD(0) >> 52) & 0x7FF;
+ exp = (xb->VsrD(0) >> 52) & 0x7FF;
- sign = float64_is_neg(xb.VsrD(0));
- if (float64_is_any_nan(xb.VsrD(0))) {
+ sign = float64_is_neg(xb->VsrD(0));
+ if (float64_is_any_nan(xb->VsrD(0))) {
match = extract32(dcmx, 6, 1);
- } else if (float64_is_infinity(xb.VsrD(0))) {
+ } else if (float64_is_infinity(xb->VsrD(0))) {
match = extract32(dcmx, 4 + !sign, 1);
- } else if (float64_is_zero(xb.VsrD(0))) {
+ } else if (float64_is_zero(xb->VsrD(0))) {
match = extract32(dcmx, 2 + !sign, 1);
- } else if (float64_is_zero_or_denormal(xb.VsrD(0)) ||
+ } else if (float64_is_zero_or_denormal(xb->VsrD(0)) ||
(exp > 0 && exp < 0x381)) {
match = extract32(dcmx, 0 + !sign, 1);
}
- not_sp = !float64_eq(xb.VsrD(0),
+ not_sp = !float64_eq(xb->VsrD(0),
float32_to_float64(
- float64_to_float32(xb.VsrD(0), &env->fp_status),
+ float64_to_float32(xb->VsrD(0), &env->fp_status),
&env->fp_status), &env->fp_status);
cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
@@ -3356,18 +3247,16 @@ void helper_xststdcsp(CPUPPCState *env, uint32_t opcode)
env->crf[BF(opcode)] = cc;
}
-void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
+void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xb)
{
- ppc_vsr_t xb;
- ppc_vsr_t xt;
+ ppc_vsr_t t = { };
uint8_t r = Rrm(opcode);
uint8_t ex = Rc(opcode);
uint8_t rmc = RMC(opcode);
uint8_t rmode = 0;
float_status tstat;
- getVSR(rB(opcode) + 32, &xb, env);
- memset(&xt, 0, sizeof(xt));
helper_reset_fpstatus(env);
if (r == 0 && rmc == 0) {
@@ -3396,13 +3285,13 @@ void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
tstat = env->fp_status;
set_float_exception_flags(0, &tstat);
set_float_rounding_mode(rmode, &tstat);
- xt.f128 = float128_round_to_int(xb.f128, &tstat);
+ t.f128 = float128_round_to_int(xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
- if (float128_is_signaling_nan(xb.f128, &tstat)) {
+ if (float128_is_signaling_nan(xb->f128, &tstat)) {
float_invalid_op_vxsnan(env, GETPC());
- xt.f128 = float128_snan_to_qnan(xt.f128);
+ t.f128 = float128_snan_to_qnan(t.f128);
}
}
@@ -3410,23 +3299,21 @@ void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
env->fp_status.float_exception_flags &= ~float_flag_inexact;
}
- helper_compute_fprf_float128(env, xt.f128);
+ helper_compute_fprf_float128(env, t.f128);
do_float_check_status(env, GETPC());
- putVSR(rD(opcode) + 32, &xt, env);
+ *xt = t;
}
-void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode)
+void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xb)
{
- ppc_vsr_t xb;
- ppc_vsr_t xt;
+ ppc_vsr_t t = { };
uint8_t r = Rrm(opcode);
uint8_t rmc = RMC(opcode);
uint8_t rmode = 0;
floatx80 round_res;
float_status tstat;
- getVSR(rB(opcode) + 32, &xb, env);
- memset(&xt, 0, sizeof(xt));
helper_reset_fpstatus(env);
if (r == 0 && rmc == 0) {
@@ -3455,30 +3342,28 @@ void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode)
tstat = env->fp_status;
set_float_exception_flags(0, &tstat);
set_float_rounding_mode(rmode, &tstat);
- round_res = float128_to_floatx80(xb.f128, &tstat);
- xt.f128 = floatx80_to_float128(round_res, &tstat);
+ round_res = float128_to_floatx80(xb->f128, &tstat);
+ t.f128 = floatx80_to_float128(round_res, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
- if (float128_is_signaling_nan(xb.f128, &tstat)) {
+ if (float128_is_signaling_nan(xb->f128, &tstat)) {
float_invalid_op_vxsnan(env, GETPC());
- xt.f128 = float128_snan_to_qnan(xt.f128);
+ t.f128 = float128_snan_to_qnan(t.f128);
}
}
- helper_compute_fprf_float128(env, xt.f128);
- putVSR(rD(opcode) + 32, &xt, env);
+ helper_compute_fprf_float128(env, t.f128);
+ *xt = t;
do_float_check_status(env, GETPC());
}
-void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode)
+void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xb)
{
- ppc_vsr_t xb;
- ppc_vsr_t xt;
+ ppc_vsr_t t = { };
float_status tstat;
- getVSR(rB(opcode) + 32, &xb, env);
- memset(&xt, 0, sizeof(xt));
helper_reset_fpstatus(env);
tstat = env->fp_status;
@@ -3487,34 +3372,32 @@ void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode)
}
set_float_exception_flags(0, &tstat);
- xt.f128 = float128_sqrt(xb.f128, &tstat);
+ t.f128 = float128_sqrt(xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
- if (float128_is_signaling_nan(xb.f128, &tstat)) {
+ if (float128_is_signaling_nan(xb->f128, &tstat)) {
float_invalid_op_vxsnan(env, GETPC());
- xt.f128 = float128_snan_to_qnan(xb.f128);
- } else if (float128_is_quiet_nan(xb.f128, &tstat)) {
- xt.f128 = xb.f128;
- } else if (float128_is_neg(xb.f128) && !float128_is_zero(xb.f128)) {
+ t.f128 = float128_snan_to_qnan(xb->f128);
+ } else if (float128_is_quiet_nan(xb->f128, &tstat)) {
+ t.f128 = xb->f128;
+ } else if (float128_is_neg(xb->f128) && !float128_is_zero(xb->f128)) {
float_invalid_op_vxsqrt(env, 1, GETPC());
- xt.f128 = float128_default_nan(&env->fp_status);
+ t.f128 = float128_default_nan(&env->fp_status);
}
}
- helper_compute_fprf_float128(env, xt.f128);
- putVSR(rD(opcode) + 32, &xt, env);
+ helper_compute_fprf_float128(env, t.f128);
+ *xt = t;
do_float_check_status(env, GETPC());
}
-void helper_xssubqp(CPUPPCState *env, uint32_t opcode)
+void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
+ ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
{
- ppc_vsr_t xt, xa, xb;
+ ppc_vsr_t t = *xt;
float_status tstat;
- getVSR(rA(opcode) + 32, &xa, env);
- getVSR(rB(opcode) + 32, &xb, env);
- getVSR(rD(opcode) + 32, &xt, env);
helper_reset_fpstatus(env);
tstat = env->fp_status;
@@ -3523,16 +3406,16 @@ void helper_xssubqp(CPUPPCState *env, uint32_t opcode)
}
set_float_exception_flags(0, &tstat);
- xt.f128 = float128_sub(xa.f128, xb.f128, &tstat);
+ t.f128 = float128_sub(xa->f128, xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
float_invalid_op_addsub(env, 1, GETPC(),
- float128_classify(xa.f128) |
- float128_classify(xb.f128));
+ float128_classify(xa->f128) |
+ float128_classify(xb->f128));
}
- helper_compute_fprf_float128(env, xt.f128);
- putVSR(rD(opcode) + 32, &xt, env);
+ helper_compute_fprf_float128(env, t.f128);
+ *xt = t;
do_float_check_status(env, GETPC());
}
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index 02b67a3..380c9b1 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -108,6 +108,10 @@ DEF_HELPER_FLAGS_1(ftsqrt, TCG_CALL_NO_RWG_SE, i32, i64)
#define dh_ctype_avr ppc_avr_t *
#define dh_is_signed_avr dh_is_signed_ptr
+#define dh_alias_vsr ptr
+#define dh_ctype_vsr ppc_vsr_t *
+#define dh_is_signed_vsr dh_is_signed_ptr
+
DEF_HELPER_3(vavgub, void, avr, avr, avr)
DEF_HELPER_3(vavguh, void, avr, avr, avr)
DEF_HELPER_3(vavguw, void, avr, avr, avr)
@@ -275,10 +279,10 @@ DEF_HELPER_3(stvebx, void, env, avr, tl)
DEF_HELPER_3(stvehx, void, env, avr, tl)
DEF_HELPER_3(stvewx, void, env, avr, tl)
#if defined(TARGET_PPC64)
-DEF_HELPER_4(lxvl, void, env, tl, tl, tl)
-DEF_HELPER_4(lxvll, void, env, tl, tl, tl)
-DEF_HELPER_4(stxvl, void, env, tl, tl, tl)
-DEF_HELPER_4(stxvll, void, env, tl, tl, tl)
+DEF_HELPER_4(lxvl, void, env, tl, vsr, tl)
+DEF_HELPER_4(lxvll, void, env, tl, vsr, tl)
+DEF_HELPER_4(stxvl, void, env, tl, vsr, tl)
+DEF_HELPER_4(stxvll, void, env, tl, vsr, tl)
#endif
DEF_HELPER_4(vsumsws, void, env, avr, avr, avr)
DEF_HELPER_4(vsum2sws, void, env, avr, avr, avr)
@@ -361,178 +365,162 @@ DEF_HELPER_4(bcdsr, i32, avr, avr, avr, i32)
DEF_HELPER_4(bcdtrunc, i32, avr, avr, avr, i32)
DEF_HELPER_4(bcdutrunc, i32, avr, avr, avr, i32)
-DEF_HELPER_2(xsadddp, void, env, i32)
-DEF_HELPER_2(xsaddqp, void, env, i32)
-DEF_HELPER_2(xssubdp, void, env, i32)
-DEF_HELPER_2(xsmuldp, void, env, i32)
-DEF_HELPER_2(xsmulqp, void, env, i32)
-DEF_HELPER_2(xsdivdp, void, env, i32)
-DEF_HELPER_2(xsdivqp, void, env, i32)
-DEF_HELPER_2(xsredp, void, env, i32)
-DEF_HELPER_2(xssqrtdp, void, env, i32)
-DEF_HELPER_2(xsrsqrtedp, void, env, i32)
-DEF_HELPER_2(xstdivdp, void, env, i32)
-DEF_HELPER_2(xstsqrtdp, void, env, i32)
-DEF_HELPER_2(xsmaddadp, void, env, i32)
-DEF_HELPER_2(xsmaddmdp, void, env, i32)
-DEF_HELPER_2(xsmsubadp, void, env, i32)
-DEF_HELPER_2(xsmsubmdp, void, env, i32)
-DEF_HELPER_2(xsnmaddadp, void, env, i32)
-DEF_HELPER_2(xsnmaddmdp, void, env, i32)
-DEF_HELPER_2(xsnmsubadp, void, env, i32)
-DEF_HELPER_2(xsnmsubmdp, void, env, i32)
-DEF_HELPER_2(xscmpeqdp, void, env, i32)
-DEF_HELPER_2(xscmpgtdp, void, env, i32)
-DEF_HELPER_2(xscmpgedp, void, env, i32)
-DEF_HELPER_2(xscmpnedp, void, env, i32)
-DEF_HELPER_2(xscmpexpdp, void, env, i32)
-DEF_HELPER_2(xscmpexpqp, void, env, i32)
-DEF_HELPER_2(xscmpodp, void, env, i32)
-DEF_HELPER_2(xscmpudp, void, env, i32)
-DEF_HELPER_2(xscmpoqp, void, env, i32)
-DEF_HELPER_2(xscmpuqp, void, env, i32)
-DEF_HELPER_2(xsmaxdp, void, env, i32)
-DEF_HELPER_2(xsmindp, void, env, i32)
-DEF_HELPER_2(xsmaxcdp, void, env, i32)
-DEF_HELPER_2(xsmincdp, void, env, i32)
-DEF_HELPER_2(xsmaxjdp, void, env, i32)
-DEF_HELPER_2(xsminjdp, void, env, i32)
-DEF_HELPER_2(xscvdphp, void, env, i32)
-DEF_HELPER_2(xscvdpqp, void, env, i32)
-DEF_HELPER_2(xscvdpsp, void, env, i32)
+DEF_HELPER_4(xsadddp, void, env, vsr, vsr, vsr)
+DEF_HELPER_5(xsaddqp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_4(xssubdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xsmuldp, void, env, vsr, vsr, vsr)
+DEF_HELPER_5(xsmulqp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_4(xsdivdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_5(xsdivqp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_3(xsredp, void, env, vsr, vsr)
+DEF_HELPER_3(xssqrtdp, void, env, vsr, vsr)
+DEF_HELPER_3(xsrsqrtedp, void, env, vsr, vsr)
+DEF_HELPER_4(xstdivdp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xstsqrtdp, void, env, i32, vsr)
+DEF_HELPER_5(xsmadddp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsmsubdp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsnmadddp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsnmsubdp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpeqdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpgtdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpgedp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpnedp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xscmpexpdp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpexpqp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpodp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpudp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpoqp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscmpuqp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xsmaxdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xsmindp, void, env, vsr, vsr, vsr)
+DEF_HELPER_5(xsmaxcdp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_5(xsmincdp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_5(xsmaxjdp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_5(xsminjdp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_3(xscvdphp, void, env, vsr, vsr)
+DEF_HELPER_4(xscvdpqp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xscvdpsp, void, env, vsr, vsr)
DEF_HELPER_2(xscvdpspn, i64, env, i64)
-DEF_HELPER_2(xscvqpdp, void, env, i32)
-DEF_HELPER_2(xscvqpsdz, void, env, i32)
-DEF_HELPER_2(xscvqpswz, void, env, i32)
-DEF_HELPER_2(xscvqpudz, void, env, i32)
-DEF_HELPER_2(xscvqpuwz, void, env, i32)
-DEF_HELPER_2(xscvhpdp, void, env, i32)
-DEF_HELPER_2(xscvsdqp, void, env, i32)
-DEF_HELPER_2(xscvspdp, void, env, i32)
+DEF_HELPER_4(xscvqpdp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscvqpsdz, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscvqpswz, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscvqpudz, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xscvqpuwz, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xscvhpdp, void, env, vsr, vsr)
+DEF_HELPER_4(xscvsdqp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xscvspdp, void, env, vsr, vsr)
DEF_HELPER_2(xscvspdpn, i64, env, i64)
-DEF_HELPER_2(xscvdpsxds, void, env, i32)
-DEF_HELPER_2(xscvdpsxws, void, env, i32)
-DEF_HELPER_2(xscvdpuxds, void, env, i32)
-DEF_HELPER_2(xscvdpuxws, void, env, i32)
-DEF_HELPER_2(xscvsxddp, void, env, i32)
-DEF_HELPER_2(xscvuxdsp, void, env, i32)
-DEF_HELPER_2(xscvsxdsp, void, env, i32)
-DEF_HELPER_2(xscvudqp, void, env, i32)
-DEF_HELPER_2(xscvuxddp, void, env, i32)
-DEF_HELPER_2(xststdcsp, void, env, i32)
+DEF_HELPER_3(xscvdpsxds, void, env, vsr, vsr)
+DEF_HELPER_3(xscvdpsxws, void, env, vsr, vsr)
+DEF_HELPER_3(xscvdpuxds, void, env, vsr, vsr)
+DEF_HELPER_3(xscvdpuxws, void, env, vsr, vsr)
+DEF_HELPER_3(xscvsxddp, void, env, vsr, vsr)
+DEF_HELPER_3(xscvuxdsp, void, env, vsr, vsr)
+DEF_HELPER_3(xscvsxdsp, void, env, vsr, vsr)
+DEF_HELPER_4(xscvudqp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xscvuxddp, void, env, vsr, vsr)
+DEF_HELPER_3(xststdcsp, void, env, i32, vsr)
DEF_HELPER_2(xststdcdp, void, env, i32)
DEF_HELPER_2(xststdcqp, void, env, i32)
-DEF_HELPER_2(xsrdpi, void, env, i32)
-DEF_HELPER_2(xsrdpic, void, env, i32)
-DEF_HELPER_2(xsrdpim, void, env, i32)
-DEF_HELPER_2(xsrdpip, void, env, i32)
-DEF_HELPER_2(xsrdpiz, void, env, i32)
-DEF_HELPER_2(xsrqpi, void, env, i32)
-DEF_HELPER_2(xsrqpxp, void, env, i32)
-DEF_HELPER_2(xssqrtqp, void, env, i32)
-DEF_HELPER_2(xssubqp, void, env, i32)
+DEF_HELPER_3(xsrdpi, void, env, vsr, vsr)
+DEF_HELPER_3(xsrdpic, void, env, vsr, vsr)
+DEF_HELPER_3(xsrdpim, void, env, vsr, vsr)
+DEF_HELPER_3(xsrdpip, void, env, vsr, vsr)
+DEF_HELPER_3(xsrdpiz, void, env, vsr, vsr)
+DEF_HELPER_4(xsrqpi, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xsrqpxp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(xssqrtqp, void, env, i32, vsr, vsr)
+DEF_HELPER_5(xssubqp, void, env, i32, vsr, vsr, vsr)
-DEF_HELPER_2(xsaddsp, void, env, i32)
-DEF_HELPER_2(xssubsp, void, env, i32)
-DEF_HELPER_2(xsmulsp, void, env, i32)
-DEF_HELPER_2(xsdivsp, void, env, i32)
-DEF_HELPER_2(xsresp, void, env, i32)
+DEF_HELPER_4(xsaddsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xssubsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xsmulsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xsdivsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_3(xsresp, void, env, vsr, vsr)
DEF_HELPER_2(xsrsp, i64, env, i64)
-DEF_HELPER_2(xssqrtsp, void, env, i32)
-DEF_HELPER_2(xsrsqrtesp, void, env, i32)
-DEF_HELPER_2(xsmaddasp, void, env, i32)
-DEF_HELPER_2(xsmaddmsp, void, env, i32)
-DEF_HELPER_2(xsmsubasp, void, env, i32)
-DEF_HELPER_2(xsmsubmsp, void, env, i32)
-DEF_HELPER_2(xsnmaddasp, void, env, i32)
-DEF_HELPER_2(xsnmaddmsp, void, env, i32)
-DEF_HELPER_2(xsnmsubasp, void, env, i32)
-DEF_HELPER_2(xsnmsubmsp, void, env, i32)
+DEF_HELPER_3(xssqrtsp, void, env, vsr, vsr)
+DEF_HELPER_3(xsrsqrtesp, void, env, vsr, vsr)
+DEF_HELPER_5(xsmaddsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsmsubsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsnmaddsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xsnmsubsp, void, env, vsr, vsr, vsr, vsr)
-DEF_HELPER_2(xvadddp, void, env, i32)
-DEF_HELPER_2(xvsubdp, void, env, i32)
-DEF_HELPER_2(xvmuldp, void, env, i32)
-DEF_HELPER_2(xvdivdp, void, env, i32)
-DEF_HELPER_2(xvredp, void, env, i32)
-DEF_HELPER_2(xvsqrtdp, void, env, i32)
-DEF_HELPER_2(xvrsqrtedp, void, env, i32)
-DEF_HELPER_2(xvtdivdp, void, env, i32)
-DEF_HELPER_2(xvtsqrtdp, void, env, i32)
-DEF_HELPER_2(xvmaddadp, void, env, i32)
-DEF_HELPER_2(xvmaddmdp, void, env, i32)
-DEF_HELPER_2(xvmsubadp, void, env, i32)
-DEF_HELPER_2(xvmsubmdp, void, env, i32)
-DEF_HELPER_2(xvnmaddadp, void, env, i32)
-DEF_HELPER_2(xvnmaddmdp, void, env, i32)
-DEF_HELPER_2(xvnmsubadp, void, env, i32)
-DEF_HELPER_2(xvnmsubmdp, void, env, i32)
-DEF_HELPER_2(xvmaxdp, void, env, i32)
-DEF_HELPER_2(xvmindp, void, env, i32)
-DEF_HELPER_2(xvcmpeqdp, void, env, i32)
-DEF_HELPER_2(xvcmpgedp, void, env, i32)
-DEF_HELPER_2(xvcmpgtdp, void, env, i32)
-DEF_HELPER_2(xvcmpnedp, void, env, i32)
-DEF_HELPER_2(xvcvdpsp, void, env, i32)
-DEF_HELPER_2(xvcvdpsxds, void, env, i32)
-DEF_HELPER_2(xvcvdpsxws, void, env, i32)
-DEF_HELPER_2(xvcvdpuxds, void, env, i32)
-DEF_HELPER_2(xvcvdpuxws, void, env, i32)
-DEF_HELPER_2(xvcvsxddp, void, env, i32)
-DEF_HELPER_2(xvcvuxddp, void, env, i32)
-DEF_HELPER_2(xvcvsxwdp, void, env, i32)
-DEF_HELPER_2(xvcvuxwdp, void, env, i32)
-DEF_HELPER_2(xvrdpi, void, env, i32)
-DEF_HELPER_2(xvrdpic, void, env, i32)
-DEF_HELPER_2(xvrdpim, void, env, i32)
-DEF_HELPER_2(xvrdpip, void, env, i32)
-DEF_HELPER_2(xvrdpiz, void, env, i32)
+DEF_HELPER_4(xvadddp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvsubdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvmuldp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvdivdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_3(xvredp, void, env, vsr, vsr)
+DEF_HELPER_3(xvsqrtdp, void, env, vsr, vsr)
+DEF_HELPER_3(xvrsqrtedp, void, env, vsr, vsr)
+DEF_HELPER_4(xvtdivdp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xvtsqrtdp, void, env, i32, vsr)
+DEF_HELPER_5(xvmadddp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvmsubdp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvnmadddp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvnmsubdp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_4(xvmaxdp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvmindp, void, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpeqdp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpgedp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpgtdp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpnedp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_3(xvcvdpsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvdpsxds, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvdpsxws, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvdpuxds, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvdpuxws, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsxddp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvuxddp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsxwdp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvuxwdp, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpi, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpic, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpim, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpip, void, env, vsr, vsr)
+DEF_HELPER_3(xvrdpiz, void, env, vsr, vsr)
-DEF_HELPER_2(xvaddsp, void, env, i32)
-DEF_HELPER_2(xvsubsp, void, env, i32)
-DEF_HELPER_2(xvmulsp, void, env, i32)
-DEF_HELPER_2(xvdivsp, void, env, i32)
-DEF_HELPER_2(xvresp, void, env, i32)
-DEF_HELPER_2(xvsqrtsp, void, env, i32)
-DEF_HELPER_2(xvrsqrtesp, void, env, i32)
-DEF_HELPER_2(xvtdivsp, void, env, i32)
-DEF_HELPER_2(xvtsqrtsp, void, env, i32)
-DEF_HELPER_2(xvmaddasp, void, env, i32)
-DEF_HELPER_2(xvmaddmsp, void, env, i32)
-DEF_HELPER_2(xvmsubasp, void, env, i32)
-DEF_HELPER_2(xvmsubmsp, void, env, i32)
-DEF_HELPER_2(xvnmaddasp, void, env, i32)
-DEF_HELPER_2(xvnmaddmsp, void, env, i32)
-DEF_HELPER_2(xvnmsubasp, void, env, i32)
-DEF_HELPER_2(xvnmsubmsp, void, env, i32)
-DEF_HELPER_2(xvmaxsp, void, env, i32)
-DEF_HELPER_2(xvminsp, void, env, i32)
-DEF_HELPER_2(xvcmpeqsp, void, env, i32)
-DEF_HELPER_2(xvcmpgesp, void, env, i32)
-DEF_HELPER_2(xvcmpgtsp, void, env, i32)
-DEF_HELPER_2(xvcmpnesp, void, env, i32)
-DEF_HELPER_2(xvcvspdp, void, env, i32)
-DEF_HELPER_2(xvcvsphp, void, env, i32)
-DEF_HELPER_2(xvcvhpsp, void, env, i32)
-DEF_HELPER_2(xvcvspsxds, void, env, i32)
-DEF_HELPER_2(xvcvspsxws, void, env, i32)
-DEF_HELPER_2(xvcvspuxds, void, env, i32)
-DEF_HELPER_2(xvcvspuxws, void, env, i32)
-DEF_HELPER_2(xvcvsxdsp, void, env, i32)
-DEF_HELPER_2(xvcvuxdsp, void, env, i32)
-DEF_HELPER_2(xvcvsxwsp, void, env, i32)
-DEF_HELPER_2(xvcvuxwsp, void, env, i32)
+DEF_HELPER_4(xvaddsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvsubsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvmulsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvdivsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_3(xvresp, void, env, vsr, vsr)
+DEF_HELPER_3(xvsqrtsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvrsqrtesp, void, env, vsr, vsr)
+DEF_HELPER_4(xvtdivsp, void, env, i32, vsr, vsr)
+DEF_HELPER_3(xvtsqrtsp, void, env, i32, vsr)
+DEF_HELPER_5(xvmaddsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvmsubsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvnmaddsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(xvnmsubsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_4(xvmaxsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xvminsp, void, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpeqsp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpgesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpgtsp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_FLAGS_4(xvcmpnesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
+DEF_HELPER_3(xvcvspdp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsphp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvhpsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvspsxds, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvspsxws, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvspuxds, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvspuxws, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsxdsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvuxdsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvsxwsp, void, env, vsr, vsr)
+DEF_HELPER_3(xvcvuxwsp, void, env, vsr, vsr)
DEF_HELPER_2(xvtstdcsp, void, env, i32)
DEF_HELPER_2(xvtstdcdp, void, env, i32)
-DEF_HELPER_2(xvrspi, void, env, i32)
-DEF_HELPER_2(xvrspic, void, env, i32)
-DEF_HELPER_2(xvrspim, void, env, i32)
-DEF_HELPER_2(xvrspip, void, env, i32)
-DEF_HELPER_2(xvrspiz, void, env, i32)
-DEF_HELPER_2(xxperm, void, env, i32)
-DEF_HELPER_2(xxpermr, void, env, i32)
-DEF_HELPER_4(xxextractuw, void, env, tl, tl, i32)
-DEF_HELPER_4(xxinsertw, void, env, tl, tl, i32)
-DEF_HELPER_2(xvxsigsp, void, env, i32)
+DEF_HELPER_3(xvrspi, void, env, vsr, vsr)
+DEF_HELPER_3(xvrspic, void, env, vsr, vsr)
+DEF_HELPER_3(xvrspim, void, env, vsr, vsr)
+DEF_HELPER_3(xvrspip, void, env, vsr, vsr)
+DEF_HELPER_3(xvrspiz, void, env, vsr, vsr)
+DEF_HELPER_4(xxperm, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xxpermr, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(xxextractuw, void, env, vsr, vsr, i32)
+DEF_HELPER_4(xxinsertw, void, env, vsr, vsr, i32)
+DEF_HELPER_3(xvxsigsp, void, env, vsr, vsr)
DEF_HELPER_2(efscfsi, i32, env, i32)
DEF_HELPER_2(efscfui, i32, env, i32)
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index 8ce89f2..5c07ef3 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -1899,41 +1899,35 @@ VEXTRACT(uw, u32)
VEXTRACT(d, u64)
#undef VEXTRACT
-void helper_xxextractuw(CPUPPCState *env, target_ulong xtn,
- target_ulong xbn, uint32_t index)
+void helper_xxextractuw(CPUPPCState *env, ppc_vsr_t *xt,
+ ppc_vsr_t *xb, uint32_t index)
{
- ppc_vsr_t xt, xb;
+ ppc_vsr_t t = { };
size_t es = sizeof(uint32_t);
uint32_t ext_index;
int i;
- getVSR(xbn, &xb, env);
- memset(&xt, 0, sizeof(xt));
-
ext_index = index;
for (i = 0; i < es; i++, ext_index++) {
- xt.VsrB(8 - es + i) = xb.VsrB(ext_index % 16);
+ t.VsrB(8 - es + i) = xb->VsrB(ext_index % 16);
}
- putVSR(xtn, &xt, env);
+ *xt = t;
}
-void helper_xxinsertw(CPUPPCState *env, target_ulong xtn,
- target_ulong xbn, uint32_t index)
+void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt,
+ ppc_vsr_t *xb, uint32_t index)
{
- ppc_vsr_t xt, xb;
+ ppc_vsr_t t = *xt;
size_t es = sizeof(uint32_t);
int ins_index, i = 0;
- getVSR(xbn, &xb, env);
- getVSR(xtn, &xt, env);
-
ins_index = index;
for (i = 0; i < es && ins_index < 16; i++, ins_index++) {
- xt.VsrB(ins_index) = xb.VsrB(8 - es + i);
+ t.VsrB(ins_index) = xb->VsrB(8 - es + i);
}
- putVSR(xtn, &xt, env);
+ *xt = t;
}
#define VEXT_SIGNED(name, element, cast) \
diff --git a/target/ppc/internal.h b/target/ppc/internal.h
index fb6f64e..d3d327e 100644
--- a/target/ppc/internal.h
+++ b/target/ppc/internal.h
@@ -204,18 +204,6 @@ EXTRACT_HELPER(IMM8, 11, 8);
EXTRACT_HELPER(DCMX, 16, 7);
EXTRACT_HELPER_SPLIT_3(DCMX_XV, 5, 16, 0, 1, 2, 5, 1, 6, 6);
-static inline void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
-{
- vsr->VsrD(0) = env->vsr[n].VsrD(0);
- vsr->VsrD(1) = env->vsr[n].VsrD(1);
-}
-
-static inline void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
-{
- env->vsr[n].VsrD(0) = vsr->VsrD(0);
- env->vsr[n].VsrD(1) = vsr->VsrD(1);
-}
-
void helper_compute_fprf_float16(CPUPPCState *env, float16 arg);
void helper_compute_fprf_float32(CPUPPCState *env, float32 arg);
void helper_compute_fprf_float128(CPUPPCState *env, float128 arg);
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 4b4989c..8a06d31 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -2650,7 +2650,7 @@ int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
return -ENOENT;
}
- strncpy(args.name, function, sizeof(args.name));
+ strncpy(args.name, function, sizeof(args.name) - 1);
return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
}
@@ -2944,3 +2944,12 @@ void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online)
kvm_set_one_reg(cs, KVM_REG_PPC_ONLINE, &online);
}
}
+
+void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
+{
+ CPUState *cs = CPU(cpu);
+
+ if (kvm_enabled()) {
+ kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &tb_offset);
+ }
+}
diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h
index 45776ca..98bd7d5 100644
--- a/target/ppc/kvm_ppc.h
+++ b/target/ppc/kvm_ppc.h
@@ -80,6 +80,7 @@ bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu);
bool kvmppc_hpt_needs_host_contiguous_pages(void);
void kvm_check_mmu(PowerPCCPU *cpu, Error **errp);
void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online);
+void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset);
#else
@@ -206,6 +207,10 @@ static inline void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu,
return;
}
+static inline void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
+{
+}
+
#ifndef CONFIG_USER_ONLY
static inline bool kvmppc_spapr_use_multitce(void)
{
@@ -394,6 +399,11 @@ static inline int kvmppc_resize_hpt_commit(PowerPCCPU *cpu,
return -ENOSYS;
}
+static inline bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
+{
+ return false;
+}
+
#endif
#ifndef CONFIG_KVM
diff --git a/target/ppc/machine.c b/target/ppc/machine.c
index 5ad7b40..e82f5de 100644
--- a/target/ppc/machine.c
+++ b/target/ppc/machine.c
@@ -378,11 +378,9 @@ static int cpu_post_load(void *opaque, int version_id)
* receive the PVR it expects as a workaround.
*
*/
-#if defined(CONFIG_KVM)
if (kvmppc_pvr_workaround_required(cpu)) {
env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
}
-#endif
env->lr = env->spr[SPR_LR];
env->ctr = env->spr[SPR_CTR];
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
index 5b0f9ee..6f4ffa3 100644
--- a/target/ppc/mem_helper.c
+++ b/target/ppc/mem_helper.c
@@ -415,28 +415,28 @@ STVE(stvewx, cpu_stl_data_ra, bswap32, u32)
#define VSX_LXVL(name, lj) \
void helper_##name(CPUPPCState *env, target_ulong addr, \
- target_ulong xt_num, target_ulong rb) \
+ ppc_vsr_t *xt, target_ulong rb) \
{ \
- int i; \
- ppc_vsr_t xt; \
+ ppc_vsr_t t; \
uint64_t nb = GET_NB(rb); \
+ int i; \
\
- xt.s128 = int128_zero(); \
+ t.s128 = int128_zero(); \
if (nb) { \
nb = (nb >= 16) ? 16 : nb; \
if (msr_le && !lj) { \
for (i = 16; i > 16 - nb; i--) { \
- xt.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \
+ t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \
addr = addr_add(env, addr, 1); \
} \
} else { \
for (i = 0; i < nb; i++) { \
- xt.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \
+ t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \
addr = addr_add(env, addr, 1); \
} \
} \
} \
- putVSR(xt_num, &xt, env); \
+ *xt = t; \
}
VSX_LXVL(lxvl, 0)
@@ -445,25 +445,24 @@ VSX_LXVL(lxvll, 1)
#define VSX_STXVL(name, lj) \
void helper_##name(CPUPPCState *env, target_ulong addr, \
- target_ulong xt_num, target_ulong rb) \
+ ppc_vsr_t *xt, target_ulong rb) \
{ \
- int i; \
- ppc_vsr_t xt; \
target_ulong nb = GET_NB(rb); \
+ int i; \
\
if (!nb) { \
return; \
} \
- getVSR(xt_num, &xt, env); \
+ \
nb = (nb >= 16) ? 16 : nb; \
if (msr_le && !lj) { \
for (i = 16; i > 16 - nb; i--) { \
- cpu_stb_data_ra(env, addr, xt.VsrB(i - 1), GETPC()); \
+ cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \
addr = addr_add(env, addr, 1); \
} \
} else { \
for (i = 0; i < nb; i++) { \
- cpu_stb_data_ra(env, addr, xt.VsrB(i), GETPC()); \
+ cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \
addr = addr_add(env, addr, 1); \
} \
} \
diff --git a/target/ppc/translate/vsx-impl.inc.c b/target/ppc/translate/vsx-impl.inc.c
index e9b7562..3922686 100644
--- a/target/ppc/translate/vsx-impl.inc.c
+++ b/target/ppc/translate/vsx-impl.inc.c
@@ -20,6 +20,13 @@ static inline void set_cpu_vsrl(int n, TCGv_i64 src)
tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, false));
}
+static inline TCGv_ptr gen_vsr_ptr(int reg)
+{
+ TCGv_ptr r = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(r, cpu_env, vsr_full_offset(reg));
+ return r;
+}
+
#define VSX_LOAD_SCALAR(name, operation) \
static void gen_##name(DisasContext *ctx) \
{ \
@@ -337,29 +344,30 @@ VSX_VECTOR_STORE(stxv, st_i64, 0)
VSX_VECTOR_STORE(stxvx, st_i64, 1)
#ifdef TARGET_PPC64
-#define VSX_VECTOR_LOAD_STORE_LENGTH(name) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv EA, xt; \
- \
- if (xT(ctx->opcode) < 32) { \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- } else { \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- } \
- EA = tcg_temp_new(); \
- xt = tcg_const_tl(xT(ctx->opcode)); \
- gen_set_access_type(ctx, ACCESS_INT); \
- gen_addr_register(ctx, EA); \
- gen_helper_##name(cpu_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \
- tcg_temp_free(EA); \
- tcg_temp_free(xt); \
+#define VSX_VECTOR_LOAD_STORE_LENGTH(name) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ TCGv_ptr xt; \
+ \
+ if (xT(ctx->opcode) < 32) { \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ } else { \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ } \
+ EA = tcg_temp_new(); \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ gen_addr_register(ctx, EA); \
+ gen_helper_##name(cpu_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \
+ tcg_temp_free(EA); \
+ tcg_temp_free_ptr(xt); \
}
VSX_VECTOR_LOAD_STORE_LENGTH(lxvl)
@@ -958,6 +966,57 @@ VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP)
VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
+#define VSX_CMP(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 ignored; \
+ TCGv_ptr xt, xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xa = gen_vsr_ptr(xA(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ if ((ctx->opcode >> (31 - 21)) & 1) { \
+ gen_helper_##name(cpu_crf[6], cpu_env, xt, xa, xb); \
+ } else { \
+ ignored = tcg_temp_new_i32(); \
+ gen_helper_##name(ignored, cpu_env, xt, xa, xb); \
+ tcg_temp_free_i32(ignored); \
+ } \
+ gen_helper_float_check_status(cpu_env); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
+VSX_CMP(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
+VSX_CMP(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
+VSX_CMP(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
+VSX_CMP(xvcmpnedp, 0x0C, 0x0F, 0, PPC2_ISA300)
+VSX_CMP(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
+VSX_CMP(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
+VSX_CMP(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
+VSX_CMP(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX)
+
+static void gen_xscvqpdp(DisasContext *ctx)
+{
+ TCGv_i32 opc;
+ TCGv_ptr xt, xb;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ opc = tcg_const_i32(ctx->opcode);
+ xt = gen_vsr_ptr(xT(ctx->opcode));
+ xb = gen_vsr_ptr(xB(ctx->opcode));
+ gen_helper_xscvqpdp(cpu_env, opc, xt, xb);
+ tcg_temp_free_i32(opc);
+ tcg_temp_free_ptr(xt);
+ tcg_temp_free_ptr(xb);
+}
+
#define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \
static void gen_##name(DisasContext *ctx) \
{ \
@@ -971,6 +1030,128 @@ static void gen_##name(DisasContext *ctx) \
tcg_temp_free_i32(opc); \
}
+#define GEN_VSX_HELPER_X3(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr xt, xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xa = gen_vsr_ptr(xA(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, xt, xa, xb); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_X2(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr xt, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, xt, xb); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_X2_AB(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xa = gen_vsr_ptr(xA(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, opc, xa, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_X1(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, opc, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_R3(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xt, xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
+ xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
+ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
+ gen_helper_##name(cpu_env, opc, xt, xa, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_R2(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xt, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
+ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
+ gen_helper_##name(cpu_env, opc, xt, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xb); \
+}
+
+#define GEN_VSX_HELPER_R2_AB(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 opc; \
+ TCGv_ptr xa, xb; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
+ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
+ gen_helper_##name(cpu_env, opc, xa, xb); \
+ tcg_temp_free_i32(opc); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(xb); \
+}
+
#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
static void gen_##name(DisasContext *ctx) \
{ \
@@ -989,176 +1170,180 @@ static void gen_##name(DisasContext *ctx) \
tcg_temp_free_i64(t1); \
}
-GEN_VSX_HELPER_2(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsaddqp, 0x04, 0x00, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmulqp, 0x04, 0x01, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsdivqp, 0x04, 0x11, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmaddadp, 0x04, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmaddmdp, 0x04, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmsubadp, 0x04, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmsubmdp, 0x04, 0x07, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsnmaddadp, 0x04, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsnmaddmdp, 0x04, 0x15, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsnmsubadp, 0x04, 0x16, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsnmsubmdp, 0x04, 0x17, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscmpnedp, 0x0C, 0x03, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsmaxcdp, 0x00, 0x10, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xsmincdp, 0x00, 0x11, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xsmaxjdp, 0x00, 0x12, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xsminjdp, 0x00, 0x12, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_R3(xsaddqp, 0x04, 0x00, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_R3(xsmulqp, 0x04, 0x01, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_R3(xsdivqp, 0x04, 0x11, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xscmpnedp, 0x0C, 0x03, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X2_AB(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R2_AB(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2_AB(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_R3(xsmaxcdp, 0x00, 0x10, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R3(xsmincdp, 0x00, 0x11, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R3(xsmaxjdp, 0x00, 0x12, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R3(xsminjdp, 0x00, 0x12, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
+GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xscvqpdp, 0x04, 0x1A, 0x14, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_R2(xscvqpsdz, 0x04, 0x1A, 0x19, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xscvqpswz, 0x04, 0x1A, 0x09, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xscvqpudz, 0x04, 0x1A, 0x11, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xscvudqp, 0x04, 0x1A, 0x02, PPC2_ISA300)
-GEN_VSX_HELPER_2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
+GEN_VSX_HELPER_R2(xscvudqp, 0x04, 0x1A, 0x02, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
-
-GEN_VSX_HELPER_2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300)
-GEN_VSX_HELPER_2(xssubqp, 0x04, 0x10, 0, PPC2_ISA300)
-
-GEN_VSX_HELPER_2(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmaddasp, 0x04, 0x00, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmaddmsp, 0x04, 0x01, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmsubasp, 0x04, 0x02, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsmsubmsp, 0x04, 0x03, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsnmaddasp, 0x04, 0x10, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsnmaddmsp, 0x04, 0x11, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsnmsubasp, 0x04, 0x12, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xsnmsubmsp, 0x04, 0x13, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
-GEN_VSX_HELPER_2(xststdcsp, 0x14, 0x12, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xsrqpi, 0x05, 0x00, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xsrqpxp, 0x05, 0x01, 0, PPC2_ISA300)
+GEN_VSX_HELPER_R2(xssqrtqp, 0x04, 0x19, 0x1B, PPC2_ISA300)
+GEN_VSX_HELPER_R3(xssubqp, 0x04, 0x10, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X3(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X3(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X3(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
+GEN_VSX_HELPER_X1(xststdcsp, 0x14, 0x12, 0, PPC2_ISA300)
GEN_VSX_HELPER_2(xststdcdp, 0x14, 0x16, 0, PPC2_ISA300)
GEN_VSX_HELPER_2(xststdcqp, 0x04, 0x16, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaddadp, 0x04, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaddmdp, 0x04, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmsubadp, 0x04, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmsubmdp, 0x04, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmaddadp, 0x04, 0x1C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmaddmdp, 0x04, 0x1D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmsubadp, 0x04, 0x1E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmsubmdp, 0x04, 0x1F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpnedp, 0x0C, 0x0F, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
-
-GEN_VSX_HELPER_2(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaddasp, 0x04, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaddmsp, 0x04, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmsubasp, 0x04, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmsubmsp, 0x04, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmaddasp, 0x04, 0x18, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmaddmsp, 0x04, 0x19, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmsubasp, 0x04, 0x1A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvnmsubmsp, 0x04, 0x1B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300)
-GEN_VSX_HELPER_2(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300)
-GEN_VSX_HELPER_2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2_AB(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X1(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
+
+GEN_VSX_HELPER_X3(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2_AB(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X1(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_X3(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvhpsp, 0x16, 0x1D, 0x18, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xvcvsphp, 0x16, 0x1D, 0x19, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_X2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
GEN_VSX_HELPER_2(xvtstdcsp, 0x14, 0x1A, 0, PPC2_VSX)
GEN_VSX_HELPER_2(xvtstdcdp, 0x14, 0x1E, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xxperm, 0x08, 0x03, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xxpermr, 0x08, 0x07, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xxperm, 0x08, 0x03, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X3(xxpermr, 0x08, 0x07, 0, PPC2_ISA300)
+
+#define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr xt, xa, b, c; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xa = gen_vsr_ptr(xA(ctx->opcode)); \
+ if (ctx->opcode & PPC_BIT(25)) { \
+ /* \
+ * AxT + B \
+ */ \
+ b = gen_vsr_ptr(xT(ctx->opcode)); \
+ c = gen_vsr_ptr(xB(ctx->opcode)); \
+ } else { \
+ /* \
+ * AxB + T \
+ */ \
+ b = gen_vsr_ptr(xB(ctx->opcode)); \
+ c = gen_vsr_ptr(xT(ctx->opcode)); \
+ } \
+ gen_helper_##name(cpu_env, xt, xa, b, c); \
+ tcg_temp_free_ptr(xt); \
+ tcg_temp_free_ptr(xa); \
+ tcg_temp_free_ptr(b); \
+ tcg_temp_free_ptr(c); \
+}
+
+GEN_VSX_HELPER_VSX_MADD(xsmadddp, 0x04, 0x04, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xsmsubdp, 0x04, 0x06, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xsnmadddp, 0x04, 0x14, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xsnmsubdp, 0x04, 0x16, 0x17, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xsmaddsp, 0x04, 0x00, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_VSX_MADD(xsmsubsp, 0x04, 0x02, 0x03, 0, PPC2_VSX207)
+GEN_VSX_HELPER_VSX_MADD(xsnmaddsp, 0x04, 0x10, 0x11, 0, PPC2_VSX207)
+GEN_VSX_HELPER_VSX_MADD(xsnmsubsp, 0x04, 0x12, 0x13, 0, PPC2_VSX207)
+GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvnmsubdp, 0x04, 0x1E, 0x1F, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvmaddsp, 0x04, 0x08, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvmsubsp, 0x04, 0x0A, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvnmaddsp, 0x04, 0x18, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_VSX_MADD(xvnmsubsp, 0x04, 0x1A, 0x1B, 0, PPC2_VSX)
static void gen_xxbrd(DisasContext *ctx)
{
@@ -1460,7 +1645,7 @@ static void gen_xxsldwi(DisasContext *ctx)
#define VSX_EXTRACT_INSERT(name) \
static void gen_##name(DisasContext *ctx) \
{ \
- TCGv xt, xb; \
+ TCGv_ptr xt, xb; \
TCGv_i32 t0; \
TCGv_i64 t1; \
uint8_t uimm = UIMM4(ctx->opcode); \
@@ -1469,8 +1654,8 @@ static void gen_##name(DisasContext *ctx) \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- xt = tcg_const_tl(xT(ctx->opcode)); \
- xb = tcg_const_tl(xB(ctx->opcode)); \
+ xt = gen_vsr_ptr(xT(ctx->opcode)); \
+ xb = gen_vsr_ptr(xB(ctx->opcode)); \
t0 = tcg_temp_new_i32(); \
t1 = tcg_temp_new_i64(); \
/* \
@@ -1485,8 +1670,8 @@ static void gen_##name(DisasContext *ctx) \
} \
tcg_gen_movi_i32(t0, uimm); \
gen_helper_##name(cpu_env, xt, xb, t0); \
- tcg_temp_free(xb); \
- tcg_temp_free(xt); \
+ tcg_temp_free_ptr(xb); \
+ tcg_temp_free_ptr(xt); \
tcg_temp_free_i32(t0); \
tcg_temp_free_i64(t1); \
}
@@ -1813,7 +1998,7 @@ static void gen_xvxexpdp(DisasContext *ctx)
tcg_temp_free_i64(xbl);
}
-GEN_VSX_HELPER_2(xvxsigsp, 0x00, 0x04, 0, PPC2_ISA300)
+GEN_VSX_HELPER_X2(xvxsigsp, 0x00, 0x04, 0, PPC2_ISA300)
static void gen_xvxsigdp(DisasContext *ctx)
{
diff --git a/target/ppc/translate/vsx-ops.inc.c b/target/ppc/translate/vsx-ops.inc.c
index 5030c4a..7fd3942 100644
--- a/target/ppc/translate/vsx-ops.inc.c
+++ b/target/ppc/translate/vsx-ops.inc.c
@@ -63,6 +63,12 @@ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2)
+#define GEN_XX3FORM_NAME(name, opcname, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, opcname, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2)
+
#define GEN_XX2IFORM(name, opc2, opc3, fl2) \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 1, PPC_NONE, fl2), \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 1, PPC_NONE, fl2), \
@@ -182,14 +188,14 @@ GEN_XX2FORM(xssqrtdp, 0x16, 0x04, PPC2_VSX),
GEN_XX2FORM(xsrsqrtedp, 0x14, 0x04, PPC2_VSX),
GEN_XX3FORM(xstdivdp, 0x14, 0x07, PPC2_VSX),
GEN_XX2FORM(xstsqrtdp, 0x14, 0x06, PPC2_VSX),
-GEN_XX3FORM(xsmaddadp, 0x04, 0x04, PPC2_VSX),
-GEN_XX3FORM(xsmaddmdp, 0x04, 0x05, PPC2_VSX),
-GEN_XX3FORM(xsmsubadp, 0x04, 0x06, PPC2_VSX),
-GEN_XX3FORM(xsmsubmdp, 0x04, 0x07, PPC2_VSX),
-GEN_XX3FORM(xsnmaddadp, 0x04, 0x14, PPC2_VSX),
-GEN_XX3FORM(xsnmaddmdp, 0x04, 0x15, PPC2_VSX),
-GEN_XX3FORM(xsnmsubadp, 0x04, 0x16, PPC2_VSX),
-GEN_XX3FORM(xsnmsubmdp, 0x04, 0x17, PPC2_VSX),
+GEN_XX3FORM_NAME(xsmadddp, "xsmaddadp", 0x04, 0x04, PPC2_VSX),
+GEN_XX3FORM_NAME(xsmadddp, "xsmaddmdp", 0x04, 0x05, PPC2_VSX),
+GEN_XX3FORM_NAME(xsmsubdp, "xsmsubadp", 0x04, 0x06, PPC2_VSX),
+GEN_XX3FORM_NAME(xsmsubdp, "xsmsubmdp", 0x04, 0x07, PPC2_VSX),
+GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddadp", 0x04, 0x14, PPC2_VSX),
+GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddmdp", 0x04, 0x15, PPC2_VSX),
+GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubadp", 0x04, 0x16, PPC2_VSX),
+GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubmdp", 0x04, 0x17, PPC2_VSX),
GEN_XX3FORM(xscmpeqdp, 0x0C, 0x00, PPC2_ISA300),
GEN_XX3FORM(xscmpgtdp, 0x0C, 0x01, PPC2_ISA300),
GEN_XX3FORM(xscmpgedp, 0x0C, 0x02, PPC2_ISA300),
@@ -235,14 +241,14 @@ GEN_XX2FORM(xsresp, 0x14, 0x01, PPC2_VSX207),
GEN_XX2FORM(xsrsp, 0x12, 0x11, PPC2_VSX207),
GEN_XX2FORM(xssqrtsp, 0x16, 0x00, PPC2_VSX207),
GEN_XX2FORM(xsrsqrtesp, 0x14, 0x00, PPC2_VSX207),
-GEN_XX3FORM(xsmaddasp, 0x04, 0x00, PPC2_VSX207),
-GEN_XX3FORM(xsmaddmsp, 0x04, 0x01, PPC2_VSX207),
-GEN_XX3FORM(xsmsubasp, 0x04, 0x02, PPC2_VSX207),
-GEN_XX3FORM(xsmsubmsp, 0x04, 0x03, PPC2_VSX207),
-GEN_XX3FORM(xsnmaddasp, 0x04, 0x10, PPC2_VSX207),
-GEN_XX3FORM(xsnmaddmsp, 0x04, 0x11, PPC2_VSX207),
-GEN_XX3FORM(xsnmsubasp, 0x04, 0x12, PPC2_VSX207),
-GEN_XX3FORM(xsnmsubmsp, 0x04, 0x13, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsmaddsp, "xsmaddasp", 0x04, 0x00, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsmaddsp, "xsmaddmsp", 0x04, 0x01, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsmsubsp, "xsmsubasp", 0x04, 0x02, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsmsubsp, "xsmsubmsp", 0x04, 0x03, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddasp", 0x04, 0x10, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddmsp", 0x04, 0x11, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubasp", 0x04, 0x12, PPC2_VSX207),
+GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubmsp", 0x04, 0x13, PPC2_VSX207),
GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207),
GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207),
@@ -255,14 +261,14 @@ GEN_XX2FORM(xvsqrtdp, 0x16, 0x0C, PPC2_VSX),
GEN_XX2FORM(xvrsqrtedp, 0x14, 0x0C, PPC2_VSX),
GEN_XX3FORM(xvtdivdp, 0x14, 0x0F, PPC2_VSX),
GEN_XX2FORM(xvtsqrtdp, 0x14, 0x0E, PPC2_VSX),
-GEN_XX3FORM(xvmaddadp, 0x04, 0x0C, PPC2_VSX),
-GEN_XX3FORM(xvmaddmdp, 0x04, 0x0D, PPC2_VSX),
-GEN_XX3FORM(xvmsubadp, 0x04, 0x0E, PPC2_VSX),
-GEN_XX3FORM(xvmsubmdp, 0x04, 0x0F, PPC2_VSX),
-GEN_XX3FORM(xvnmaddadp, 0x04, 0x1C, PPC2_VSX),
-GEN_XX3FORM(xvnmaddmdp, 0x04, 0x1D, PPC2_VSX),
-GEN_XX3FORM(xvnmsubadp, 0x04, 0x1E, PPC2_VSX),
-GEN_XX3FORM(xvnmsubmdp, 0x04, 0x1F, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmadddp, "xvmaddadp", 0x04, 0x0C, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmadddp, "xvmaddmdp", 0x04, 0x0D, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmsubdp, "xvmsubadp", 0x04, 0x0E, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmsubdp, "xvmsubmdp", 0x04, 0x0F, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmadddp, "xvnmaddadp", 0x04, 0x1C, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmadddp, "xvnmaddmdp", 0x04, 0x1D, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmsubdp, "xvnmsubadp", 0x04, 0x1E, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmsubdp, "xvnmsubmdp", 0x04, 0x1F, PPC2_VSX),
GEN_XX3FORM(xvmaxdp, 0x00, 0x1C, PPC2_VSX),
GEN_XX3FORM(xvmindp, 0x00, 0x1D, PPC2_VSX),
GEN_XX3_RC_FORM(xvcmpeqdp, 0x0C, 0x0C, PPC2_VSX),
@@ -293,14 +299,14 @@ GEN_XX2FORM(xvsqrtsp, 0x16, 0x08, PPC2_VSX),
GEN_XX2FORM(xvrsqrtesp, 0x14, 0x08, PPC2_VSX),
GEN_XX3FORM(xvtdivsp, 0x14, 0x0B, PPC2_VSX),
GEN_XX2FORM(xvtsqrtsp, 0x14, 0x0A, PPC2_VSX),
-GEN_XX3FORM(xvmaddasp, 0x04, 0x08, PPC2_VSX),
-GEN_XX3FORM(xvmaddmsp, 0x04, 0x09, PPC2_VSX),
-GEN_XX3FORM(xvmsubasp, 0x04, 0x0A, PPC2_VSX),
-GEN_XX3FORM(xvmsubmsp, 0x04, 0x0B, PPC2_VSX),
-GEN_XX3FORM(xvnmaddasp, 0x04, 0x18, PPC2_VSX),
-GEN_XX3FORM(xvnmaddmsp, 0x04, 0x19, PPC2_VSX),
-GEN_XX3FORM(xvnmsubasp, 0x04, 0x1A, PPC2_VSX),
-GEN_XX3FORM(xvnmsubmsp, 0x04, 0x1B, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmaddsp, "xvmaddasp", 0x04, 0x08, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmaddsp, "xvmaddmsp", 0x04, 0x09, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmsubsp, "xvmsubasp", 0x04, 0x0A, PPC2_VSX),
+GEN_XX3FORM_NAME(xvmsubsp, "xvmsubmsp", 0x04, 0x0B, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmaddsp, "xvnmaddasp", 0x04, 0x18, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmaddsp, "xvnmaddmsp", 0x04, 0x19, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmsubsp, "xvnmsubasp", 0x04, 0x1A, PPC2_VSX),
+GEN_XX3FORM_NAME(xvnmsubsp, "xvnmsubmsp", 0x04, 0x1B, PPC2_VSX),
GEN_XX3FORM(xvmaxsp, 0x00, 0x18, PPC2_VSX),
GEN_XX3FORM(xvminsp, 0x00, 0x19, PPC2_VSX),
GEN_XX3_RC_FORM(xvcmpeqsp, 0x0C, 0x08, PPC2_VSX),