aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom Musta <tommusta@gmail.com>2014-03-31 16:04:03 -0500
committerAlexander Graf <agraf@suse.de>2014-04-08 11:20:04 +0200
commit6cd7db3d92d44344d75feb432e3ece8587e1afd4 (patch)
treeaaad5bd5ad86a9fc6304c34623f8aa5fbc4f0861
parentd1dec5ef550802f76ffb8cdec50e6d50467e877e (diff)
downloadqemu-6cd7db3d92d44344d75feb432e3ece8587e1afd4.zip
qemu-6cd7db3d92d44344d75feb432e3ece8587e1afd4.tar.gz
qemu-6cd7db3d92d44344d75feb432e3ece8587e1afd4.tar.bz2
target-ppc: Correct VSX Integer to FP Conversion
This patch corrects the VSX integer to floating point conversion instructions by using the endian correct accessors. The auxiliary "j" index used by the existing macros is now obsolete and is removed. The JOFFSET preprocessor macro is also obsolete and removed. Signed-off-by: Tom Musta <tommusta@gmail.com> Tested-by: Tom Musta <tommusta@gmail.com> Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--target-ppc/fpu_helper.c37
1 files changed, 13 insertions, 24 deletions
diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index abba703..c6f484f 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -2487,12 +2487,6 @@ VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0)
VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1)
VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1)
-#if defined(HOST_WORDS_BIGENDIAN)
-#define JOFFSET 0
-#else
-#define JOFFSET 1
-#endif
-
/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
@@ -2614,7 +2608,7 @@ VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
* jdef - definition of the j index (i or 2*i)
* sfprf - set FPRF
*/
-#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, jdef, sfprf, r2sp) \
+#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
{ \
ppc_vsr_t xt, xb; \
@@ -2624,7 +2618,6 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
getVSR(xT(opcode), &xt, env); \
\
for (i = 0; i < nels; i++) { \
- int j = jdef; \
xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
if (r2sp) { \
xt.tfld = helper_frsp(env, xt.tfld); \
@@ -2638,22 +2631,18 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
helper_float_check_status(env); \
}
-VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, u64[j], f64[i], i, 1, 0)
-VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, u64[j], f64[i], i, 1, 0)
-VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, u64[j], f64[i], i, 1, 1)
-VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, u64[j], f64[i], i, 1, 1)
-VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, u64[j], f64[i], i, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, u64[j], f64[i], i, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, u32[j], f64[i], \
- 2*i + JOFFSET, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, u32[j], f64[i], \
- 2*i + JOFFSET, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, u64[i], f32[j], \
- 2*i + JOFFSET, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, u64[i], f32[j], \
- 2*i + JOFFSET, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, u32[j], f32[i], i, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, u32[j], f32[i], i, 0, 0)
+VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
+VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
+VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
+VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
+VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
/* For "use current rounding mode", define a value that will not be one of
* the existing rounding model enums.