aboutsummaryrefslogtreecommitdiff
path: root/target-ppc/fpu_helper.c
diff options
context:
space:
mode:
authorTom Musta <tommusta@gmail.com>2014-01-15 08:10:33 -0600
committerAlexander Graf <agraf@suse.de>2014-03-05 03:06:34 +0100
commit3fd0aadfc19ba146ed6ced27b4e6ff6afc08473a (patch)
treea94707d2019eb699948c641601bc0fac5adc76c4 /target-ppc/fpu_helper.c
parente16a626b8280cfcf3de385c59ddd1fde2840c2c3 (diff)
downloadqemu-3fd0aadfc19ba146ed6ced27b4e6ff6afc08473a.zip
qemu-3fd0aadfc19ba146ed6ced27b4e6ff6afc08473a.tar.gz
qemu-3fd0aadfc19ba146ed6ced27b4e6ff6afc08473a.tar.bz2
target-ppc: VSX Stage 4: Add xsaddsp and xssubsp
This patch adds the VSX Scalar Add Single-Precision (xsaddsp) and VSX Scalar Subtract Single-Precision (xssubsp) instructions. The existing VSX_ADD_SUB macro is modified to support the rounding of the (intermediate) result to single-precision. Signed-off-by: Tom Musta <tommusta@gmail.com> Reviewed-by: Richard Henderson <rth@twiddle.net> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'target-ppc/fpu_helper.c')
-rw-r--r--target-ppc/fpu_helper.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index 3165ef0..f047640 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -1768,7 +1768,7 @@ static void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
* fld - vsr_t field (f32 or f64)
* sfprf - set FPRF
*/
-#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf) \
+#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
void helper_##name(CPUPPCState *env, uint32_t opcode) \
{ \
ppc_vsr_t xt, xa, xb; \
@@ -1794,6 +1794,10 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
} \
} \
\
+ if (r2sp) { \
+ xt.fld[i] = helper_frsp(env, xt.fld[i]); \
+ } \
+ \
if (sfprf) { \
helper_compute_fprf(env, xt.fld[i], sfprf); \
} \
@@ -1802,12 +1806,14 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
helper_float_check_status(env); \
}
-VSX_ADD_SUB(xsadddp, add, 1, float64, f64, 1)
-VSX_ADD_SUB(xvadddp, add, 2, float64, f64, 0)
-VSX_ADD_SUB(xvaddsp, add, 4, float32, f32, 0)
-VSX_ADD_SUB(xssubdp, sub, 1, float64, f64, 1)
-VSX_ADD_SUB(xvsubdp, sub, 2, float64, f64, 0)
-VSX_ADD_SUB(xvsubsp, sub, 4, float32, f32, 0)
+VSX_ADD_SUB(xsadddp, add, 1, float64, f64, 1, 0)
+VSX_ADD_SUB(xsaddsp, add, 1, float64, f64, 1, 1)
+VSX_ADD_SUB(xvadddp, add, 2, float64, f64, 0, 0)
+VSX_ADD_SUB(xvaddsp, add, 4, float32, f32, 0, 0)
+VSX_ADD_SUB(xssubdp, sub, 1, float64, f64, 1, 0)
+VSX_ADD_SUB(xssubsp, sub, 1, float64, f64, 1, 1)
+VSX_ADD_SUB(xvsubdp, sub, 2, float64, f64, 0, 0)
+VSX_ADD_SUB(xvsubsp, sub, 4, float32, f32, 0, 0)
/* VSX_MUL - VSX floating point multiply
* op - instruction mnemonic