diff options
author | Tom Musta <tommusta@gmail.com> | 2014-01-15 08:10:34 -0600 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2014-03-05 03:06:34 +0100 |
commit | ab9408a2d11670d15b2692ca60646d8da8158d6f (patch) | |
tree | bbddc035411746113111eca4e62203a03f0dbf24 /target-ppc/fpu_helper.c | |
parent | 3fd0aadfc19ba146ed6ced27b4e6ff6afc08473a (diff) | |
download | qemu-ab9408a2d11670d15b2692ca60646d8da8158d6f.zip qemu-ab9408a2d11670d15b2692ca60646d8da8158d6f.tar.gz qemu-ab9408a2d11670d15b2692ca60646d8da8158d6f.tar.bz2 |
target-ppc: VSX Stage 4: Add xsmulsp
This patch adds the VSX Scalar Multiply Single-Precision (xsmulsp)
instruction.
The existing VSX_MUL macro is modified to support rounding of the
intermediate result to single precision.
Signed-off-by: Tom Musta <tommusta@gmail.com>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'target-ppc/fpu_helper.c')
-rw-r--r-- | target-ppc/fpu_helper.c | 13 |
1 files changed, 9 insertions, 4 deletions
diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c index f047640..dc9849f 100644 --- a/target-ppc/fpu_helper.c +++ b/target-ppc/fpu_helper.c @@ -1822,7 +1822,7 @@ VSX_ADD_SUB(xvsubsp, sub, 4, float32, f32, 0, 0) * fld - vsr_t field (f32 or f64) * sfprf - set FPRF */ -#define VSX_MUL(op, nels, tp, fld, sfprf) \ +#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \ void helper_##op(CPUPPCState *env, uint32_t opcode) \ { \ ppc_vsr_t xt, xa, xb; \ @@ -1849,6 +1849,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ } \ } \ \ + if (r2sp) { \ + xt.fld[i] = helper_frsp(env, xt.fld[i]); \ + } \ + \ if (sfprf) { \ helper_compute_fprf(env, xt.fld[i], sfprf); \ } \ @@ -1858,9 +1862,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \ helper_float_check_status(env); \ } -VSX_MUL(xsmuldp, 1, float64, f64, 1) -VSX_MUL(xvmuldp, 2, float64, f64, 0) -VSX_MUL(xvmulsp, 4, float32, f32, 0) +VSX_MUL(xsmuldp, 1, float64, f64, 1, 0) +VSX_MUL(xsmulsp, 1, float64, f64, 1, 1) +VSX_MUL(xvmuldp, 2, float64, f64, 0, 0) +VSX_MUL(xvmulsp, 4, float32, f32, 0, 0) /* VSX_DIV - VSX floating point divide * op - instruction mnemonic |