diff options
Diffstat (limited to 'target-ppc/fpu_helper.c')
-rw-r--r-- | target-ppc/fpu_helper.c | 1294 |
1 files changed, 1152 insertions, 142 deletions
diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c index 4f60218..e7f3295 100644 --- a/target-ppc/fpu_helper.c +++ b/target-ppc/fpu_helper.c @@ -50,6 +50,16 @@ static inline int isden(float64 d) return ((u.ll >> 52) & 0x7FF) == 0; } +static inline int ppc_float32_get_unbiased_exp(float32 f) +{ + return ((f >> 23) & 0xFF) - 127; +} + +static inline int ppc_float64_get_unbiased_exp(float64 f) +{ + return ((f >> 52) & 0x7FF) - 1023; +} + uint32_t helper_compute_fprf(CPUPPCState *env, uint64_t arg, uint32_t set_fprf) { CPU_DoubleU farg; @@ -106,7 +116,8 @@ uint32_t helper_compute_fprf(CPUPPCState *env, uint64_t arg, uint32_t set_fprf) } /* Floating-point invalid operations exception */ -static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op) +static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op, + int set_fpcc) { uint64_t ret = 0; int ve; @@ -138,8 +149,10 @@ static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op) case POWERPC_EXCP_FP_VXVC: /* Ordered comparison of NaN */ env->fpscr |= 1 << FPSCR_VXVC; - env->fpscr &= ~(0xF << FPSCR_FPCC); - env->fpscr |= 0x11 << FPSCR_FPCC; + if (set_fpcc) { + env->fpscr &= ~(0xF << FPSCR_FPCC); + env->fpscr |= 0x11 << FPSCR_FPCC; + } /* We must update the target FPR before raising the exception */ if (ve != 0) { env->exception_index = POWERPC_EXCP_PROGRAM; @@ -158,8 +171,10 @@ static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op) if (ve == 0) { /* Set the result to quiet NaN */ ret = 0x7FF8000000000000ULL; - env->fpscr &= ~(0xF << FPSCR_FPCC); - env->fpscr |= 0x11 << FPSCR_FPCC; + if (set_fpcc) { + env->fpscr &= ~(0xF << FPSCR_FPCC); + env->fpscr |= 0x11 << FPSCR_FPCC; + } } break; case POWERPC_EXCP_FP_VXCVI: @@ -169,8 +184,10 @@ static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op) if (ve == 0) { /* Set the result to quiet NaN */ ret = 0x7FF8000000000000ULL; - env->fpscr &= ~(0xF << FPSCR_FPCC); - env->fpscr |= 0x11 << FPSCR_FPCC; + if (set_fpcc) { + env->fpscr &= ~(0xF << FPSCR_FPCC); + env->fpscr |= 0x11 << FPSCR_FPCC; + } } break; } @@ -505,12 +522,12 @@ uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2) if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) && float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) { /* Magnitude subtraction of infinities */ - farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); } else { if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d))) { /* sNaN addition */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status); } @@ -529,12 +546,12 @@ uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2) if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) && float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) { /* Magnitude subtraction of infinities */ - farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); } else { if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d))) { /* sNaN subtraction */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status); } @@ -553,12 +570,12 @@ uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2) if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { /* Multiplication of zero by infinity */ - farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ); + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); } else { if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d))) { /* sNaN multiplication */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status); } @@ -577,15 +594,15 @@ uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2) if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) { /* Division of infinity by infinity */ - farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI); + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1); } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) { /* Division of zero by zero */ - farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ); + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1); } else { if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d))) { /* sNaN division */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status); } @@ -593,107 +610,63 @@ uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2) return farg1.ll; } -/* fctiw - fctiw. */ -uint64_t helper_fctiw(CPUPPCState *env, uint64_t arg) -{ - CPU_DoubleU farg; - - farg.ll = arg; - - if (unlikely(float64_is_signaling_nan(farg.d))) { - /* sNaN conversion */ - farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | - POWERPC_EXCP_FP_VXCVI); - } else if (unlikely(float64_is_quiet_nan(farg.d) || - float64_is_infinity(farg.d))) { - /* qNan / infinity conversion */ - farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI); - } else { - farg.ll = float64_to_int32(farg.d, &env->fp_status); - /* XXX: higher bits are not supposed to be significant. - * to make tests easier, return the same as a real PowerPC 750 - */ - farg.ll |= 0xFFF80000ULL << 32; - } - return farg.ll; -} - -/* fctiwz - fctiwz. */ -uint64_t helper_fctiwz(CPUPPCState *env, uint64_t arg) -{ - CPU_DoubleU farg; - - farg.ll = arg; - - if (unlikely(float64_is_signaling_nan(farg.d))) { - /* sNaN conversion */ - farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | - POWERPC_EXCP_FP_VXCVI); - } else if (unlikely(float64_is_quiet_nan(farg.d) || - float64_is_infinity(farg.d))) { - /* qNan / infinity conversion */ - farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI); - } else { - farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status); - /* XXX: higher bits are not supposed to be significant. - * to make tests easier, return the same as a real PowerPC 750 - */ - farg.ll |= 0xFFF80000ULL << 32; - } - return farg.ll; -} +#define FPU_FCTI(op, cvt, nanval) \ +uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \ +{ \ + CPU_DoubleU farg; \ + \ + farg.ll = arg; \ + farg.ll = float64_to_##cvt(farg.d, &env->fp_status); \ + \ + if (unlikely(env->fp_status.float_exception_flags)) { \ + if (float64_is_any_nan(arg)) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \ + if (float64_is_signaling_nan(arg)) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \ + } \ + farg.ll = nanval; \ + } else if (env->fp_status.float_exception_flags & \ + float_flag_invalid) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \ + } \ + helper_float_check_status(env); \ + } \ + return farg.ll; \ + } + +FPU_FCTI(fctiw, int32, 0x80000000U) +FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U) +FPU_FCTI(fctiwu, uint32, 0x00000000U) +FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U) #if defined(TARGET_PPC64) -/* fcfid - fcfid. */ -uint64_t helper_fcfid(CPUPPCState *env, uint64_t arg) -{ - CPU_DoubleU farg; - - farg.d = int64_to_float64(arg, &env->fp_status); - return farg.ll; -} - -/* fctid - fctid. */ -uint64_t helper_fctid(CPUPPCState *env, uint64_t arg) -{ - CPU_DoubleU farg; - - farg.ll = arg; - - if (unlikely(float64_is_signaling_nan(farg.d))) { - /* sNaN conversion */ - farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | - POWERPC_EXCP_FP_VXCVI); - } else if (unlikely(float64_is_quiet_nan(farg.d) || - float64_is_infinity(farg.d))) { - /* qNan / infinity conversion */ - farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI); - } else { - farg.ll = float64_to_int64(farg.d, &env->fp_status); - } - return farg.ll; -} - -/* fctidz - fctidz. */ -uint64_t helper_fctidz(CPUPPCState *env, uint64_t arg) -{ - CPU_DoubleU farg; +FPU_FCTI(fctid, int64, 0x8000000000000000ULL) +FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL) +FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL) +FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL) +#endif - farg.ll = arg; +#if defined(TARGET_PPC64) - if (unlikely(float64_is_signaling_nan(farg.d))) { - /* sNaN conversion */ - farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | - POWERPC_EXCP_FP_VXCVI); - } else if (unlikely(float64_is_quiet_nan(farg.d) || - float64_is_infinity(farg.d))) { - /* qNan / infinity conversion */ - farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI); - } else { - farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status); - } - return farg.ll; -} +#define FPU_FCFI(op, cvtr, is_single) \ +uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \ +{ \ + CPU_DoubleU farg; \ + \ + if (is_single) { \ + float32 tmp = cvtr(arg, &env->fp_status); \ + farg.d = float32_to_float64(tmp, &env->fp_status); \ + } else { \ + farg.d = cvtr(arg, &env->fp_status); \ + } \ + helper_float_check_status(env); \ + return farg.ll; \ +} + +FPU_FCFI(fcfid, int64_to_float64, 0) +FPU_FCFI(fcfids, int64_to_float32, 1) +FPU_FCFI(fcfidu, uint64_to_float64, 0) +FPU_FCFI(fcfidus, uint64_to_float32, 1) #endif @@ -706,24 +679,28 @@ static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg, if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN round */ - farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | - POWERPC_EXCP_FP_VXCVI); - } else if (unlikely(float64_is_quiet_nan(farg.d) || - float64_is_infinity(farg.d))) { - /* qNan / infinity round */ - farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); + farg.ll = arg | 0x0008000000000000ULL; } else { + int inexact = get_float_exception_flags(&env->fp_status) & + float_flag_inexact; set_float_rounding_mode(rounding_mode, &env->fp_status); farg.ll = float64_round_to_int(farg.d, &env->fp_status); /* Restore rounding mode from FPSCR */ fpscr_set_rounding_mode(env); + + /* fri* does not set FPSCR[XX] */ + if (!inexact) { + env->fp_status.float_exception_flags &= ~float_flag_inexact; + } } + helper_float_check_status(env); return farg.ll; } uint64_t helper_frin(CPUPPCState *env, uint64_t arg) { - return do_fri(env, arg, float_round_nearest_even); + return do_fri(env, arg, float_round_ties_away); } uint64_t helper_friz(CPUPPCState *env, uint64_t arg) @@ -754,13 +731,13 @@ uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2, if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { /* Multiplication of zero by infinity */ - farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ); + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); } else { if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d) || float64_is_signaling_nan(farg3.d))) { /* sNaN operation */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } /* This is the way the PowerPC specification defines it */ float128 ft0_128, ft1_128; @@ -772,7 +749,7 @@ uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2, float64_is_infinity(farg3.d) && float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) { /* Magnitude subtraction of infinities */ - farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); } else { ft1_128 = float64_to_float128(farg3.d, &env->fp_status); ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status); @@ -797,13 +774,13 @@ uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2, (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { /* Multiplication of zero by infinity */ - farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ); + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); } else { if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d) || float64_is_signaling_nan(farg3.d))) { /* sNaN operation */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } /* This is the way the PowerPC specification defines it */ float128 ft0_128, ft1_128; @@ -815,7 +792,7 @@ uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2, float64_is_infinity(farg3.d) && float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) { /* Magnitude subtraction of infinities */ - farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); } else { ft1_128 = float64_to_float128(farg3.d, &env->fp_status); ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status); @@ -838,13 +815,13 @@ uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2, if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { /* Multiplication of zero by infinity */ - farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ); + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); } else { if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d) || float64_is_signaling_nan(farg3.d))) { /* sNaN operation */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } /* This is the way the PowerPC specification defines it */ float128 ft0_128, ft1_128; @@ -856,7 +833,7 @@ uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2, float64_is_infinity(farg3.d) && float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) { /* Magnitude subtraction of infinities */ - farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); } else { ft1_128 = float64_to_float128(farg3.d, &env->fp_status); ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status); @@ -883,13 +860,13 @@ uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2, (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { /* Multiplication of zero by infinity */ - farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ); + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); } else { if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d) || float64_is_signaling_nan(farg3.d))) { /* sNaN operation */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } /* This is the way the PowerPC specification defines it */ float128 ft0_128, ft1_128; @@ -901,7 +878,7 @@ uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2, float64_is_infinity(farg3.d) && float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) { /* Magnitude subtraction of infinities */ - farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); + farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); } else { ft1_128 = float64_to_float128(farg3.d, &env->fp_status); ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status); @@ -924,7 +901,7 @@ uint64_t helper_frsp(CPUPPCState *env, uint64_t arg) if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN square root */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } f32 = float64_to_float32(farg.d, &env->fp_status); farg.d = float32_to_float64(f32, &env->fp_status); @@ -941,11 +918,11 @@ uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg) if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) { /* Square root of a negative nonzero number */ - farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT); + farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1); } else { if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN square root */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } farg.d = float64_sqrt(farg.d, &env->fp_status); } @@ -961,7 +938,7 @@ uint64_t helper_fre(CPUPPCState *env, uint64_t arg) if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN reciprocal */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } farg.d = float64_div(float64_one, farg.d, &env->fp_status); return farg.d; @@ -977,7 +954,7 @@ uint64_t helper_fres(CPUPPCState *env, uint64_t arg) if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN reciprocal */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } farg.d = float64_div(float64_one, farg.d, &env->fp_status); f32 = float64_to_float32(farg.d, &env->fp_status); @@ -996,11 +973,11 @@ uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg) if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) { /* Reciprocal square root of a negative nonzero number */ - farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT); + farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1); } else { if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN reciprocal square root */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } farg.d = float64_sqrt(farg.d, &env->fp_status); farg.d = float64_div(float64_one, farg.d, &env->fp_status); @@ -1026,6 +1003,73 @@ uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2, } } +uint32_t helper_ftdiv(uint64_t fra, uint64_t frb) +{ + int fe_flag = 0; + int fg_flag = 0; + + if (unlikely(float64_is_infinity(fra) || + float64_is_infinity(frb) || + float64_is_zero(frb))) { + fe_flag = 1; + fg_flag = 1; + } else { + int e_a = ppc_float64_get_unbiased_exp(fra); + int e_b = ppc_float64_get_unbiased_exp(frb); + + if (unlikely(float64_is_any_nan(fra) || + float64_is_any_nan(frb))) { + fe_flag = 1; + } else if ((e_b <= -1022) || (e_b >= 1021)) { + fe_flag = 1; + } else if (!float64_is_zero(fra) && + (((e_a - e_b) >= 1023) || + ((e_a - e_b) <= -1021) || + (e_a <= -970))) { + fe_flag = 1; + } + + if (unlikely(float64_is_zero_or_denormal(frb))) { + /* XB is not zero because of the above check and */ + /* so must be denormalized. */ + fg_flag = 1; + } + } + + return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); +} + +uint32_t helper_ftsqrt(uint64_t frb) +{ + int fe_flag = 0; + int fg_flag = 0; + + if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) { + fe_flag = 1; + fg_flag = 1; + } else { + int e_b = ppc_float64_get_unbiased_exp(frb); + + if (unlikely(float64_is_any_nan(frb))) { + fe_flag = 1; + } else if (unlikely(float64_is_zero(frb))) { + fe_flag = 1; + } else if (unlikely(float64_is_neg(frb))) { + fe_flag = 1; + } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) { + fe_flag = 1; + } + + if (unlikely(float64_is_zero_or_denormal(frb))) { + /* XB is not zero because of the above check and */ + /* therefore must be denormalized. */ + fg_flag = 1; + } + } + + return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); +} + void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2, uint32_t crfD) { @@ -1053,7 +1097,7 @@ void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2, && (float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d)))) { /* sNaN comparison */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } } @@ -1085,10 +1129,10 @@ void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2, float64_is_signaling_nan(farg2.d)) { /* sNaN comparison */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | - POWERPC_EXCP_FP_VXVC); + POWERPC_EXCP_FP_VXVC, 1); } else { /* qNaN comparison */ - fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC); + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1); } } } @@ -1710,3 +1754,969 @@ uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2) /* XXX: TODO: test special values (NaN, infinites, ...) */ return helper_efdtsteq(env, op1, op2); } + +#define DECODE_SPLIT(opcode, shift1, nb1, shift2, nb2) \ + (((((opcode) >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \ + (((opcode) >> (shift2)) & ((1 << (nb2)) - 1))) + +#define xT(opcode) DECODE_SPLIT(opcode, 0, 1, 21, 5) +#define xA(opcode) DECODE_SPLIT(opcode, 2, 1, 16, 5) +#define xB(opcode) DECODE_SPLIT(opcode, 1, 1, 11, 5) +#define xC(opcode) DECODE_SPLIT(opcode, 3, 1, 6, 5) +#define BF(opcode) (((opcode) >> (31-8)) & 7) + +typedef union _ppc_vsr_t { + uint64_t u64[2]; + uint32_t u32[4]; + float32 f32[4]; + float64 f64[2]; +} ppc_vsr_t; + +static void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env) +{ + if (n < 32) { + vsr->f64[0] = env->fpr[n]; + vsr->u64[1] = env->vsr[n]; + } else { + vsr->u64[0] = env->avr[n-32].u64[0]; + vsr->u64[1] = env->avr[n-32].u64[1]; + } +} + +static void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env) +{ + if (n < 32) { + env->fpr[n] = vsr->f64[0]; + env->vsr[n] = vsr->u64[1]; + } else { + env->avr[n-32].u64[0] = vsr->u64[0]; + env->avr[n-32].u64[1] = vsr->u64[1]; + } +} + +#define float64_to_float64(x, env) x + + +/* VSX_ADD_SUB - VSX floating point add/subract + * name - instruction mnemonic + * op - operation (add or sub) + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (f32 or f64) + * sfprf - set FPRF + */ +#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \ +void helper_##name(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt, xa, xb; \ + int i; \ + \ + getVSR(xA(opcode), &xa, env); \ + getVSR(xB(opcode), &xb, env); \ + getVSR(xT(opcode), &xt, env); \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + float_status tstat = env->fp_status; \ + set_float_exception_flags(0, &tstat); \ + xt.fld[i] = tp##_##op(xa.fld[i], xb.fld[i], &tstat); \ + env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ + \ + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ + if (tp##_is_infinity(xa.fld[i]) && tp##_is_infinity(xb.fld[i])) {\ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \ + } else if (tp##_is_signaling_nan(xa.fld[i]) || \ + tp##_is_signaling_nan(xb.fld[i])) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ + } \ + } \ + \ + if (r2sp) { \ + xt.fld[i] = helper_frsp(env, xt.fld[i]); \ + } \ + \ + if (sfprf) { \ + helper_compute_fprf(env, xt.fld[i], sfprf); \ + } \ + } \ + putVSR(xT(opcode), &xt, env); \ + helper_float_check_status(env); \ +} + +VSX_ADD_SUB(xsadddp, add, 1, float64, f64, 1, 0) +VSX_ADD_SUB(xsaddsp, add, 1, float64, f64, 1, 1) +VSX_ADD_SUB(xvadddp, add, 2, float64, f64, 0, 0) +VSX_ADD_SUB(xvaddsp, add, 4, float32, f32, 0, 0) +VSX_ADD_SUB(xssubdp, sub, 1, float64, f64, 1, 0) +VSX_ADD_SUB(xssubsp, sub, 1, float64, f64, 1, 1) +VSX_ADD_SUB(xvsubdp, sub, 2, float64, f64, 0, 0) +VSX_ADD_SUB(xvsubsp, sub, 4, float32, f32, 0, 0) + +/* VSX_MUL - VSX floating point multiply + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (f32 or f64) + * sfprf - set FPRF + */ +#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt, xa, xb; \ + int i; \ + \ + getVSR(xA(opcode), &xa, env); \ + getVSR(xB(opcode), &xb, env); \ + getVSR(xT(opcode), &xt, env); \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + float_status tstat = env->fp_status; \ + set_float_exception_flags(0, &tstat); \ + xt.fld[i] = tp##_mul(xa.fld[i], xb.fld[i], &tstat); \ + env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ + \ + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ + if ((tp##_is_infinity(xa.fld[i]) && tp##_is_zero(xb.fld[i])) || \ + (tp##_is_infinity(xb.fld[i]) && tp##_is_zero(xa.fld[i]))) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \ + } else if (tp##_is_signaling_nan(xa.fld[i]) || \ + tp##_is_signaling_nan(xb.fld[i])) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ + } \ + } \ + \ + if (r2sp) { \ + xt.fld[i] = helper_frsp(env, xt.fld[i]); \ + } \ + \ + if (sfprf) { \ + helper_compute_fprf(env, xt.fld[i], sfprf); \ + } \ + } \ + \ + putVSR(xT(opcode), &xt, env); \ + helper_float_check_status(env); \ +} + +VSX_MUL(xsmuldp, 1, float64, f64, 1, 0) +VSX_MUL(xsmulsp, 1, float64, f64, 1, 1) +VSX_MUL(xvmuldp, 2, float64, f64, 0, 0) +VSX_MUL(xvmulsp, 4, float32, f32, 0, 0) + +/* VSX_DIV - VSX floating point divide + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (f32 or f64) + * sfprf - set FPRF + */ +#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt, xa, xb; \ + int i; \ + \ + getVSR(xA(opcode), &xa, env); \ + getVSR(xB(opcode), &xb, env); \ + getVSR(xT(opcode), &xt, env); \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + float_status tstat = env->fp_status; \ + set_float_exception_flags(0, &tstat); \ + xt.fld[i] = tp##_div(xa.fld[i], xb.fld[i], &tstat); \ + env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ + \ + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ + if (tp##_is_infinity(xa.fld[i]) && tp##_is_infinity(xb.fld[i])) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf); \ + } else if (tp##_is_zero(xa.fld[i]) && \ + tp##_is_zero(xb.fld[i])) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \ + } else if (tp##_is_signaling_nan(xa.fld[i]) || \ + tp##_is_signaling_nan(xb.fld[i])) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ + } \ + } \ + \ + if (r2sp) { \ + xt.fld[i] = helper_frsp(env, xt.fld[i]); \ + } \ + \ + if (sfprf) { \ + helper_compute_fprf(env, xt.fld[i], sfprf); \ + } \ + } \ + \ + putVSR(xT(opcode), &xt, env); \ + helper_float_check_status(env); \ +} + +VSX_DIV(xsdivdp, 1, float64, f64, 1, 0) +VSX_DIV(xsdivsp, 1, float64, f64, 1, 1) +VSX_DIV(xvdivdp, 2, float64, f64, 0, 0) +VSX_DIV(xvdivsp, 4, float32, f32, 0, 0) + +/* VSX_RE - VSX floating point reciprocal estimate + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (f32 or f64) + * sfprf - set FPRF + */ +#define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt, xb; \ + int i; \ + \ + getVSR(xB(opcode), &xb, env); \ + getVSR(xT(opcode), &xt, env); \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + if (unlikely(tp##_is_signaling_nan(xb.fld[i]))) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ + } \ + xt.fld[i] = tp##_div(tp##_one, xb.fld[i], &env->fp_status); \ + \ + if (r2sp) { \ + xt.fld[i] = helper_frsp(env, xt.fld[i]); \ + } \ + \ + if (sfprf) { \ + helper_compute_fprf(env, xt.fld[0], sfprf); \ + } \ + } \ + \ + putVSR(xT(opcode), &xt, env); \ + helper_float_check_status(env); \ +} + +VSX_RE(xsredp, 1, float64, f64, 1, 0) +VSX_RE(xsresp, 1, float64, f64, 1, 1) +VSX_RE(xvredp, 2, float64, f64, 0, 0) +VSX_RE(xvresp, 4, float32, f32, 0, 0) + +/* VSX_SQRT - VSX floating point square root + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (f32 or f64) + * sfprf - set FPRF + */ +#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt, xb; \ + int i; \ + \ + getVSR(xB(opcode), &xb, env); \ + getVSR(xT(opcode), &xt, env); \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + float_status tstat = env->fp_status; \ + set_float_exception_flags(0, &tstat); \ + xt.fld[i] = tp##_sqrt(xb.fld[i], &tstat); \ + env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ + \ + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ + if (tp##_is_neg(xb.fld[i]) && !tp##_is_zero(xb.fld[i])) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \ + } else if (tp##_is_signaling_nan(xb.fld[i])) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ + } \ + } \ + \ + if (r2sp) { \ + xt.fld[i] = helper_frsp(env, xt.fld[i]); \ + } \ + \ + if (sfprf) { \ + helper_compute_fprf(env, xt.fld[i], sfprf); \ + } \ + } \ + \ + putVSR(xT(opcode), &xt, env); \ + helper_float_check_status(env); \ +} + +VSX_SQRT(xssqrtdp, 1, float64, f64, 1, 0) +VSX_SQRT(xssqrtsp, 1, float64, f64, 1, 1) +VSX_SQRT(xvsqrtdp, 2, float64, f64, 0, 0) +VSX_SQRT(xvsqrtsp, 4, float32, f32, 0, 0) + +/* VSX_RSQRTE - VSX floating point reciprocal square root estimate + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (f32 or f64) + * sfprf - set FPRF + */ +#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt, xb; \ + int i; \ + \ + getVSR(xB(opcode), &xb, env); \ + getVSR(xT(opcode), &xt, env); \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + float_status tstat = env->fp_status; \ + set_float_exception_flags(0, &tstat); \ + xt.fld[i] = tp##_sqrt(xb.fld[i], &tstat); \ + xt.fld[i] = tp##_div(tp##_one, xt.fld[i], &tstat); \ + env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ + \ + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ + if (tp##_is_neg(xb.fld[i]) && !tp##_is_zero(xb.fld[i])) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \ + } else if (tp##_is_signaling_nan(xb.fld[i])) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ + } \ + } \ + \ + if (r2sp) { \ + xt.fld[i] = helper_frsp(env, xt.fld[i]); \ + } \ + \ + if (sfprf) { \ + helper_compute_fprf(env, xt.fld[i], sfprf); \ + } \ + } \ + \ + putVSR(xT(opcode), &xt, env); \ + helper_float_check_status(env); \ +} + +VSX_RSQRTE(xsrsqrtedp, 1, float64, f64, 1, 0) +VSX_RSQRTE(xsrsqrtesp, 1, float64, f64, 1, 1) +VSX_RSQRTE(xvrsqrtedp, 2, float64, f64, 0, 0) +VSX_RSQRTE(xvrsqrtesp, 4, float32, f32, 0, 0) + +/* VSX_TDIV - VSX floating point test for divide + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (f32 or f64) + * emin - minimum unbiased exponent + * emax - maximum unbiased exponent + * nbits - number of fraction bits + */ +#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xa, xb; \ + int i; \ + int fe_flag = 0; \ + int fg_flag = 0; \ + \ + getVSR(xA(opcode), &xa, env); \ + getVSR(xB(opcode), &xb, env); \ + \ + for (i = 0; i < nels; i++) { \ + if (unlikely(tp##_is_infinity(xa.fld[i]) || \ + tp##_is_infinity(xb.fld[i]) || \ + tp##_is_zero(xb.fld[i]))) { \ + fe_flag = 1; \ + fg_flag = 1; \ + } else { \ + int e_a = ppc_##tp##_get_unbiased_exp(xa.fld[i]); \ + int e_b = ppc_##tp##_get_unbiased_exp(xb.fld[i]); \ + \ + if (unlikely(tp##_is_any_nan(xa.fld[i]) || \ + tp##_is_any_nan(xb.fld[i]))) { \ + fe_flag = 1; \ + } else if ((e_b <= emin) || (e_b >= (emax-2))) { \ + fe_flag = 1; \ + } else if (!tp##_is_zero(xa.fld[i]) && \ + (((e_a - e_b) >= emax) || \ + ((e_a - e_b) <= (emin+1)) || \ + (e_a <= (emin+nbits)))) { \ + fe_flag = 1; \ + } \ + \ + if (unlikely(tp##_is_zero_or_denormal(xb.fld[i]))) { \ + /* XB is not zero because of the above check and */ \ + /* so must be denormalized. */ \ + fg_flag = 1; \ + } \ + } \ + } \ + \ + env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \ +} + +VSX_TDIV(xstdivdp, 1, float64, f64, -1022, 1023, 52) +VSX_TDIV(xvtdivdp, 2, float64, f64, -1022, 1023, 52) +VSX_TDIV(xvtdivsp, 4, float32, f32, -126, 127, 23) + +/* VSX_TSQRT - VSX floating point test for square root + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (f32 or f64) + * emin - minimum unbiased exponent + * emax - maximum unbiased exponent + * nbits - number of fraction bits + */ +#define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xa, xb; \ + int i; \ + int fe_flag = 0; \ + int fg_flag = 0; \ + \ + getVSR(xA(opcode), &xa, env); \ + getVSR(xB(opcode), &xb, env); \ + \ + for (i = 0; i < nels; i++) { \ + if (unlikely(tp##_is_infinity(xb.fld[i]) || \ + tp##_is_zero(xb.fld[i]))) { \ + fe_flag = 1; \ + fg_flag = 1; \ + } else { \ + int e_b = ppc_##tp##_get_unbiased_exp(xb.fld[i]); \ + \ + if (unlikely(tp##_is_any_nan(xb.fld[i]))) { \ + fe_flag = 1; \ + } else if (unlikely(tp##_is_zero(xb.fld[i]))) { \ + fe_flag = 1; \ + } else if (unlikely(tp##_is_neg(xb.fld[i]))) { \ + fe_flag = 1; \ + } else if (!tp##_is_zero(xb.fld[i]) && \ + (e_b <= (emin+nbits))) { \ + fe_flag = 1; \ + } \ + \ + if (unlikely(tp##_is_zero_or_denormal(xb.fld[i]))) { \ + /* XB is not zero because of the above check and */ \ + /* therefore must be denormalized. */ \ + fg_flag = 1; \ + } \ + } \ + } \ + \ + env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \ +} + +VSX_TSQRT(xstsqrtdp, 1, float64, f64, -1022, 52) +VSX_TSQRT(xvtsqrtdp, 2, float64, f64, -1022, 52) +VSX_TSQRT(xvtsqrtsp, 4, float32, f32, -126, 23) + +/* VSX_MADD - VSX floating point muliply/add variations + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (f32 or f64) + * maddflgs - flags for the float*muladd routine that control the + * various forms (madd, msub, nmadd, nmsub) + * afrm - A form (1=A, 0=M) + * sfprf - set FPRF + */ +#define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt_in, xa, xb, xt_out; \ + ppc_vsr_t *b, *c; \ + int i; \ + \ + if (afrm) { /* AxB + T */ \ + b = &xb; \ + c = &xt_in; \ + } else { /* AxT + B */ \ + b = &xt_in; \ + c = &xb; \ + } \ + \ + getVSR(xA(opcode), &xa, env); \ + getVSR(xB(opcode), &xb, env); \ + getVSR(xT(opcode), &xt_in, env); \ + \ + xt_out = xt_in; \ + \ + helper_reset_fpstatus(env); \ + \ + for (i = 0; i < nels; i++) { \ + float_status tstat = env->fp_status; \ + set_float_exception_flags(0, &tstat); \ + if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\ + /* Avoid double rounding errors by rounding the intermediate */ \ + /* result to odd. */ \ + set_float_rounding_mode(float_round_to_zero, &tstat); \ + xt_out.fld[i] = tp##_muladd(xa.fld[i], b->fld[i], c->fld[i], \ + maddflgs, &tstat); \ + xt_out.fld[i] |= (get_float_exception_flags(&tstat) & \ + float_flag_inexact) != 0; \ + } else { \ + xt_out.fld[i] = tp##_muladd(xa.fld[i], b->fld[i], c->fld[i], \ + maddflgs, &tstat); \ + } \ + env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ + \ + if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ + if (tp##_is_signaling_nan(xa.fld[i]) || \ + tp##_is_signaling_nan(b->fld[i]) || \ + tp##_is_signaling_nan(c->fld[i])) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ + tstat.float_exception_flags &= ~float_flag_invalid; \ + } \ + if ((tp##_is_infinity(xa.fld[i]) && tp##_is_zero(b->fld[i])) || \ + (tp##_is_zero(xa.fld[i]) && tp##_is_infinity(b->fld[i]))) { \ + xt_out.fld[i] = float64_to_##tp(fload_invalid_op_excp(env, \ + POWERPC_EXCP_FP_VXIMZ, sfprf), &env->fp_status); \ + tstat.float_exception_flags &= ~float_flag_invalid; \ + } \ + if ((tstat.float_exception_flags & float_flag_invalid) && \ + ((tp##_is_infinity(xa.fld[i]) || \ + tp##_is_infinity(b->fld[i])) && \ + tp##_is_infinity(c->fld[i]))) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \ + } \ + } \ + \ + if (r2sp) { \ + xt_out.fld[i] = helper_frsp(env, xt_out.fld[i]); \ + } \ + \ + if (sfprf) { \ + helper_compute_fprf(env, xt_out.fld[i], sfprf); \ + } \ + } \ + putVSR(xT(opcode), &xt_out, env); \ + helper_float_check_status(env); \ +} + +#define MADD_FLGS 0 +#define MSUB_FLGS float_muladd_negate_c +#define NMADD_FLGS float_muladd_negate_result +#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result) + +VSX_MADD(xsmaddadp, 1, float64, f64, MADD_FLGS, 1, 1, 0) +VSX_MADD(xsmaddmdp, 1, float64, f64, MADD_FLGS, 0, 1, 0) +VSX_MADD(xsmsubadp, 1, float64, f64, MSUB_FLGS, 1, 1, 0) +VSX_MADD(xsmsubmdp, 1, float64, f64, MSUB_FLGS, 0, 1, 0) +VSX_MADD(xsnmaddadp, 1, float64, f64, NMADD_FLGS, 1, 1, 0) +VSX_MADD(xsnmaddmdp, 1, float64, f64, NMADD_FLGS, 0, 1, 0) +VSX_MADD(xsnmsubadp, 1, float64, f64, NMSUB_FLGS, 1, 1, 0) +VSX_MADD(xsnmsubmdp, 1, float64, f64, NMSUB_FLGS, 0, 1, 0) + +VSX_MADD(xsmaddasp, 1, float64, f64, MADD_FLGS, 1, 1, 1) +VSX_MADD(xsmaddmsp, 1, float64, f64, MADD_FLGS, 0, 1, 1) +VSX_MADD(xsmsubasp, 1, float64, f64, MSUB_FLGS, 1, 1, 1) +VSX_MADD(xsmsubmsp, 1, float64, f64, MSUB_FLGS, 0, 1, 1) +VSX_MADD(xsnmaddasp, 1, float64, f64, NMADD_FLGS, 1, 1, 1) +VSX_MADD(xsnmaddmsp, 1, float64, f64, NMADD_FLGS, 0, 1, 1) +VSX_MADD(xsnmsubasp, 1, float64, f64, NMSUB_FLGS, 1, 1, 1) +VSX_MADD(xsnmsubmsp, 1, float64, f64, NMSUB_FLGS, 0, 1, 1) + +VSX_MADD(xvmaddadp, 2, float64, f64, MADD_FLGS, 1, 0, 0) +VSX_MADD(xvmaddmdp, 2, float64, f64, MADD_FLGS, 0, 0, 0) +VSX_MADD(xvmsubadp, 2, float64, f64, MSUB_FLGS, 1, 0, 0) +VSX_MADD(xvmsubmdp, 2, float64, f64, MSUB_FLGS, 0, 0, 0) +VSX_MADD(xvnmaddadp, 2, float64, f64, NMADD_FLGS, 1, 0, 0) +VSX_MADD(xvnmaddmdp, 2, float64, f64, NMADD_FLGS, 0, 0, 0) +VSX_MADD(xvnmsubadp, 2, float64, f64, NMSUB_FLGS, 1, 0, 0) +VSX_MADD(xvnmsubmdp, 2, float64, f64, NMSUB_FLGS, 0, 0, 0) + +VSX_MADD(xvmaddasp, 4, float32, f32, MADD_FLGS, 1, 0, 0) +VSX_MADD(xvmaddmsp, 4, float32, f32, MADD_FLGS, 0, 0, 0) +VSX_MADD(xvmsubasp, 4, float32, f32, MSUB_FLGS, 1, 0, 0) +VSX_MADD(xvmsubmsp, 4, float32, f32, MSUB_FLGS, 0, 0, 0) +VSX_MADD(xvnmaddasp, 4, float32, f32, NMADD_FLGS, 1, 0, 0) +VSX_MADD(xvnmaddmsp, 4, float32, f32, NMADD_FLGS, 0, 0, 0) +VSX_MADD(xvnmsubasp, 4, float32, f32, NMSUB_FLGS, 1, 0, 0) +VSX_MADD(xvnmsubmsp, 4, float32, f32, NMSUB_FLGS, 0, 0, 0) + +#define VSX_SCALAR_CMP(op, ordered) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xa, xb; \ + uint32_t cc = 0; \ + \ + getVSR(xA(opcode), &xa, env); \ + getVSR(xB(opcode), &xb, env); \ + \ + if (unlikely(float64_is_any_nan(xa.f64[0]) || \ + float64_is_any_nan(xb.f64[0]))) { \ + if (float64_is_signaling_nan(xa.f64[0]) || \ + float64_is_signaling_nan(xb.f64[0])) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ + } \ + if (ordered) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \ + } \ + cc = 1; \ + } else { \ + if (float64_lt(xa.f64[0], xb.f64[0], &env->fp_status)) { \ + cc = 8; \ + } else if (!float64_le(xa.f64[0], xb.f64[0], &env->fp_status)) { \ + cc = 4; \ + } else { \ + cc = 2; \ + } \ + } \ + \ + env->fpscr &= ~(0x0F << FPSCR_FPRF); \ + env->fpscr |= cc << FPSCR_FPRF; \ + env->crf[BF(opcode)] = cc; \ + \ + helper_float_check_status(env); \ +} + +VSX_SCALAR_CMP(xscmpodp, 1) +VSX_SCALAR_CMP(xscmpudp, 0) + +#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL) +#define float32_snan_to_qnan(x) ((x) | 0x00400000) + +/* VSX_MAX_MIN - VSX floating point maximum/minimum + * name - instruction mnemonic + * op - operation (max or min) + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (f32 or f64) + */ +#define VSX_MAX_MIN(name, op, nels, tp, fld) \ +void helper_##name(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt, xa, xb; \ + int i; \ + \ + getVSR(xA(opcode), &xa, env); \ + getVSR(xB(opcode), &xb, env); \ + getVSR(xT(opcode), &xt, env); \ + \ + for (i = 0; i < nels; i++) { \ + xt.fld[i] = tp##_##op(xa.fld[i], xb.fld[i], &env->fp_status); \ + if (unlikely(tp##_is_signaling_nan(xa.fld[i]) || \ + tp##_is_signaling_nan(xb.fld[i]))) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ + } \ + } \ + \ + putVSR(xT(opcode), &xt, env); \ + helper_float_check_status(env); \ +} + +VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, f64) +VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, f64) +VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, f32) +VSX_MAX_MIN(xsmindp, minnum, 1, float64, f64) +VSX_MAX_MIN(xvmindp, minnum, 2, float64, f64) +VSX_MAX_MIN(xvminsp, minnum, 4, float32, f32) + +/* VSX_CMP - VSX floating point compare + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (f32 or f64) + * cmp - comparison operation + * svxvc - set VXVC bit + */ +#define VSX_CMP(op, nels, tp, fld, cmp, svxvc) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt, xa, xb; \ + int i; \ + int all_true = 1; \ + int all_false = 1; \ + \ + getVSR(xA(opcode), &xa, env); \ + getVSR(xB(opcode), &xb, env); \ + getVSR(xT(opcode), &xt, env); \ + \ + for (i = 0; i < nels; i++) { \ + if (unlikely(tp##_is_any_nan(xa.fld[i]) || \ + tp##_is_any_nan(xb.fld[i]))) { \ + if (tp##_is_signaling_nan(xa.fld[i]) || \ + tp##_is_signaling_nan(xb.fld[i])) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ + } \ + if (svxvc) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \ + } \ + xt.fld[i] = 0; \ + all_true = 0; \ + } else { \ + if (tp##_##cmp(xb.fld[i], xa.fld[i], &env->fp_status) == 1) { \ + xt.fld[i] = -1; \ + all_false = 0; \ + } else { \ + xt.fld[i] = 0; \ + all_true = 0; \ + } \ + } \ + } \ + \ + putVSR(xT(opcode), &xt, env); \ + if ((opcode >> (31-21)) & 1) { \ + env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \ + } \ + helper_float_check_status(env); \ + } + +VSX_CMP(xvcmpeqdp, 2, float64, f64, eq, 0) +VSX_CMP(xvcmpgedp, 2, float64, f64, le, 1) +VSX_CMP(xvcmpgtdp, 2, float64, f64, lt, 1) +VSX_CMP(xvcmpeqsp, 4, float32, f32, eq, 0) +VSX_CMP(xvcmpgesp, 4, float32, f32, le, 1) +VSX_CMP(xvcmpgtsp, 4, float32, f32, lt, 1) + +#if defined(HOST_WORDS_BIGENDIAN) +#define JOFFSET 0 +#else +#define JOFFSET 1 +#endif + +/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * stp - source type (float32 or float64) + * ttp - target type (float32 or float64) + * sfld - source vsr_t field + * tfld - target vsr_t field (f32 or f64) + * sfprf - set FPRF + */ +#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt, xb; \ + int i; \ + \ + getVSR(xB(opcode), &xb, env); \ + getVSR(xT(opcode), &xt, env); \ + \ + for (i = 0; i < nels; i++) { \ + int j = 2*i + JOFFSET; \ + xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \ + if (unlikely(stp##_is_signaling_nan(xb.sfld))) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ + xt.tfld = ttp##_snan_to_qnan(xt.tfld); \ + } \ + if (sfprf) { \ + helper_compute_fprf(env, ttp##_to_float64(xt.tfld, \ + &env->fp_status), sfprf); \ + } \ + } \ + \ + putVSR(xT(opcode), &xt, env); \ + helper_float_check_status(env); \ +} + +VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, f64[i], f32[j], 1) +VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, f32[j], f64[i], 1) +VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, f64[i], f32[j], 0) +VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, f32[j], f64[i], 0) + +uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb) +{ + float_status tstat = env->fp_status; + set_float_exception_flags(0, &tstat); + + return (uint64_t)float64_to_float32(xb, &tstat) << 32; +} + +uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb) +{ + float_status tstat = env->fp_status; + set_float_exception_flags(0, &tstat); + + return float32_to_float64(xb >> 32, &tstat); +} + +/* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * stp - source type (float32 or float64) + * ttp - target type (int32, uint32, int64 or uint64) + * sfld - source vsr_t field + * tfld - target vsr_t field + * jdef - definition of the j index (i or 2*i) + * rnan - resulting NaN + */ +#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, jdef, rnan) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt, xb; \ + int i; \ + \ + getVSR(xB(opcode), &xb, env); \ + getVSR(xT(opcode), &xt, env); \ + \ + for (i = 0; i < nels; i++) { \ + int j = jdef; \ + if (unlikely(stp##_is_any_nan(xb.sfld))) { \ + if (stp##_is_signaling_nan(xb.sfld)) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ + } \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \ + xt.tfld = rnan; \ + } else { \ + xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \ + if (env->fp_status.float_exception_flags & float_flag_invalid) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \ + } \ + } \ + } \ + \ + putVSR(xT(opcode), &xt, env); \ + helper_float_check_status(env); \ +} + +VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, f64[j], u64[i], i, \ + 0x8000000000000000ULL) +VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, f64[i], u32[j], \ + 2*i + JOFFSET, 0x80000000U) +VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, f64[j], u64[i], i, 0ULL) +VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, f64[i], u32[j], \ + 2*i + JOFFSET, 0U) +VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, f64[j], u64[i], i, \ + 0x8000000000000000ULL) +VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, f64[i], u32[j], \ + 2*i + JOFFSET, 0x80000000U) +VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, f64[j], u64[i], i, 0ULL) +VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, f64[i], u32[j], \ + 2*i + JOFFSET, 0U) +VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, f32[j], u64[i], \ + 2*i + JOFFSET, 0x8000000000000000ULL) +VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, f32[j], u32[j], i, \ + 0x80000000U) +VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, f32[j], u64[i], \ + 2*i + JOFFSET, 0ULL) +VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, f32[j], u32[i], i, 0U) + +/* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * stp - source type (int32, uint32, int64 or uint64) + * ttp - target type (float32 or float64) + * sfld - source vsr_t field + * tfld - target vsr_t field + * jdef - definition of the j index (i or 2*i) + * sfprf - set FPRF + */ +#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, jdef, sfprf, r2sp) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt, xb; \ + int i; \ + \ + getVSR(xB(opcode), &xb, env); \ + getVSR(xT(opcode), &xt, env); \ + \ + for (i = 0; i < nels; i++) { \ + int j = jdef; \ + xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \ + if (r2sp) { \ + xt.tfld = helper_frsp(env, xt.tfld); \ + } \ + if (sfprf) { \ + helper_compute_fprf(env, xt.tfld, sfprf); \ + } \ + } \ + \ + putVSR(xT(opcode), &xt, env); \ + helper_float_check_status(env); \ +} + +VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, u64[j], f64[i], i, 1, 0) +VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, u64[j], f64[i], i, 1, 0) +VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, u64[j], f64[i], i, 1, 1) +VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, u64[j], f64[i], i, 1, 1) +VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, u64[j], f64[i], i, 0, 0) +VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, u64[j], f64[i], i, 0, 0) +VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, u32[j], f64[i], \ + 2*i + JOFFSET, 0, 0) +VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, u32[j], f64[i], \ + 2*i + JOFFSET, 0, 0) +VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, u64[i], f32[j], \ + 2*i + JOFFSET, 0, 0) +VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, u64[i], f32[j], \ + 2*i + JOFFSET, 0, 0) +VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, u32[j], f32[i], i, 0, 0) +VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, u32[j], f32[i], i, 0, 0) + +/* For "use current rounding mode", define a value that will not be one of + * the existing rounding model enums. + */ +#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \ + float_round_up + float_round_to_zero) + +/* VSX_ROUND - VSX floating point round + * op - instruction mnemonic + * nels - number of elements (1, 2 or 4) + * tp - type (float32 or float64) + * fld - vsr_t field (f32 or f64) + * rmode - rounding mode + * sfprf - set FPRF + */ +#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \ +void helper_##op(CPUPPCState *env, uint32_t opcode) \ +{ \ + ppc_vsr_t xt, xb; \ + int i; \ + getVSR(xB(opcode), &xb, env); \ + getVSR(xT(opcode), &xt, env); \ + \ + if (rmode != FLOAT_ROUND_CURRENT) { \ + set_float_rounding_mode(rmode, &env->fp_status); \ + } \ + \ + for (i = 0; i < nels; i++) { \ + if (unlikely(tp##_is_signaling_nan(xb.fld[i]))) { \ + fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ + xt.fld[i] = tp##_snan_to_qnan(xb.fld[i]); \ + } else { \ + xt.fld[i] = tp##_round_to_int(xb.fld[i], &env->fp_status); \ + } \ + if (sfprf) { \ + helper_compute_fprf(env, xt.fld[i], sfprf); \ + } \ + } \ + \ + /* If this is not a "use current rounding mode" instruction, \ + * then inhibit setting of the XX bit and restore rounding \ + * mode from FPSCR */ \ + if (rmode != FLOAT_ROUND_CURRENT) { \ + fpscr_set_rounding_mode(env); \ + env->fp_status.float_exception_flags &= ~float_flag_inexact; \ + } \ + \ + putVSR(xT(opcode), &xt, env); \ + helper_float_check_status(env); \ +} + +VSX_ROUND(xsrdpi, 1, float64, f64, float_round_nearest_even, 1) +VSX_ROUND(xsrdpic, 1, float64, f64, FLOAT_ROUND_CURRENT, 1) +VSX_ROUND(xsrdpim, 1, float64, f64, float_round_down, 1) +VSX_ROUND(xsrdpip, 1, float64, f64, float_round_up, 1) +VSX_ROUND(xsrdpiz, 1, float64, f64, float_round_to_zero, 1) + +VSX_ROUND(xvrdpi, 2, float64, f64, float_round_nearest_even, 0) +VSX_ROUND(xvrdpic, 2, float64, f64, FLOAT_ROUND_CURRENT, 0) +VSX_ROUND(xvrdpim, 2, float64, f64, float_round_down, 0) +VSX_ROUND(xvrdpip, 2, float64, f64, float_round_up, 0) +VSX_ROUND(xvrdpiz, 2, float64, f64, float_round_to_zero, 0) + +VSX_ROUND(xvrspi, 4, float32, f32, float_round_nearest_even, 0) +VSX_ROUND(xvrspic, 4, float32, f32, FLOAT_ROUND_CURRENT, 0) +VSX_ROUND(xvrspim, 4, float32, f32, float_round_down, 0) +VSX_ROUND(xvrspip, 4, float32, f32, float_round_up, 0) +VSX_ROUND(xvrspiz, 4, float32, f32, float_round_to_zero, 0) + +uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb) +{ + helper_reset_fpstatus(env); + + uint64_t xt = helper_frsp(env, xb); + + helper_compute_fprf(env, xt, 1); + helper_float_check_status(env); + return xt; +} |