diff options
author | Jakub Jelinek <jakub@redhat.com> | 2010-10-15 15:25:14 -0400 |
---|---|---|
committer | Ulrich Drepper <drepper@gmail.com> | 2010-10-15 15:25:14 -0400 |
commit | f3f7372de1401b99f0a318ce09caf73e42d6f022 (patch) | |
tree | ba50115ae28fd9174fb6f57d55128453f1898ff0 | |
parent | 14d43591face21dbd4d51b5c46fa3c17740ddc78 (diff) | |
download | glibc-f3f7372de1401b99f0a318ce09caf73e42d6f022.zip glibc-f3f7372de1401b99f0a318ce09caf73e42d6f022.tar.gz glibc-f3f7372de1401b99f0a318ce09caf73e42d6f022.tar.bz2 |
Fix some more dbl-64/s_fma.c issue
-rw-r--r-- | ChangeLog | 7 | ||||
-rw-r--r-- | math/libm-test.inc | 10 | ||||
-rw-r--r-- | sysdeps/ieee754/dbl-64/s_fma.c | 103 |
3 files changed, 105 insertions, 15 deletions
@@ -1,3 +1,10 @@ +2010-10-14 Jakub Jelinek <jakub@redhat.com> + + [BZ #3268] + * math/libm-test.inc (fma_test): Add some more tests. + * sysdeps/ieee754/dbl-64/s_fma.c (__fma): Handle underflows + correctly. + 2010-10-15 Andreas Schwab <schwab@redhat.com> * scripts/data/localplt-s390-linux-gnu.data: New file. diff --git a/math/libm-test.inc b/math/libm-test.inc index 34c4fa9..1bec476 100644 --- a/math/libm-test.inc +++ b/math/libm-test.inc @@ -2808,6 +2808,16 @@ fma_test (void) TEST_fff_f (fma, 0x1.fffffffffffffp+1023, 0x1.001p+0, -0x1.fffffffffffffp+1023, 0x1.fffffffffffffp+1011); TEST_fff_f (fma, -0x1.fffffffffffffp+1023, 0x1.fffffffffffffp+0, 0x1.fffffffffffffp+1023, -0x1.ffffffffffffdp+1023); TEST_fff_f (fma, 0x1.fffffffffffffp+1023, 2.0, -0x1.fffffffffffffp+1023, 0x1.fffffffffffffp+1023); + TEST_fff_f (fma, 0x1.6a09e667f3bccp-538, 0x1.6a09e667f3bccp-538, 0.0, 0.0); + TEST_fff_f (fma, 0x1.deadbeef2feedp-495, 0x1.deadbeef2feedp-495, -0x1.bf86a5786a574p-989, 0x0.0000042625a1fp-1022); + TEST_fff_f (fma, 0x1.deadbeef2feedp-503, 0x1.deadbeef2feedp-503, -0x1.bf86a5786a574p-1005, 0x0.0000000004262p-1022); + TEST_fff_f (fma, 0x1p-537, 0x1p-538, 0x1p-1074, 0x0.0000000000002p-1022); + TEST_fff_f (fma, 0x1.7fffff8p-968, 0x1p-106, 0x0.000001p-1022, 0x0.0000010000001p-1022); + TEST_fff_f (fma, 0x1.4000004p-967, 0x1p-106, 0x0.000001p-1022, 0x0.0000010000003p-1022); + TEST_fff_f (fma, 0x1.4p-967, -0x1p-106, -0x0.000001p-1022, -0x0.0000010000002p-1022); + TEST_fff_f (fma, -0x1.19cab66d73e17p-959, 0x1.c7108a8c5ff51p-107, -0x0.80b0ad65d9b64p-1022, -0x0.80b0ad65d9d59p-1022); + TEST_fff_f (fma, -0x1.d2eaed6e8e9d3p-979, -0x1.4e066c62ac9ddp-63, -0x0.9245e6b003454p-1022, -0x0.9245c09c5fb5dp-1022); + TEST_fff_f (fma, 0x1.153d650bb9f06p-907, 0x1.2d01230d48407p-125, -0x0.b278d5acfc3cp-1022, -0x0.b22757123bbe9p-1022); #endif END (fma); diff --git a/sysdeps/ieee754/dbl-64/s_fma.c b/sysdeps/ieee754/dbl-64/s_fma.c index ca7300c..911682e 100644 --- a/sysdeps/ieee754/dbl-64/s_fma.c +++ b/sysdeps/ieee754/dbl-64/s_fma.c @@ -39,15 +39,20 @@ __fma (double x, double y, double z) >= 0x7ff + IEEE754_DOUBLE_BIAS - DBL_MANT_DIG, 0) || __builtin_expect (u.ieee.exponent >= 0x7ff - DBL_MANT_DIG, 0) || __builtin_expect (v.ieee.exponent >= 0x7ff - DBL_MANT_DIG, 0) - || __builtin_expect (w.ieee.exponent >= 0x7ff - DBL_MANT_DIG, 0)) + || __builtin_expect (w.ieee.exponent >= 0x7ff - DBL_MANT_DIG, 0) + || __builtin_expect (u.ieee.exponent + v.ieee.exponent + <= IEEE754_DOUBLE_BIAS + DBL_MANT_DIG, 0)) { - /* If x or y or z is Inf/NaN or if fma will certainly overflow, + /* If x or y or z is Inf/NaN, or if fma will certainly overflow, + or if x * y is less than half of DBL_DENORM_MIN, compute as x * y + z. */ if (u.ieee.exponent == 0x7ff || v.ieee.exponent == 0x7ff || w.ieee.exponent == 0x7ff || u.ieee.exponent + v.ieee.exponent - > 0x7ff + IEEE754_DOUBLE_BIAS) + > 0x7ff + IEEE754_DOUBLE_BIAS + || u.ieee.exponent + v.ieee.exponent + < IEEE754_DOUBLE_BIAS - DBL_MANT_DIG - 2) return x * y + z; if (u.ieee.exponent + v.ieee.exponent >= 0x7ff + IEEE754_DOUBLE_BIAS - DBL_MANT_DIG) @@ -87,7 +92,7 @@ __fma (double x, double y, double z) else v.d *= 0x1p53; } - else + else if (v.ieee.exponent >= 0x7ff - DBL_MANT_DIG) { v.ieee.exponent -= DBL_MANT_DIG; if (u.ieee.exponent) @@ -95,6 +100,24 @@ __fma (double x, double y, double z) else u.d *= 0x1p53; } + else /* if (u.ieee.exponent + v.ieee.exponent + <= IEEE754_DOUBLE_BIAS + DBL_MANT_DIG) */ + { + if (u.ieee.exponent > v.ieee.exponent) + u.ieee.exponent += 2 * DBL_MANT_DIG; + else + v.ieee.exponent += 2 * DBL_MANT_DIG; + if (w.ieee.exponent <= 4 * DBL_MANT_DIG + 4) + { + if (w.ieee.exponent) + w.ieee.exponent += 2 * DBL_MANT_DIG; + else + w.d *= 0x1p106; + adjust = -1; + } + /* Otherwise x * y should just affect inexact + and nothing else. */ + } x = u.d; y = v.d; z = w.d; @@ -123,18 +146,68 @@ __fma (double x, double y, double z) fesetround (FE_TOWARDZERO); /* Perform m2 + a2 addition with round to odd. */ u.d = a2 + m2; - if ((u.ieee.mantissa1 & 1) == 0 && u.ieee.exponent != 0x7ff) - u.ieee.mantissa1 |= fetestexcept (FE_INEXACT) != 0; - feupdateenv (&env); - - /* Add that to a1. */ - a1 = a1 + u.d; - /* And adjust exponent if needed. */ - if (__builtin_expect (adjust, 0)) - a1 *= 0x1p53; - - return a1; + if (__builtin_expect (adjust == 0, 1)) + { + if ((u.ieee.mantissa1 & 1) == 0 && u.ieee.exponent != 0x7ff) + u.ieee.mantissa1 |= fetestexcept (FE_INEXACT) != 0; + feupdateenv (&env); + /* Result is a1 + u.d. */ + return a1 + u.d; + } + else if (__builtin_expect (adjust > 0, 1)) + { + if ((u.ieee.mantissa1 & 1) == 0 && u.ieee.exponent != 0x7ff) + u.ieee.mantissa1 |= fetestexcept (FE_INEXACT) != 0; + feupdateenv (&env); + /* Result is a1 + u.d, scaled up. */ + return (a1 + u.d) * 0x1p53; + } + else + { + v.d = a1 + u.d; + int j = fetestexcept (FE_INEXACT) != 0; + feupdateenv (&env); + /* Ensure the following computations are performed in default rounding + mode instead of just reusing the round to zero computation. */ + asm volatile ("" : "=m" (u) : "m" (u)); + /* If a1 + u.d is exact, the only rounding happens during + scaling down. */ + if (j == 0) + return v.d * 0x1p-106; + /* If result rounded to zero is not subnormal, no double + rounding will occur. */ + if (v.ieee.exponent > 106) + return (a1 + u.d) * 0x1p-106; + /* If v.d * 0x1p-106 with round to zero is a subnormal above + or equal to DBL_MIN / 2, then v.d * 0x1p-106 shifts mantissa + down just by 1 bit, which means v.ieee.mantissa1 |= j would + change the round bit, not sticky or guard bit. + v.d * 0x1p-106 never normalizes by shifting up, + so round bit plus sticky bit should be already enough + for proper rounding. */ + if (v.ieee.exponent == 106) + { + /* v.ieee.mantissa1 & 2 is LSB bit of the result before rounding, + v.ieee.mantissa1 & 1 is the round bit and j is our sticky + bit. In round-to-nearest 001 rounds down like 00, + 011 rounds up, even though 01 rounds down (thus we need + to adjust), 101 rounds down like 10 and 111 rounds up + like 11. */ + if ((v.ieee.mantissa1 & 3) == 1) + { + v.d *= 0x1p-106; + if (v.ieee.negative) + return v.d - 0x1p-1074 /* __DBL_DENORM_MIN__ */; + else + return v.d + 0x1p-1074 /* __DBL_DENORM_MIN__ */; + } + else + return v.d * 0x1p-106; + } + v.ieee.mantissa1 |= j; + return v.d * 0x1p-106; + } } #ifndef __fma weak_alias (__fma, fma) |