aboutsummaryrefslogtreecommitdiff
path: root/fpu
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2018-10-03 09:35:51 -0500
committerRichard Henderson <richard.henderson@linaro.org>2018-10-05 12:57:41 -0500
commit5dfbc9e4903c0121140f2945f05df48cea72dd82 (patch)
treea3919c99dfd9c4144513dc134836ffc902977a72 /fpu
parent0019d5c3a18c31604fb55f9cec3ceb13999c4866 (diff)
downloadqemu-5dfbc9e4903c0121140f2945f05df48cea72dd82.zip
qemu-5dfbc9e4903c0121140f2945f05df48cea72dd82.tar.gz
qemu-5dfbc9e4903c0121140f2945f05df48cea72dd82.tar.bz2
softfloat: Fix division
The __udiv_qrnnd primitive that we nicked from gmp requires its inputs to be normalized. We were not doing that. Because the inputs are nearly normalized already, finishing that is trivial. Replace div128to64 with a "proper" udiv_qrnnd, so that this remains a reusable primitive. Fixes: cf07323d494 Fixes: https://bugs.launchpad.net/qemu/+bug/1793119 Tested-by: Emilio G. Cota <cota@braap.org> Tested-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'fpu')
-rw-r--r--fpu/softfloat.c35
1 files changed, 27 insertions, 8 deletions
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index 71da0f6..46ae206 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -1112,19 +1112,38 @@ static FloatParts div_floats(FloatParts a, FloatParts b, float_status *s)
bool sign = a.sign ^ b.sign;
if (a.cls == float_class_normal && b.cls == float_class_normal) {
- uint64_t temp_lo, temp_hi;
+ uint64_t n0, n1, q, r;
int exp = a.exp - b.exp;
+
+ /*
+ * We want a 2*N / N-bit division to produce exactly an N-bit
+ * result, so that we do not lose any precision and so that we
+ * do not have to renormalize afterward. If A.frac < B.frac,
+ * then division would produce an (N-1)-bit result; shift A left
+ * by one to produce the an N-bit result, and decrement the
+ * exponent to match.
+ *
+ * The udiv_qrnnd algorithm that we're using requires normalization,
+ * i.e. the msb of the denominator must be set. Since we know that
+ * DECOMPOSED_BINARY_POINT is msb-1, the inputs must be shifted left
+ * by one (more), and the remainder must be shifted right by one.
+ */
if (a.frac < b.frac) {
exp -= 1;
- shortShift128Left(0, a.frac, DECOMPOSED_BINARY_POINT + 1,
- &temp_hi, &temp_lo);
+ shift128Left(0, a.frac, DECOMPOSED_BINARY_POINT + 2, &n1, &n0);
} else {
- shortShift128Left(0, a.frac, DECOMPOSED_BINARY_POINT,
- &temp_hi, &temp_lo);
+ shift128Left(0, a.frac, DECOMPOSED_BINARY_POINT + 1, &n1, &n0);
}
- /* LSB of quot is set if inexact which roundandpack will use
- * to set flags. Yet again we re-use a for the result */
- a.frac = div128To64(temp_lo, temp_hi, b.frac);
+ q = udiv_qrnnd(&r, n1, n0, b.frac << 1);
+
+ /*
+ * Set lsb if there is a remainder, to set inexact.
+ * As mentioned above, to find the actual value of the remainder we
+ * would need to shift right, but (1) we are only concerned about
+ * non-zero-ness, and (2) the remainder will always be even because
+ * both inputs to the division primitive are even.
+ */
+ a.frac = q | (r != 0);
a.sign = sign;
a.exp = exp;
return a;