aboutsummaryrefslogtreecommitdiff
path: root/fpu
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2020-11-08 13:01:55 -0800
committerRichard Henderson <richard.henderson@linaro.org>2021-05-16 07:13:51 -0500
commitd46975bce10e163b9f10a7f569d3e046114d8580 (patch)
tree5a3f93fc9c854c766d21f2e97a84cd5dbbc59d12 /fpu
parent979582d07115ff3c5c0c1f2bed90a2db91191281 (diff)
downloadqemu-d46975bce10e163b9f10a7f569d3e046114d8580.zip
qemu-d46975bce10e163b9f10a7f569d3e046114d8580.tar.gz
qemu-d46975bce10e163b9f10a7f569d3e046114d8580.tar.bz2
softfloat: Move sf_canonicalize to softfloat-parts.c.inc
At the same time, convert to pointers, rename to parts$N_canonicalize and define a macro for parts_canonicalize using QEMU_GENERIC. Rearrange the cases to recognize float_class_normal as early as possible. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'fpu')
-rw-r--r--fpu/softfloat-parts.c.inc33
-rw-r--r--fpu/softfloat.c117
2 files changed, 112 insertions, 38 deletions
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
index a78d61e..25bf99b 100644
--- a/fpu/softfloat-parts.c.inc
+++ b/fpu/softfloat-parts.c.inc
@@ -100,3 +100,36 @@ static FloatPartsN *partsN(pick_nan_muladd)(FloatPartsN *a, FloatPartsN *b,
}
return a;
}
+
+/*
+ * Canonicalize the FloatParts structure. Determine the class,
+ * unbias the exponent, and normalize the fraction.
+ */
+static void partsN(canonicalize)(FloatPartsN *p, float_status *status,
+ const FloatFmt *fmt)
+{
+ if (unlikely(p->exp == 0)) {
+ if (likely(frac_eqz(p))) {
+ p->cls = float_class_zero;
+ } else if (status->flush_inputs_to_zero) {
+ float_raise(float_flag_input_denormal, status);
+ p->cls = float_class_zero;
+ frac_clear(p);
+ } else {
+ int shift = frac_normalize(p);
+ p->cls = float_class_normal;
+ p->exp = fmt->frac_shift - fmt->exp_bias - shift + 1;
+ }
+ } else if (likely(p->exp < fmt->exp_max) || fmt->arm_althp) {
+ p->cls = float_class_normal;
+ p->exp -= fmt->exp_bias;
+ frac_shl(p, fmt->frac_shift);
+ p->frac_hi |= DECOMPOSED_IMPLICIT_BIT;
+ } else if (likely(frac_eqz(p))) {
+ p->cls = float_class_inf;
+ } else {
+ frac_shl(p, fmt->frac_shift);
+ p->cls = (parts_is_snan_frac(p->frac_hi, status)
+ ? float_class_snan : float_class_qnan);
+ }
+}
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index df004db..535261d 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -733,6 +733,14 @@ static FloatParts128 *parts128_pick_nan_muladd(FloatParts128 *a,
#define parts_pick_nan_muladd(A, B, C, S, ABM, ABCM) \
PARTS_GENERIC_64_128(pick_nan_muladd, A)(A, B, C, S, ABM, ABCM)
+static void parts64_canonicalize(FloatParts64 *p, float_status *status,
+ const FloatFmt *fmt);
+static void parts128_canonicalize(FloatParts128 *p, float_status *status,
+ const FloatFmt *fmt);
+
+#define parts_canonicalize(A, S, F) \
+ PARTS_GENERIC_64_128(canonicalize, A)(A, S, F)
+
/*
* Helper functions for softfloat-parts.c.inc, per-size operations.
*/
@@ -759,53 +767,86 @@ static int frac128_cmp(FloatParts128 *a, FloatParts128 *b)
#define frac_cmp(A, B) FRAC_GENERIC_64_128(cmp, A)(A, B)
-static void frac128_shl(FloatParts128 *a, int c)
+static void frac64_clear(FloatParts64 *a)
{
- shift128Left(a->frac_hi, a->frac_lo, c, &a->frac_hi, &a->frac_lo);
+ a->frac = 0;
+}
+
+static void frac128_clear(FloatParts128 *a)
+{
+ a->frac_hi = a->frac_lo = 0;
}
-#define frac_shl(A, C) frac128_shl(A, C)
+#define frac_clear(A) FRAC_GENERIC_64_128(clear, A)(A)
-static void frac128_shr(FloatParts128 *a, int c)
+static bool frac64_eqz(FloatParts64 *a)
{
- shift128Right(a->frac_hi, a->frac_lo, c, &a->frac_hi, &a->frac_lo);
+ return a->frac == 0;
+}
+
+static bool frac128_eqz(FloatParts128 *a)
+{
+ return (a->frac_hi | a->frac_lo) == 0;
}
-#define frac_shr(A, C) frac128_shr(A, C)
+#define frac_eqz(A) FRAC_GENERIC_64_128(eqz, A)(A)
-/* Canonicalize EXP and FRAC, setting CLS. */
-static FloatParts64 sf_canonicalize(FloatParts64 part, const FloatFmt *parm,
- float_status *status)
+static int frac64_normalize(FloatParts64 *a)
{
- if (part.exp == parm->exp_max && !parm->arm_althp) {
- if (part.frac == 0) {
- part.cls = float_class_inf;
- } else {
- part.frac <<= parm->frac_shift;
- part.cls = (parts_is_snan_frac(part.frac, status)
- ? float_class_snan : float_class_qnan);
- }
- } else if (part.exp == 0) {
- if (likely(part.frac == 0)) {
- part.cls = float_class_zero;
- } else if (status->flush_inputs_to_zero) {
- float_raise(float_flag_input_denormal, status);
- part.cls = float_class_zero;
- part.frac = 0;
- } else {
- int shift = clz64(part.frac);
- part.cls = float_class_normal;
- part.exp = parm->frac_shift - parm->exp_bias - shift + 1;
- part.frac <<= shift;
+ if (a->frac) {
+ int shift = clz64(a->frac);
+ a->frac <<= shift;
+ return shift;
+ }
+ return 64;
+}
+
+static int frac128_normalize(FloatParts128 *a)
+{
+ if (a->frac_hi) {
+ int shl = clz64(a->frac_hi);
+ if (shl) {
+ int shr = 64 - shl;
+ a->frac_hi = (a->frac_hi << shl) | (a->frac_lo >> shr);
+ a->frac_lo = (a->frac_lo << shl);
}
- } else {
- part.cls = float_class_normal;
- part.exp -= parm->exp_bias;
- part.frac = DECOMPOSED_IMPLICIT_BIT + (part.frac << parm->frac_shift);
+ return shl;
+ } else if (a->frac_lo) {
+ int shl = clz64(a->frac_lo);
+ a->frac_hi = (a->frac_lo << shl);
+ a->frac_lo = 0;
+ return shl + 64;
}
- return part;
+ return 128;
}
+#define frac_normalize(A) FRAC_GENERIC_64_128(normalize, A)(A)
+
+static void frac64_shl(FloatParts64 *a, int c)
+{
+ a->frac <<= c;
+}
+
+static void frac128_shl(FloatParts128 *a, int c)
+{
+ shift128Left(a->frac_hi, a->frac_lo, c, &a->frac_hi, &a->frac_lo);
+}
+
+#define frac_shl(A, C) FRAC_GENERIC_64_128(shl, A)(A, C)
+
+static void frac64_shr(FloatParts64 *a, int c)
+{
+ a->frac >>= c;
+}
+
+static void frac128_shr(FloatParts128 *a, int c)
+{
+ shift128Right(a->frac_hi, a->frac_lo, c, &a->frac_hi, &a->frac_lo);
+}
+
+#define frac_shr(A, C) FRAC_GENERIC_64_128(shr, A)(A, C)
+
+
/* Round and uncanonicalize a floating-point number by parts. There
* are FRAC_SHIFT bits that may require rounding at the bottom of the
* fraction; these bits will be removed. The exponent will be biased
@@ -984,7 +1025,7 @@ static void float16a_unpack_canonical(FloatParts64 *p, float16 f,
float_status *s, const FloatFmt *params)
{
float16_unpack_raw(p, f);
- *p = sf_canonicalize(*p, params, s);
+ parts_canonicalize(p, s, params);
}
static void float16_unpack_canonical(FloatParts64 *p, float16 f,
@@ -997,7 +1038,7 @@ static void bfloat16_unpack_canonical(FloatParts64 *p, bfloat16 f,
float_status *s)
{
bfloat16_unpack_raw(p, f);
- *p = sf_canonicalize(*p, &bfloat16_params, s);
+ parts_canonicalize(p, s, &bfloat16_params);
}
static float16 float16a_round_pack_canonical(FloatParts64 *p,
@@ -1025,7 +1066,7 @@ static void float32_unpack_canonical(FloatParts64 *p, float32 f,
float_status *s)
{
float32_unpack_raw(p, f);
- *p = sf_canonicalize(*p, &float32_params, s);
+ parts_canonicalize(p, s, &float32_params);
}
static float32 float32_round_pack_canonical(FloatParts64 *p,
@@ -1039,7 +1080,7 @@ static void float64_unpack_canonical(FloatParts64 *p, float64 f,
float_status *s)
{
float64_unpack_raw(p, f);
- *p = sf_canonicalize(*p, &float64_params, s);
+ parts_canonicalize(p, s, &float64_params);
}
static float64 float64_round_pack_canonical(FloatParts64 *p,