aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/aarch64/fpu/cosh_sve.c
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/aarch64/fpu/cosh_sve.c')
-rw-r--r--sysdeps/aarch64/fpu/cosh_sve.c135
1 files changed, 81 insertions, 54 deletions
diff --git a/sysdeps/aarch64/fpu/cosh_sve.c b/sysdeps/aarch64/fpu/cosh_sve.c
index 77e58e1..f5a163b 100644
--- a/sysdeps/aarch64/fpu/cosh_sve.c
+++ b/sysdeps/aarch64/fpu/cosh_sve.c
@@ -21,71 +21,99 @@
static const struct data
{
- float64_t poly[3];
- float64_t inv_ln2, ln2_hi, ln2_lo, shift, thres;
+ double c0, c2;
+ double c1, c3;
+ float64_t inv_ln2, ln2_hi, ln2_lo, shift;
uint64_t special_bound;
} data = {
- .poly = { 0x1.fffffffffffd4p-2, 0x1.5555571d6b68cp-3,
- 0x1.5555576a59599p-5, },
-
- .inv_ln2 = 0x1.71547652b82fep8, /* N/ln2. */
- /* -ln2/N. */
- .ln2_hi = -0x1.62e42fefa39efp-9,
- .ln2_lo = -0x1.abc9e3b39803f3p-64,
- .shift = 0x1.8p+52,
- .thres = 704.0,
-
- /* 0x1.6p9, above which exp overflows. */
- .special_bound = 0x4086000000000000,
+ /* Generated using Remez, in [-log(2)/128, log(2)/128]. */
+ .c0 = 0x1.fffffffffdbcdp-2,
+ .c1 = 0x1.555555555444cp-3,
+ .c2 = 0x1.555573c6a9f7dp-5,
+ .c3 = 0x1.1111266d28935p-7,
+ .ln2_hi = 0x1.62e42fefa3800p-1,
+ .ln2_lo = 0x1.ef35793c76730p-45,
+ /* 1/ln2. */
+ .inv_ln2 = 0x1.71547652b82fep+0,
+ .shift = 0x1.800000000ff80p+46, /* 1.5*2^46+1022. */
+
+ /* asuint(ln(2^(1024 - 1/128))), the value above which exp overflows. */
+ .special_bound = 0x40862e37e7d8ba72,
};
-static svfloat64_t NOINLINE
-special_case (svfloat64_t x, svbool_t pg, svfloat64_t t, svbool_t special)
-{
- svfloat64_t half_t = svmul_x (svptrue_b64 (), t, 0.5);
- svfloat64_t half_over_t = svdivr_x (pg, t, 0.5);
- svfloat64_t y = svadd_x (pg, half_t, half_over_t);
- return sv_call_f64 (cosh, x, y, special);
-}
-
-/* Helper for approximating exp(x). Copied from sv_exp_tail, with no
- special-case handling or tail. */
+/* Helper for approximating exp(x)/2.
+ Functionally identical to FEXPA exp(x), but an adjustment in
+ the shift value which leads to a reduction in the exponent of scale by 1,
+ thus halving the result at no cost. */
static inline svfloat64_t
-exp_inline (svfloat64_t x, const svbool_t pg, const struct data *d)
+exp_over_two_inline (const svbool_t pg, svfloat64_t x, const struct data *d)
{
/* Calculate exp(x). */
svfloat64_t z = svmla_x (pg, sv_f64 (d->shift), x, d->inv_ln2);
+ svuint64_t u = svreinterpret_u64 (z);
svfloat64_t n = svsub_x (pg, z, d->shift);
- svfloat64_t r = svmla_x (pg, x, n, d->ln2_hi);
- r = svmla_x (pg, r, n, d->ln2_lo);
+ svfloat64_t c13 = svld1rq (svptrue_b64 (), &d->c1);
+ svfloat64_t ln2 = svld1rq (svptrue_b64 (), &d->ln2_hi);
- svuint64_t u = svreinterpret_u64 (z);
- svuint64_t e = svlsl_x (pg, u, 52 - V_EXP_TAIL_TABLE_BITS);
- svuint64_t i = svand_x (svptrue_b64 (), u, 0xff);
+ svfloat64_t r = x;
+ r = svmls_lane (r, n, ln2, 0);
+ r = svmls_lane (r, n, ln2, 1);
- svfloat64_t y = svmla_x (pg, sv_f64 (d->poly[1]), r, d->poly[2]);
- y = svmla_x (pg, sv_f64 (d->poly[0]), r, y);
- y = svmla_x (pg, sv_f64 (1.0), r, y);
- y = svmul_x (svptrue_b64 (), r, y);
+ svfloat64_t r2 = svmul_x (svptrue_b64 (), r, r);
+ svfloat64_t p01 = svmla_lane (sv_f64 (d->c0), r, c13, 0);
+ svfloat64_t p23 = svmla_lane (sv_f64 (d->c2), r, c13, 1);
+ svfloat64_t p04 = svmla_x (pg, p01, p23, r2);
+ svfloat64_t p = svmla_x (pg, r, p04, r2);
- /* s = 2^(n/N). */
- u = svld1_gather_index (pg, __v_exp_tail_data, i);
- svfloat64_t s = svreinterpret_f64 (svadd_x (pg, u, e));
+ svfloat64_t scale = svexpa (u);
- return svmla_x (pg, s, s, y);
+ return svmla_x (pg, scale, scale, p);
+}
+
+/* Vectorised special case to handle values past where exp_inline overflows.
+ Halves the input value and uses the identity exp(x) = exp(x/2)^2 to double
+ the valid range of inputs, and returns inf for anything past that. */
+static svfloat64_t NOINLINE
+special_case (svbool_t pg, svbool_t special, svfloat64_t ax, svfloat64_t t,
+ const struct data *d)
+{
+ /* Finish fast path to compute values for non-special cases. */
+ svfloat64_t inv_twoexp = svdivr_x (pg, t, 0.25);
+ svfloat64_t y = svadd_x (pg, t, inv_twoexp);
+
+ /* Halves input value, and then check if any cases
+ are still going to overflow. */
+ ax = svmul_x (special, ax, 0.5);
+ svbool_t is_safe
+ = svcmplt (special, svreinterpret_u64 (ax), d->special_bound);
+
+ /* Computes exp(x/2), and sets any overflowing lanes to inf. */
+ svfloat64_t half_exp = exp_over_two_inline (special, ax, d);
+ half_exp = svsel (is_safe, half_exp, sv_f64 (INFINITY));
+
+ /* Construct special case cosh(x) = (exp(x/2)^2)/2. */
+ svfloat64_t exp = svmul_x (svptrue_b64 (), half_exp, 2);
+ svfloat64_t special_y = svmul_x (special, exp, half_exp);
+
+ /* Select correct return values for special and non-special cases. */
+ special_y = svsel (special, special_y, y);
+
+ /* Ensure an input of nan is correctly propagated. */
+ svbool_t is_nan
+ = svcmpgt (special, svreinterpret_u64 (ax), sv_u64 (0x7ff0000000000000));
+ return svsel (is_nan, ax, svsel (special, special_y, y));
}
/* Approximation for SVE double-precision cosh(x) using exp_inline.
cosh(x) = (exp(x) + exp(-x)) / 2.
- The greatest observed error is in the scalar fall-back region, so is the
- same as the scalar routine, 1.93 ULP:
- _ZGVsMxv_cosh (0x1.628ad45039d2fp+9) got 0x1.fd774e958236dp+1021
- want 0x1.fd774e958236fp+1021.
-
- The greatest observed error in the non-special region is 1.54 ULP:
- _ZGVsMxv_cosh (0x1.ba5651dd4486bp+2) got 0x1.f5e2bb8d5c98fp+8
- want 0x1.f5e2bb8d5c991p+8. */
+ The greatest observed error in special case region is 2.66 + 0.5 ULP:
+ _ZGVsMxv_cosh (0x1.633b532ffbc1ap+9) got 0x1.f9b2d3d22399ep+1023
+ want 0x1.f9b2d3d22399bp+1023
+
+ The greatest observed error in the non-special region is 1.01 + 0.5 ULP:
+ _ZGVsMxv_cosh (0x1.998ecbb3c1f81p+1) got 0x1.890b225657f84p+3
+ want 0x1.890b225657f82p+3. */
svfloat64_t SV_NAME_D1 (cosh) (svfloat64_t x, const svbool_t pg)
{
const struct data *d = ptr_barrier (&data);
@@ -94,14 +122,13 @@ svfloat64_t SV_NAME_D1 (cosh) (svfloat64_t x, const svbool_t pg)
svbool_t special = svcmpgt (pg, svreinterpret_u64 (ax), d->special_bound);
/* Up to the point that exp overflows, we can use it to calculate cosh by
- exp(|x|) / 2 + 1 / (2 * exp(|x|)). */
- svfloat64_t t = exp_inline (ax, pg, d);
+ (exp(|x|)/2 + 1) / (2 * exp(|x|)). */
+ svfloat64_t half_exp = exp_over_two_inline (pg, ax, d);
- /* Fall back to scalar for any special cases. */
+ /* Falls back to entirely standalone vectorized special case. */
if (__glibc_unlikely (svptest_any (pg, special)))
- return special_case (x, pg, t, special);
+ return special_case (pg, special, ax, half_exp, d);
- svfloat64_t half_t = svmul_x (svptrue_b64 (), t, 0.5);
- svfloat64_t half_over_t = svdivr_x (pg, t, 0.5);
- return svadd_x (pg, half_t, half_over_t);
+ svfloat64_t inv_twoexp = svdivr_x (pg, half_exp, 0.25);
+ return svadd_x (pg, half_exp, inv_twoexp);
}