aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/aarch64/fpu/atan2_advsimd.c
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps/aarch64/fpu/atan2_advsimd.c')
-rw-r--r--sysdeps/aarch64/fpu/atan2_advsimd.c128
1 files changed, 63 insertions, 65 deletions
diff --git a/sysdeps/aarch64/fpu/atan2_advsimd.c b/sysdeps/aarch64/fpu/atan2_advsimd.c
index 00b4a4f..a31d52f 100644
--- a/sysdeps/aarch64/fpu/atan2_advsimd.c
+++ b/sysdeps/aarch64/fpu/atan2_advsimd.c
@@ -19,40 +19,38 @@
#include "math_config.h"
#include "v_math.h"
-#include "poly_advsimd_f64.h"
static const struct data
{
+ double c1, c3, c5, c7, c9, c11, c13, c15, c17, c19;
float64x2_t c0, c2, c4, c6, c8, c10, c12, c14, c16, c18;
float64x2_t pi_over_2;
- double c1, c3, c5, c7, c9, c11, c13, c15, c17, c19;
- uint64x2_t zeroinfnan, minustwo;
+ uint64x2_t zeroinfnan;
} data = {
- /* Coefficients of polynomial P such that atan(x)~x+x*P(x^2) on
- [2**-1022, 1.0]. */
- .c0 = V2 (-0x1.5555555555555p-2),
- .c1 = 0x1.99999999996c1p-3,
- .c2 = V2 (-0x1.2492492478f88p-3),
- .c3 = 0x1.c71c71bc3951cp-4,
- .c4 = V2 (-0x1.745d160a7e368p-4),
- .c5 = 0x1.3b139b6a88ba1p-4,
- .c6 = V2 (-0x1.11100ee084227p-4),
- .c7 = 0x1.e1d0f9696f63bp-5,
- .c8 = V2 (-0x1.aebfe7b418581p-5),
- .c9 = 0x1.842dbe9b0d916p-5,
- .c10 = V2 (-0x1.5d30140ae5e99p-5),
- .c11 = 0x1.338e31eb2fbbcp-5,
- .c12 = V2 (-0x1.00e6eece7de8p-5),
- .c13 = 0x1.860897b29e5efp-6,
- .c14 = V2 (-0x1.0051381722a59p-6),
- .c15 = 0x1.14e9dc19a4a4ep-7,
- .c16 = V2 (-0x1.d0062b42fe3bfp-9),
- .c17 = 0x1.17739e210171ap-10,
- .c18 = V2 (-0x1.ab24da7be7402p-13),
- .c19 = 0x1.358851160a528p-16,
+ /* Coefficients of polynomial P such that
+ atan(x)~x+x*P(x^2) on [2^-1022, 1.0]. */
+ .c0 = V2 (-0x1.555555555552ap-2),
+ .c1 = 0x1.9999999995aebp-3,
+ .c2 = V2 (-0x1.24924923923f6p-3),
+ .c3 = 0x1.c71c7184288a2p-4,
+ .c4 = V2 (-0x1.745d11fb3d32bp-4),
+ .c5 = 0x1.3b136a18051b9p-4,
+ .c6 = V2 (-0x1.110e6d985f496p-4),
+ .c7 = 0x1.e1bcf7f08801dp-5,
+ .c8 = V2 (-0x1.ae644e28058c3p-5),
+ .c9 = 0x1.82eeb1fed85c6p-5,
+ .c10 = V2 (-0x1.59d7f901566cbp-5),
+ .c11 = 0x1.2c982855ab069p-5,
+ .c12 = V2 (-0x1.eb49592998177p-6),
+ .c13 = 0x1.69d8b396e3d38p-6,
+ .c14 = V2 (-0x1.ca980345c4204p-7),
+ .c15 = 0x1.dc050eafde0b3p-8,
+ .c16 = V2 (-0x1.7ea70755b8eccp-9),
+ .c17 = 0x1.ba3da3de903e8p-11,
+ .c18 = V2 (-0x1.44a4b059b6f67p-13),
+ .c19 = 0x1.c4a45029e5a91p-17,
.pi_over_2 = V2 (0x1.921fb54442d18p+0),
.zeroinfnan = V2 (2 * 0x7ff0000000000000ul - 1),
- .minustwo = V2 (0xc000000000000000),
};
#define SignMask v_u64 (0x8000000000000000)
@@ -77,10 +75,9 @@ zeroinfnan (uint64x2_t i, const struct data *d)
}
/* Fast implementation of vector atan2.
- Maximum observed error is 2.8 ulps:
- _ZGVnN2vv_atan2 (0x1.9651a429a859ap+5, 0x1.953075f4ee26p+5)
- got 0x1.92d628ab678ccp-1
- want 0x1.92d628ab678cfp-1. */
+ Maximum observed error is 1.97 ulps:
+ _ZGVnN2vv_atan2 (0x1.42337dba73768p+5, 0x1.422d748cd3e29p+5)
+ got 0x1.9224810264efcp-1 want 0x1.9224810264efep-1. */
float64x2_t VPCS_ATTR V_NAME_D2 (atan2) (float64x2_t y, float64x2_t x)
{
const struct data *d = ptr_barrier (&data);
@@ -101,26 +98,29 @@ float64x2_t VPCS_ATTR V_NAME_D2 (atan2) (float64x2_t y, float64x2_t x)
uint64x2_t pred_xlt0 = vcltzq_f64 (x);
uint64x2_t pred_aygtax = vcagtq_f64 (y, x);
- /* Set up z for call to atan. */
- float64x2_t n = vbslq_f64 (pred_aygtax, vnegq_f64 (ax), ay);
- float64x2_t q = vbslq_f64 (pred_aygtax, ay, ax);
- float64x2_t z = vdivq_f64 (n, q);
-
- /* Work out the correct shift. */
- float64x2_t shift
- = vreinterpretq_f64_u64 (vandq_u64 (pred_xlt0, d->minustwo));
- shift = vbslq_f64 (pred_aygtax, vaddq_f64 (shift, v_f64 (1.0)), shift);
- shift = vmulq_f64 (shift, d->pi_over_2);
-
- /* Calculate the polynomial approximation.
- Use split Estrin scheme for P(z^2) with deg(P)=19. Use split instead of
- full scheme to avoid underflow in x^16.
- The order 19 polynomial P approximates
- (atan(sqrt(x))-sqrt(x))/x^(3/2). */
+ /* Set up z for evaluation of atan. */
+ float64x2_t num = vbslq_f64 (pred_aygtax, vnegq_f64 (ax), ay);
+ float64x2_t den = vbslq_f64 (pred_aygtax, ay, ax);
+ float64x2_t z = vdivq_f64 (num, den);
+
+ /* Work out the correct shift for atan2:
+ Multiplication by pi is done later.
+ -pi when x < 0 and ax < ay
+ -pi/2 when x < 0 and ax > ay
+ 0 when x >= 0 and ax < ay
+ pi/2 when x >= 0 and ax > ay. */
+ float64x2_t shift = vreinterpretq_f64_u64 (
+ vandq_u64 (pred_xlt0, vreinterpretq_u64_f64 (v_f64 (-2.0))));
+ float64x2_t shift2 = vreinterpretq_f64_u64 (
+ vandq_u64 (pred_aygtax, vreinterpretq_u64_f64 (v_f64 (1.0))));
+ shift = vaddq_f64 (shift, shift2);
+
+ /* Calculate the polynomial approximation. */
float64x2_t z2 = vmulq_f64 (z, z);
- float64x2_t x2 = vmulq_f64 (z2, z2);
- float64x2_t x4 = vmulq_f64 (x2, x2);
- float64x2_t x8 = vmulq_f64 (x4, x4);
+ float64x2_t z3 = vmulq_f64 (z2, z);
+ float64x2_t z4 = vmulq_f64 (z2, z2);
+ float64x2_t z8 = vmulq_f64 (z4, z4);
+ float64x2_t z16 = vmulq_f64 (z8, z8);
float64x2_t c13 = vld1q_f64 (&d->c1);
float64x2_t c57 = vld1q_f64 (&d->c5);
@@ -128,45 +128,43 @@ float64x2_t VPCS_ATTR V_NAME_D2 (atan2) (float64x2_t y, float64x2_t x)
float64x2_t c1315 = vld1q_f64 (&d->c13);
float64x2_t c1719 = vld1q_f64 (&d->c17);
- /* estrin_7. */
+ /* Order-7 Estrin. */
float64x2_t p01 = vfmaq_laneq_f64 (d->c0, z2, c13, 0);
float64x2_t p23 = vfmaq_laneq_f64 (d->c2, z2, c13, 1);
- float64x2_t p03 = vfmaq_f64 (p01, x2, p23);
+ float64x2_t p03 = vfmaq_f64 (p01, z4, p23);
float64x2_t p45 = vfmaq_laneq_f64 (d->c4, z2, c57, 0);
float64x2_t p67 = vfmaq_laneq_f64 (d->c6, z2, c57, 1);
- float64x2_t p47 = vfmaq_f64 (p45, x2, p67);
+ float64x2_t p47 = vfmaq_f64 (p45, z4, p67);
- float64x2_t p07 = vfmaq_f64 (p03, x4, p47);
+ float64x2_t p07 = vfmaq_f64 (p03, z8, p47);
- /* estrin_11. */
+ /* Order-11 Estrin. */
float64x2_t p89 = vfmaq_laneq_f64 (d->c8, z2, c911, 0);
float64x2_t p1011 = vfmaq_laneq_f64 (d->c10, z2, c911, 1);
- float64x2_t p811 = vfmaq_f64 (p89, x2, p1011);
+ float64x2_t p811 = vfmaq_f64 (p89, z4, p1011);
float64x2_t p1213 = vfmaq_laneq_f64 (d->c12, z2, c1315, 0);
float64x2_t p1415 = vfmaq_laneq_f64 (d->c14, z2, c1315, 1);
- float64x2_t p1215 = vfmaq_f64 (p1213, x2, p1415);
+ float64x2_t p1215 = vfmaq_f64 (p1213, z4, p1415);
float64x2_t p1617 = vfmaq_laneq_f64 (d->c16, z2, c1719, 0);
float64x2_t p1819 = vfmaq_laneq_f64 (d->c18, z2, c1719, 1);
- float64x2_t p1619 = vfmaq_f64 (p1617, x2, p1819);
+ float64x2_t p1619 = vfmaq_f64 (p1617, z4, p1819);
- float64x2_t p815 = vfmaq_f64 (p811, x4, p1215);
- float64x2_t p819 = vfmaq_f64 (p815, x8, p1619);
+ float64x2_t p815 = vfmaq_f64 (p811, z8, p1215);
+ float64x2_t p819 = vfmaq_f64 (p815, z16, p1619);
- float64x2_t ret = vfmaq_f64 (p07, p819, x8);
+ float64x2_t poly = vfmaq_f64 (p07, p819, z16);
/* Finalize. y = shift + z + z^3 * P(z^2). */
- ret = vfmaq_f64 (z, ret, vmulq_f64 (z2, z));
- ret = vaddq_f64 (ret, shift);
+ float64x2_t ret = vfmaq_f64 (z, shift, d->pi_over_2);
+ ret = vfmaq_f64 (ret, z3, poly);
if (__glibc_unlikely (v_any_u64 (special_cases)))
return special_case (y, x, ret, sign_xy, special_cases);
/* Account for the sign of x and y. */
- ret = vreinterpretq_f64_u64 (
+ return vreinterpretq_f64_u64 (
veorq_u64 (vreinterpretq_u64_f64 (ret), sign_xy));
-
- return ret;
}