aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/ieee754/ldbl-128ibm
diff options
context:
space:
mode:
authorJoseph Myers <joseph@codesourcery.com>2018-09-20 21:11:10 +0000
committerJoseph Myers <joseph@codesourcery.com>2018-09-20 21:11:10 +0000
commit7abf97bed9c24464a8a68fb9f9fe8d1e55c6b54c (patch)
treebd95af810312e1718aeb0f9b652bf5b260998540 /sysdeps/ieee754/ldbl-128ibm
parentd90c9b1a121295a5e31810b899ab637f68898857 (diff)
downloadglibc-7abf97bed9c24464a8a68fb9f9fe8d1e55c6b54c.zip
glibc-7abf97bed9c24464a8a68fb9f9fe8d1e55c6b54c.tar.gz
glibc-7abf97bed9c24464a8a68fb9f9fe8d1e55c6b54c.tar.bz2
Use trunc functions not __trunc functions in glibc libm.
Continuing the move to use, within libm, public names for libm functions that can be inlined as built-in functions on many architectures, this patch moves calls to __trunc functions to call the corresponding trunc names instead, with asm redirection to __trunc when the calls are not inlined. Tested for x86_64, and with build-many-glibcs.py. * include/math.h [!_ISOMAC && !(__FINITE_MATH_ONLY__ && __FINITE_MATH_ONLY__ > 0) && !NO_MATH_REDIRECT] (trunc): Redirect using MATH_REDIRECT. * sysdeps/aarch64/fpu/s_trunc.c: Define NO_MATH_REDIRECT before header inclusion. * sysdeps/aarch64/fpu/s_truncf.c: Likewise. * sysdeps/ieee754/dbl-64/wordsize-64/s_trunc.c: Likewise. * sysdeps/ieee754/float128/s_truncf128.c: Likewise. * sysdeps/ieee754/dbl-64/s_trunc.c: Likewise. * sysdeps/ieee754/flt-32/s_truncf.c: Likewise. * sysdeps/ieee754/ldbl-128/s_truncl.c: Likewise. * sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_trunc.c: Likewise. * sysdeps/powerpc/powerpc32/power4/fpu/multiarch/s_truncf.c: Likewise. * sysdeps/powerpc/powerpc64/fpu/multiarch/s_trunc.c: Likewise. * sysdeps/powerpc/powerpc64/fpu/multiarch/s_truncf.c: Likewise. * sysdeps/riscv/rv64/rvd/s_trunc.c: Likewise. * sysdeps/riscv/rvf/s_truncf.c: Likewise. * sysdeps/sparc/sparc64/fpu/multiarch/s_trunc.c: Likewise. * sysdeps/sparc/sparc64/fpu/multiarch/s_truncf.c: Likewise. * sysdeps/x86_64/fpu/multiarch/s_trunc.c: Likewise. * sysdeps/x86_64/fpu/multiarch/s_truncf.c: Likewise. * sysdeps/m68k/m680x0/fpu/s_trunc_template.c: Likewise. * sysdeps/ieee754/ldbl-128ibm/s_truncl.c: Likewise. (ceil): Redirect to __ceil. (floor): Redirect to __floor. (trunc): Redirect to __trunc. (__truncl): Call trunc instead of __trunc. * sysdeps/powerpc/fpu/math_private.h [_ARCH_PWR5X] (__trunc): Remove macro. [_ARCH_PWR5X] (__truncf): Likewise. * sysdeps/ieee754/dbl-64/e_gamma_r.c (__ieee754_gamma_r): Use trunc functions instead of __trunc variants. * sysdeps/ieee754/flt-32/e_gammaf_r.c (__ieee754_gammaf_r): Likewise. * sysdeps/ieee754/ldbl-128/e_gammal_r.c (__ieee754_gammal_r): Likewise. * sysdeps/ieee754/ldbl-128ibm/e_gammal_r.c (__ieee754_gammal_r): Likewise. * sysdeps/ieee754/ldbl-96/e_gammal_r.c (__ieee754_gammal_r): Likewise.
Diffstat (limited to 'sysdeps/ieee754/ldbl-128ibm')
-rw-r--r--sysdeps/ieee754/ldbl-128ibm/e_gammal_r.c4
-rw-r--r--sysdeps/ieee754/ldbl-128ibm/s_truncl.c7
2 files changed, 8 insertions, 3 deletions
diff --git a/sysdeps/ieee754/ldbl-128ibm/e_gammal_r.c b/sysdeps/ieee754/ldbl-128ibm/e_gammal_r.c
index c1e0efa..3680121 100644
--- a/sysdeps/ieee754/ldbl-128ibm/e_gammal_r.c
+++ b/sysdeps/ieee754/ldbl-128ibm/e_gammal_r.c
@@ -179,8 +179,8 @@ __ieee754_gammal_r (long double x, int *signgamp)
}
else
{
- long double tx = __truncl (x);
- *signgamp = (tx == 2.0L * __truncl (tx / 2.0L)) ? -1 : 1;
+ long double tx = truncl (x);
+ *signgamp = (tx == 2.0L * truncl (tx / 2.0L)) ? -1 : 1;
if (x <= -191.0L)
/* Underflow. */
ret = LDBL_MIN * LDBL_MIN;
diff --git a/sysdeps/ieee754/ldbl-128ibm/s_truncl.c b/sysdeps/ieee754/ldbl-128ibm/s_truncl.c
index 9646567..3d658d3 100644
--- a/sysdeps/ieee754/ldbl-128ibm/s_truncl.c
+++ b/sysdeps/ieee754/ldbl-128ibm/s_truncl.c
@@ -17,12 +17,17 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
+#define NO_MATH_REDIRECT
#include <math.h>
#include <math_private.h>
#include <math_ldbl_opt.h>
#include <float.h>
#include <ieee754.h>
+double ceil (double) asm ("__ceil");
+double floor (double) asm ("__floor");
+double trunc (double) asm ("__trunc");
+
long double
__truncl (long double x)
@@ -36,7 +41,7 @@ __truncl (long double x)
&& __builtin_isless (__builtin_fabs (xh),
__builtin_inf ()), 1))
{
- hi = __trunc (xh);
+ hi = trunc (xh);
if (hi != xh)
{
/* The high part is not an integer; the low part does not