aboutsummaryrefslogtreecommitdiff
path: root/math
diff options
context:
space:
mode:
authorJoseph Myers <joseph@codesourcery.com>2015-10-28 21:42:52 +0000
committerJoseph Myers <joseph@codesourcery.com>2015-10-28 21:42:52 +0000
commit1f4dafa3ea24df63a6550dbdbbe8769caa7a9fc3 (patch)
tree70f341e615048fa98c3cc33efa340dcc2143b65f /math
parenta62719ba90e2fa1728890ae7dc8df9e32a622e7b (diff)
downloadglibc-1f4dafa3ea24df63a6550dbdbbe8769caa7a9fc3.zip
glibc-1f4dafa3ea24df63a6550dbdbbe8769caa7a9fc3.tar.gz
glibc-1f4dafa3ea24df63a6550dbdbbe8769caa7a9fc3.tar.bz2
Use C11 *_TRUE_MIN macros where applicable.
C11 defines standard <float.h> macros *_TRUE_MIN for the least positive subnormal value of a type. Now that we build with -std=gnu11, we can use these macros in glibc. This patch replaces previous uses of the GCC predefines __*_DENORM_MIN__ (used in <float.h> to define *_TRUE_MIN), as well as *_DENORM_MIN references in comments. Tested for x86_64 and x86 (testsuite, and that installed shared libraries are unchanged by the patch). Also tested for powerpc that installed stripped shared libraries are unchanged by the patch. * math/libm-test.inc (min_subnorm_value): Use LDBL_TRUE_MIN, DBL_TRUE_MIN and FLT_TRUE_MIN instead of __LDBL_DENORM_MIN__, __DBL_DENORM_MIN__ and __FLT_DENORM_MIN__. * sysdeps/ieee754/dbl-64/s_fma.c (__fma): Refer to DBL_TRUE_MIN instead of DBL_DENORM_MIN in comment. * sysdeps/ieee754/ldbl-128/s_fmal.c (__fmal): Refer to LDBL_TRUE_MIN instead of LDBL_DENORM_MIN in comment. * sysdeps/ieee754/ldbl-128ibm/s_nextafterl.c: Include <float.h>. (__nextafterl): Use LDBL_TRUE_MIN instead of __LDBL_DENORM_MIN__. * sysdeps/ieee754/ldbl-96/s_fmal.c (__fmal): Refer to LDBL_TRUE_MIN instead of LDBL_DENORM_MIN in comment.
Diffstat (limited to 'math')
-rw-r--r--math/libm-test.inc12
1 files changed, 6 insertions, 6 deletions
diff --git a/math/libm-test.inc b/math/libm-test.inc
index 6cce5fc..631b205 100644
--- a/math/libm-test.inc
+++ b/math/libm-test.inc
@@ -315,12 +315,12 @@ static int ignore_max_ulp; /* Should we ignore max_ulp? */
LDBL_MAX, DBL_MAX, FLT_MAX)
#define min_value CHOOSE (LDBL_MIN, DBL_MIN, FLT_MIN, \
LDBL_MIN, DBL_MIN, FLT_MIN)
-#define min_subnorm_value CHOOSE (__LDBL_DENORM_MIN__, \
- __DBL_DENORM_MIN__, \
- __FLT_DENORM_MIN__, \
- __LDBL_DENORM_MIN__, \
- __DBL_DENORM_MIN__, \
- __FLT_DENORM_MIN__)
+#define min_subnorm_value CHOOSE (LDBL_TRUE_MIN, \
+ DBL_TRUE_MIN, \
+ FLT_TRUE_MIN, \
+ LDBL_TRUE_MIN, \
+ DBL_TRUE_MIN, \
+ FLT_TRUE_MIN)
static FLOAT max_error, real_max_error, imag_max_error;