aboutsummaryrefslogtreecommitdiff
path: root/malloc
diff options
context:
space:
mode:
authorAdhemerval Zanella Netto <adhemerval.zanella@linaro.org>2023-03-23 10:13:51 -0300
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2023-03-29 14:33:06 -0300
commit33237fe83d553dff111915024c9670adc3f06048 (patch)
tree610ddd2195dca3c3901cfcc74f76e4ab6eb6c0e7 /malloc
parent6384171fa0cef59b738ce8d0499fcea4f5009411 (diff)
downloadglibc-33237fe83d553dff111915024c9670adc3f06048.zip
glibc-33237fe83d553dff111915024c9670adc3f06048.tar.gz
glibc-33237fe83d553dff111915024c9670adc3f06048.tar.bz2
Remove --enable-tunables configure option
And make always supported. The configure option was added on glibc 2.25 and some features require it (such as hwcap mask, huge pages support, and lock elisition tuning). It also simplifies the build permutations. Changes from v1: * Remove glibc.rtld.dynamic_sort changes, it is orthogonal and needs more discussion. * Cleanup more code. Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
Diffstat (limited to 'malloc')
-rw-r--r--malloc/Makefile2
-rw-r--r--malloc/arena.c118
-rw-r--r--malloc/malloc-check.c8
-rw-r--r--malloc/malloc.c14
4 files changed, 5 insertions, 137 deletions
diff --git a/malloc/Makefile b/malloc/Makefile
index f685fc6..0717df6 100644
--- a/malloc/Makefile
+++ b/malloc/Makefile
@@ -61,9 +61,7 @@ tests-internal += \
tst-dynarray-fail \
tst-dynarray-at-fail \
-ifneq (no,$(have-tunables))
tests += tst-malloc-usable-tunables tst-mxfast
-endif
tests += $(tests-static)
test-srcs = tst-mtrace
diff --git a/malloc/arena.c b/malloc/arena.c
index ccd1939..e98b779 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -18,9 +18,7 @@
#include <stdbool.h>
-#if HAVE_TUNABLES
-# define TUNABLE_NAMESPACE malloc
-#endif
+#define TUNABLE_NAMESPACE malloc
#include <elf/dl-tunables.h>
/* Compile-time constants. */
@@ -47,21 +45,13 @@
static inline size_t
heap_min_size (void)
{
-#if HAVE_TUNABLES
return mp_.hp_pagesize == 0 ? HEAP_MIN_SIZE : mp_.hp_pagesize;
-#else
- return HEAP_MIN_SIZE;
-#endif
}
static inline size_t
heap_max_size (void)
{
-#if HAVE_TUNABLES
return mp_.hp_pagesize == 0 ? HEAP_MAX_SIZE : mp_.hp_pagesize * 4;
-#else
- return HEAP_MAX_SIZE;
-#endif
}
/***************************************************************************/
@@ -239,8 +229,7 @@ __malloc_fork_unlock_child (void)
__libc_lock_init (list_lock);
}
-#if HAVE_TUNABLES
-# define TUNABLE_CALLBACK_FNDECL(__name, __type) \
+#define TUNABLE_CALLBACK_FNDECL(__name, __type) \
static inline int do_ ## __name (__type value); \
static void \
TUNABLE_CALLBACK (__name) (tunable_val_t *valp) \
@@ -263,42 +252,6 @@ TUNABLE_CALLBACK_FNDECL (set_tcache_unsorted_limit, size_t)
#endif
TUNABLE_CALLBACK_FNDECL (set_mxfast, size_t)
TUNABLE_CALLBACK_FNDECL (set_hugetlb, size_t)
-#else
-/* Initialization routine. */
-#include <string.h>
-extern char **_environ;
-
-static char *
-next_env_entry (char ***position)
-{
- char **current = *position;
- char *result = NULL;
-
- while (*current != NULL)
- {
- if (__builtin_expect ((*current)[0] == 'M', 0)
- && (*current)[1] == 'A'
- && (*current)[2] == 'L'
- && (*current)[3] == 'L'
- && (*current)[4] == 'O'
- && (*current)[5] == 'C'
- && (*current)[6] == '_')
- {
- result = &(*current)[7];
-
- /* Save current position for next visit. */
- *position = ++current;
-
- break;
- }
-
- ++current;
- }
-
- return result;
-}
-#endif
-
#if USE_TCACHE
static void tcache_key_initialize (void);
@@ -343,7 +296,6 @@ ptmalloc_init (void)
malloc_init_state (&main_arena);
-#if HAVE_TUNABLES
TUNABLE_GET (top_pad, size_t, TUNABLE_CALLBACK (set_top_pad));
TUNABLE_GET (perturb, int32_t, TUNABLE_CALLBACK (set_perturb_byte));
TUNABLE_GET (mmap_threshold, size_t, TUNABLE_CALLBACK (set_mmap_threshold));
@@ -363,70 +315,6 @@ ptmalloc_init (void)
/* Force mmap for main arena instead of sbrk, so hugepages are explicitly
used. */
__always_fail_morecore = true;
-#else
- if (__glibc_likely (_environ != NULL))
- {
- char **runp = _environ;
- char *envline;
-
- while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
- 0))
- {
- size_t len = strcspn (envline, "=");
-
- if (envline[len] != '=')
- /* This is a "MALLOC_" variable at the end of the string
- without a '=' character. Ignore it since otherwise we
- will access invalid memory below. */
- continue;
-
- switch (len)
- {
- case 8:
- if (!__builtin_expect (__libc_enable_secure, 0))
- {
- if (memcmp (envline, "TOP_PAD_", 8) == 0)
- __libc_mallopt (M_TOP_PAD, strtol (&envline[9], NULL, 10));
- else if (memcmp (envline, "PERTURB_", 8) == 0)
- __libc_mallopt (M_PERTURB, strtol (&envline[9], NULL, 10));
- }
- break;
- case 9:
- if (!__builtin_expect (__libc_enable_secure, 0))
- {
- if (memcmp (envline, "MMAP_MAX_", 9) == 0)
- __libc_mallopt (M_MMAP_MAX, strtol (&envline[10],
- NULL, 10));
- else if (memcmp (envline, "ARENA_MAX", 9) == 0)
- __libc_mallopt (M_ARENA_MAX, strtol (&envline[10],
- NULL, 10));
- }
- break;
- case 10:
- if (!__builtin_expect (__libc_enable_secure, 0))
- {
- if (memcmp (envline, "ARENA_TEST", 10) == 0)
- __libc_mallopt (M_ARENA_TEST, strtol (&envline[11],
- NULL, 10));
- }
- break;
- case 15:
- if (!__builtin_expect (__libc_enable_secure, 0))
- {
- if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
- __libc_mallopt (M_TRIM_THRESHOLD, strtol (&envline[16],
- NULL, 10));
- else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
- __libc_mallopt (M_MMAP_THRESHOLD, strtol (&envline[16],
- NULL, 10));
- }
- break;
- default:
- break;
- }
- }
- }
-#endif
}
/* Managing heaps and arenas (for concurrent threads) */
@@ -561,7 +449,6 @@ alloc_new_heap (size_t size, size_t top_pad, size_t pagesize,
static heap_info *
new_heap (size_t size, size_t top_pad)
{
-#if HAVE_TUNABLES
if (__glibc_unlikely (mp_.hp_pagesize != 0))
{
heap_info *h = alloc_new_heap (size, top_pad, mp_.hp_pagesize,
@@ -569,7 +456,6 @@ new_heap (size_t size, size_t top_pad)
if (h != NULL)
return h;
}
-#endif
return alloc_new_heap (size, top_pad, GLRO (dl_pagesize), 0);
}
diff --git a/malloc/malloc-check.c b/malloc/malloc-check.c
index af27fd2..5732d81 100644
--- a/malloc/malloc-check.c
+++ b/malloc/malloc-check.c
@@ -376,7 +376,6 @@ memalign_check (size_t alignment, size_t bytes)
return mem2mem_check (tag_new_usable (mem), bytes);
}
-#if HAVE_TUNABLES
static void
TUNABLE_CALLBACK (set_mallopt_check) (tunable_val_t *valp)
{
@@ -384,7 +383,6 @@ TUNABLE_CALLBACK (set_mallopt_check) (tunable_val_t *valp)
if (value != 0)
__malloc_debug_enable (MALLOC_CHECK_HOOK);
}
-#endif
static bool
initialize_malloc_check (void)
@@ -392,12 +390,6 @@ initialize_malloc_check (void)
/* This is the copy of the malloc initializer that we pulled in along with
malloc-check. This does not affect any of the libc malloc structures. */
ptmalloc_init ();
-#if HAVE_TUNABLES
TUNABLE_GET (check, int32_t, TUNABLE_CALLBACK (set_mallopt_check));
-#else
- const char *s = secure_getenv ("MALLOC_CHECK_");
- if (s && s[0] != '\0' && s[0] != '0')
- __malloc_debug_enable (MALLOC_CHECK_HOOK);
-#endif
return __is_malloc_debug_enabled (MALLOC_CHECK_HOOK);
}
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 76c50e3..05e65a2 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1856,14 +1856,12 @@ struct malloc_par
INTERNAL_SIZE_T arena_test;
INTERNAL_SIZE_T arena_max;
-#if HAVE_TUNABLES
/* Transparent Large Page support. */
INTERNAL_SIZE_T thp_pagesize;
/* A value different than 0 means to align mmap allocation to hp_pagesize
add hp_flags on flags. */
INTERNAL_SIZE_T hp_pagesize;
int hp_flags;
-#endif
/* Memory map support */
int n_mmaps;
@@ -1998,7 +1996,7 @@ free_perturb (char *p, size_t n)
static inline void
madvise_thp (void *p, INTERNAL_SIZE_T size)
{
-#if HAVE_TUNABLES && defined (MADV_HUGEPAGE)
+#ifdef MADV_HUGEPAGE
/* Do not consider areas smaller than a huge page or if the tunable is
not active. */
if (mp_.thp_pagesize == 0 || size < mp_.thp_pagesize)
@@ -2557,7 +2555,6 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
&& (mp_.n_mmaps < mp_.n_mmaps_max)))
{
char *mm;
-#if HAVE_TUNABLES
if (mp_.hp_pagesize > 0 && nb >= mp_.hp_pagesize)
{
/* There is no need to isse the THP madvise call if Huge Pages are
@@ -2566,7 +2563,6 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
if (mm != MAP_FAILED)
return mm;
}
-#endif
mm = sysmalloc_mmap (nb, pagesize, 0, av);
if (mm != MAP_FAILED)
return mm;
@@ -2679,7 +2675,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
previous calls. Otherwise, we correct to page-align below.
*/
-#if HAVE_TUNABLES && defined (MADV_HUGEPAGE)
+#ifdef MADV_HUGEPAGE
/* Defined in brk.c. */
extern void *__curbrk;
if (__glibc_unlikely (mp_.thp_pagesize != 0))
@@ -2718,12 +2714,10 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
*/
char *mbrk = MAP_FAILED;
-#if HAVE_TUNABLES
if (mp_.hp_pagesize > 0)
mbrk = sysmalloc_mmap_fallback (&size, nb, old_size,
mp_.hp_pagesize, mp_.hp_pagesize,
mp_.hp_flags, av);
-#endif
if (mbrk == MAP_FAILED)
mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, MMAP_AS_MORECORE_SIZE,
pagesize, 0, av);
@@ -2966,7 +2960,7 @@ systrim (size_t pad, mstate av)
return 0;
/* Release in pagesize units and round down to the nearest page. */
-#if HAVE_TUNABLES && defined (MADV_HUGEPAGE)
+#ifdef MADV_HUGEPAGE
if (__glibc_unlikely (mp_.thp_pagesize != 0))
extra = ALIGN_DOWN (top_area - pad, mp_.thp_pagesize);
else
@@ -5410,7 +5404,6 @@ do_set_mxfast (size_t value)
return 0;
}
-#if HAVE_TUNABLES
static __always_inline int
do_set_hugetlb (size_t value)
{
@@ -5429,7 +5422,6 @@ do_set_hugetlb (size_t value)
&mp_.hp_flags);
return 0;
}
-#endif
int
__libc_mallopt (int param_number, int value)