aboutsummaryrefslogtreecommitdiff
path: root/malloc
diff options
context:
space:
mode:
authorAlejandro Colomar <alx@kernel.org>2024-11-16 16:51:31 +0100
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2024-11-25 16:45:59 -0300
commit53fcdf5f743aa9b02972eec658e66f96d6a63386 (patch)
treee1bd3ed90d89027abe4b8ba6f0dbffd833f08a9b /malloc
parent83d4b42ded712bbbc22ceeefe886b8315190da5b (diff)
downloadglibc-53fcdf5f743aa9b02972eec658e66f96d6a63386.zip
glibc-53fcdf5f743aa9b02972eec658e66f96d6a63386.tar.gz
glibc-53fcdf5f743aa9b02972eec658e66f96d6a63386.tar.bz2
Silence most -Wzero-as-null-pointer-constant diagnostics
Replace 0 by NULL and {0} by {}. Omit a few cases that aren't so trivial to fix. Link: <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=117059> Link: <https://software.codidact.com/posts/292718/292759#answer-292759> Signed-off-by: Alejandro Colomar <alx@kernel.org>
Diffstat (limited to 'malloc')
-rw-r--r--malloc/arena.c14
-rw-r--r--malloc/malloc-check.c4
-rw-r--r--malloc/malloc.c56
-rw-r--r--malloc/obstack.c16
-rw-r--r--malloc/reallocarray.c2
5 files changed, 46 insertions, 46 deletions
diff --git a/malloc/arena.c b/malloc/arena.c
index cfb1ff8..91a43ee 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -389,7 +389,7 @@ alloc_new_heap (size_t size, size_t top_pad, size_t pagesize,
else if (size + top_pad <= max_size)
size += top_pad;
else if (size > max_size)
- return 0;
+ return NULL;
else
size = max_size;
size = ALIGN_UP (size, pagesize);
@@ -411,7 +411,7 @@ alloc_new_heap (size_t size, size_t top_pad, size_t pagesize,
}
if (p2 == MAP_FAILED)
{
- p1 = (char *) MMAP (0, max_size << 1, PROT_NONE, mmap_flags);
+ p1 = (char *) MMAP (NULL, max_size << 1, PROT_NONE, mmap_flags);
if (p1 != MAP_FAILED)
{
p2 = (char *) (((uintptr_t) p1 + (max_size - 1))
@@ -427,21 +427,21 @@ alloc_new_heap (size_t size, size_t top_pad, size_t pagesize,
{
/* Try to take the chance that an allocation of only max_size
is already aligned. */
- p2 = (char *) MMAP (0, max_size, PROT_NONE, mmap_flags);
+ p2 = (char *) MMAP (NULL, max_size, PROT_NONE, mmap_flags);
if (p2 == MAP_FAILED)
- return 0;
+ return NULL;
if ((unsigned long) p2 & (max_size - 1))
{
__munmap (p2, max_size);
- return 0;
+ return NULL;
}
}
}
if (__mprotect (p2, size, mtag_mmap_flags | PROT_READ | PROT_WRITE) != 0)
{
__munmap (p2, max_size);
- return 0;
+ return NULL;
}
/* Only considere the actual usable range. */
@@ -644,7 +644,7 @@ _int_new_arena (size_t size)
to deal with the large request via mmap_chunk(). */
h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
if (!h)
- return 0;
+ return NULL;
}
a = h->ar_ptr = (mstate) (h + 1);
malloc_init_state (a);
diff --git a/malloc/malloc-check.c b/malloc/malloc-check.c
index da1158b..6ac71df 100644
--- a/malloc/malloc-check.c
+++ b/malloc/malloc-check.c
@@ -245,7 +245,7 @@ static void *
realloc_check (void *oldmem, size_t bytes)
{
INTERNAL_SIZE_T chnb;
- void *newmem = 0;
+ void *newmem = NULL;
unsigned char *magic_p;
size_t rb;
@@ -254,7 +254,7 @@ realloc_check (void *oldmem, size_t bytes)
__set_errno (ENOMEM);
return NULL;
}
- if (oldmem == 0)
+ if (oldmem == NULL)
return malloc_check (bytes);
if (bytes == 0)
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 06c7847..32dbc27 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -369,7 +369,7 @@
#include "morecore.c"
#define MORECORE (*__glibc_morecore)
-#define MORECORE_FAILURE 0
+#define MORECORE_FAILURE NULL
/* Memory tagging. */
@@ -2420,7 +2420,7 @@ sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
if ((unsigned long) (size) <= (unsigned long) (nb))
return MAP_FAILED;
- char *mm = (char *) MMAP (0, size,
+ char *mm = (char *) MMAP (NULL, size,
mtag_mmap_flags | PROT_READ | PROT_WRITE,
extra_flags);
if (mm == MAP_FAILED)
@@ -2507,7 +2507,7 @@ sysmalloc_mmap_fallback (long int *s, INTERNAL_SIZE_T nb,
if ((unsigned long) (size) <= (unsigned long) (nb))
return MORECORE_FAILURE;
- char *mbrk = (char *) (MMAP (0, size,
+ char *mbrk = (char *) (MMAP (NULL, size,
mtag_mmap_flags | PROT_READ | PROT_WRITE,
extra_flags));
if (mbrk == MAP_FAILED)
@@ -2583,7 +2583,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
/* There are no usable arenas and mmap also failed. */
if (av == NULL)
- return 0;
+ return NULL;
/* Record incoming configuration of top */
@@ -2743,7 +2743,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
if (brk != (char *) (MORECORE_FAILURE))
{
- if (mp_.sbrk_base == 0)
+ if (mp_.sbrk_base == NULL)
mp_.sbrk_base = brk;
av->system_mem += size;
@@ -2942,7 +2942,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
/* catch all failure paths */
__set_errno (ENOMEM);
- return 0;
+ return NULL;
}
@@ -3080,7 +3080,7 @@ mremap_chunk (mchunkptr p, size_t new_size)
MREMAP_MAYMOVE);
if (cp == MAP_FAILED)
- return 0;
+ return NULL;
madvise_thp (cp, new_size);
@@ -3295,7 +3295,7 @@ static void
tcache_init(void)
{
mstate ar_ptr;
- void *victim = 0;
+ void *victim = NULL;
const size_t bytes = sizeof (tcache_perthread_struct);
if (tcache_shutting_down)
@@ -3413,7 +3413,7 @@ __libc_free (void *mem)
mstate ar_ptr;
mchunkptr p; /* chunk corresponding to mem */
- if (mem == 0) /* free(0) has no effect */
+ if (mem == NULL) /* free(0) has no effect */
return;
/* Quickly check that the freed pointer matches the tag for the memory.
@@ -3469,12 +3469,12 @@ __libc_realloc (void *oldmem, size_t bytes)
#if REALLOC_ZERO_BYTES_FREES
if (bytes == 0 && oldmem != NULL)
{
- __libc_free (oldmem); return 0;
+ __libc_free (oldmem); return NULL;
}
#endif
/* realloc of null is supposed to be same as malloc */
- if (oldmem == 0)
+ if (oldmem == NULL)
return __libc_malloc (bytes);
/* Perform a quick check to ensure that the pointer's tag matches the
@@ -3548,8 +3548,8 @@ __libc_realloc (void *oldmem, size_t bytes)
/* Must alloc, copy, free. */
newmem = __libc_malloc (bytes);
- if (newmem == 0)
- return 0; /* propagate failure */
+ if (newmem == NULL)
+ return NULL; /* propagate failure */
memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ);
munmap_chunk (oldp);
@@ -3617,7 +3617,7 @@ aligned_alloc (size_t alignment, size_t bytes)
if (!powerof2 (alignment) || alignment == 0)
{
__set_errno (EINVAL);
- return 0;
+ return NULL;
}
void *address = RETURN_ADDRESS (0);
@@ -3643,7 +3643,7 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
if (alignment > SIZE_MAX / 2 + 1)
{
__set_errno (EINVAL);
- return 0;
+ return NULL;
}
@@ -3740,7 +3740,7 @@ __libc_pvalloc (size_t bytes)
&rounded_bytes)))
{
__set_errno (ENOMEM);
- return 0;
+ return NULL;
}
rounded_bytes = rounded_bytes & -(pagesize - 1);
@@ -3801,7 +3801,7 @@ __libc_calloc (size_t n, size_t elem_size)
else
{
/* No usable arenas. */
- oldtop = 0;
+ oldtop = NULL;
oldtopsize = 0;
}
mem = _int_malloc (av, sz);
@@ -3811,7 +3811,7 @@ __libc_calloc (size_t n, size_t elem_size)
if (!SINGLE_THREAD_P)
{
- if (mem == 0 && av != NULL)
+ if (mem == NULL && av != NULL)
{
LIBC_PROBE (memory_calloc_retry, 1, sz);
av = arena_get_retry (av, sz);
@@ -3823,8 +3823,8 @@ __libc_calloc (size_t n, size_t elem_size)
}
/* Allocation failed even after a retry. */
- if (mem == 0)
- return 0;
+ if (mem == NULL)
+ return NULL;
mchunkptr p = mem2chunk (mem);
@@ -4056,7 +4056,7 @@ _int_malloc (mstate av, size_t bytes)
while (tcache->counts[tc_idx] < mp_.tcache_count
&& (tc_victim = last (bin)) != bin)
{
- if (tc_victim != 0)
+ if (tc_victim != NULL)
{
bck = tc_victim->bk;
set_inuse_bit_at_offset (tc_victim, nb);
@@ -4876,7 +4876,7 @@ static void malloc_consolidate(mstate av)
fb = &fastbin (av, 0);
do {
p = atomic_exchange_acquire (fb, NULL);
- if (p != 0) {
+ if (p != NULL) {
do {
{
if (__glibc_unlikely (misaligned_chunk (p)))
@@ -4935,7 +4935,7 @@ static void malloc_consolidate(mstate av)
av->top = p;
}
- } while ( (p = nextp) != 0);
+ } while ( (p = nextp) != NULL);
}
} while (fb++ != maxfb);
@@ -5010,8 +5010,8 @@ _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
else
{
newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
- if (newmem == 0)
- return 0; /* propagate failure */
+ if (newmem == NULL)
+ return NULL; /* propagate failure */
newp = mem2chunk (newmem);
newsize = chunksize (newp);
@@ -5105,8 +5105,8 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
/* Call malloc with worst case padding to hit alignment. */
m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
- if (m == 0)
- return 0; /* propagate failure */
+ if (m == NULL)
+ return NULL; /* propagate failure */
p = mem2chunk (m);
@@ -5318,7 +5318,7 @@ int_mallinfo (mstate av, struct mallinfo2 *m)
for (i = 0; i < NFASTBINS; ++i)
{
for (p = fastbin (av, i);
- p != 0;
+ p != NULL;
p = REVEAL_PTR (p->fd))
{
if (__glibc_unlikely (misaligned_chunk (p)))
diff --git a/malloc/obstack.c b/malloc/obstack.c
index 579c693..6e8299a 100644
--- a/malloc/obstack.c
+++ b/malloc/obstack.c
@@ -107,7 +107,7 @@ int obstack_exit_failure = EXIT_FAILURE;
/* A looong time ago (before 1994, anyway; we're not sure) this global variable
was used by non-GNU-C macros to avoid multiple evaluation. The GNU C
library still exports it because somebody might use it. */
-struct obstack *_obstack_compat = 0;
+struct obstack *_obstack_compat = NULL;
compat_symbol (libc, _obstack_compat, _obstack, GLIBC_2_0);
# endif
# endif
@@ -180,7 +180,7 @@ _obstack_begin (struct obstack *h,
alignment - 1);
h->chunk_limit = chunk->limit
= (char *) chunk + h->chunk_size;
- chunk->prev = 0;
+ chunk->prev = NULL;
/* The initial chunk now contains no empty object. */
h->maybe_empty_object = 0;
h->alloc_failed = 0;
@@ -228,7 +228,7 @@ _obstack_begin_1 (struct obstack *h, int size, int alignment,
alignment - 1);
h->chunk_limit = chunk->limit
= (char *) chunk + h->chunk_size;
- chunk->prev = 0;
+ chunk->prev = NULL;
/* The initial chunk now contains no empty object. */
h->maybe_empty_object = 0;
h->alloc_failed = 0;
@@ -328,12 +328,12 @@ _obstack_allocated_p (struct obstack *h, void *obj)
/* We use >= rather than > since the object cannot be exactly at
the beginning of the chunk but might be an empty object exactly
at the end of an adjacent chunk. */
- while (lp != 0 && ((void *) lp >= obj || (void *) (lp)->limit < obj))
+ while (lp != NULL && ((void *) lp >= obj || (void *) (lp)->limit < obj))
{
plp = lp->prev;
lp = plp;
}
- return lp != 0;
+ return lp != NULL;
}
/* Free objects in obstack H, including OBJ and everything allocate
@@ -351,7 +351,7 @@ __obstack_free (struct obstack *h, void *obj)
/* We use >= because there cannot be an object at the beginning of a chunk.
But there can be an empty object at that address
at the end of another chunk. */
- while (lp != 0 && ((void *) lp >= obj || (void *) (lp)->limit < obj))
+ while (lp != NULL && ((void *) lp >= obj || (void *) (lp)->limit < obj))
{
plp = lp->prev;
CALL_FREEFUN (h, lp);
@@ -366,7 +366,7 @@ __obstack_free (struct obstack *h, void *obj)
h->chunk_limit = lp->limit;
h->chunk = lp;
}
- else if (obj != 0)
+ else if (obj != NULL)
/* obj is not in any of the chunks! */
abort ();
}
@@ -383,7 +383,7 @@ _obstack_memory_used (struct obstack *h)
struct _obstack_chunk *lp;
int nbytes = 0;
- for (lp = h->chunk; lp != 0; lp = lp->prev)
+ for (lp = h->chunk; lp != NULL; lp = lp->prev)
{
nbytes += lp->limit - (char *) lp;
}
diff --git a/malloc/reallocarray.c b/malloc/reallocarray.c
index 2a5f2a4..0118f88 100644
--- a/malloc/reallocarray.c
+++ b/malloc/reallocarray.c
@@ -27,7 +27,7 @@ __libc_reallocarray (void *optr, size_t nmemb, size_t elem_size)
if (__builtin_mul_overflow (nmemb, elem_size, &bytes))
{
__set_errno (ENOMEM);
- return 0;
+ return NULL;
}
return realloc (optr, bytes);
}