diff options
author | DJ Delorie <dj@redhat.com> | 2023-04-03 17:33:03 -0400 |
---|---|---|
committer | DJ Delorie <dj@redhat.com> | 2023-04-18 10:58:42 -0400 |
commit | e5524ef335dc8e28d64cc376d57c219e566fcf53 (patch) | |
tree | 92fc25b1af0bbc50710619d42aeba02c17baa0c7 /malloc/malloc.c | |
parent | 8895a99c10349e5b0bb57b64c229389169a312e0 (diff) | |
download | glibc-e5524ef335dc8e28d64cc376d57c219e566fcf53.zip glibc-e5524ef335dc8e28d64cc376d57c219e566fcf53.tar.gz glibc-e5524ef335dc8e28d64cc376d57c219e566fcf53.tar.bz2 |
malloc: set NON_MAIN_ARENA flag for reclaimed memalign chunk (BZ #30101)
Based on these comments in malloc.c:
size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
from a non-main arena. This is only set immediately before handing
the chunk to the user, if necessary.
The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
does not have to be taken into account in size comparisons.
When we pull a chunk off the unsorted list (or any list) we need to
make sure that flag is set properly before returning the chunk.
Use the rounded-up size for chunk_ok_for_memalign()
Do not scan the arena for reusable chunks if there's no arena.
Account for chunk overhead when determining if a chunk is a reuse
candidate.
mcheck interferes with memalign, so skip mcheck variants of
memalign tests.
Reviewed-by: Carlos O'Donell <carlos@redhat.com>
Tested-by: Carlos O'Donell <carlos@redhat.com>
Diffstat (limited to 'malloc/malloc.c')
-rw-r--r-- | malloc/malloc.c | 157 |
1 files changed, 81 insertions, 76 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c index 0315ac5..e33ed66 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -4974,13 +4974,13 @@ _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize, /* Returns 0 if the chunk is not and does not contain the requested aligned sub-chunk, else returns the amount of "waste" from - trimming. BYTES is the *user* byte size, not the chunk byte + trimming. NB is the *chunk* byte size, not the user byte size. */ static size_t -chunk_ok_for_memalign (mchunkptr p, size_t alignment, size_t bytes) +chunk_ok_for_memalign (mchunkptr p, size_t alignment, size_t nb) { void *m = chunk2mem (p); - INTERNAL_SIZE_T size = memsize (p); + INTERNAL_SIZE_T size = chunksize (p); void *aligned_m = m; if (__glibc_unlikely (misaligned_chunk (p))) @@ -4997,12 +4997,12 @@ chunk_ok_for_memalign (mchunkptr p, size_t alignment, size_t bytes) /* If it's a perfect fit, it's an exception to the return value rule (we would return zero waste, which looks like "not usable"), so handle it here by returning a small non-zero value instead. */ - if (size == bytes && front_extra == 0) + if (size == nb && front_extra == 0) return 1; /* If the block we need fits in the chunk, calculate total waste. */ - if (size > bytes + front_extra) - return size - bytes; + if (size > nb + front_extra) + return size - nb; /* Can't use this chunk. */ return 0; @@ -5048,95 +5048,98 @@ _int_memalign (mstate av, size_t alignment, size_t bytes) and unlikely to meet our alignment requirements. We have not done any experimentation with searching for aligned fastbins. */ - int first_bin_index; - int first_largebin_index; - int last_bin_index; + if (av != NULL) + { + int first_bin_index; + int first_largebin_index; + int last_bin_index; - if (in_smallbin_range (nb)) - first_bin_index = smallbin_index (nb); - else - first_bin_index = largebin_index (nb); + if (in_smallbin_range (nb)) + first_bin_index = smallbin_index (nb); + else + first_bin_index = largebin_index (nb); - if (in_smallbin_range (nb * 2)) - last_bin_index = smallbin_index (nb * 2); - else - last_bin_index = largebin_index (nb * 2); + if (in_smallbin_range (nb * 2)) + last_bin_index = smallbin_index (nb * 2); + else + last_bin_index = largebin_index (nb * 2); - first_largebin_index = largebin_index (MIN_LARGE_SIZE); + first_largebin_index = largebin_index (MIN_LARGE_SIZE); - int victim_index; /* its bin index */ + int victim_index; /* its bin index */ - for (victim_index = first_bin_index; - victim_index < last_bin_index; - victim_index ++) - { - victim = NULL; + for (victim_index = first_bin_index; + victim_index < last_bin_index; + victim_index ++) + { + victim = NULL; - if (victim_index < first_largebin_index) - { - /* Check small bins. Small bin chunks are doubly-linked despite - being the same size. */ + if (victim_index < first_largebin_index) + { + /* Check small bins. Small bin chunks are doubly-linked despite + being the same size. */ - mchunkptr fwd; /* misc temp for linking */ - mchunkptr bck; /* misc temp for linking */ + mchunkptr fwd; /* misc temp for linking */ + mchunkptr bck; /* misc temp for linking */ - bck = bin_at (av, victim_index); - fwd = bck->fd; - while (fwd != bck) - { - if (chunk_ok_for_memalign (fwd, alignment, bytes) > 0) - { - victim = fwd; + bck = bin_at (av, victim_index); + fwd = bck->fd; + while (fwd != bck) + { + if (chunk_ok_for_memalign (fwd, alignment, nb) > 0) + { + victim = fwd; - /* Unlink it */ - victim->fd->bk = victim->bk; - victim->bk->fd = victim->fd; - break; + /* Unlink it */ + victim->fd->bk = victim->bk; + victim->bk->fd = victim->fd; + break; + } + + fwd = fwd->fd; + } } + else + { + /* Check large bins. */ + mchunkptr fwd; /* misc temp for linking */ + mchunkptr bck; /* misc temp for linking */ + mchunkptr best = NULL; + size_t best_size = 0; - fwd = fwd->fd; - } - } - else - { - /* Check large bins. */ - mchunkptr fwd; /* misc temp for linking */ - mchunkptr bck; /* misc temp for linking */ - mchunkptr best = NULL; - size_t best_size = 0; + bck = bin_at (av, victim_index); + fwd = bck->fd; - bck = bin_at (av, victim_index); - fwd = bck->fd; + while (fwd != bck) + { + int extra; - while (fwd != bck) - { - int extra; + if (chunksize (fwd) < nb) + break; + extra = chunk_ok_for_memalign (fwd, alignment, nb); + if (extra > 0 + && (extra <= best_size || best == NULL)) + { + best = fwd; + best_size = extra; + } - if (chunksize (fwd) < nb) - break; - extra = chunk_ok_for_memalign (fwd, alignment, bytes); - if (extra > 0 - && (extra <= best_size || best == NULL)) - { - best = fwd; - best_size = extra; - } + fwd = fwd->fd; + } + victim = best; - fwd = fwd->fd; - } - victim = best; + if (victim != NULL) + { + unlink_chunk (av, victim); + break; + } + } - if (victim != NULL) - { - unlink_chunk (av, victim); - break; + if (victim != NULL) + break; } } - if (victim != NULL) - break; - } - /* Strategy: find a spot within that chunk that meets the alignment request, and then possibly free the leading and trailing space. This strategy is incredibly costly and can lead to external @@ -5147,6 +5150,8 @@ _int_memalign (mstate av, size_t alignment, size_t bytes) p = victim; m = chunk2mem (p); set_inuse (p); + if (av != &main_arena) + set_non_main_arena (p); } else { |