diff options
author | k4lizen <k4lizen@proton.me> | 2024-11-29 13:25:29 +0000 |
---|---|---|
committer | Wilco Dijkstra <wilco.dijkstra@arm.com> | 2024-11-29 13:27:13 +0000 |
commit | e2436d6f5aa47ce8da80c2ba0f59dfb9ffde08f3 (patch) | |
tree | e461ce183d8028251d35c3ef24934fcf191b6142 | |
parent | a08d9a52f967531a77e1824c23b5368c6434a72d (diff) | |
download | glibc-e2436d6f5aa47ce8da80c2ba0f59dfb9ffde08f3.zip glibc-e2436d6f5aa47ce8da80c2ba0f59dfb9ffde08f3.tar.gz glibc-e2436d6f5aa47ce8da80c2ba0f59dfb9ffde08f3.tar.bz2 |
malloc: send freed small chunks to smallbin
Large chunks get added to the unsorted bin since
sorting them takes time, for small chunks the
benefit of adding them to the unsorted bin is
non-existant, actually hurting performance.
Splitting and malloc_consolidate still add small
chunks to unsorted, but we can hint the compiler
that that is a relatively rare occurance.
Benchmarking shows this to be consistently good.
Authored-by: k4lizen <k4lizen@proton.me>
Signed-off-by: Aleksa Siriški <sir@tmina.org>
-rw-r--r-- | malloc/malloc.c | 53 |
1 files changed, 34 insertions, 19 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c index 81ddd2c..287fa09 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -4209,9 +4209,9 @@ _int_malloc (mstate av, size_t bytes) #endif } - /* place chunk in bin */ - - if (in_smallbin_range (size)) + /* Place chunk in bin. Only malloc_consolidate() and splitting can put + small chunks into the unsorted bin. */ + if (__glibc_unlikely (in_smallbin_range (size))) { victim_index = smallbin_index (size); bck = bin_at (av, victim_index); @@ -4760,23 +4760,39 @@ _int_free_create_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, } else clear_inuse_bit_at_offset(nextchunk, 0); - /* - Place the chunk in unsorted chunk list. Chunks are - not placed into regular bins until after they have - been given one chance to be used in malloc. - */ + mchunkptr bck, fwd; + + if (!in_smallbin_range (size)) + { + /* Place large chunks in unsorted chunk list. Large chunks are + not placed into regular bins until after they have + been given one chance to be used in malloc. + + This branch is first in the if-statement to help branch + prediction on consecutive adjacent frees. */ + bck = unsorted_chunks (av); + fwd = bck->fd; + if (__glibc_unlikely (fwd->bk != bck)) + malloc_printerr ("free(): corrupted unsorted chunks"); + p->fd_nextsize = NULL; + p->bk_nextsize = NULL; + } + else + { + /* Place small chunks directly in their smallbin, so they + don't pollute the unsorted bin. */ + int chunk_index = smallbin_index (size); + bck = bin_at (av, chunk_index); + fwd = bck->fd; + + if (__glibc_unlikely (fwd->bk != bck)) + malloc_printerr ("free(): chunks in smallbin corrupted"); + + mark_bin (av, chunk_index); + } - mchunkptr bck = unsorted_chunks (av); - mchunkptr fwd = bck->fd; - if (__glibc_unlikely (fwd->bk != bck)) - malloc_printerr ("free(): corrupted unsorted chunks"); - p->fd = fwd; p->bk = bck; - if (!in_smallbin_range(size)) - { - p->fd_nextsize = NULL; - p->bk_nextsize = NULL; - } + p->fd = fwd; bck->fd = p; fwd->bk = p; @@ -4785,7 +4801,6 @@ _int_free_create_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, check_free_chunk(av, p); } - else { /* If the chunk borders the current high end of memory, |