diff options
-rw-r--r-- | ChangeLog | 9 | ||||
-rw-r--r-- | malloc/arena.c | 52 | ||||
-rw-r--r-- | malloc/hooks.c | 28 | ||||
-rw-r--r-- | malloc/malloc.c | 40 |
4 files changed, 69 insertions, 60 deletions
@@ -1,3 +1,12 @@ +2016-09-06 Florian Weimer <fweimer@redhat.com> + + Convert malloc to __libc_lock. Automated part, using this Perl + s/// command: + s/(?:\(void\)\s*)?mutex_((?:|un|try)lock|init) + \s*\(\&([^\)]+)\)/__libc_lock_$1\ ($2)/gx; + * malloc/malloc.c, malloc/arena.c, malloc/hooks.c: Perform + conversion. + 2016-09-05 Aurelien Jarno <aurelien@aurel32.net> * conform/Makefile (conformtest-header-tests): Pass -I. to $(PERL). diff --git a/malloc/arena.c b/malloc/arena.c index 4e16593..922ae49 100644 --- a/malloc/arena.c +++ b/malloc/arena.c @@ -112,7 +112,7 @@ int __malloc_initialized = -1; #define arena_lock(ptr, size) do { \ if (ptr && !arena_is_corrupt (ptr)) \ - (void) mutex_lock (&ptr->mutex); \ + __libc_lock_lock (ptr->mutex); \ else \ ptr = arena_get2 ((size), NULL); \ } while (0) @@ -145,11 +145,11 @@ __malloc_fork_lock_parent (void) /* We do not acquire free_list_lock here because we completely reconstruct free_list in __malloc_fork_unlock_child. */ - (void) mutex_lock (&list_lock); + __libc_lock_lock (list_lock); for (mstate ar_ptr = &main_arena;; ) { - (void) mutex_lock (&ar_ptr->mutex); + __libc_lock_lock (ar_ptr->mutex); ar_ptr = ar_ptr->next; if (ar_ptr == &main_arena) break; @@ -165,12 +165,12 @@ __malloc_fork_unlock_parent (void) for (mstate ar_ptr = &main_arena;; ) { - (void) mutex_unlock (&ar_ptr->mutex); + __libc_lock_unlock (ar_ptr->mutex); ar_ptr = ar_ptr->next; if (ar_ptr == &main_arena) break; } - (void) mutex_unlock (&list_lock); + __libc_lock_unlock (list_lock); } void @@ -182,13 +182,13 @@ __malloc_fork_unlock_child (void) /* Push all arenas to the free list, except thread_arena, which is attached to the current thread. */ - mutex_init (&free_list_lock); + __libc_lock_init (free_list_lock); if (thread_arena != NULL) thread_arena->attached_threads = 1; free_list = NULL; for (mstate ar_ptr = &main_arena;; ) { - mutex_init (&ar_ptr->mutex); + __libc_lock_init (ar_ptr->mutex); if (ar_ptr != thread_arena) { /* This arena is no longer attached to any thread. */ @@ -201,7 +201,7 @@ __malloc_fork_unlock_child (void) break; } - mutex_init (&list_lock); + __libc_lock_init (list_lock); } /* Initialization routine. */ @@ -668,9 +668,9 @@ _int_new_arena (size_t size) LIBC_PROBE (memory_arena_new, 2, a, size); mstate replaced_arena = thread_arena; thread_arena = a; - mutex_init (&a->mutex); + __libc_lock_init (a->mutex); - (void) mutex_lock (&list_lock); + __libc_lock_lock (list_lock); /* Add the new arena to the global list. */ a->next = main_arena.next; @@ -680,11 +680,11 @@ _int_new_arena (size_t size) atomic_write_barrier (); main_arena.next = a; - (void) mutex_unlock (&list_lock); + __libc_lock_unlock (list_lock); - (void) mutex_lock (&free_list_lock); + __libc_lock_lock (free_list_lock); detach_arena (replaced_arena); - (void) mutex_unlock (&free_list_lock); + __libc_lock_unlock (free_list_lock); /* Lock this arena. NB: Another thread may have been attached to this arena because the arena is now accessible from the @@ -696,7 +696,7 @@ _int_new_arena (size_t size) but this could result in a deadlock with __malloc_fork_lock_parent. */ - (void) mutex_lock (&a->mutex); + __libc_lock_lock (a->mutex); return a; } @@ -710,7 +710,7 @@ get_free_list (void) mstate result = free_list; if (result != NULL) { - (void) mutex_lock (&free_list_lock); + __libc_lock_lock (free_list_lock); result = free_list; if (result != NULL) { @@ -722,12 +722,12 @@ get_free_list (void) detach_arena (replaced_arena); } - (void) mutex_unlock (&free_list_lock); + __libc_lock_unlock (free_list_lock); if (result != NULL) { LIBC_PROBE (memory_arena_reuse_free_list, 1, result); - (void) mutex_lock (&result->mutex); + __libc_lock_lock (result->mutex); thread_arena = result; } } @@ -772,7 +772,7 @@ reused_arena (mstate avoid_arena) result = next_to_use; do { - if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex)) + if (!arena_is_corrupt (result) && !__libc_lock_trylock (result->mutex)) goto out; /* FIXME: This is a data race, see _int_new_arena. */ @@ -799,14 +799,14 @@ reused_arena (mstate avoid_arena) /* No arena available without contention. Wait for the next in line. */ LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena); - (void) mutex_lock (&result->mutex); + __libc_lock_lock (result->mutex); out: /* Attach the arena to the current thread. */ { /* Update the arena thread attachment counters. */ mstate replaced_arena = thread_arena; - (void) mutex_lock (&free_list_lock); + __libc_lock_lock (free_list_lock); detach_arena (replaced_arena); /* We may have picked up an arena on the free list. We need to @@ -821,7 +821,7 @@ out: ++result->attached_threads; - (void) mutex_unlock (&free_list_lock); + __libc_lock_unlock (free_list_lock); } LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena); @@ -892,17 +892,17 @@ arena_get_retry (mstate ar_ptr, size_t bytes) LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr); if (ar_ptr != &main_arena) { - (void) mutex_unlock (&ar_ptr->mutex); + __libc_lock_unlock (ar_ptr->mutex); /* Don't touch the main arena if it is corrupt. */ if (arena_is_corrupt (&main_arena)) return NULL; ar_ptr = &main_arena; - (void) mutex_lock (&ar_ptr->mutex); + __libc_lock_lock (ar_ptr->mutex); } else { - (void) mutex_unlock (&ar_ptr->mutex); + __libc_lock_unlock (ar_ptr->mutex); ar_ptr = arena_get2 (bytes, ar_ptr); } @@ -917,7 +917,7 @@ arena_thread_freeres (void) if (a != NULL) { - (void) mutex_lock (&free_list_lock); + __libc_lock_lock (free_list_lock); /* If this was the last attached thread for this arena, put the arena on the free list. */ assert (a->attached_threads > 0); @@ -926,7 +926,7 @@ arena_thread_freeres (void) a->next_free = free_list; free_list = a; } - (void) mutex_unlock (&free_list_lock); + __libc_lock_unlock (free_list_lock); } } text_set_element (__libc_thread_subfreeres, arena_thread_freeres); diff --git a/malloc/hooks.c b/malloc/hooks.c index caa1e70..0fdcffd 100644 --- a/malloc/hooks.c +++ b/malloc/hooks.c @@ -291,9 +291,9 @@ malloc_check (size_t sz, const void *caller) return NULL; } - (void) mutex_lock (&main_arena.mutex); + __libc_lock_lock (main_arena.mutex); victim = (top_check () >= 0) ? _int_malloc (&main_arena, sz + 1) : NULL; - (void) mutex_unlock (&main_arena.mutex); + __libc_lock_unlock (main_arena.mutex); return mem2mem_check (victim, sz); } @@ -305,11 +305,11 @@ free_check (void *mem, const void *caller) if (!mem) return; - (void) mutex_lock (&main_arena.mutex); + __libc_lock_lock (main_arena.mutex); p = mem2chunk_check (mem, NULL); if (!p) { - (void) mutex_unlock (&main_arena.mutex); + __libc_lock_unlock (main_arena.mutex); malloc_printerr (check_action, "free(): invalid pointer", mem, &main_arena); @@ -317,12 +317,12 @@ free_check (void *mem, const void *caller) } if (chunk_is_mmapped (p)) { - (void) mutex_unlock (&main_arena.mutex); + __libc_lock_unlock (main_arena.mutex); munmap_chunk (p); return; } _int_free (&main_arena, p, 1); - (void) mutex_unlock (&main_arena.mutex); + __libc_lock_unlock (main_arena.mutex); } static void * @@ -345,9 +345,9 @@ realloc_check (void *oldmem, size_t bytes, const void *caller) free_check (oldmem, NULL); return NULL; } - (void) mutex_lock (&main_arena.mutex); + __libc_lock_lock (main_arena.mutex); const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p); - (void) mutex_unlock (&main_arena.mutex); + __libc_lock_unlock (main_arena.mutex); if (!oldp) { malloc_printerr (check_action, "realloc(): invalid pointer", oldmem, @@ -357,7 +357,7 @@ realloc_check (void *oldmem, size_t bytes, const void *caller) const INTERNAL_SIZE_T oldsize = chunksize (oldp); checked_request2size (bytes + 1, nb); - (void) mutex_lock (&main_arena.mutex); + __libc_lock_lock (main_arena.mutex); if (chunk_is_mmapped (oldp)) { @@ -400,7 +400,7 @@ realloc_check (void *oldmem, size_t bytes, const void *caller) if (newmem == NULL) *magic_p ^= 0xFF; - (void) mutex_unlock (&main_arena.mutex); + __libc_lock_unlock (main_arena.mutex); return mem2mem_check (newmem, bytes); } @@ -440,10 +440,10 @@ memalign_check (size_t alignment, size_t bytes, const void *caller) alignment = a; } - (void) mutex_lock (&main_arena.mutex); + __libc_lock_lock (main_arena.mutex); mem = (top_check () >= 0) ? _int_memalign (&main_arena, alignment, bytes + 1) : NULL; - (void) mutex_unlock (&main_arena.mutex); + __libc_lock_unlock (main_arena.mutex); return mem2mem_check (mem, bytes); } @@ -503,7 +503,7 @@ __malloc_get_state (void) if (!ms) return 0; - (void) mutex_lock (&main_arena.mutex); + __libc_lock_lock (main_arena.mutex); malloc_consolidate (&main_arena); ms->magic = MALLOC_STATE_MAGIC; ms->version = MALLOC_STATE_VERSION; @@ -540,7 +540,7 @@ __malloc_get_state (void) ms->arena_test = mp_.arena_test; ms->arena_max = mp_.arena_max; ms->narenas = narenas; - (void) mutex_unlock (&main_arena.mutex); + __libc_lock_unlock (main_arena.mutex); return (void *) ms; } diff --git a/malloc/malloc.c b/malloc/malloc.c index bb52b3e..20acb73 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -2870,7 +2870,7 @@ __libc_malloc (size_t bytes) } if (ar_ptr != NULL) - (void) mutex_unlock (&ar_ptr->mutex); + __libc_lock_unlock (ar_ptr->mutex); assert (!victim || chunk_is_mmapped (mem2chunk (victim)) || ar_ptr == arena_for_chunk (mem2chunk (victim))); @@ -3012,11 +3012,11 @@ __libc_realloc (void *oldmem, size_t bytes) return newmem; } - (void) mutex_lock (&ar_ptr->mutex); + __libc_lock_lock (ar_ptr->mutex); newp = _int_realloc (ar_ptr, oldp, oldsize, nb); - (void) mutex_unlock (&ar_ptr->mutex); + __libc_lock_unlock (ar_ptr->mutex); assert (!newp || chunk_is_mmapped (mem2chunk (newp)) || ar_ptr == arena_for_chunk (mem2chunk (newp))); @@ -3098,7 +3098,7 @@ _mid_memalign (size_t alignment, size_t bytes, void *address) } if (ar_ptr != NULL) - (void) mutex_unlock (&ar_ptr->mutex); + __libc_lock_unlock (ar_ptr->mutex); assert (!p || chunk_is_mmapped (mem2chunk (p)) || ar_ptr == arena_for_chunk (mem2chunk (p))); @@ -3219,7 +3219,7 @@ __libc_calloc (size_t n, size_t elem_size) } if (av != NULL) - (void) mutex_unlock (&av->mutex); + __libc_lock_unlock (av->mutex); /* Allocation failed even after a retry. */ if (mem == 0) @@ -3835,7 +3835,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) errstr = "free(): invalid pointer"; errout: if (!have_lock && locked) - (void) mutex_unlock (&av->mutex); + __libc_lock_unlock (av->mutex); malloc_printerr (check_action, errstr, chunk2mem (p), av); return; } @@ -3874,7 +3874,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) after getting the lock. */ if (have_lock || ({ assert (locked == 0); - mutex_lock(&av->mutex); + __libc_lock_lock (av->mutex); locked = 1; chunk_at_offset (p, size)->size <= 2 * SIZE_SZ || chunksize (chunk_at_offset (p, size)) >= av->system_mem; @@ -3885,7 +3885,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) } if (! have_lock) { - (void)mutex_unlock(&av->mutex); + __libc_lock_unlock (av->mutex); locked = 0; } } @@ -3931,7 +3931,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) else if (!chunk_is_mmapped(p)) { if (! have_lock) { - (void)mutex_lock(&av->mutex); + __libc_lock_lock (av->mutex); locked = 1; } @@ -4064,7 +4064,7 @@ _int_free (mstate av, mchunkptr p, int have_lock) if (! have_lock) { assert (locked); - (void)mutex_unlock(&av->mutex); + __libc_lock_unlock (av->mutex); } } /* @@ -4531,9 +4531,9 @@ __malloc_trim (size_t s) mstate ar_ptr = &main_arena; do { - (void) mutex_lock (&ar_ptr->mutex); + __libc_lock_lock (ar_ptr->mutex); result |= mtrim (ar_ptr, s); - (void) mutex_unlock (&ar_ptr->mutex); + __libc_lock_unlock (ar_ptr->mutex); ar_ptr = ar_ptr->next; } @@ -4662,9 +4662,9 @@ __libc_mallinfo (void) ar_ptr = &main_arena; do { - (void) mutex_lock (&ar_ptr->mutex); + __libc_lock_lock (ar_ptr->mutex); int_mallinfo (ar_ptr, &m); - (void) mutex_unlock (&ar_ptr->mutex); + __libc_lock_unlock (ar_ptr->mutex); ar_ptr = ar_ptr->next; } @@ -4694,7 +4694,7 @@ __malloc_stats (void) struct mallinfo mi; memset (&mi, 0, sizeof (mi)); - (void) mutex_lock (&ar_ptr->mutex); + __libc_lock_lock (ar_ptr->mutex); int_mallinfo (ar_ptr, &mi); fprintf (stderr, "Arena %d:\n", i); fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena); @@ -4705,7 +4705,7 @@ __malloc_stats (void) #endif system_b += mi.arena; in_use_b += mi.uordblks; - (void) mutex_unlock (&ar_ptr->mutex); + __libc_lock_unlock (ar_ptr->mutex); ar_ptr = ar_ptr->next; if (ar_ptr == &main_arena) break; @@ -4733,7 +4733,7 @@ __libc_mallopt (int param_number, int value) if (__malloc_initialized < 0) ptmalloc_init (); - (void) mutex_lock (&av->mutex); + __libc_lock_lock (av->mutex); /* Ensure initialization/consolidation */ malloc_consolidate (av); @@ -4811,7 +4811,7 @@ __libc_mallopt (int param_number, int value) } break; } - (void) mutex_unlock (&av->mutex); + __libc_lock_unlock (av->mutex); return res; } libc_hidden_def (__libc_mallopt) @@ -5058,7 +5058,7 @@ __malloc_info (int options, FILE *fp) } sizes[NFASTBINS + NBINS - 1]; #define nsizes (sizeof (sizes) / sizeof (sizes[0])) - mutex_lock (&ar_ptr->mutex); + __libc_lock_lock (ar_ptr->mutex); for (size_t i = 0; i < NFASTBINS; ++i) { @@ -5117,7 +5117,7 @@ __malloc_info (int options, FILE *fp) avail += sizes[NFASTBINS - 1 + i].total; } - mutex_unlock (&ar_ptr->mutex); + __libc_lock_unlock (ar_ptr->mutex); total_nfastblocks += nfastblocks; total_fastavail += fastavail; |