From 2a5195d96565c6886fe9a686ecd4cbb2fa5c0e0b Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Tue, 1 Jan 2013 16:13:20 +0000 Subject: re PR other/55536 (libbacktrace abort in backtrace_alloc at mmap.c:99 running btest) PR other/55536 * mmap.c (backtrace_alloc): Don't call sync functions if not threaded. (backtrace_free): Likewise. From-SVN: r194768 --- libbacktrace/mmap.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) (limited to 'libbacktrace/mmap.c') diff --git a/libbacktrace/mmap.c b/libbacktrace/mmap.c index d3313c7..a6c730e 100644 --- a/libbacktrace/mmap.c +++ b/libbacktrace/mmap.c @@ -84,6 +84,7 @@ backtrace_alloc (struct backtrace_state *state, void *data) { void *ret; + int locked; struct backtrace_freelist_struct **pp; size_t pagesize; size_t asksize; @@ -96,7 +97,12 @@ backtrace_alloc (struct backtrace_state *state, using mmap. __sync_lock_test_and_set returns the old state of the lock, so we have acquired it if it returns 0. */ - if (!__sync_lock_test_and_set (&state->lock_alloc, 1)) + if (!state->threaded) + locked = 1; + else + locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0; + + if (locked) { for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next) { @@ -120,7 +126,8 @@ backtrace_alloc (struct backtrace_state *state, } } - __sync_lock_release (&state->lock_alloc); + if (state->threaded) + __sync_lock_release (&state->lock_alloc); } if (ret == NULL) @@ -154,15 +161,24 @@ backtrace_free (struct backtrace_state *state, void *addr, size_t size, backtrace_error_callback error_callback ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { + int locked; + /* If we can acquire the lock, add the new space to the free list. If we can't acquire the lock, just leak the memory. __sync_lock_test_and_set returns the old state of the lock, so we have acquired it if it returns 0. */ - if (!__sync_lock_test_and_set (&state->lock_alloc, 1)) + + if (!state->threaded) + locked = 1; + else + locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0; + + if (locked) { backtrace_free_locked (state, addr, size); - __sync_lock_release (&state->lock_alloc); + if (state->threaded) + __sync_lock_release (&state->lock_alloc); } } -- cgit v1.1