aboutsummaryrefslogtreecommitdiff
path: root/boehm-gc/alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'boehm-gc/alloc.c')
-rw-r--r--boehm-gc/alloc.c28
1 files changed, 22 insertions, 6 deletions
diff --git a/boehm-gc/alloc.c b/boehm-gc/alloc.c
index 45c71d3..9b4869f 100644
--- a/boehm-gc/alloc.c
+++ b/boehm-gc/alloc.c
@@ -92,6 +92,16 @@ char * GC_copyright[] =
# include "version.h"
+#if defined(SAVE_CALL_CHAIN) && \
+ !(defined(REDIRECT_MALLOC) && defined(GC_HAVE_BUILTIN_BACKTRACE))
+# define SAVE_CALL_CHAIN_IN_GC
+ /* This is only safe if the call chain save mechanism won't end up */
+ /* calling GC_malloc. The GNU C library documentation suggests */
+ /* that backtrace doesn't use malloc, but at least the initial */
+ /* call in some versions does seem to invoke the dynamic linker, */
+ /* which uses malloc. */
+#endif
+
/* some more variables */
extern signed_word GC_mem_found; /* Number of reclaimed longwords */
@@ -196,7 +206,8 @@ word GC_adj_words_allocd()
/* had been reallocated this round. Finalization is user */
/* visible progress. And if we don't count this, we have */
/* stability problems for programs that finalize all objects. */
- result += GC_words_wasted;
+ if ((GC_words_wasted >> 3) < result)
+ result += GC_words_wasted;
/* This doesn't reflect useful work. But if there is lots of */
/* new fragmentation, the same is probably true of the heap, */
/* and the collection will be correspondingly cheaper. */
@@ -221,6 +232,8 @@ void GC_clear_a_few_frames()
{
# define NWORDS 64
word frames[NWORDS];
+ /* Some compilers will warn that frames was set but never used. */
+ /* That's the whole idea ... */
register int i;
for (i = 0; i < NWORDS; i++) frames[i] = 0;
@@ -293,7 +306,7 @@ void GC_maybe_gc()
# endif
if (GC_stopped_mark(GC_time_limit == GC_TIME_UNLIMITED?
GC_never_stop_func : GC_timeout_stop_func)) {
-# ifdef SAVE_CALL_CHAIN
+# ifdef SAVE_CALL_CHAIN_IN_GC
GC_save_callers(GC_last_stack);
# endif
GC_finish_collection();
@@ -358,7 +371,7 @@ GC_stop_func stop_func;
}
GC_invalidate_mark_state(); /* Flush mark stack. */
GC_clear_marks();
-# ifdef SAVE_CALL_CHAIN
+# ifdef SAVE_CALL_CHAIN_IN_GC
GC_save_callers(GC_last_stack);
# endif
GC_is_full_gc = TRUE;
@@ -413,7 +426,7 @@ int n;
for (i = GC_deficit; i < GC_RATE*n; i++) {
if (GC_mark_some((ptr_t)0)) {
/* Need to finish a collection */
-# ifdef SAVE_CALL_CHAIN
+# ifdef SAVE_CALL_CHAIN_IN_GC
GC_save_callers(GC_last_stack);
# endif
# ifdef PARALLEL_MARK
@@ -929,7 +942,7 @@ word n;
# endif
expansion_slop = WORDS_TO_BYTES(min_words_allocd()) + 4*MAXHINCR*HBLKSIZE;
if (GC_last_heap_addr == 0 && !((word)space & SIGNB)
- || GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space) {
+ || (GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space)) {
/* Assume the heap is growing up */
GC_greatest_plausible_heap_addr =
(GC_PTR)GC_max((ptr_t)GC_greatest_plausible_heap_addr,
@@ -992,7 +1005,7 @@ word needed_blocks;
GC_bool ignore_off_page;
{
if (!GC_incremental && !GC_dont_gc &&
- (GC_dont_expand && GC_words_allocd > 0 || GC_should_collect())) {
+ ((GC_dont_expand && GC_words_allocd > 0) || GC_should_collect())) {
GC_gcollect_inner();
} else {
word blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)
@@ -1001,6 +1014,9 @@ GC_bool ignore_off_page;
if (blocks_to_get > MAXHINCR) {
word slop;
+ /* Get the minimum required to make it likely that we */
+ /* can satisfy the current request in the presence of black- */
+ /* listing. This will probably be more than MAXHINCR. */
if (ignore_off_page) {
slop = 4;
} else {