aboutsummaryrefslogtreecommitdiff
path: root/boehm-gc/gc_priv.h
diff options
context:
space:
mode:
authorTom Tromey <tromey@cygnus.com>1999-11-01 23:15:51 +0000
committerTom Tromey <tromey@gcc.gnu.org>1999-11-01 23:15:51 +0000
commit20bbd3cd53a80ddafa56a5d21337aae0c24f94ca (patch)
tree9c24ba1ebabff472b9caddbff07ef957dbf2c24c /boehm-gc/gc_priv.h
parentc05ddfa745d68f6d9db1a7d0992650a41986d376 (diff)
downloadgcc-20bbd3cd53a80ddafa56a5d21337aae0c24f94ca.zip
gcc-20bbd3cd53a80ddafa56a5d21337aae0c24f94ca.tar.gz
gcc-20bbd3cd53a80ddafa56a5d21337aae0c24f94ca.tar.bz2
Merged GC 5.0alpha4 with local changes, plus:
* Makefile.in: Rebuilt. * Makefile.am (gctest_LDADD): Added THREADLIB. (TESTS): New macro. * configure: Rebuilt. * configure.in (INCLUDES): New subst. From-SVN: r30332
Diffstat (limited to 'boehm-gc/gc_priv.h')
-rw-r--r--boehm-gc/gc_priv.h381
1 files changed, 293 insertions, 88 deletions
diff --git a/boehm-gc/gc_priv.h b/boehm-gc/gc_priv.h
index cda9c23..8dd496f 100644
--- a/boehm-gc/gc_priv.h
+++ b/boehm-gc/gc_priv.h
@@ -1,6 +1,9 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
+ *
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -42,7 +45,7 @@ typedef GC_word word;
typedef GC_signed_word signed_word;
# ifndef CONFIG_H
-# include "config.h"
+# include "gcconfig.h"
# endif
# ifndef HEADERS_H
@@ -64,16 +67,16 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# include <stddef.h>
# endif
# define VOLATILE volatile
-# define CONST const
#else
# ifdef MSWIN32
# include <stdlib.h>
# endif
# define VOLATILE
-# define CONST
#endif
-#ifdef AMIGA
+#define CONST GC_CONST
+
+#if 0 /* was once defined for AMIGA */
# define GC_FAR __far
#else
# define GC_FAR
@@ -336,6 +339,9 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
/* space is assumed to be cleared. */
/* In the case os USE_MMAP, the argument must also be a */
/* physical page size. */
+/* GET_MEM is currently not assumed to retrieve 0 filled space, */
+/* though we should perhaps take advantage of the case in which */
+/* does. */
# ifdef PCR
char * real_malloc();
# define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + GC_page_size) \
@@ -347,7 +353,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
+ GC_page_size) \
+ GC_page_size-1)
# else
-# if defined(AMIGA) || defined(NEXT) || defined(DOS4GW)
+# if defined(AMIGA) || defined(NEXT) || defined(MACOSX) || defined(DOS4GW)
# define GET_MEM(bytes) HBLKPTR((size_t) \
calloc(1, (size_t)bytes + GC_page_size) \
+ GC_page_size-1)
@@ -433,7 +439,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# endif
# ifdef LINUX_THREADS
# include <pthread.h>
-# ifdef __i386__
+# if defined(I386)
inline static int GC_test_and_set(volatile unsigned int *addr) {
int oldval;
/* Note: the "xchg" instruction does not need a "lock" prefix */
@@ -442,55 +448,58 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
: "0"(1), "m"(*(addr)));
return oldval;
}
- inline static void GC_clear(volatile unsigned int *addr) {
- *(addr) = 0;
- }
-# elif defined(__alpha__)
+# else
+# if defined(POWERPC)
inline static int GC_test_and_set(volatile unsigned int *addr) {
- long oldval, temp;
+ int oldval;
+ int temp = 1; // locked value
__asm__ __volatile__(
- "1:\tldl_l %0,%3\n"
- "\tbne %0,2f\n"
- "\tor $31,1,%1\n"
- "\tstl_c %1,%2\n"
- "\tbeq %1,1b\n"
- "2:\tmb\n"
- : "=&r"(oldval), "=&r"(temp), "=m"(*(addr))
- : "m"(*(addr))
+ "1:\tlwarx %0,0,%3\n" // load and reserve
+ "\tcmpwi %0, 0\n" // if load is
+ "\tbne 2f\n" // non-zero, return already set
+ "\tstwcx. %2,0,%1\n" // else store conditional
+ "\tbne- 1b\n" // retry if lost reservation
+ "2:\t\n" // oldval is zero if we set
+ : "=&r"(oldval), "=p"(addr)
+ : "r"(temp), "1"(addr)
: "memory");
return (int)oldval;
}
- inline static void GC_clear(volatile unsigned int *addr) {
- __asm__ __volatile__("mb": : :"memory");
- *(addr) = 0;
- }
-# elif defined(__powerpc__)
- inline static int GC_test_and_set(volatile unsigned int *addr) {
- int ret, oldval=0, newval=1;
-
- __asm__ __volatile__("sync" : : : "memory");
- __asm__ __volatile__(
- "0: lwarx %0,0,%1 ;"
- " xor. %0,%3,%0;"
- " bne 1f;"
- " stwcx. %2,0,%1;"
- " bne- 0b;"
- "1: "
- : "=&r"(ret)
- : "r"(addr), "r"(newval), "r"(oldval)
- : "cr0", "memory");
- __asm__ __volatile__("sync" : : : "memory");
- return ret == 0;
- }
- inline static void GC_clear(volatile unsigned int *addr) {
- __asm__ __volatile__("sync": : :"memory");
- *(addr) = 0;
- }
-
-# else
- -- > Need implementation of GC_test_and_set()
+# else
+# ifdef ALPHA
+ inline static int GC_test_and_set(volatile unsigned int *
+addr)
+ {
+ unsigned long oldvalue;
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%1\n"
+ " and %0,%3,%2\n"
+ " bne %2,2f\n"
+ " xor %0,%3,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,3f\n"
+ " mb\n"
+ "2:\n"
+ ".section .text2,\"ax\"\n"
+ "3: br 1b\n"
+ ".previous"
+ :"=&r" (temp), "=m" (*addr), "=&r"
+(oldvalue)
+ :"Ir" (1), "m" (*addr));
+
+ return oldvalue;
+ }
+# else
+ -- > Need implementation of GC_test_and_set()
+# endif
+# endif
# endif
+ inline static void GC_clear(volatile unsigned int *addr) {
+ *(addr) = 0;
+ }
extern volatile unsigned int GC_allocate_lock;
/* This is not a mutex because mutexes that obey the (optional) */
@@ -504,15 +513,10 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# define NO_THREAD (pthread_t)(-1)
# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
-# ifdef UNDEFINED
-# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
-# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
-# else
-# define LOCK() \
+# define LOCK() \
{ if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
-# define UNLOCK() \
+# define UNLOCK() \
GC_clear(&GC_allocate_lock)
-# endif
extern GC_bool GC_collecting;
# define ENTER_GC() \
{ \
@@ -520,15 +524,30 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
}
# define EXIT_GC() GC_collecting = 0;
# endif /* LINUX_THREADS */
-# if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS)
+# if defined(HPUX_THREADS)
+# include <pthread.h>
+ extern pthread_mutex_t GC_allocate_ml;
+# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
+# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
+# endif
+# if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS)
+ /* This may also eventually be appropriate for HPUX_THREADS */
# include <pthread.h>
-# include <mutex.h>
+# ifndef HPUX_THREADS
+ /* This probably should never be included, but I can't test */
+ /* on Irix anymore. */
+# include <mutex.h>
+# endif
-# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
+# ifndef HPUX_THREADS
+# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
|| !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
# define GC_test_and_set(addr, v) test_and_set(addr,v)
-# else
+# else
# define GC_test_and_set(addr, v) __test_and_set(addr,v)
+# endif
+# else
+ /* I couldn't find a way to do this inline on HP/UX */
# endif
extern unsigned long GC_allocate_lock;
/* This is not a mutex because mutexes that obey the (optional) */
@@ -542,15 +561,17 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# define NO_THREAD (pthread_t)(-1)
# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
-# ifdef UNDEFINED
-# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
-# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
+# ifdef HPUX_THREADS
+# define LOCK() { if (!GC_test_and_clear(&GC_allocate_lock)) GC_lock(); }
+ /* The following is INCORRECT, since the memory model is too weak. */
+# define UNLOCK() { GC_noop1(&GC_allocate_lock); \
+ *(volatile unsigned long *)(&GC_allocate_lock) = 1; }
# else
-# define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
-# if __mips >= 3 && (defined (_ABIN32) || defined(_ABI64)) \
+# define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
+# if __mips >= 3 && (defined (_ABIN32) || defined(_ABI64)) \
&& defined(_COMPILER_VERSION) && _COMPILER_VERSION >= 700
# define UNLOCK() __lock_release(&GC_allocate_lock)
-# else
+# else
/* The function call in the following should prevent the */
/* compiler from moving assignments to below the UNLOCK. */
/* This is probably not necessary for ucode or gcc 2.8. */
@@ -558,7 +579,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
/* versions. */
# define UNLOCK() { GC_noop1(&GC_allocate_lock); \
*(volatile unsigned long *)(&GC_allocate_lock) = 0; }
-# endif
+# endif
# endif
extern GC_bool GC_collecting;
# define ENTER_GC() \
@@ -653,7 +674,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# else
# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
|| defined(IRIX_THREADS) || defined(LINUX_THREADS) \
- || defined(IRIX_JDK_THREADS)
+ || defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
void GC_stop_world();
void GC_start_world();
# define STOP_WORLD() GC_stop_world()
@@ -869,6 +890,7 @@ struct hblkhdr {
struct hblk * hb_next; /* Link field for hblk free list */
/* and for lists of chunks waiting to be */
/* reclaimed. */
+ struct hblk * hb_prev; /* Backwards link for free list. */
word hb_descr; /* object descriptor for marking. See */
/* mark.h. */
char* hb_map; /* A pointer to a pointer validity map of the block. */
@@ -883,14 +905,28 @@ struct hblkhdr {
# define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
/* point to the first page of */
/* this object. */
+# define WAS_UNMAPPED 2 /* This is a free block, which has */
+ /* been unmapped from the address */
+ /* space. */
+ /* GC_remap must be invoked on it */
+ /* before it can be reallocated. */
+ /* Only set with USE_MUNMAP. */
unsigned short hb_last_reclaimed;
/* Value of GC_gc_no when block was */
/* last allocated or swept. May wrap. */
+ /* For a free block, this is maintained */
+ /* unly for USE_MUNMAP, and indicates */
+ /* when the header was allocated, or */
+ /* when the size of the block last */
+ /* changed. */
word hb_marks[MARK_BITS_SZ];
/* Bit i in the array refers to the */
/* object starting at the ith word (header */
/* INCLUDED) in the heap block. */
/* The lsb of word 0 is numbered 0. */
+ /* Unused bits are invalid, and are */
+ /* occasionally set, e.g for uncollectable */
+ /* objects. */
};
/* heap block body */
@@ -922,7 +958,69 @@ struct hblk {
/* Object free list link */
# define obj_link(p) (*(ptr_t *)(p))
-/* lists of all heap blocks and free lists */
+/* The type of mark procedures. This really belongs in gc_mark.h. */
+/* But we put it here, so that we can avoid scanning the mark proc */
+/* table. */
+typedef struct ms_entry * (*mark_proc)(/* word * addr, mark_stack_ptr,
+ mark_stack_limit, env */);
+# define LOG_MAX_MARK_PROCS 6
+# define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS)
+
+/* Root sets. Logically private to mark_rts.c. But we don't want the */
+/* tables scanned, so we put them here. */
+/* MAX_ROOT_SETS is the maximum number of ranges that can be */
+/* registered as static roots. */
+# ifdef LARGE_CONFIG
+# define MAX_ROOT_SETS 4096
+# else
+# ifdef PCR
+# define MAX_ROOT_SETS 1024
+# else
+# ifdef MSWIN32
+# define MAX_ROOT_SETS 512
+ /* Under NT, we add only written pages, which can result */
+ /* in many small root sets. */
+# else
+# define MAX_ROOT_SETS 64
+# endif
+# endif
+# endif
+
+# define MAX_EXCLUSIONS (MAX_ROOT_SETS/4)
+/* Maximum number of segments that can be excluded from root sets. */
+
+/*
+ * Data structure for excluded static roots.
+ */
+struct exclusion {
+ ptr_t e_start;
+ ptr_t e_end;
+};
+
+/* Data structure for list of root sets. */
+/* We keep a hash table, so that we can filter out duplicate additions. */
+/* Under Win32, we need to do a better job of filtering overlaps, so */
+/* we resort to sequential search, and pay the price. */
+struct roots {
+ ptr_t r_start;
+ ptr_t r_end;
+# ifndef MSWIN32
+ struct roots * r_next;
+# endif
+ GC_bool r_tmp;
+ /* Delete before registering new dynamic libraries */
+};
+
+#ifndef MSWIN32
+ /* Size of hash table index to roots. */
+# define LOG_RT_SIZE 6
+# define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */
+#endif
+
+/* Lists of all heap blocks and free lists */
+/* as well as other random data structures */
+/* that should not be scanned by the */
+/* collector. */
/* These are grouped together in a struct */
/* so that they can be easily skipped by the */
/* GC_mark routine. */
@@ -943,6 +1041,9 @@ struct _GC_arrays {
word _max_heapsize;
ptr_t _last_heap_addr;
ptr_t _prev_heap_addr;
+ word _large_free_bytes;
+ /* Total bytes contained in blocks on large object free */
+ /* list. */
word _words_allocd_before_gc;
/* Number of words allocated before this */
/* collection cycle. */
@@ -962,7 +1063,10 @@ struct _GC_arrays {
word _mem_freed;
/* Number of explicitly deallocated words of memory */
/* since last collection. */
-
+ mark_proc _mark_procs[MAX_MARK_PROCS];
+ /* Table of user-defined mark procedures. There is */
+ /* a small number of these, which can be referenced */
+ /* by DS_PROC mark descriptors. See gc_mark.h. */
ptr_t _objfreelist[MAXOBJSZ+1];
/* free list for objects */
ptr_t _aobjfreelist[MAXOBJSZ+1];
@@ -986,6 +1090,9 @@ struct _GC_arrays {
/* Number of words in accessible atomic */
/* objects. */
# endif
+# ifdef USE_MUNMAP
+ word _unmapped_bytes;
+# endif
# ifdef MERGE_SIZES
unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
/* Number of words to allocate for a given allocation request in */
@@ -1003,7 +1110,7 @@ struct _GC_arrays {
/* to an object at */
/* block_start+i&~3 - WORDS_TO_BYTES(j). */
/* (If ALL_INTERIOR_POINTERS is defined, then */
- /* instead ((short *)(hbh_map[sz])[i] is j if */
+ /* instead ((short *)(hb_map[sz])[i] is j if */
/* block_start+WORDS_TO_BYTES(i) is in the */
/* interior of an object starting at */
/* block_start+WORDS_TO_BYTES(i-j)). */
@@ -1044,17 +1151,24 @@ struct _GC_arrays {
/* GC_modws_valid_offsets[i%sizeof(word)] */
# endif
# ifdef STUBBORN_ALLOC
- page_hash_table _changed_pages;
+ page_hash_table _changed_pages;
/* Stubborn object pages that were changes since last call to */
/* GC_read_changed. */
- page_hash_table _prev_changed_pages;
+ page_hash_table _prev_changed_pages;
/* Stubborn object pages that were changes before last call to */
/* GC_read_changed. */
# endif
# if defined(PROC_VDB) || defined(MPROTECT_VDB)
- page_hash_table _grungy_pages; /* Pages that were dirty at last */
+ page_hash_table _grungy_pages; /* Pages that were dirty at last */
/* GC_read_dirty. */
# endif
+# ifdef MPROTECT_VDB
+ VOLATILE page_hash_table _dirty_pages;
+ /* Pages dirtied since last GC_read_dirty. */
+# endif
+# ifdef PROC_VDB
+ page_hash_table _written_pages; /* Pages ever dirtied */
+# endif
# ifdef LARGE_CONFIG
# if CPP_WORDSZ > 32
# define MAX_HEAP_SECTS 4096 /* overflows at roughly 64 GB */
@@ -1071,6 +1185,11 @@ struct _GC_arrays {
ptr_t _heap_bases[MAX_HEAP_SECTS];
/* Start address of memory regions obtained from kernel. */
# endif
+ struct roots _static_roots[MAX_ROOT_SETS];
+# ifndef MSWIN32
+ struct roots * _root_index[RT_SIZE];
+# endif
+ struct exclusion _excl_table[MAX_EXCLUSIONS];
/* Block header index; see gc_headers.h */
bottom_index * _all_nils;
bottom_index * _top_index [TOP_SZ];
@@ -1104,22 +1223,36 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_prev_heap_addr GC_arrays._prev_heap_addr
# define GC_words_allocd GC_arrays._words_allocd
# define GC_words_wasted GC_arrays._words_wasted
+# define GC_large_free_bytes GC_arrays._large_free_bytes
# define GC_words_finalized GC_arrays._words_finalized
# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
# define GC_mem_freed GC_arrays._mem_freed
+# define GC_mark_procs GC_arrays._mark_procs
# define GC_heapsize GC_arrays._heapsize
# define GC_max_heapsize GC_arrays._max_heapsize
# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
# define GC_heap_sects GC_arrays._heap_sects
# define GC_last_stack GC_arrays._last_stack
+# ifdef USE_MUNMAP
+# define GC_unmapped_bytes GC_arrays._unmapped_bytes
+# endif
# ifdef MSWIN32
# define GC_heap_bases GC_arrays._heap_bases
# endif
+# define GC_static_roots GC_arrays._static_roots
+# define GC_root_index GC_arrays._root_index
+# define GC_excl_table GC_arrays._excl_table
# define GC_all_nils GC_arrays._all_nils
# define GC_top_index GC_arrays._top_index
# if defined(PROC_VDB) || defined(MPROTECT_VDB)
# define GC_grungy_pages GC_arrays._grungy_pages
# endif
+# ifdef MPROTECT_VDB
+# define GC_dirty_pages GC_arrays._dirty_pages
+# endif
+# ifdef PROC_VDB
+# define GC_written_pages GC_arrays._written_pages
+# endif
# ifdef GATHERSTATS
# define GC_composite_in_use GC_arrays._composite_in_use
# define GC_atomic_in_use GC_arrays._atomic_in_use
@@ -1131,11 +1264,9 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define beginGC_arrays ((ptr_t)(&GC_arrays))
# define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays))
-GC_API word GC_fo_entries;
-
+/* Object kinds: */
# define MAXOBJKINDS 16
-/* Object kinds: */
extern struct obj_kind {
ptr_t *ok_freelist; /* Array of free listheaders for this kind of object */
/* Point either to GC_arrays or to storage allocated */
@@ -1149,8 +1280,14 @@ extern struct obj_kind {
/* Add object size in bytes to descriptor */
/* template to obtain descriptor. Otherwise */
/* template is used as is. */
- GC_bool ok_init; /* Clear objects before putting them on the free list. */
+ GC_bool ok_init; /* Clear objects before putting them on the free list. */
} GC_obj_kinds[MAXOBJKINDS];
+
+# define endGC_obj_kinds (((ptr_t)(&GC_obj_kinds)) + (sizeof GC_obj_kinds))
+
+# define end_gc_area ((ptr_t)endGC_arrays == (ptr_t)(&GC_obj_kinds) ? \
+ endGC_obj_kinds : endGC_arrays)
+
/* Predefined kinds: */
# define PTRFREE 0
# define NORMAL 1
@@ -1166,6 +1303,8 @@ extern struct obj_kind {
extern int GC_n_kinds;
+GC_API word GC_fo_entries;
+
extern word GC_n_heap_sects; /* Number of separately added heap */
/* sections. */
@@ -1189,7 +1328,7 @@ extern char * GC_invalid_map;
/* Pointer to the nowhere valid hblk map */
/* Blocks pointing to this map are free. */
-extern struct hblk * GC_hblkfreelist;
+extern struct hblk * GC_hblkfreelist[];
/* List of completely empty heap blocks */
/* Linked through hb_next field of */
/* header structure associated with */
@@ -1200,17 +1339,19 @@ extern GC_bool GC_is_initialized; /* GC_init() has been run. */
extern GC_bool GC_objects_are_marked; /* There are marked objects in */
/* the heap. */
-extern GC_bool GC_incremental; /* Using incremental/generational collection. */
+#ifndef SMALL_CONFIG
+ extern GC_bool GC_incremental;
+ /* Using incremental/generational collection. */
+#else
+# define GC_incremental TRUE
+ /* Hopefully allow optimizer to remove some code. */
+#endif
extern GC_bool GC_dirty_maintained;
/* Dirty bits are being maintained, */
/* either for incremental collection, */
/* or to limit the root set. */
-# ifndef PCR
- extern ptr_t GC_stackbottom; /* Cool end of user stack */
-# endif
-
extern word GC_root_size; /* Total size of registered root sections */
extern GC_bool GC_debugging_started; /* GC_debug_malloc has been called. */
@@ -1262,7 +1403,12 @@ GC_bool GC_should_collect();
void GC_apply_to_all_blocks(/*fn, client_data*/);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
-struct hblk * GC_next_block(/* struct hblk * h */);
+struct hblk * GC_next_used_block(/* struct hblk * h */);
+ /* Return first in-use block >= h */
+struct hblk * GC_prev_block(/* struct hblk * h */);
+ /* Return last block <= h. Returned block */
+ /* is managed by GC, but may or may not be in */
+ /* use. */
void GC_mark_init();
void GC_clear_marks(); /* Clear mark bits for all heap objects. */
void GC_invalidate_mark_state(); /* Tell the marker that marked */
@@ -1274,7 +1420,8 @@ void GC_mark_from_mark_stack(); /* Mark from everything on the mark stack. */
/* Return after about one pages worth of */
/* work. */
GC_bool GC_mark_stack_empty();
-GC_bool GC_mark_some(); /* Perform about one pages worth of marking */
+GC_bool GC_mark_some(/* cold_gc_frame */);
+ /* Perform about one pages worth of marking */
/* work of whatever kind is needed. Returns */
/* quickly if no collection is in progress. */
/* Return TRUE if mark phase finished. */
@@ -1296,7 +1443,31 @@ void GC_push_dirty(/*b,t*/); /* Push all possibly changed */
/* on the third arg. */
void GC_push_all_stack(/*b,t*/); /* As above, but consider */
/* interior pointers as valid */
-void GC_push_roots(/* GC_bool all */); /* Push all or dirty roots. */
+void GC_push_all_eager(/*b,t*/); /* Same as GC_push_all_stack, but */
+ /* ensures that stack is scanned */
+ /* immediately, not just scheduled */
+ /* for scanning. */
+#ifndef THREADS
+ void GC_push_all_stack_partially_eager(/* bottom, top, cold_gc_frame */);
+ /* Similar to GC_push_all_eager, but only the */
+ /* part hotter than cold_gc_frame is scanned */
+ /* immediately. Needed to endure that callee- */
+ /* save registers are not missed. */
+#else
+ /* In the threads case, we push part of the current thread stack */
+ /* with GC_push_all_eager when we push the registers. This gets the */
+ /* callee-save registers that may disappear. The remainder of the */
+ /* stacks are scheduled for scanning in *GC_push_other_roots, which */
+ /* is thread-package-specific. */
+#endif
+void GC_push_current_stack(/* ptr_t cold_gc_frame */);
+ /* Push enough of the current stack eagerly to */
+ /* ensure that callee-save registers saved in */
+ /* GC frames are scanned. */
+ /* In the non-threads case, schedule entire */
+ /* stack for scanning. */
+void GC_push_roots(/* GC_bool all, ptr_t cold_gc_frame */);
+ /* Push all or dirty roots. */
extern void (*GC_push_other_roots)();
/* Push system or application specific roots */
/* onto the mark stack. In some environments */
@@ -1310,8 +1481,14 @@ extern void (*GC_start_call_back)(/* void */);
/* lock held. */
/* 0 by default. */
void GC_push_regs(); /* Push register contents onto mark stack. */
+ /* If NURSERY is defined, the default push */
+ /* action can be overridden with GC_push_proc */
void GC_remark(); /* Mark from all marked objects. Used */
/* only if we had to drop something. */
+
+# ifdef NURSERY
+ extern void (*GC_push_proc)(ptr_t);
+# endif
# if defined(MSWIN32)
void __cdecl GC_push_one();
# else
@@ -1461,7 +1638,7 @@ GC_bool GC_collect_or_expand(/* needed_blocks */);
/* blocks available. Should be called */
/* until the blocks are available or */
/* until it fails by returning FALSE. */
-void GC_init(); /* Initialize collector. */
+GC_API void GC_init(); /* Initialize collector. */
void GC_collect_a_little_inner(/* int n */);
/* Do n units worth of garbage */
/* collection work, if appropriate. */
@@ -1538,6 +1715,15 @@ extern void (*GC_print_heap_obj)(/* ptr_t p */);
/* detailed description of the object */
/* referred to by p. */
+/* Memory unmapping: */
+#ifdef USE_MUNMAP
+ void GC_unmap_old(void);
+ void GC_merge_unmapped(void);
+ void GC_unmap(ptr_t start, word bytes);
+ void GC_remap(ptr_t start, word bytes);
+ void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2);
+#endif
+
/* Virtual dirty bit implementation: */
/* Each implementation exports the following: */
void GC_read_dirty(); /* Retrieve dirty bits. */
@@ -1553,7 +1739,7 @@ void GC_write_hint(/* struct hblk * h */);
void GC_dirty_init();
/* Slow/general mark bit manipulation: */
-GC_bool GC_is_marked();
+GC_API GC_bool GC_is_marked();
void GC_clear_mark_bit();
void GC_set_mark_bit();
@@ -1570,6 +1756,16 @@ void GC_print_heap_sects();
void GC_print_static_roots();
void GC_dump();
+#ifdef KEEP_BACK_PTRS
+ void GC_store_back_pointer(ptr_t source, ptr_t dest);
+ void GC_marked_for_finalization(ptr_t dest);
+# define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest)
+# define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest)
+#else
+# define GC_STORE_BACK_PTR(source, dest)
+# define GC_MARKED_FOR_FINALIZATION(dest)
+#endif
+
/* Make arguments appear live to compiler */
# ifdef __WATCOMC__
void GC_noop(void*, ...);
@@ -1620,4 +1816,13 @@ void GC_err_puts(/* char *s */);
/* newlines, don't ... */
+# ifdef GC_ASSERTIONS
+# define GC_ASSERT(expr) if(!(expr)) {\
+ GC_err_printf2("Assertion failure: %s:%ld\n", \
+ __FILE__, (unsigned long)__LINE__); \
+ ABORT("assertion failure"); }
+# else
+# define GC_ASSERT(expr)
+# endif
+
# endif /* GC_PRIVATE_H */