aboutsummaryrefslogtreecommitdiff
path: root/boehm-gc/allchblk.c
diff options
context:
space:
mode:
Diffstat (limited to 'boehm-gc/allchblk.c')
-rw-r--r--boehm-gc/allchblk.c123
1 files changed, 92 insertions, 31 deletions
diff --git a/boehm-gc/allchblk.c b/boehm-gc/allchblk.c
index 1505f8e..5b7bcff 100644
--- a/boehm-gc/allchblk.c
+++ b/boehm-gc/allchblk.c
@@ -14,10 +14,9 @@
* modified is included with the above copyright notice.
*/
-#define DEBUG
-#undef DEBUG
+/* #define DEBUG */
#include <stdio.h>
-#include "gc_priv.h"
+#include "private/gc_priv.h"
GC_bool GC_use_entire_heap = 0;
@@ -47,6 +46,35 @@ GC_bool GC_use_entire_heap = 0;
struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
+#ifndef USE_MUNMAP
+ word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
+ /* Number of free bytes on each list. */
+
+ /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */
+ /* > GC_max_large_allocd_bytes? */
+ GC_bool GC_enough_large_bytes_left(bytes,n)
+ word bytes;
+ int n;
+ {
+ int i;
+ for (i = N_HBLK_FLS; i >= n; --i) {
+ bytes += GC_free_bytes[i];
+ if (bytes > GC_max_large_allocd_bytes) return TRUE;
+ }
+ return FALSE;
+ }
+
+# define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
+
+# define FREE_ASSERT(e) GC_ASSERT(e)
+
+#else /* USE_MUNMAP */
+
+# define INCR_FREE_BYTES(n, b)
+# define FREE_ASSERT(e)
+
+#endif /* USE_MUNMAP */
+
/* Map a number of blocks to the appropriate large block free list index. */
int GC_hblk_fl_from_blocks(blocks_needed)
word blocks_needed;
@@ -79,7 +107,14 @@ void GC_print_hblkfreelist()
for (i = 0; i <= N_HBLK_FLS; ++i) {
h = GC_hblkfreelist[i];
- if (0 != h) GC_printf1("Free list %ld:\n", (unsigned long)i);
+# ifdef USE_MUNMAP
+ if (0 != h) GC_printf1("Free list %ld (Total size %ld):\n",
+ (unsigned long)i);
+# else
+ if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n",
+ (unsigned long)i,
+ (unsigned long)GC_free_bytes[i]);
+# endif
while (h != 0) {
hhdr = HDR(h);
sz = hhdr -> hb_sz;
@@ -219,14 +254,25 @@ void GC_remove_from_fl(hhdr, n)
hdr * hhdr;
int n;
{
+ int index;
+
GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
+# ifndef USE_MUNMAP
+ /* We always need index to mainatin free counts. */
+ if (FL_UNKNOWN == n) {
+ index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
+ } else {
+ index = n;
+ }
+# endif
if (hhdr -> hb_prev == 0) {
- int index;
- if (FL_UNKNOWN == n) {
+# ifdef USE_MUNMAP
+ if (FL_UNKNOWN == n) {
index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
- } else {
+ } else {
index = n;
- }
+ }
+# endif
GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
GC_hblkfreelist[index] = hhdr -> hb_next;
} else {
@@ -234,6 +280,8 @@ int n;
GET_HDR(hhdr -> hb_prev, phdr);
phdr -> hb_next = hhdr -> hb_next;
}
+ INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
+ FREE_ASSERT(GC_free_bytes[index] >= 0);
if (0 != hhdr -> hb_next) {
hdr * nhdr;
GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
@@ -294,6 +342,8 @@ hdr * hhdr;
# endif
GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
GC_hblkfreelist[index] = h;
+ INCR_FREE_BYTES(index, hhdr -> hb_sz);
+ FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
hhdr -> hb_next = second;
hhdr -> hb_prev = 0;
if (0 != second) {
@@ -419,7 +469,7 @@ int index;
rest_hdr -> hb_sz = total_size - bytes;
rest_hdr -> hb_flags = 0;
# ifdef GC_ASSERTIONS
- // Mark h not free, to avoid assertion about adjacent free blocks.
+ /* Mark h not free, to avoid assertion about adjacent free blocks. */
hhdr -> hb_map = 0;
# endif
GC_add_to_fl(rest, rest_hdr);
@@ -463,6 +513,8 @@ int index; /* Index of free list */
if (0 != next) {
HDR(next) -> hb_prev = n;
}
+ INCR_FREE_BYTES(index, -(signed_word)h_size);
+ FREE_ASSERT(GC_free_bytes[index] > 0);
# ifdef GC_ASSERTIONS
nhdr -> hb_map = 0; /* Don't fail test for consecutive */
/* free blocks in GC_add_to_fl. */
@@ -484,20 +536,23 @@ struct hblk * GC_allochblk_nth();
* NOTE: We set obj_map field in header correctly.
* Caller is responsible for building an object freelist in block.
*
- * We clear the block if it is destined for large objects, and if
- * kind requires that newly allocated objects be cleared.
+ * Unlike older versions of the collectors, the client is responsible
+ * for clearing the block, if necessary.
*/
struct hblk *
GC_allochblk(sz, kind, flags)
word sz;
int kind;
-unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
+unsigned flags; /* IGNORE_OFF_PAGE or 0 */
{
- int start_list = GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz));
+ word blocks = OBJ_SZ_TO_BLOCKS(sz);
+ int start_list = GC_hblk_fl_from_blocks(blocks);
int i;
for (i = start_list; i <= N_HBLK_FLS; ++i) {
struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
- if (0 != result) return result;
+ if (0 != result) {
+ return result;
+ }
}
return 0;
}
@@ -526,12 +581,22 @@ int n;
GET_HDR(hbp, hhdr);
size_avail = hhdr->hb_sz;
if (size_avail < size_needed) continue;
- if (!GC_use_entire_heap) {
- if (size_avail != size_needed
- && USED_HEAP_SIZE >= GC_requested_heapsize
- && !GC_incremental && GC_should_collect()) {
+ if (!GC_use_entire_heap
+ && size_avail != size_needed
+ && USED_HEAP_SIZE >= GC_requested_heapsize
+ && !GC_incremental && GC_should_collect()) {
+# ifdef USE_MUNMAP
continue;
- }
+# else
+ /* If we enough large blocks left to cover any */
+ /* previous request for large blocks, we go ahead */
+ /* and split. Assuming a steady state, that should */
+ /* be safe. It means that we can use the full */
+ /* heap if we allocate only small objects. */
+ if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
+ continue;
+ }
+# endif /* !USE_MUNMAP */
}
/* If the next heap block is obviously better, go on. */
/* This prevents us from disassembling a single large block */
@@ -573,7 +638,7 @@ int n;
/* Make sure it's mapped before we mangle it. */
# ifdef USE_MUNMAP
if (!IS_MAPPED(hhdr)) {
- GC_remap((ptr_t)hbp, size_avail);
+ GC_remap((ptr_t)hbp, hhdr -> hb_sz);
hhdr -> hb_flags &= ~WAS_UNMAPPED;
}
# endif
@@ -589,8 +654,10 @@ int n;
&& orig_avail - size_needed
> (signed_word)BL_LIMIT) {
/* Punt, since anything else risks unreasonable heap growth. */
- WARN("Needed to allocate blacklisted block at 0x%lx\n",
- (word)hbp);
+ if (0 != GETENV("GC_NO_BLACKLIST_WARNING")) {
+ WARN("Needed to allocate blacklisted block at 0x%lx\n",
+ (word)hbp);
+ }
size_avail = orig_avail;
} else if (size_avail == 0 && size_needed == HBLKSIZE
&& IS_MAPPED(hhdr)) {
@@ -618,10 +685,10 @@ int n;
if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
(void) setup_header(
hhdr,
- BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),
+ BYTES_TO_WORDS(HBLKSIZE),
PTRFREE, 0); /* Cant fail */
if (GC_debugging_started) {
- BZERO(h + HDR_BYTES, HBLKSIZE - HDR_BYTES);
+ BZERO(h, HBLKSIZE);
}
}
}
@@ -638,7 +705,7 @@ int n;
if( size_avail >= size_needed ) {
# ifdef USE_MUNMAP
if (!IS_MAPPED(hhdr)) {
- GC_remap((ptr_t)hbp, size_avail);
+ GC_remap((ptr_t)hbp, hhdr -> hb_sz);
hhdr -> hb_flags &= ~WAS_UNMAPPED;
}
# endif
@@ -664,12 +731,6 @@ int n;
return(0); /* ditto */
}
- /* Clear block if necessary */
- if (GC_debugging_started
- || sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
- BZERO(hbp + HDR_BYTES, size_needed - HDR_BYTES);
- }
-
/* We just successfully allocated a block. Restart count of */
/* consecutive failures. */
{