1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
|
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/* Boehm, February 7, 1996 4:32 pm PST */
#include <stdio.h>
#include "gc_priv.h"
extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
void GC_extend_size_map(); /* in misc.c. */
/* Allocate reclaim list for kind: */
/* Return TRUE on success */
GC_bool GC_alloc_reclaim_list(kind)
register struct obj_kind * kind;
{
struct hblk ** result = (struct hblk **)
GC_scratch_alloc((MAXOBJSZ+1) * sizeof(struct hblk *));
if (result == 0) return(FALSE);
BZERO(result, (MAXOBJSZ+1)*sizeof(struct hblk *));
kind -> ok_reclaim_list = result;
return(TRUE);
}
/* allocate lb bytes for an object of kind. */
/* Should not be used to directly to allocate */
/* objects such as STUBBORN objects that */
/* require special handling on allocation. */
/* First a version that assumes we already */
/* hold lock: */
ptr_t GC_generic_malloc_inner(lb, k)
register word lb;
register int k;
{
register word lw;
register ptr_t op;
register ptr_t *opp;
if( SMALL_OBJ(lb) ) {
register struct obj_kind * kind = GC_obj_kinds + k;
# ifdef MERGE_SIZES
lw = GC_size_map[lb];
# else
lw = ALIGNED_WORDS(lb);
if (lw == 0) lw = MIN_WORDS;
# endif
opp = &(kind -> ok_freelist[lw]);
if( (op = *opp) == 0 ) {
# ifdef MERGE_SIZES
if (GC_size_map[lb] == 0) {
if (!GC_is_initialized) GC_init_inner();
if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
return(GC_generic_malloc_inner(lb, k));
}
# else
if (!GC_is_initialized) {
GC_init_inner();
return(GC_generic_malloc_inner(lb, k));
}
# endif
if (kind -> ok_reclaim_list == 0) {
if (!GC_alloc_reclaim_list(kind)) goto out;
}
op = GC_allocobj(lw, k);
if (op == 0) goto out;
}
/* Here everything is in a consistent state. */
/* We assume the following assignment is */
/* atomic. If we get aborted */
/* after the assignment, we lose an object, */
/* but that's benign. */
/* Volatile declarations may need to be added */
/* to prevent the compiler from breaking things.*/
/* If we only execute the second of the */
/* following assignments, we lose the free */
/* list, but that should still be OK, at least */
/* for garbage collected memory. */
*opp = obj_link(op);
obj_link(op) = 0;
} else {
register struct hblk * h;
register word n_blocks = divHBLKSZ(ADD_SLOP(lb)
+ HDR_BYTES + HBLKSIZE-1);
if (!GC_is_initialized) GC_init_inner();
/* Do our share of marking work */
if(GC_incremental && !GC_dont_gc)
GC_collect_a_little_inner((int)n_blocks);
lw = ROUNDED_UP_WORDS(lb);
h = GC_allochblk(lw, k, 0);
# ifdef USE_MUNMAP
if (0 == h) {
GC_merge_unmapped();
h = GC_allochblk(lw, k, 0);
}
# endif
while (0 == h && GC_collect_or_expand(n_blocks, FALSE)) {
h = GC_allochblk(lw, k, 0);
}
if (h == 0) {
op = 0;
} else {
op = (ptr_t) (h -> hb_body);
GC_words_wasted += BYTES_TO_WORDS(n_blocks * HBLKSIZE) - lw;
}
}
GC_words_allocd += lw;
out:
return((ptr_t)op);
}
ptr_t GC_generic_malloc(lb, k)
register word lb;
register int k;
{
ptr_t result;
DCL_LOCK_STATE;
GC_INVOKE_FINALIZERS();
DISABLE_SIGNALS();
LOCK();
result = GC_generic_malloc_inner(lb, k);
UNLOCK();
ENABLE_SIGNALS();
if (0 == result) {
return((*GC_oom_fn)(lb));
} else {
return(result);
}
}
#define GENERAL_MALLOC(lb,k) \
(GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
/* We make the GC_clear_stack_call a tail call, hoping to get more of */
/* the stack. */
/* Allocate lb bytes of atomic (pointerfree) data */
# ifdef __STDC__
GC_PTR GC_malloc_atomic(size_t lb)
# else
GC_PTR GC_malloc_atomic(lb)
size_t lb;
# endif
{
register ptr_t op;
register ptr_t * opp;
register word lw;
DCL_LOCK_STATE;
if( SMALL_OBJ(lb) ) {
# ifdef MERGE_SIZES
lw = GC_size_map[lb];
# else
lw = ALIGNED_WORDS(lb);
# endif
opp = &(GC_aobjfreelist[lw]);
FASTLOCK();
if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
FASTUNLOCK();
return(GENERAL_MALLOC((word)lb, PTRFREE));
}
/* See above comment on signals. */
*opp = obj_link(op);
GC_words_allocd += lw;
FASTUNLOCK();
return((GC_PTR) op);
} else {
return(GENERAL_MALLOC((word)lb, PTRFREE));
}
}
/* Allocate lb bytes of composite (pointerful) data */
# ifdef __STDC__
GC_PTR GC_malloc(size_t lb)
# else
GC_PTR GC_malloc(lb)
size_t lb;
# endif
{
register ptr_t op;
register ptr_t *opp;
register word lw;
DCL_LOCK_STATE;
if( SMALL_OBJ(lb) ) {
# ifdef MERGE_SIZES
lw = GC_size_map[lb];
# else
lw = ALIGNED_WORDS(lb);
# endif
opp = &(GC_objfreelist[lw]);
FASTLOCK();
if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
FASTUNLOCK();
return(GENERAL_MALLOC((word)lb, NORMAL));
}
/* See above comment on signals. */
*opp = obj_link(op);
obj_link(op) = 0;
GC_words_allocd += lw;
FASTUNLOCK();
return((GC_PTR) op);
} else {
return(GENERAL_MALLOC((word)lb, NORMAL));
}
}
# ifdef REDIRECT_MALLOC
# ifdef __STDC__
GC_PTR malloc(size_t lb)
# else
GC_PTR malloc(lb)
size_t lb;
# endif
{
/* It might help to manually inline the GC_malloc call here. */
/* But any decent compiler should reduce the extra procedure call */
/* to at most a jump instruction in this case. */
# if defined(I386) && defined(SOLARIS_THREADS)
/*
* Thread initialisation can call malloc before
* we're ready for it.
* It's not clear that this is enough to help matters.
* The thread implementation may well call malloc at other
* inopportune times.
*/
if (!GC_is_initialized) return sbrk(lb);
# endif /* I386 && SOLARIS_THREADS */
return(REDIRECT_MALLOC(lb));
}
# ifdef __STDC__
GC_PTR calloc(size_t n, size_t lb)
# else
GC_PTR calloc(n, lb)
size_t n, lb;
# endif
{
return(REDIRECT_MALLOC(n*lb));
}
# endif /* REDIRECT_MALLOC */
GC_PTR GC_generic_or_special_malloc(lb,knd)
word lb;
int knd;
{
switch(knd) {
# ifdef STUBBORN_ALLOC
case STUBBORN:
return(GC_malloc_stubborn((size_t)lb));
# endif
case PTRFREE:
return(GC_malloc_atomic((size_t)lb));
case NORMAL:
return(GC_malloc((size_t)lb));
case UNCOLLECTABLE:
return(GC_malloc_uncollectable((size_t)lb));
# ifdef ATOMIC_UNCOLLECTABLE
case AUNCOLLECTABLE:
return(GC_malloc_atomic_uncollectable((size_t)lb));
# endif /* ATOMIC_UNCOLLECTABLE */
default:
return(GC_generic_malloc(lb,knd));
}
}
/* Change the size of the block pointed to by p to contain at least */
/* lb bytes. The object may be (and quite likely will be) moved. */
/* The kind (e.g. atomic) is the same as that of the old. */
/* Shrinking of large blocks is not implemented well. */
# ifdef __STDC__
GC_PTR GC_realloc(GC_PTR p, size_t lb)
# else
GC_PTR GC_realloc(p,lb)
GC_PTR p;
size_t lb;
# endif
{
register struct hblk * h;
register hdr * hhdr;
register word sz; /* Current size in bytes */
register word orig_sz; /* Original sz in bytes */
int obj_kind;
if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
h = HBLKPTR(p);
hhdr = HDR(h);
sz = hhdr -> hb_sz;
obj_kind = hhdr -> hb_obj_kind;
sz = WORDS_TO_BYTES(sz);
orig_sz = sz;
if (sz > WORDS_TO_BYTES(MAXOBJSZ)) {
/* Round it up to the next whole heap block */
register word descr;
sz = (sz+HDR_BYTES+HBLKSIZE-1)
& (~HBLKMASK);
sz -= HDR_BYTES;
hhdr -> hb_sz = BYTES_TO_WORDS(sz);
descr = GC_obj_kinds[obj_kind].ok_descriptor;
if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
hhdr -> hb_descr = descr;
if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
/* Extra area is already cleared by allochblk. */
}
if (ADD_SLOP(lb) <= sz) {
if (lb >= (sz >> 1)) {
# ifdef STUBBORN_ALLOC
if (obj_kind == STUBBORN) GC_change_stubborn(p);
# endif
if (orig_sz > lb) {
/* Clear unneeded part of object to avoid bogus pointer */
/* tracing. */
/* Safe for stubborn objects. */
BZERO(((ptr_t)p) + lb, orig_sz - lb);
}
return(p);
} else {
/* shrink */
GC_PTR result =
GC_generic_or_special_malloc((word)lb, obj_kind);
if (result == 0) return(0);
/* Could also return original object. But this */
/* gives the client warning of imminent disaster. */
BCOPY(p, result, lb);
# ifndef IGNORE_FREE
GC_free(p);
# endif
return(result);
}
} else {
/* grow */
GC_PTR result =
GC_generic_or_special_malloc((word)lb, obj_kind);
if (result == 0) return(0);
BCOPY(p, result, sz);
# ifndef IGNORE_FREE
GC_free(p);
# endif
return(result);
}
}
# ifdef REDIRECT_MALLOC
# ifdef __STDC__
GC_PTR realloc(GC_PTR p, size_t lb)
# else
GC_PTR realloc(p,lb)
GC_PTR p;
size_t lb;
# endif
{
return(GC_realloc(p, lb));
}
# endif /* REDIRECT_MALLOC */
/* Explicitly deallocate an object p. */
# ifdef __STDC__
void GC_free(GC_PTR p)
# else
void GC_free(p)
GC_PTR p;
# endif
{
register struct hblk *h;
register hdr *hhdr;
register signed_word sz;
register ptr_t * flh;
register int knd;
register struct obj_kind * ok;
DCL_LOCK_STATE;
if (p == 0) return;
/* Required by ANSI. It's not my fault ... */
h = HBLKPTR(p);
hhdr = HDR(h);
# if defined(REDIRECT_MALLOC) && \
(defined(SOLARIS_THREADS) || defined(LINUX_THREADS))
/* We have to redirect malloc calls during initialization. */
/* Don't try to deallocate that memory. */
if (0 == hhdr) return;
# endif
knd = hhdr -> hb_obj_kind;
sz = hhdr -> hb_sz;
ok = &GC_obj_kinds[knd];
if (sz <= MAXOBJSZ) {
# ifdef THREADS
DISABLE_SIGNALS();
LOCK();
# endif
GC_mem_freed += sz;
/* A signal here can make GC_mem_freed and GC_non_gc_bytes */
/* inconsistent. We claim this is benign. */
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
/* Its unnecessary to clear the mark bit. If the */
/* object is reallocated, it doesn't matter. O.w. the */
/* collector will do it, since it's on a free list. */
if (ok -> ok_init) {
BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
}
flh = &(ok -> ok_freelist[sz]);
obj_link(p) = *flh;
*flh = (ptr_t)p;
# ifdef THREADS
UNLOCK();
ENABLE_SIGNALS();
# endif
} else {
DISABLE_SIGNALS();
LOCK();
GC_mem_freed += sz;
if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
GC_freehblk(h);
UNLOCK();
ENABLE_SIGNALS();
}
}
# ifdef REDIRECT_MALLOC
# ifdef __STDC__
void free(GC_PTR p)
# else
void free(p)
GC_PTR p;
# endif
{
# ifndef IGNORE_FREE
GC_free(p);
# endif
}
# endif /* REDIRECT_MALLOC */
|