aboutsummaryrefslogtreecommitdiff
path: root/libgo/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2013-01-29 20:52:43 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2013-01-29 20:52:43 +0000
commitd6f2922e91928b5191a5c5f1b3a6b320712b5ce3 (patch)
tree4f2fad1f4b778519bdd5941185c7e1d032af055b /libgo/runtime
parent91bfca59095b1cca9d4364996866848eaaf76c26 (diff)
downloadgcc-d6f2922e91928b5191a5c5f1b3a6b320712b5ce3.zip
gcc-d6f2922e91928b5191a5c5f1b3a6b320712b5ce3.tar.gz
gcc-d6f2922e91928b5191a5c5f1b3a6b320712b5ce3.tar.bz2
libgo: Update Go library to master revision 15489/921e53d4863c.
From-SVN: r195560
Diffstat (limited to 'libgo/runtime')
-rw-r--r--libgo/runtime/go-nosys.c38
-rw-r--r--libgo/runtime/go-setenv.c9
-rw-r--r--libgo/runtime/lock_futex.c3
-rw-r--r--libgo/runtime/malloc.goc22
-rw-r--r--libgo/runtime/malloc.h3
-rw-r--r--libgo/runtime/mcentral.c53
-rw-r--r--libgo/runtime/mem.c4
-rw-r--r--libgo/runtime/mgc0.c360
-rw-r--r--libgo/runtime/mheap.c14
-rw-r--r--libgo/runtime/panic.c2
-rw-r--r--libgo/runtime/proc.c7
-rw-r--r--libgo/runtime/runtime.h17
-rw-r--r--libgo/runtime/sigqueue.goc104
13 files changed, 475 insertions, 161 deletions
diff --git a/libgo/runtime/go-nosys.c b/libgo/runtime/go-nosys.c
index 36bbdd2..3ab5ea2 100644
--- a/libgo/runtime/go-nosys.c
+++ b/libgo/runtime/go-nosys.c
@@ -16,8 +16,10 @@
#include <math.h>
#include <stdint.h>
#include <sys/types.h>
+#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
+#include <time.h>
#include <unistd.h>
#ifndef HAVE_OFF64_T
@@ -28,6 +30,19 @@ typedef signed int off64_t __attribute__ ((mode (DI)));
typedef off64_t loff_t;
#endif
+#ifndef HAVE_ACCEPT4
+struct sockaddr;
+int
+accept4 (int sockfd __attribute__ ((unused)),
+ struct sockaddr *addr __attribute__ ((unused)),
+ socklen_t *addrlen __attribute__ ((unused)),
+ int flags __attribute__ ((unused)))
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
#ifndef HAVE_EPOLL_CREATE1
int
epoll_create1 (int flags __attribute__ ((unused)))
@@ -171,6 +186,16 @@ openat (int dirfd __attribute__ ((unused)),
}
#endif
+#ifndef HAVE_PIPE2
+int
+pipe2 (int pipefd[2] __attribute__ ((unused)),
+ int flags __attribute__ ((unused)))
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
#ifndef HAVE_RENAMEAT
int
renameat (int olddirfd __attribute__ ((unused)),
@@ -241,6 +266,19 @@ unshare (int flags __attribute__ ((unused)))
}
#endif
+#ifndef HAVE_UTIMENSAT
+struct timespec;
+int
+utimensat(int dirfd __attribute__ ((unused)),
+ const char *pathname __attribute__ ((unused)),
+ const struct timespec times[2] __attribute__ ((unused)),
+ int flags __attribute__ ((unused)))
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
/* Long double math functions. These are needed on old i386 systems
that don't have them in libm. The compiler translates calls to
these functions on float64 to call an 80-bit floating point
diff --git a/libgo/runtime/go-setenv.c b/libgo/runtime/go-setenv.c
index 53ef2d3..6c7378c 100644
--- a/libgo/runtime/go-setenv.c
+++ b/libgo/runtime/go-setenv.c
@@ -25,20 +25,25 @@ setenv_c (String k, String v)
unsigned char *vn;
ks = k.str;
+ if (ks == NULL)
+ ks = (const byte *) "";
kn = NULL;
+
vs = v.str;
+ if (vs == NULL)
+ vs = (const byte *) "";
vn = NULL;
#ifdef HAVE_SETENV
- if (ks[k.len] != 0)
+ if (ks != NULL && ks[k.len] != 0)
{
kn = __go_alloc (k.len + 1);
__builtin_memcpy (kn, ks, k.len);
ks = kn;
}
- if (vs[v.len] != 0)
+ if (vs != NULL && vs[v.len] != 0)
{
vn = __go_alloc (v.len + 1);
__builtin_memcpy (vn, vs, v.len);
diff --git a/libgo/runtime/lock_futex.c b/libgo/runtime/lock_futex.c
index 9a533a5..5374aff 100644
--- a/libgo/runtime/lock_futex.c
+++ b/libgo/runtime/lock_futex.c
@@ -111,7 +111,8 @@ runtime_noteclear(Note *n)
void
runtime_notewakeup(Note *n)
{
- runtime_xchg(&n->key, 1);
+ if(runtime_xchg(&n->key, 1))
+ runtime_throw("notewakeup - double wakeup");
runtime_futexwakeup(&n->key, 1);
}
diff --git a/libgo/runtime/malloc.goc b/libgo/runtime/malloc.goc
index 48460b1..a484642 100644
--- a/libgo/runtime/malloc.goc
+++ b/libgo/runtime/malloc.goc
@@ -20,6 +20,8 @@ package runtime
MHeap runtime_mheap;
+int32 runtime_checking;
+
extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
extern volatile intgo runtime_MemProfileRate
@@ -718,14 +720,22 @@ runtime_new(const Type *typ)
if(raceenabled)
runtime_m()->racepc = runtime_getcallerpc(&typ);
- flag = typ->__code&GO_NO_POINTERS ? FlagNoPointers : 0;
- ret = runtime_mallocgc(typ->__size, flag, 1, 1);
- if(UseSpanType && !flag) {
- if(false) {
- runtime_printf("new %S: %p\n", *typ->__reflection, ret);
+ if(typ->__size == 0) {
+ // All 0-length allocations use this pointer.
+ // The language does not require the allocations to
+ // have distinct values.
+ ret = (uint8*)&runtime_zerobase;
+ } else {
+ flag = typ->__code&GO_NO_POINTERS ? FlagNoPointers : 0;
+ ret = runtime_mallocgc(typ->__size, flag, 1, 1);
+
+ if(UseSpanType && !flag) {
+ if(false) {
+ runtime_printf("new %S: %p\n", *typ->__reflection, ret);
+ }
+ runtime_settype(ret, (uintptr)typ | TypeInfo_SingleObject);
}
- runtime_settype(ret, (uintptr)typ | TypeInfo_SingleObject);
}
return ret;
diff --git a/libgo/runtime/malloc.h b/libgo/runtime/malloc.h
index 172d81d..a820774 100644
--- a/libgo/runtime/malloc.h
+++ b/libgo/runtime/malloc.h
@@ -446,7 +446,7 @@ void runtime_markallocated(void *v, uintptr n, bool noptr);
void runtime_checkallocated(void *v, uintptr n);
void runtime_markfreed(void *v, uintptr n);
void runtime_checkfreed(void *v, uintptr n);
-int32 runtime_checking;
+extern int32 runtime_checking;
void runtime_markspan(void *v, uintptr size, uintptr n, bool leftover);
void runtime_unmarkspan(void *v, uintptr size);
bool runtime_blockspecial(void*);
@@ -500,6 +500,7 @@ enum
// defined in mgc0.go
void runtime_gc_m_ptr(Eface*);
+void runtime_gc_itab_ptr(Eface*);
void runtime_memorydump(void);
diff --git a/libgo/runtime/mcentral.c b/libgo/runtime/mcentral.c
index 670a647..b405438 100644
--- a/libgo/runtime/mcentral.c
+++ b/libgo/runtime/mcentral.c
@@ -19,7 +19,6 @@
#include "malloc.h"
static bool MCentral_Grow(MCentral *c);
-static void* MCentral_Alloc(MCentral *c);
static void MCentral_Free(MCentral *c, void *v);
// Initialize a single central free list.
@@ -34,12 +33,13 @@ runtime_MCentral_Init(MCentral *c, int32 sizeclass)
// Allocate up to n objects from the central free list.
// Return the number of objects allocated.
// The objects are linked together by their first words.
-// On return, *pstart points at the first object and *pend at the last.
+// On return, *pstart points at the first object.
int32
runtime_MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst)
{
- MLink *first, *last, *v;
- int32 i;
+ MSpan *s;
+ MLink *first, *last;
+ int32 cap, avail, i;
runtime_lock(c);
// Replenish central list if empty.
@@ -50,41 +50,34 @@ runtime_MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst)
return 0;
}
}
+ s = c->nonempty.next;
+ cap = (s->npages << PageShift) / s->elemsize;
+ avail = cap - s->ref;
+ if(avail < n)
+ n = avail;
- // Copy from list, up to n.
// First one is guaranteed to work, because we just grew the list.
- first = MCentral_Alloc(c);
+ first = s->freelist;
last = first;
- for(i=1; i<n && (v = MCentral_Alloc(c)) != nil; i++) {
- last->next = v;
- last = v;
+ for(i=1; i<n; i++) {
+ last = last->next;
}
+ s->freelist = last->next;
last->next = nil;
- c->nfree -= i;
-
- runtime_unlock(c);
- *pfirst = first;
- return i;
-}
+ s->ref += n;
+ c->nfree -= n;
-// Helper: allocate one object from the central free list.
-static void*
-MCentral_Alloc(MCentral *c)
-{
- MSpan *s;
- MLink *v;
-
- if(runtime_MSpanList_IsEmpty(&c->nonempty))
- return nil;
- s = c->nonempty.next;
- s->ref++;
- v = s->freelist;
- s->freelist = v->next;
- if(s->freelist == nil) {
+ if(n == avail) {
+ if(s->freelist != nil || s->ref != (uint32)cap) {
+ runtime_throw("invalid freelist");
+ }
runtime_MSpanList_Remove(s);
runtime_MSpanList_Insert(&c->empty, s);
}
- return v;
+
+ runtime_unlock(c);
+ *pfirst = first;
+ return n;
}
// Free n objects back into the central free list.
diff --git a/libgo/runtime/mem.c b/libgo/runtime/mem.c
index 9df4c87..e70694c 100644
--- a/libgo/runtime/mem.c
+++ b/libgo/runtime/mem.c
@@ -81,6 +81,10 @@ runtime_SysAlloc(uintptr n)
runtime_printf("if you're running SELinux, enable execmem for this process.\n");
exit(2);
}
+ if(errno == EAGAIN) {
+ runtime_printf("runtime: mmap: too much locked memory (check 'ulimit -l').\n");
+ runtime_exit(2);
+ }
return nil;
}
return p;
diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c
index 9906365..ffbe2ce 100644
--- a/libgo/runtime/mgc0.c
+++ b/libgo/runtime/mgc0.c
@@ -11,6 +11,22 @@
#include "malloc.h"
#include "mgc0.h"
#include "race.h"
+#include "go-type.h"
+
+// Map gccgo field names to gc field names.
+// Slice aka __go_open_array.
+#define array __values
+#define cap __capacity
+// Iface aka __go_interface
+#define tab __methods
+// Eface aka __go_empty_interface.
+#define type __type_descriptor
+// Type aka __go_type_descriptor
+#define kind __code
+#define KindPtr GO_PTR
+#define KindNoPointers GO_NO_POINTERS
+// PtrType aka __go_ptr_type
+#define elem __element_type
#ifdef USING_SPLIT_STACK
@@ -32,6 +48,11 @@ enum {
handoffThreshold = 4,
IntermediateBufferCapacity = 64,
+
+ // Bits in type information
+ PRECISE = 1,
+ LOOP = 2,
+ PC_BITS = PRECISE | LOOP,
};
// Bits in per-word bitmap.
@@ -158,12 +179,14 @@ enum {
// is moved/flushed to the work buffer (Workbuf).
// The size of an intermediate buffer is very small,
// such as 32 or 64 elements.
+typedef struct PtrTarget PtrTarget;
struct PtrTarget
{
void *p;
uintptr ti;
};
+typedef struct BitTarget BitTarget;
struct BitTarget
{
void *p;
@@ -171,15 +194,19 @@ struct BitTarget
uintptr *bitp, shift;
};
+typedef struct BufferList BufferList;
struct BufferList
{
- struct PtrTarget ptrtarget[IntermediateBufferCapacity];
- struct BitTarget bittarget[IntermediateBufferCapacity];
- struct BufferList *next;
+ PtrTarget ptrtarget[IntermediateBufferCapacity];
+ BitTarget bittarget[IntermediateBufferCapacity];
+ BufferList *next;
};
-static struct BufferList *bufferList;
+static BufferList *bufferList;
static Lock lock;
+static Type *itabtype;
+
+static void enqueue(Obj obj, Workbuf **_wbuf, Obj **_wp, uintptr *_nobj);
// flushptrbuf moves data from the PtrTarget buffer to the work buffer.
// The PtrTarget buffer contains blocks irrespective of whether the blocks have been marked or scanned,
@@ -202,16 +229,16 @@ static Lock lock;
// flushptrbuf
// (2nd part, mark and enqueue)
static void
-flushptrbuf(struct PtrTarget *ptrbuf, uintptr n, Obj **_wp, Workbuf **_wbuf, uintptr *_nobj, struct BitTarget *bitbuf)
+flushptrbuf(PtrTarget *ptrbuf, PtrTarget **ptrbufpos, Obj **_wp, Workbuf **_wbuf, uintptr *_nobj, BitTarget *bitbuf)
{
byte *p, *arena_start, *obj;
- uintptr size, *bitp, bits, shift, j, x, xbits, off, nobj, ti;
+ uintptr size, *bitp, bits, shift, j, x, xbits, off, nobj, ti, n;
MSpan *s;
PageID k;
Obj *wp;
Workbuf *wbuf;
- struct PtrTarget *ptrbuf_end;
- struct BitTarget *bitbufpos, *bt;
+ PtrTarget *ptrbuf_end;
+ BitTarget *bitbufpos, *bt;
arena_start = runtime_mheap.arena_start;
@@ -219,7 +246,9 @@ flushptrbuf(struct PtrTarget *ptrbuf, uintptr n, Obj **_wp, Workbuf **_wbuf, uin
wbuf = *_wbuf;
nobj = *_nobj;
- ptrbuf_end = ptrbuf + n;
+ ptrbuf_end = *ptrbufpos;
+ n = ptrbuf_end - ptrbuf;
+ *ptrbufpos = ptrbuf;
// If buffer is nearly full, get a new one.
if(wbuf == nil || nobj+n >= nelem(wbuf->obj)) {
@@ -318,8 +347,7 @@ flushptrbuf(struct PtrTarget *ptrbuf, uintptr n, Obj **_wp, Workbuf **_wbuf, uin
if((bits & (bitAllocated|bitMarked)) != bitAllocated)
continue;
- *bitbufpos = (struct BitTarget){obj, ti, bitp, shift};
- bitbufpos++;
+ *bitbufpos++ = (BitTarget){obj, ti, bitp, shift};
}
runtime_lock(&lock);
@@ -370,6 +398,13 @@ flushptrbuf(struct PtrTarget *ptrbuf, uintptr n, Obj **_wp, Workbuf **_wbuf, uin
// Program that scans the whole block and treats every block element as a potential pointer
static uintptr defaultProg[2] = {PtrSize, GC_DEFAULT_PTR};
+// Local variables of a program fragment or loop
+typedef struct Frame Frame;
+struct Frame {
+ uintptr count, elemsize, b;
+ uintptr *loop_or_ret;
+};
+
// scanblock scans a block of n bytes starting at pointer b for references
// to other objects, scanning any it finds recursively until there are no
// unscanned objects left. Instead of using an explicit recursion, it keeps
@@ -384,22 +419,17 @@ static void
scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
{
byte *b, *arena_start, *arena_used;
- uintptr n, i, end_b;
+ uintptr n, i, end_b, elemsize, ti, objti, count /* , type */;
+ uintptr *pc, precise_type, nominal_size;
void *obj;
-
- // TODO(atom): to be expanded in a next CL
- struct Frame {uintptr count, b; uintptr *loop_or_ret;};
- struct Frame stack_top;
-
- uintptr *pc;
-
- struct BufferList *scanbuffers;
- struct PtrTarget *ptrbuf, *ptrbuf_end;
- struct BitTarget *bitbuf;
-
- struct PtrTarget *ptrbufpos;
-
- // End of local variable declarations.
+ const Type *t;
+ Slice *sliceptr;
+ Frame *stack_ptr, stack_top, stack[GC_STACK_CAPACITY+4];
+ BufferList *scanbuffers;
+ PtrTarget *ptrbuf, *ptrbuf_end, *ptrbufpos;
+ BitTarget *bitbuf;
+ Eface *eface;
+ Iface *iface;
if(sizeof(Workbuf) % PageSize != 0)
runtime_throw("scanblock: size of Workbuf is suboptimal");
@@ -408,6 +438,11 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
arena_start = runtime_mheap.arena_start;
arena_used = runtime_mheap.arena_used;
+ stack_ptr = stack+nelem(stack)-1;
+
+ precise_type = false;
+ nominal_size = 0;
+
// Allocate ptrbuf, bitbuf
{
runtime_lock(&lock);
@@ -437,50 +472,247 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
runtime_printf("scanblock %p %D\n", b, (int64)n);
}
- // TODO(atom): to be replaced in a next CL
- pc = defaultProg;
+ if(ti != 0 && 0) {
+ pc = (uintptr*)(ti & ~(uintptr)PC_BITS);
+ precise_type = (ti & PRECISE);
+ stack_top.elemsize = pc[0];
+ if(!precise_type)
+ nominal_size = pc[0];
+ if(ti & LOOP) {
+ stack_top.count = 0; // 0 means an infinite number of iterations
+ stack_top.loop_or_ret = pc+1;
+ } else {
+ stack_top.count = 1;
+ }
+ } else if(UseSpanType && 0) {
+#if 0
+ type = runtime_gettype(b);
+ if(type != 0) {
+ t = (Type*)(type & ~(uintptr)(PtrSize-1));
+ switch(type & (PtrSize-1)) {
+ case TypeInfo_SingleObject:
+ pc = (uintptr*)t->gc;
+ precise_type = true; // type information about 'b' is precise
+ stack_top.count = 1;
+ stack_top.elemsize = pc[0];
+ break;
+ case TypeInfo_Array:
+ pc = (uintptr*)t->gc;
+ if(pc[0] == 0)
+ goto next_block;
+ precise_type = true; // type information about 'b' is precise
+ stack_top.count = 0; // 0 means an infinite number of iterations
+ stack_top.elemsize = pc[0];
+ stack_top.loop_or_ret = pc+1;
+ break;
+ case TypeInfo_Map:
+ // TODO(atom): to be expanded in a next CL
+ pc = defaultProg;
+ break;
+ default:
+ runtime_throw("scanblock: invalid type");
+ return;
+ }
+ } else {
+ pc = defaultProg;
+ }
+#endif
+ } else {
+ pc = defaultProg;
+ }
pc++;
stack_top.b = (uintptr)b;
end_b = (uintptr)b + n - PtrSize;
- next_instr:
- // TODO(atom): to be expanded in a next CL
+ for(;;) {
+ obj = nil;
+ objti = 0;
switch(pc[0]) {
+ case GC_PTR:
+ obj = *(void**)(stack_top.b + pc[1]);
+ objti = pc[2];
+ pc += 3;
+ break;
+
+ case GC_SLICE:
+ sliceptr = (Slice*)(stack_top.b + pc[1]);
+ if(sliceptr->cap != 0) {
+ obj = sliceptr->array;
+ objti = pc[2] | PRECISE | LOOP;
+ }
+ pc += 3;
+ break;
+
+ case GC_APTR:
+ obj = *(void**)(stack_top.b + pc[1]);
+ pc += 2;
+ break;
+
+ case GC_STRING:
+ obj = *(void**)(stack_top.b + pc[1]);
+ pc += 2;
+ break;
+
+ case GC_EFACE:
+ eface = (Eface*)(stack_top.b + pc[1]);
+ pc += 2;
+ if(eface->type != nil && ((byte*)eface->__object >= arena_start && (byte*)eface->__object < arena_used)) {
+ t = eface->type;
+ if(t->__size <= sizeof(void*)) {
+ if((t->kind & KindNoPointers))
+ break;
+
+ obj = eface->__object;
+ if((t->kind & ~KindNoPointers) == KindPtr)
+ // objti = (uintptr)((PtrType*)t)->elem->gc;
+ objti = 0;
+ } else {
+ obj = eface->__object;
+ // objti = (uintptr)t->gc;
+ objti = 0;
+ }
+ }
+ break;
+
+ case GC_IFACE:
+ iface = (Iface*)(stack_top.b + pc[1]);
+ pc += 2;
+ if(iface->tab == nil)
+ break;
+
+ // iface->tab
+ if((byte*)iface->tab >= arena_start && (byte*)iface->tab < arena_used) {
+ // *ptrbufpos++ = (struct PtrTarget){iface->tab, (uintptr)itabtype->gc};
+ *ptrbufpos++ = (struct PtrTarget){iface->tab, 0};
+ if(ptrbufpos == ptrbuf_end)
+ flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj, bitbuf);
+ }
+
+ // iface->data
+ if((byte*)iface->__object >= arena_start && (byte*)iface->__object < arena_used) {
+ // t = iface->tab->type;
+ t = nil;
+ if(t->__size <= sizeof(void*)) {
+ if((t->kind & KindNoPointers))
+ break;
+
+ obj = iface->__object;
+ if((t->kind & ~KindNoPointers) == KindPtr)
+ // objti = (uintptr)((const PtrType*)t)->elem->gc;
+ objti = 0;
+ } else {
+ obj = iface->__object;
+ // objti = (uintptr)t->gc;
+ objti = 0;
+ }
+ }
+ break;
+
case GC_DEFAULT_PTR:
- while(true) {
- i = stack_top.b;
- if(i > end_b)
- goto next_block;
+ while((i = stack_top.b) <= end_b) {
stack_top.b += PtrSize;
-
obj = *(byte**)i;
if((byte*)obj >= arena_start && (byte*)obj < arena_used) {
- *ptrbufpos = (struct PtrTarget){obj, 0};
- ptrbufpos++;
+ *ptrbufpos++ = (struct PtrTarget){obj, 0};
if(ptrbufpos == ptrbuf_end)
- goto flush_buffers;
+ flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj, bitbuf);
}
}
+ goto next_block;
+
+ case GC_END:
+ if(--stack_top.count != 0) {
+ // Next iteration of a loop if possible.
+ elemsize = stack_top.elemsize;
+ stack_top.b += elemsize;
+ if(stack_top.b + elemsize <= end_b+PtrSize) {
+ pc = stack_top.loop_or_ret;
+ continue;
+ }
+ i = stack_top.b;
+ } else {
+ // Stack pop if possible.
+ if(stack_ptr+1 < stack+nelem(stack)) {
+ pc = stack_top.loop_or_ret;
+ stack_top = *(++stack_ptr);
+ continue;
+ }
+ i = (uintptr)b + nominal_size;
+ }
+ if(!precise_type) {
+ // Quickly scan [b+i,b+n) for possible pointers.
+ for(; i<=end_b; i+=PtrSize) {
+ if(*(byte**)i != nil) {
+ // Found a value that may be a pointer.
+ // Do a rescan of the entire block.
+ enqueue((Obj){b, n, 0}, &wbuf, &wp, &nobj);
+ break;
+ }
+ }
+ }
+ goto next_block;
+
+ case GC_ARRAY_START:
+ i = stack_top.b + pc[1];
+ count = pc[2];
+ elemsize = pc[3];
+ pc += 4;
+
+ // Stack push.
+ *stack_ptr-- = stack_top;
+ stack_top = (Frame){count, elemsize, i, pc};
+ continue;
+
+ case GC_ARRAY_NEXT:
+ if(--stack_top.count != 0) {
+ stack_top.b += stack_top.elemsize;
+ pc = stack_top.loop_or_ret;
+ } else {
+ // Stack pop.
+ stack_top = *(++stack_ptr);
+ pc += 1;
+ }
+ continue;
+
+ case GC_CALL:
+ // Stack push.
+ *stack_ptr-- = stack_top;
+ stack_top = (Frame){1, 0, stack_top.b + pc[1], pc+3 /*return address*/};
+ pc = (uintptr*)pc[2]; // target of the CALL instruction
+ continue;
+
+ case GC_MAP_PTR:
+ // TODO(atom): to be expanded in a next CL. Same as GC_APTR for now.
+ obj = *(void**)(stack_top.b + pc[1]);
+ pc += 3;
+ break;
+
+ case GC_REGION:
+ // TODO(atom): to be expanded in a next CL. Same as GC_APTR for now.
+ obj = (void*)(stack_top.b + pc[1]);
+ pc += 4;
+ break;
default:
runtime_throw("scanblock: invalid GC instruction");
return;
}
- flush_buffers:
- flushptrbuf(ptrbuf, ptrbufpos-ptrbuf, &wp, &wbuf, &nobj, bitbuf);
- ptrbufpos = ptrbuf;
- goto next_instr;
+ if((byte*)obj >= arena_start && (byte*)obj < arena_used) {
+ *ptrbufpos++ = (PtrTarget){obj, objti};
+ if(ptrbufpos == ptrbuf_end)
+ flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj, bitbuf);
+ }
+ }
next_block:
// Done scanning [b, b+n). Prepare for the next iteration of
- // the loop by setting b, n to the parameters for the next block.
+ // the loop by setting b, n, ti to the parameters for the next block.
if(nobj == 0) {
- flushptrbuf(ptrbuf, ptrbufpos-ptrbuf, &wp, &wbuf, &nobj, bitbuf);
- ptrbufpos = ptrbuf;
+ flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj, bitbuf);
if(nobj == 0) {
if(!keepworking) {
@@ -501,6 +733,7 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
--wp;
b = wp->p;
n = wp->n;
+ ti = wp->ti;
nobj--;
}
@@ -1004,10 +1237,6 @@ sweepspan(ParFor *desc, uint32 idx)
USED(&desc);
s = runtime_mheap.allspans[idx];
- // Stamp newly unused spans. The scavenger will use that
- // info to potentially give back some pages to the OS.
- if(s->state == MSpanFree && s->unusedsince == 0)
- s->unusedsince = runtime_nanotime();
if(s->state != MSpanInUse)
return;
arena_start = runtime_mheap.arena_start;
@@ -1229,18 +1458,15 @@ cachestats(GCStats *stats)
MCache *c;
uint32 i;
uint64 stacks_inuse;
- uint64 stacks_sys;
uint64 *src, *dst;
if(stats)
runtime_memclr((byte*)stats, sizeof(*stats));
stacks_inuse = 0;
- stacks_sys = runtime_stacks_sys;
for(mp=runtime_allm; mp; mp=mp->alllink) {
c = mp->mcache;
runtime_purgecachedstats(c);
- // stacks_inuse += mp->stackalloc->inuse;
- // stacks_sys += mp->stackalloc->sys;
+ // stacks_inuse += mp->stackinuse*FixedStack;
if(stats) {
src = (uint64*)&mp->gcstats;
dst = (uint64*)stats;
@@ -1256,7 +1482,6 @@ cachestats(GCStats *stats)
}
}
mstats.stacks_inuse = stacks_inuse;
- mstats.stacks_sys = stacks_sys;
}
// Structure of arguments passed to function gc().
@@ -1330,11 +1555,12 @@ static void
gc(struct gc_args *args)
{
M *m;
- int64 t0, t1, t2, t3;
+ int64 t0, t1, t2, t3, t4;
uint64 heap0, heap1, obj0, obj1;
GCStats stats;
M *mp;
uint32 i;
+ // Eface eface;
runtime_semacquire(&runtime_worldsema);
if(!args->force && mstats.heap_alloc < mstats.next_gc) {
@@ -1367,6 +1593,12 @@ gc(struct gc_args *args)
work.sweepfor = runtime_parforalloc(MaxGcproc);
m->locks--;
+ if(itabtype == nil) {
+ // get C pointer to the Go type "itab"
+ // runtime_gc_itab_ptr(&eface);
+ // itabtype = ((PtrType*)eface.type)->elem;
+ }
+
work.nwait = 0;
work.ndone = 0;
work.debugmarkdone = 0;
@@ -1379,6 +1611,8 @@ gc(struct gc_args *args)
runtime_helpgc(work.nproc);
}
+ t1 = runtime_nanotime();
+
runtime_parfordo(work.markfor);
scanblock(nil, nil, 0, true);
@@ -1387,10 +1621,10 @@ gc(struct gc_args *args)
debug_scanblock(work.roots[i].p, work.roots[i].n);
runtime_atomicstore(&work.debugmarkdone, 1);
}
- t1 = runtime_nanotime();
+ t2 = runtime_nanotime();
runtime_parfordo(work.sweepfor);
- t2 = runtime_nanotime();
+ t3 = runtime_nanotime();
stealcache();
cachestats(&stats);
@@ -1420,18 +1654,18 @@ gc(struct gc_args *args)
heap1 = mstats.heap_alloc;
obj1 = mstats.nmalloc - mstats.nfree;
- t3 = runtime_nanotime();
- mstats.last_gc = t3;
- mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t3 - t0;
- mstats.pause_total_ns += t3 - t0;
+ t4 = runtime_nanotime();
+ mstats.last_gc = t4;
+ mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0;
+ mstats.pause_total_ns += t4 - t0;
mstats.numgc++;
if(mstats.debuggc)
- runtime_printf("pause %D\n", t3-t0);
+ runtime_printf("pause %D\n", t4-t0);
if(gctrace) {
runtime_printf("gc%d(%d): %D+%D+%D ms, %D -> %D MB %D -> %D (%D-%D) objects,"
" %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
- mstats.numgc, work.nproc, (t1-t0)/1000000, (t2-t1)/1000000, (t3-t2)/1000000,
+ mstats.numgc, work.nproc, (t2-t1)/1000000, (t3-t2)/1000000, (t1-t0+t4-t3)/1000000,
heap0>>20, heap1>>20, obj0, obj1,
mstats.nmalloc, mstats.nfree,
stats.nhandoff, stats.nhandoffcnt,
diff --git a/libgo/runtime/mheap.c b/libgo/runtime/mheap.c
index 7be135b..1b478f9 100644
--- a/libgo/runtime/mheap.c
+++ b/libgo/runtime/mheap.c
@@ -138,7 +138,9 @@ HaveSpan:
*(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark
t->state = MSpanInUse;
MHeap_FreeLocked(h, t);
+ t->unusedsince = s->unusedsince; // preserve age
}
+ s->unusedsince = 0;
// Record span info, because gc needs to be
// able to map interior pointer to containing span.
@@ -300,10 +302,12 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
}
mstats.heap_idle += s->npages<<PageShift;
s->state = MSpanFree;
- s->unusedsince = 0;
- s->npreleased = 0;
runtime_MSpanList_Remove(s);
sp = (uintptr*)(s->start<<PageShift);
+ // Stamp newly unused spans. The scavenger will use that
+ // info to potentially give back some pages to the OS.
+ s->unusedsince = runtime_nanotime();
+ s->npreleased = 0;
// Coalesce with earlier, later spans.
p = s->start;
@@ -405,10 +409,10 @@ runtime_MHeap_Scavenger(void* dummy)
runtime_entersyscall();
runtime_notesleep(&note);
runtime_exitsyscall();
+ if(trace)
+ runtime_printf("scvg%d: GC forced\n", k);
runtime_lock(h);
now = runtime_nanotime();
- if (trace)
- runtime_printf("scvg%d: GC forced\n", k);
}
sumreleased = 0;
for(i=0; i < nelem(h->free)+1; i++) {
@@ -419,7 +423,7 @@ runtime_MHeap_Scavenger(void* dummy)
if(runtime_MSpanList_IsEmpty(list))
continue;
for(s=list->next; s != list; s=s->next) {
- if(s->unusedsince != 0 && (now - s->unusedsince) > limit) {
+ if((now - s->unusedsince) > limit) {
released = (s->npages - s->npreleased) << PageShift;
mstats.heap_released += released;
sumreleased += released;
diff --git a/libgo/runtime/panic.c b/libgo/runtime/panic.c
index 85a5608..23a56f3 100644
--- a/libgo/runtime/panic.c
+++ b/libgo/runtime/panic.c
@@ -87,7 +87,7 @@ void
runtime_throw(const char *s)
{
runtime_startpanic();
- runtime_printf("throw: %s\n", s);
+ runtime_printf("fatal error: %s\n", s);
runtime_dopanic(0);
*(int32*)0 = 0; // not reached
runtime_exit(1); // even more not reached
diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c
index 754c68e..e805c90 100644
--- a/libgo/runtime/proc.c
+++ b/libgo/runtime/proc.c
@@ -167,6 +167,13 @@ runtime_m(void)
int32 runtime_gcwaiting;
+G* runtime_allg;
+G* runtime_lastg;
+M* runtime_allm;
+
+int8* runtime_goos;
+int32 runtime_ncpu;
+
// The static TLS size. See runtime_newm.
static int tlssize;
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
index 4a0ebf2b..a937503 100644
--- a/libgo/runtime/runtime.h
+++ b/libgo/runtime/runtime.h
@@ -119,6 +119,13 @@ enum
{
PtrSize = sizeof(void*),
};
+enum
+{
+ // Per-M stack segment cache size.
+ StackCacheSize = 32,
+ // Global <-> per-M stack segment cache transfer batch size.
+ StackCacheBatch = 16,
+};
/*
* structures
@@ -178,6 +185,8 @@ struct G
int32 sig;
int32 writenbuf;
byte* writebuf;
+ // DeferChunk *dchunk;
+ // DeferChunk *dchunknext;
uintptr sigcode0;
uintptr sigcode1;
// uintptr sigpc;
@@ -344,14 +353,14 @@ struct CgoMal
* external data
*/
extern uintptr runtime_zerobase;
-G* runtime_allg;
-G* runtime_lastg;
-M* runtime_allm;
+extern G* runtime_allg;
+extern G* runtime_lastg;
+extern M* runtime_allm;
extern int32 runtime_gomaxprocs;
extern bool runtime_singleproc;
extern uint32 runtime_panicking;
extern int32 runtime_gcwaiting; // gc is waiting to run
-int32 runtime_ncpu;
+extern int32 runtime_ncpu;
/*
* common functions and data
diff --git a/libgo/runtime/sigqueue.goc b/libgo/runtime/sigqueue.goc
index be7c592..82b0400 100644
--- a/libgo/runtime/sigqueue.goc
+++ b/libgo/runtime/sigqueue.goc
@@ -5,36 +5,24 @@
// This file implements runtime support for signal handling.
//
// Most synchronization primitives are not available from
-// the signal handler (it cannot block and cannot use locks)
+// the signal handler (it cannot block, allocate memory, or use locks)
// so the handler communicates with a processing goroutine
// via struct sig, below.
//
-// Ownership for sig.Note passes back and forth between
-// the signal handler and the signal goroutine in rounds.
-// The initial state is that sig.note is cleared (setup by signal_enable).
-// At the beginning of each round, mask == 0.
-// The round goes through three stages:
-//
-// (In parallel)
-// 1a) One or more signals arrive and are handled
-// by sigsend using cas to set bits in sig.mask.
-// The handler that changes sig.mask from zero to non-zero
-// calls notewakeup(&sig).
-// 1b) Sigrecv calls notesleep(&sig) to wait for the wakeup.
-//
-// 2) Having received the wakeup, sigrecv knows that sigsend
-// will not send another wakeup, so it can noteclear(&sig)
-// to prepare for the next round. (Sigsend may still be adding
-// signals to sig.mask at this point, which is fine.)
-//
-// 3) Sigrecv uses cas to grab the current sig.mask and zero it,
-// triggering the next round.
-//
-// The signal handler takes ownership of the note by atomically
-// changing mask from a zero to non-zero value. It gives up
-// ownership by calling notewakeup. The signal goroutine takes
-// ownership by returning from notesleep (caused by the notewakeup)
-// and gives up ownership by clearing mask.
+// sigsend() is called by the signal handler to queue a new signal.
+// signal_recv() is called by the Go program to receive a newly queued signal.
+// Synchronization between sigsend() and signal_recv() is based on the sig.state
+// variable. It can be in 3 states: 0, HASWAITER and HASSIGNAL.
+// HASWAITER means that signal_recv() is blocked on sig.Note and there are no
+// new pending signals.
+// HASSIGNAL means that sig.mask *may* contain new pending signals,
+// signal_recv() can't be blocked in this state.
+// 0 means that there are no new pending signals and signal_recv() is not blocked.
+// Transitions between states are done atomically with CAS.
+// When signal_recv() is unblocked, it resets sig.Note and rechecks sig.mask.
+// If several sigsend()'s and signal_recv() execute concurrently, it can lead to
+// unnecessary rechecks of sig.mask, but must not lead to missed signals
+// nor deadlocks.
package signal
#include "config.h"
@@ -47,15 +35,20 @@ static struct {
Note;
uint32 mask[(NSIG+31)/32];
uint32 wanted[(NSIG+31)/32];
- uint32 kick;
+ uint32 state;
bool inuse;
} sig;
+enum {
+ HASWAITER = 1,
+ HASSIGNAL = 2,
+};
+
// Called from sighandler to send a signal back out of the signal handling thread.
bool
__go_sigsend(int32 s)
{
- uint32 bit, mask;
+ uint32 bit, mask, old, new;
if(!sig.inuse || s < 0 || (size_t)s >= 32*nelem(sig.wanted) || !(sig.wanted[s/32]&(1U<<(s&31))))
return false;
@@ -67,8 +60,20 @@ __go_sigsend(int32 s)
if(runtime_cas(&sig.mask[s/32], mask, mask|bit)) {
// Added to queue.
// Only send a wakeup if the receiver needs a kick.
- if(runtime_cas(&sig.kick, 1, 0))
- runtime_notewakeup(&sig);
+ for(;;) {
+ old = runtime_atomicload(&sig.state);
+ if(old == HASSIGNAL)
+ break;
+ if(old == HASWAITER)
+ new = 0;
+ else // if(old == 0)
+ new = HASSIGNAL;
+ if(runtime_cas(&sig.state, old, new)) {
+ if (old == HASWAITER)
+ runtime_notewakeup(&sig);
+ break;
+ }
+ }
break;
}
}
@@ -79,7 +84,7 @@ __go_sigsend(int32 s)
// Must only be called from a single goroutine at a time.
func signal_recv() (m uint32) {
static uint32 recv[nelem(sig.mask)];
- int32 i, more;
+ uint32 i, old, new;
for(;;) {
// Serve from local copy if there are bits left.
@@ -91,15 +96,27 @@ func signal_recv() (m uint32) {
}
}
- // Get a new local copy.
- // Ask for a kick if more signals come in
- // during or after our check (before the sleep).
- if(sig.kick == 0) {
- runtime_noteclear(&sig);
- runtime_cas(&sig.kick, 0, 1);
+ // Check and update sig.state.
+ for(;;) {
+ old = runtime_atomicload(&sig.state);
+ if(old == HASWAITER)
+ runtime_throw("inconsistent state in signal_recv");
+ if(old == HASSIGNAL)
+ new = 0;
+ else // if(old == 0)
+ new = HASWAITER;
+ if(runtime_cas(&sig.state, old, new)) {
+ if (new == HASWAITER) {
+ runtime_entersyscall();
+ runtime_notesleep(&sig);
+ runtime_exitsyscall();
+ runtime_noteclear(&sig);
+ }
+ break;
+ }
}
- more = 0;
+ // Get a new local copy.
for(i=0; (size_t)i<nelem(sig.mask); i++) {
for(;;) {
m = sig.mask[i];
@@ -107,16 +124,7 @@ func signal_recv() (m uint32) {
break;
}
recv[i] = m;
- if(m != 0)
- more = 1;
}
- if(more)
- continue;
-
- // Sleep waiting for more.
- runtime_entersyscall();
- runtime_notesleep(&sig);
- runtime_exitsyscall();
}
done:;