aboutsummaryrefslogtreecommitdiff
path: root/libgo/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2014-07-19 08:53:52 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2014-07-19 08:53:52 +0000
commit00d86ac99f5dd6afa5bbd7c38ffe1c585edd2387 (patch)
treeb988e32ea14a3dc1b4718b1fdfa47bab087ae96c /libgo/runtime
parentbcf2fc6ee0a7edbe7de4299f28b66527c07bb0a2 (diff)
downloadgcc-00d86ac99f5dd6afa5bbd7c38ffe1c585edd2387.zip
gcc-00d86ac99f5dd6afa5bbd7c38ffe1c585edd2387.tar.gz
gcc-00d86ac99f5dd6afa5bbd7c38ffe1c585edd2387.tar.bz2
libgo: Update to Go 1.3 release.
From-SVN: r212837
Diffstat (limited to 'libgo/runtime')
-rw-r--r--libgo/runtime/chan.c1186
-rw-r--r--libgo/runtime/chan.goc10
-rw-r--r--libgo/runtime/cpuprof.c350
-rw-r--r--libgo/runtime/cpuprof.goc3
-rw-r--r--libgo/runtime/env_posix.c2
-rw-r--r--libgo/runtime/go-now.c10
-rw-r--r--libgo/runtime/go-signal.c25
-rw-r--r--libgo/runtime/heapdump.c776
-rw-r--r--libgo/runtime/lfstack.c76
-rw-r--r--libgo/runtime/lock_sema.c2
-rw-r--r--libgo/runtime/malloc.goc364
-rw-r--r--libgo/runtime/malloc.h113
-rw-r--r--libgo/runtime/mcache.c124
-rw-r--r--libgo/runtime/mcentral.c145
-rw-r--r--libgo/runtime/mem.c55
-rw-r--r--libgo/runtime/mgc0.c473
-rw-r--r--libgo/runtime/mgc0.h41
-rw-r--r--libgo/runtime/mheap.c112
-rw-r--r--libgo/runtime/mprof.goc142
-rw-r--r--libgo/runtime/netpoll.goc34
-rw-r--r--libgo/runtime/netpoll_epoll.c4
-rw-r--r--libgo/runtime/netpoll_kqueue.c4
-rw-r--r--libgo/runtime/panic.c34
-rw-r--r--libgo/runtime/print.c19
-rw-r--r--libgo/runtime/proc.c65
-rw-r--r--libgo/runtime/race.h1
-rw-r--r--libgo/runtime/rdebug.goc5
-rw-r--r--libgo/runtime/runtime.c41
-rw-r--r--libgo/runtime/runtime.h50
-rw-r--r--libgo/runtime/time.goc24
30 files changed, 3653 insertions, 637 deletions
diff --git a/libgo/runtime/chan.c b/libgo/runtime/chan.c
new file mode 100644
index 0000000..4559c0f
--- /dev/null
+++ b/libgo/runtime/chan.c
@@ -0,0 +1,1186 @@
+// AUTO-GENERATED by autogen.sh; DO NOT EDIT
+
+#include "runtime.h"
+#include "arch.h"
+#include "go-type.h"
+#include "race.h"
+#include "malloc.h"
+#include "chan.h"
+
+#line 13 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+uint32 runtime_Hchansize = sizeof ( Hchan ) ;
+#line 15 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static void dequeueg ( WaitQ* ) ;
+static SudoG* dequeue ( WaitQ* ) ;
+static void enqueue ( WaitQ* , SudoG* ) ;
+static void racesync ( Hchan* , SudoG* ) ;
+#line 20 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static Hchan*
+makechan ( ChanType *t , int64 hint )
+{
+Hchan *c;
+uintptr n;
+const Type *elem;
+#line 27 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+elem = t->__element_type;
+#line 30 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( elem->__size >= ( 1<<16 ) )
+runtime_throw ( "makechan: invalid channel element type" ) ;
+#line 33 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( hint < 0 || ( intgo ) hint != hint || ( elem->__size > 0 && ( uintptr ) hint > ( MaxMem - sizeof ( *c ) ) / elem->__size ) )
+runtime_panicstring ( "makechan: size out of range" ) ;
+#line 36 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+n = sizeof ( *c ) ;
+n = ROUND ( n , elem->__align ) ;
+#line 40 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+c = ( Hchan* ) runtime_mallocgc ( sizeof ( *c ) + hint*elem->__size , ( uintptr ) t | TypeInfo_Chan , 0 ) ;
+c->elemsize = elem->__size;
+c->elemtype = elem;
+c->dataqsiz = hint;
+#line 45 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( debug )
+runtime_printf ( "makechan: chan=%p; elemsize=%D; dataqsiz=%D\n" ,
+c , ( int64 ) elem->__size , ( int64 ) c->dataqsiz ) ;
+#line 49 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+return c;
+}
+Hchan* reflect_makechan(ChanType* t, uint64 size) __asm__ (GOSYM_PREFIX "reflect.makechan");
+Hchan* reflect_makechan(ChanType* t, uint64 size)
+{
+ Hchan* c;
+#line 52 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ c = makechan(t, size);
+return c;
+}
+
+#line 56 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+Hchan*
+__go_new_channel ( ChanType *t , uintptr hint )
+{
+return makechan ( t , hint ) ;
+}
+#line 62 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+Hchan*
+__go_new_channel_big ( ChanType *t , uint64 hint )
+{
+return makechan ( t , hint ) ;
+}
+#line 82 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static bool
+chansend ( ChanType *t , Hchan *c , byte *ep , bool block , void *pc )
+{
+SudoG *sg;
+SudoG mysg;
+G* gp;
+int64 t0;
+G* g;
+#line 91 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+g = runtime_g ( ) ;
+#line 93 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( raceenabled )
+runtime_racereadobjectpc ( ep , t->__element_type , runtime_getcallerpc ( &t ) , chansend ) ;
+#line 96 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( c == nil ) {
+USED ( t ) ;
+if ( !block )
+return false;
+runtime_park ( nil , nil , "chan send (nil chan)" ) ;
+return false;
+}
+#line 104 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( runtime_gcwaiting ( ) )
+runtime_gosched ( ) ;
+#line 107 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( debug ) {
+runtime_printf ( "chansend: chan=%p\n" , c ) ;
+}
+#line 111 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+t0 = 0;
+mysg.releasetime = 0;
+if ( runtime_blockprofilerate > 0 ) {
+t0 = runtime_cputicks ( ) ;
+mysg.releasetime = -1;
+}
+#line 118 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+runtime_lock ( c ) ;
+if ( raceenabled )
+runtime_racereadpc ( c , pc , chansend ) ;
+if ( c->closed )
+goto closed;
+#line 124 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( c->dataqsiz > 0 )
+goto asynch;
+#line 127 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+sg = dequeue ( &c->recvq ) ;
+if ( sg != nil ) {
+if ( raceenabled )
+racesync ( c , sg ) ;
+runtime_unlock ( c ) ;
+#line 133 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+gp = sg->g;
+gp->param = sg;
+if ( sg->elem != nil )
+runtime_memmove ( sg->elem , ep , c->elemsize ) ;
+if ( sg->releasetime )
+sg->releasetime = runtime_cputicks ( ) ;
+runtime_ready ( gp ) ;
+return true;
+}
+#line 143 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( !block ) {
+runtime_unlock ( c ) ;
+return false;
+}
+#line 148 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+mysg.elem = ep;
+mysg.g = g;
+mysg.selectdone = nil;
+g->param = nil;
+enqueue ( &c->sendq , &mysg ) ;
+runtime_parkunlock ( c , "chan send" ) ;
+#line 155 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( g->param == nil ) {
+runtime_lock ( c ) ;
+if ( !c->closed )
+runtime_throw ( "chansend: spurious wakeup" ) ;
+goto closed;
+}
+#line 162 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( mysg.releasetime > 0 )
+runtime_blockevent ( mysg.releasetime - t0 , 2 ) ;
+#line 165 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+return true;
+#line 167 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+asynch:
+if ( c->closed )
+goto closed;
+#line 171 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( c->qcount >= c->dataqsiz ) {
+if ( !block ) {
+runtime_unlock ( c ) ;
+return false;
+}
+mysg.g = g;
+mysg.elem = nil;
+mysg.selectdone = nil;
+enqueue ( &c->sendq , &mysg ) ;
+runtime_parkunlock ( c , "chan send" ) ;
+#line 182 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+runtime_lock ( c ) ;
+goto asynch;
+}
+#line 186 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( raceenabled )
+runtime_racerelease ( chanbuf ( c , c->sendx ) ) ;
+#line 189 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+runtime_memmove ( chanbuf ( c , c->sendx ) , ep , c->elemsize ) ;
+if ( ++c->sendx == c->dataqsiz )
+c->sendx = 0;
+c->qcount++;
+#line 194 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+sg = dequeue ( &c->recvq ) ;
+if ( sg != nil ) {
+gp = sg->g;
+runtime_unlock ( c ) ;
+if ( sg->releasetime )
+sg->releasetime = runtime_cputicks ( ) ;
+runtime_ready ( gp ) ;
+} else
+runtime_unlock ( c ) ;
+if ( mysg.releasetime > 0 )
+runtime_blockevent ( mysg.releasetime - t0 , 2 ) ;
+return true;
+#line 207 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+closed:
+runtime_unlock ( c ) ;
+runtime_panicstring ( "send on closed channel" ) ;
+return false;
+}
+#line 214 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static bool
+chanrecv ( ChanType *t , Hchan* c , byte *ep , bool block , bool *received )
+{
+SudoG *sg;
+SudoG mysg;
+G *gp;
+int64 t0;
+G *g;
+#line 223 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( runtime_gcwaiting ( ) )
+runtime_gosched ( ) ;
+#line 228 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( debug )
+runtime_printf ( "chanrecv: chan=%p\n" , c ) ;
+#line 231 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+g = runtime_g ( ) ;
+#line 233 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( c == nil ) {
+USED ( t ) ;
+if ( !block )
+return false;
+runtime_park ( nil , nil , "chan receive (nil chan)" ) ;
+return false;
+}
+#line 241 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+t0 = 0;
+mysg.releasetime = 0;
+if ( runtime_blockprofilerate > 0 ) {
+t0 = runtime_cputicks ( ) ;
+mysg.releasetime = -1;
+}
+#line 248 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+runtime_lock ( c ) ;
+if ( c->dataqsiz > 0 )
+goto asynch;
+#line 252 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( c->closed )
+goto closed;
+#line 255 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+sg = dequeue ( &c->sendq ) ;
+if ( sg != nil ) {
+if ( raceenabled )
+racesync ( c , sg ) ;
+runtime_unlock ( c ) ;
+#line 261 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( ep != nil )
+runtime_memmove ( ep , sg->elem , c->elemsize ) ;
+gp = sg->g;
+gp->param = sg;
+if ( sg->releasetime )
+sg->releasetime = runtime_cputicks ( ) ;
+runtime_ready ( gp ) ;
+#line 269 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( received != nil )
+*received = true;
+return true;
+}
+#line 274 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( !block ) {
+runtime_unlock ( c ) ;
+return false;
+}
+#line 279 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+mysg.elem = ep;
+mysg.g = g;
+mysg.selectdone = nil;
+g->param = nil;
+enqueue ( &c->recvq , &mysg ) ;
+runtime_parkunlock ( c , "chan receive" ) ;
+#line 286 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( g->param == nil ) {
+runtime_lock ( c ) ;
+if ( !c->closed )
+runtime_throw ( "chanrecv: spurious wakeup" ) ;
+goto closed;
+}
+#line 293 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( received != nil )
+*received = true;
+if ( mysg.releasetime > 0 )
+runtime_blockevent ( mysg.releasetime - t0 , 2 ) ;
+return true;
+#line 299 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+asynch:
+if ( c->qcount <= 0 ) {
+if ( c->closed )
+goto closed;
+#line 304 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( !block ) {
+runtime_unlock ( c ) ;
+if ( received != nil )
+*received = false;
+return false;
+}
+mysg.g = g;
+mysg.elem = nil;
+mysg.selectdone = nil;
+enqueue ( &c->recvq , &mysg ) ;
+runtime_parkunlock ( c , "chan receive" ) ;
+#line 316 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+runtime_lock ( c ) ;
+goto asynch;
+}
+#line 320 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( raceenabled )
+runtime_raceacquire ( chanbuf ( c , c->recvx ) ) ;
+#line 323 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( ep != nil )
+runtime_memmove ( ep , chanbuf ( c , c->recvx ) , c->elemsize ) ;
+runtime_memclr ( chanbuf ( c , c->recvx ) , c->elemsize ) ;
+if ( ++c->recvx == c->dataqsiz )
+c->recvx = 0;
+c->qcount--;
+#line 330 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+sg = dequeue ( &c->sendq ) ;
+if ( sg != nil ) {
+gp = sg->g;
+runtime_unlock ( c ) ;
+if ( sg->releasetime )
+sg->releasetime = runtime_cputicks ( ) ;
+runtime_ready ( gp ) ;
+} else
+runtime_unlock ( c ) ;
+#line 340 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( received != nil )
+*received = true;
+if ( mysg.releasetime > 0 )
+runtime_blockevent ( mysg.releasetime - t0 , 2 ) ;
+return true;
+#line 346 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+closed:
+if ( ep != nil )
+runtime_memclr ( ep , c->elemsize ) ;
+if ( received != nil )
+*received = false;
+if ( raceenabled )
+runtime_raceacquire ( c ) ;
+runtime_unlock ( c ) ;
+if ( mysg.releasetime > 0 )
+runtime_blockevent ( mysg.releasetime - t0 , 2 ) ;
+return true;
+}
+#line 361 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+void
+__go_send_small ( ChanType *t , Hchan* c , uint64 val )
+{
+union
+{
+byte b[sizeof ( uint64 ) ];
+uint64 v;
+} u;
+byte *v;
+#line 371 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+u.v = val;
+#ifndef WORDS_BIGENDIAN
+v = u.b;
+#else
+v = u.b + sizeof ( uint64 ) - t->__element_type->__size;
+#endif
+chansend ( t , c , v , true , runtime_getcallerpc ( &t ) ) ;
+}
+#line 382 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+void
+__go_send_big ( ChanType *t , Hchan* c , byte* v )
+{
+chansend ( t , c , v , true , runtime_getcallerpc ( &t ) ) ;
+}
+#line 390 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+void
+__go_receive ( ChanType *t , Hchan* c , byte* v )
+{
+chanrecv ( t , c , v , true , nil ) ;
+}
+#line 396 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+_Bool runtime_chanrecv2 ( ChanType *t , Hchan* c , byte* v )
+__asm__ ( GOSYM_PREFIX "runtime.chanrecv2" ) ;
+#line 399 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+_Bool
+runtime_chanrecv2 ( ChanType *t , Hchan* c , byte* v )
+{
+bool received = false;
+#line 404 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+chanrecv ( t , c , v , true , &received ) ;
+return received;
+}
+bool runtime_selectnbsend(ChanType* t, Hchan* c, byte* elem) __asm__ (GOSYM_PREFIX "runtime.selectnbsend");
+bool runtime_selectnbsend(ChanType* t, Hchan* c, byte* elem)
+{
+ bool selected;
+#line 425 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ selected = chansend(t, c, elem, false, runtime_getcallerpc(&t));
+return selected;
+}
+bool runtime_selectnbrecv(ChanType* t, byte* elem, Hchan* c) __asm__ (GOSYM_PREFIX "runtime.selectnbrecv");
+bool runtime_selectnbrecv(ChanType* t, byte* elem, Hchan* c)
+{
+ bool selected;
+#line 446 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ selected = chanrecv(t, c, elem, false, nil);
+return selected;
+}
+bool runtime_selectnbrecv2(ChanType* t, byte* elem, bool* received, Hchan* c) __asm__ (GOSYM_PREFIX "runtime.selectnbrecv2");
+bool runtime_selectnbrecv2(ChanType* t, byte* elem, bool* received, Hchan* c)
+{
+ bool selected;
+#line 467 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ bool r;
+
+ selected = chanrecv(t, c, elem, false, received == nil ? nil : &r);
+ if(received != nil)
+ *received = r;
+return selected;
+}
+bool reflect_chansend(ChanType* t, Hchan* c, byte* elem, bool nb) __asm__ (GOSYM_PREFIX "reflect.chansend");
+bool reflect_chansend(ChanType* t, Hchan* c, byte* elem, bool nb)
+{
+ bool selected;
+#line 475 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ selected = chansend(t, c, elem, !nb, runtime_getcallerpc(&t));
+return selected;
+}
+struct reflect_chanrecv_ret {
+ bool selected;
+ bool received;
+};
+struct reflect_chanrecv_ret reflect_chanrecv(ChanType* t, Hchan* c, bool nb, byte* elem) __asm__ (GOSYM_PREFIX "reflect.chanrecv");
+struct reflect_chanrecv_ret reflect_chanrecv(ChanType* t, Hchan* c, bool nb, byte* elem)
+{
+ bool selected;
+ bool received;
+#line 479 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ received = false;
+ selected = chanrecv(t, c, elem, !nb, &received);
+ {
+ struct reflect_chanrecv_ret __ret;
+ __ret.selected = selected;
+ __ret.received = received;
+ return __ret;
+ }
+}
+
+#line 484 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static Select* newselect ( int32 ) ;
+byte* runtime_newselect(int32 size) __asm__ (GOSYM_PREFIX "runtime.newselect");
+byte* runtime_newselect(int32 size)
+{
+ byte* sel;
+#line 486 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ sel = (byte*)newselect(size);
+return sel;
+}
+
+#line 490 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static Select*
+newselect ( int32 size )
+{
+int32 n;
+Select *sel;
+#line 496 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+n = 0;
+if ( size > 1 )
+n = size-1;
+#line 504 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+sel = runtime_mal ( sizeof ( *sel ) +
+n*sizeof ( sel->scase[0] ) +
+size*sizeof ( sel->lockorder[0] ) +
+size*sizeof ( sel->pollorder[0] ) ) ;
+#line 509 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+sel->tcase = size;
+sel->ncase = 0;
+sel->lockorder = ( void* ) ( sel->scase + size ) ;
+sel->pollorder = ( void* ) ( sel->lockorder + size ) ;
+#line 514 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( debug )
+runtime_printf ( "newselect s=%p size=%d\n" , sel , size ) ;
+return sel;
+}
+#line 520 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static void selectsend ( Select *sel , Hchan *c , int index , void *elem ) ;
+void runtime_selectsend(Select* sel, Hchan* c, byte* elem, int32 index) __asm__ (GOSYM_PREFIX "runtime.selectsend");
+void runtime_selectsend(Select* sel, Hchan* c, byte* elem, int32 index)
+{
+#line 522 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ // nil cases do not compete
+ if(c != nil)
+ selectsend(sel, c, index, elem);
+}
+
+#line 528 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static void
+selectsend ( Select *sel , Hchan *c , int index , void *elem )
+{
+int32 i;
+Scase *cas;
+#line 534 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+i = sel->ncase;
+if ( i >= sel->tcase )
+runtime_throw ( "selectsend: too many cases" ) ;
+sel->ncase = i+1;
+cas = &sel->scase[i];
+#line 540 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+cas->index = index;
+cas->chan = c;
+cas->kind = CaseSend;
+cas->sg.elem = elem;
+#line 545 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( debug )
+runtime_printf ( "selectsend s=%p index=%d chan=%p\n" ,
+sel , cas->index , cas->chan ) ;
+}
+#line 551 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static void selectrecv ( Select *sel , Hchan *c , int index , void *elem , bool* ) ;
+void runtime_selectrecv(Select* sel, Hchan* c, byte* elem, int32 index) __asm__ (GOSYM_PREFIX "runtime.selectrecv");
+void runtime_selectrecv(Select* sel, Hchan* c, byte* elem, int32 index)
+{
+#line 553 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ // nil cases do not compete
+ if(c != nil)
+ selectrecv(sel, c, index, elem, nil);
+}
+void runtime_selectrecv2(Select* sel, Hchan* c, byte* elem, bool* received, int32 index) __asm__ (GOSYM_PREFIX "runtime.selectrecv2");
+void runtime_selectrecv2(Select* sel, Hchan* c, byte* elem, bool* received, int32 index)
+{
+#line 559 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ // nil cases do not compete
+ if(c != nil)
+ selectrecv(sel, c, index, elem, received);
+}
+
+#line 565 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static void
+selectrecv ( Select *sel , Hchan *c , int index , void *elem , bool *received )
+{
+int32 i;
+Scase *cas;
+#line 571 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+i = sel->ncase;
+if ( i >= sel->tcase )
+runtime_throw ( "selectrecv: too many cases" ) ;
+sel->ncase = i+1;
+cas = &sel->scase[i];
+cas->index = index;
+cas->chan = c;
+#line 579 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+cas->kind = CaseRecv;
+cas->sg.elem = elem;
+cas->receivedp = received;
+#line 583 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( debug )
+runtime_printf ( "selectrecv s=%p index=%d chan=%p\n" ,
+sel , cas->index , cas->chan ) ;
+}
+#line 589 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static void selectdefault ( Select* , int ) ;
+void runtime_selectdefault(Select* sel, int32 index) __asm__ (GOSYM_PREFIX "runtime.selectdefault");
+void runtime_selectdefault(Select* sel, int32 index)
+{
+#line 591 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ selectdefault(sel, index);
+}
+
+#line 595 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static void
+selectdefault ( Select *sel , int32 index )
+{
+int32 i;
+Scase *cas;
+#line 601 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+i = sel->ncase;
+if ( i >= sel->tcase )
+runtime_throw ( "selectdefault: too many cases" ) ;
+sel->ncase = i+1;
+cas = &sel->scase[i];
+cas->index = index;
+cas->chan = nil;
+#line 609 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+cas->kind = CaseDefault;
+#line 611 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( debug )
+runtime_printf ( "selectdefault s=%p index=%d\n" ,
+sel , cas->index ) ;
+}
+#line 616 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static void
+sellock ( Select *sel )
+{
+uint32 i;
+Hchan *c , *c0;
+#line 622 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+c = nil;
+for ( i=0; i<sel->ncase; i++ ) {
+c0 = sel->lockorder[i];
+if ( c0 && c0 != c ) {
+c = sel->lockorder[i];
+runtime_lock ( c ) ;
+}
+}
+}
+#line 632 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static void
+selunlock ( Select *sel )
+{
+int32 i , n , r;
+Hchan *c;
+#line 646 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+n = ( int32 ) sel->ncase;
+r = 0;
+#line 649 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( n>0 && sel->lockorder[0] == nil )
+r = 1;
+for ( i = n-1; i >= r; i-- ) {
+c = sel->lockorder[i];
+if ( i>0 && sel->lockorder[i-1] == c )
+continue;
+runtime_unlock ( c ) ;
+}
+}
+#line 659 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static bool
+selparkcommit ( G *gp , void *sel )
+{
+USED ( gp ) ;
+selunlock ( sel ) ;
+return true;
+}
+void runtime_block() __asm__ (GOSYM_PREFIX "runtime.block");
+void runtime_block()
+{
+#line 667 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ runtime_park(nil, nil, "select (no cases)"); // forever
+}
+
+#line 671 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static int selectgo ( Select** ) ;
+int32 runtime_selectgo(Select* sel) __asm__ (GOSYM_PREFIX "runtime.selectgo");
+int32 runtime_selectgo(Select* sel)
+{
+ int32 ret;
+#line 675 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ return selectgo(&sel);
+return ret;
+}
+
+#line 679 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static int
+selectgo ( Select **selp )
+{
+Select *sel;
+uint32 o , i , j , k , done;
+int64 t0;
+Scase *cas , *dfl;
+Hchan *c;
+SudoG *sg;
+G *gp;
+int index;
+G *g;
+#line 692 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+sel = *selp;
+if ( runtime_gcwaiting ( ) )
+runtime_gosched ( ) ;
+#line 696 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( debug )
+runtime_printf ( "select: sel=%p\n" , sel ) ;
+#line 699 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+g = runtime_g ( ) ;
+#line 701 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+t0 = 0;
+if ( runtime_blockprofilerate > 0 ) {
+t0 = runtime_cputicks ( ) ;
+for ( i=0; i<sel->ncase; i++ )
+sel->scase[i].sg.releasetime = -1;
+}
+#line 717 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+for ( i=0; i<sel->ncase; i++ )
+sel->pollorder[i] = i;
+for ( i=1; i<sel->ncase; i++ ) {
+o = sel->pollorder[i];
+j = runtime_fastrand1 ( ) % ( i+1 ) ;
+sel->pollorder[i] = sel->pollorder[j];
+sel->pollorder[j] = o;
+}
+#line 728 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+for ( i=0; i<sel->ncase; i++ ) {
+j = i;
+c = sel->scase[j].chan;
+while ( j > 0 && sel->lockorder[k= ( j-1 ) /2] < c ) {
+sel->lockorder[j] = sel->lockorder[k];
+j = k;
+}
+sel->lockorder[j] = c;
+}
+for ( i=sel->ncase; i-->0; ) {
+c = sel->lockorder[i];
+sel->lockorder[i] = sel->lockorder[0];
+j = 0;
+for ( ;; ) {
+k = j*2+1;
+if ( k >= i )
+break;
+if ( k+1 < i && sel->lockorder[k] < sel->lockorder[k+1] )
+k++;
+if ( c < sel->lockorder[k] ) {
+sel->lockorder[j] = sel->lockorder[k];
+j = k;
+continue;
+}
+break;
+}
+sel->lockorder[j] = c;
+}
+#line 763 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+sellock ( sel ) ;
+#line 765 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+loop:
+#line 767 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+dfl = nil;
+for ( i=0; i<sel->ncase; i++ ) {
+o = sel->pollorder[i];
+cas = &sel->scase[o];
+c = cas->chan;
+#line 773 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+switch ( cas->kind ) {
+case CaseRecv:
+if ( c->dataqsiz > 0 ) {
+if ( c->qcount > 0 )
+goto asyncrecv;
+} else {
+sg = dequeue ( &c->sendq ) ;
+if ( sg != nil )
+goto syncrecv;
+}
+if ( c->closed )
+goto rclose;
+break;
+#line 787 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+case CaseSend:
+if ( raceenabled )
+runtime_racereadpc ( c , runtime_selectgo , chansend ) ;
+if ( c->closed )
+goto sclose;
+if ( c->dataqsiz > 0 ) {
+if ( c->qcount < c->dataqsiz )
+goto asyncsend;
+} else {
+sg = dequeue ( &c->recvq ) ;
+if ( sg != nil )
+goto syncsend;
+}
+break;
+#line 802 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+case CaseDefault:
+dfl = cas;
+break;
+}
+}
+#line 808 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( dfl != nil ) {
+selunlock ( sel ) ;
+cas = dfl;
+goto retc;
+}
+#line 816 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+done = 0;
+for ( i=0; i<sel->ncase; i++ ) {
+o = sel->pollorder[i];
+cas = &sel->scase[o];
+c = cas->chan;
+sg = &cas->sg;
+sg->g = g;
+sg->selectdone = &done;
+#line 825 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+switch ( cas->kind ) {
+case CaseRecv:
+enqueue ( &c->recvq , sg ) ;
+break;
+#line 830 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+case CaseSend:
+enqueue ( &c->sendq , sg ) ;
+break;
+}
+}
+#line 836 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+g->param = nil;
+runtime_park ( selparkcommit , sel , "select" ) ;
+#line 839 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+sellock ( sel ) ;
+sg = g->param;
+#line 844 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+for ( i=0; i<sel->ncase; i++ ) {
+cas = &sel->scase[i];
+if ( cas != ( Scase* ) sg ) {
+c = cas->chan;
+if ( cas->kind == CaseSend )
+dequeueg ( &c->sendq ) ;
+else
+dequeueg ( &c->recvq ) ;
+}
+}
+#line 855 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( sg == nil )
+goto loop;
+#line 858 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+cas = ( Scase* ) sg;
+c = cas->chan;
+#line 861 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( c->dataqsiz > 0 )
+runtime_throw ( "selectgo: shouldn't happen" ) ;
+#line 864 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( debug )
+runtime_printf ( "wait-return: sel=%p c=%p cas=%p kind=%d\n" ,
+sel , c , cas , cas->kind ) ;
+#line 868 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( cas->kind == CaseRecv ) {
+if ( cas->receivedp != nil )
+*cas->receivedp = true;
+}
+#line 873 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( raceenabled ) {
+if ( cas->kind == CaseRecv && cas->sg.elem != nil )
+runtime_racewriteobjectpc ( cas->sg.elem , c->elemtype , selectgo , chanrecv ) ;
+else if ( cas->kind == CaseSend )
+runtime_racereadobjectpc ( cas->sg.elem , c->elemtype , selectgo , chansend ) ;
+}
+#line 880 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+selunlock ( sel ) ;
+goto retc;
+#line 883 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+asyncrecv:
+#line 885 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( raceenabled ) {
+if ( cas->sg.elem != nil )
+runtime_racewriteobjectpc ( cas->sg.elem , c->elemtype , selectgo , chanrecv ) ;
+runtime_raceacquire ( chanbuf ( c , c->recvx ) ) ;
+}
+if ( cas->receivedp != nil )
+*cas->receivedp = true;
+if ( cas->sg.elem != nil )
+runtime_memmove ( cas->sg.elem , chanbuf ( c , c->recvx ) , c->elemsize ) ;
+runtime_memclr ( chanbuf ( c , c->recvx ) , c->elemsize ) ;
+if ( ++c->recvx == c->dataqsiz )
+c->recvx = 0;
+c->qcount--;
+sg = dequeue ( &c->sendq ) ;
+if ( sg != nil ) {
+gp = sg->g;
+selunlock ( sel ) ;
+if ( sg->releasetime )
+sg->releasetime = runtime_cputicks ( ) ;
+runtime_ready ( gp ) ;
+} else {
+selunlock ( sel ) ;
+}
+goto retc;
+#line 910 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+asyncsend:
+#line 912 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( raceenabled ) {
+runtime_racerelease ( chanbuf ( c , c->sendx ) ) ;
+runtime_racereadobjectpc ( cas->sg.elem , c->elemtype , selectgo , chansend ) ;
+}
+runtime_memmove ( chanbuf ( c , c->sendx ) , cas->sg.elem , c->elemsize ) ;
+if ( ++c->sendx == c->dataqsiz )
+c->sendx = 0;
+c->qcount++;
+sg = dequeue ( &c->recvq ) ;
+if ( sg != nil ) {
+gp = sg->g;
+selunlock ( sel ) ;
+if ( sg->releasetime )
+sg->releasetime = runtime_cputicks ( ) ;
+runtime_ready ( gp ) ;
+} else {
+selunlock ( sel ) ;
+}
+goto retc;
+#line 932 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+syncrecv:
+#line 934 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( raceenabled ) {
+if ( cas->sg.elem != nil )
+runtime_racewriteobjectpc ( cas->sg.elem , c->elemtype , selectgo , chanrecv ) ;
+racesync ( c , sg ) ;
+}
+selunlock ( sel ) ;
+if ( debug )
+runtime_printf ( "syncrecv: sel=%p c=%p o=%d\n" , sel , c , o ) ;
+if ( cas->receivedp != nil )
+*cas->receivedp = true;
+if ( cas->sg.elem != nil )
+runtime_memmove ( cas->sg.elem , sg->elem , c->elemsize ) ;
+gp = sg->g;
+gp->param = sg;
+if ( sg->releasetime )
+sg->releasetime = runtime_cputicks ( ) ;
+runtime_ready ( gp ) ;
+goto retc;
+#line 953 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+rclose:
+#line 955 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+selunlock ( sel ) ;
+if ( cas->receivedp != nil )
+*cas->receivedp = false;
+if ( cas->sg.elem != nil )
+runtime_memclr ( cas->sg.elem , c->elemsize ) ;
+if ( raceenabled )
+runtime_raceacquire ( c ) ;
+goto retc;
+#line 964 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+syncsend:
+#line 966 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( raceenabled ) {
+runtime_racereadobjectpc ( cas->sg.elem , c->elemtype , selectgo , chansend ) ;
+racesync ( c , sg ) ;
+}
+selunlock ( sel ) ;
+if ( debug )
+runtime_printf ( "syncsend: sel=%p c=%p o=%d\n" , sel , c , o ) ;
+if ( sg->elem != nil )
+runtime_memmove ( sg->elem , cas->sg.elem , c->elemsize ) ;
+gp = sg->g;
+gp->param = sg;
+if ( sg->releasetime )
+sg->releasetime = runtime_cputicks ( ) ;
+runtime_ready ( gp ) ;
+#line 981 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+retc:
+#line 983 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+index = cas->index;
+if ( cas->sg.releasetime > 0 )
+runtime_blockevent ( cas->sg.releasetime - t0 , 2 ) ;
+runtime_free ( sel ) ;
+return index;
+#line 989 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+sclose:
+#line 991 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+selunlock ( sel ) ;
+runtime_panicstring ( "send on closed channel" ) ;
+return 0;
+}
+#line 997 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+typedef struct runtimeSelect runtimeSelect;
+struct runtimeSelect
+{
+uintptr dir;
+ChanType *typ;
+Hchan *ch;
+byte *val;
+} ;
+#line 1007 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+enum SelectDir {
+SelectSend = 1 ,
+SelectRecv ,
+SelectDefault ,
+} ;
+struct reflect_rselect_ret {
+ intgo chosen;
+ bool recvOK;
+};
+struct reflect_rselect_ret reflect_rselect(Slice cases) __asm__ (GOSYM_PREFIX "reflect.rselect");
+struct reflect_rselect_ret reflect_rselect(Slice cases)
+{
+ intgo chosen;
+ bool recvOK;
+#line 1013 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ int32 i;
+ Select *sel;
+ runtimeSelect* rcase, *rc;
+
+ chosen = -1;
+ recvOK = false;
+
+ rcase = (runtimeSelect*)cases.__values;
+
+ sel = newselect(cases.__count);
+ for(i=0; i<cases.__count; i++) {
+ rc = &rcase[i];
+ switch(rc->dir) {
+ case SelectDefault:
+ selectdefault(sel, i);
+ break;
+ case SelectSend:
+ if(rc->ch == nil)
+ break;
+ selectsend(sel, rc->ch, i, rc->val);
+ break;
+ case SelectRecv:
+ if(rc->ch == nil)
+ break;
+ selectrecv(sel, rc->ch, i, rc->val, &recvOK);
+ break;
+ }
+ }
+
+ chosen = (intgo)(uintptr)selectgo(&sel);
+ {
+ struct reflect_rselect_ret __ret;
+ __ret.chosen = chosen;
+ __ret.recvOK = recvOK;
+ return __ret;
+ }
+}
+
+#line 1046 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static void closechan ( Hchan *c , void *pc ) ;
+void runtime_closechan(Hchan* c) __asm__ (GOSYM_PREFIX "runtime.closechan");
+void runtime_closechan(Hchan* c)
+{
+#line 1048 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ closechan(c, runtime_getcallerpc(&c));
+}
+void reflect_chanclose(Hchan* c) __asm__ (GOSYM_PREFIX "reflect.chanclose");
+void reflect_chanclose(Hchan* c)
+{
+#line 1052 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ closechan(c, runtime_getcallerpc(&c));
+}
+
+#line 1056 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static void
+closechan ( Hchan *c , void *pc )
+{
+SudoG *sg;
+G* gp;
+#line 1062 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( c == nil )
+runtime_panicstring ( "close of nil channel" ) ;
+#line 1065 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( runtime_gcwaiting ( ) )
+runtime_gosched ( ) ;
+#line 1068 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+runtime_lock ( c ) ;
+if ( c->closed ) {
+runtime_unlock ( c ) ;
+runtime_panicstring ( "close of closed channel" ) ;
+}
+#line 1074 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( raceenabled ) {
+runtime_racewritepc ( c , pc , runtime_closechan ) ;
+runtime_racerelease ( c ) ;
+}
+#line 1079 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+c->closed = true;
+#line 1082 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+for ( ;; ) {
+sg = dequeue ( &c->recvq ) ;
+if ( sg == nil )
+break;
+gp = sg->g;
+gp->param = nil;
+if ( sg->releasetime )
+sg->releasetime = runtime_cputicks ( ) ;
+runtime_ready ( gp ) ;
+}
+#line 1094 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+for ( ;; ) {
+sg = dequeue ( &c->sendq ) ;
+if ( sg == nil )
+break;
+gp = sg->g;
+gp->param = nil;
+if ( sg->releasetime )
+sg->releasetime = runtime_cputicks ( ) ;
+runtime_ready ( gp ) ;
+}
+#line 1105 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+runtime_unlock ( c ) ;
+}
+#line 1108 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+void
+__go_builtin_close ( Hchan *c )
+{
+runtime_closechan ( c ) ;
+}
+intgo reflect_chanlen(Hchan* c) __asm__ (GOSYM_PREFIX "reflect.chanlen");
+intgo reflect_chanlen(Hchan* c)
+{
+ intgo len;
+#line 1114 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ if(c == nil)
+ len = 0;
+ else
+ len = c->qcount;
+return len;
+}
+
+#line 1121 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+intgo
+__go_chan_len ( Hchan *c )
+{
+return reflect_chanlen ( c ) ;
+}
+intgo reflect_chancap(Hchan* c) __asm__ (GOSYM_PREFIX "reflect.chancap");
+intgo reflect_chancap(Hchan* c)
+{
+ intgo cap;
+#line 1127 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+
+ if(c == nil)
+ cap = 0;
+ else
+ cap = c->dataqsiz;
+return cap;
+}
+
+#line 1134 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+intgo
+__go_chan_cap ( Hchan *c )
+{
+return reflect_chancap ( c ) ;
+}
+#line 1140 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static SudoG*
+dequeue ( WaitQ *q )
+{
+SudoG *sgp;
+#line 1145 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+loop:
+sgp = q->first;
+if ( sgp == nil )
+return nil;
+q->first = sgp->link;
+#line 1152 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( sgp->selectdone != nil ) {
+#line 1154 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+if ( *sgp->selectdone != 0 || !runtime_cas ( sgp->selectdone , 0 , 1 ) )
+goto loop;
+}
+#line 1158 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+return sgp;
+}
+#line 1161 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static void
+dequeueg ( WaitQ *q )
+{
+SudoG **l , *sgp , *prevsgp;
+G *g;
+#line 1167 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+g = runtime_g ( ) ;
+prevsgp = nil;
+for ( l=&q->first; ( sgp=*l ) != nil; l=&sgp->link , prevsgp=sgp ) {
+if ( sgp->g == g ) {
+*l = sgp->link;
+if ( q->last == sgp )
+q->last = prevsgp;
+break;
+}
+}
+}
+#line 1179 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static void
+enqueue ( WaitQ *q , SudoG *sgp )
+{
+sgp->link = nil;
+if ( q->first == nil ) {
+q->first = sgp;
+q->last = sgp;
+return;
+}
+q->last->link = sgp;
+q->last = sgp;
+}
+#line 1192 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/chan.goc"
+static void
+racesync ( Hchan *c , SudoG *sg )
+{
+runtime_racerelease ( chanbuf ( c , 0 ) ) ;
+runtime_raceacquireg ( sg->g , chanbuf ( c , 0 ) ) ;
+runtime_racereleaseg ( sg->g , chanbuf ( c , 0 ) ) ;
+runtime_raceacquire ( chanbuf ( c , 0 ) ) ;
+} \ No newline at end of file
diff --git a/libgo/runtime/chan.goc b/libgo/runtime/chan.goc
index ebe0493..b8038d4 100644
--- a/libgo/runtime/chan.goc
+++ b/libgo/runtime/chan.goc
@@ -183,8 +183,10 @@ asynch:
goto asynch;
}
- if(raceenabled)
+ if(raceenabled) {
+ runtime_raceacquire(chanbuf(c, c->sendx));
runtime_racerelease(chanbuf(c, c->sendx));
+ }
runtime_memmove(chanbuf(c, c->sendx), ep, c->elemsize);
if(++c->sendx == c->dataqsiz)
@@ -317,8 +319,10 @@ asynch:
goto asynch;
}
- if(raceenabled)
+ if(raceenabled) {
runtime_raceacquire(chanbuf(c, c->recvx));
+ runtime_racerelease(chanbuf(c, c->recvx));
+ }
if(ep != nil)
runtime_memmove(ep, chanbuf(c, c->recvx), c->elemsize);
@@ -886,6 +890,7 @@ asyncrecv:
if(cas->sg.elem != nil)
runtime_racewriteobjectpc(cas->sg.elem, c->elemtype, selectgo, chanrecv);
runtime_raceacquire(chanbuf(c, c->recvx));
+ runtime_racerelease(chanbuf(c, c->recvx));
}
if(cas->receivedp != nil)
*cas->receivedp = true;
@@ -910,6 +915,7 @@ asyncrecv:
asyncsend:
// can send to buffer
if(raceenabled) {
+ runtime_raceacquire(chanbuf(c, c->sendx));
runtime_racerelease(chanbuf(c, c->sendx));
runtime_racereadobjectpc(cas->sg.elem, c->elemtype, selectgo, chansend);
}
diff --git a/libgo/runtime/cpuprof.c b/libgo/runtime/cpuprof.c
new file mode 100644
index 0000000..3398a92
--- /dev/null
+++ b/libgo/runtime/cpuprof.c
@@ -0,0 +1,350 @@
+// AUTO-GENERATED by autogen.sh; DO NOT EDIT
+
+#include "runtime.h"
+#include "arch.h"
+#include "malloc.h"
+#include "array.h"
+
+#line 57 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+typedef struct __go_open_array Slice;
+#define array __values
+#define len __count
+#define cap __capacity
+#line 62 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+enum
+{
+HashSize = 1<<10 ,
+LogSize = 1<<17 ,
+Assoc = 4 ,
+MaxStack = 64 ,
+} ;
+#line 70 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+typedef struct Profile Profile;
+typedef struct Bucket Bucket;
+typedef struct Entry Entry;
+#line 74 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+struct Entry {
+uintptr count;
+uintptr depth;
+uintptr stack[MaxStack];
+} ;
+#line 80 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+struct Bucket {
+Entry entry[Assoc];
+} ;
+#line 84 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+struct Profile {
+bool on;
+Note wait;
+uintptr count;
+uintptr evicts;
+uintptr lost;
+uintptr totallost;
+#line 93 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+Bucket hash[HashSize];
+#line 98 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+uintptr log[2][LogSize/2];
+uintptr nlog;
+int32 toggle;
+uint32 handoff;
+#line 106 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+uint32 wtoggle;
+bool wholding;
+bool flushing;
+bool eod_sent;
+} ;
+#line 112 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+static Lock lk;
+static Profile *prof;
+#line 115 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+static void tick ( uintptr* , int32 ) ;
+static void add ( Profile* , uintptr* , int32 ) ;
+static bool evict ( Profile* , Entry* ) ;
+static bool flushlog ( Profile* ) ;
+#line 120 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+static uintptr eod[3] = { 0 , 1 , 0 } ;
+#line 125 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+static void
+LostProfileData ( void )
+{
+}
+#line 130 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+extern void runtime_SetCPUProfileRate ( intgo )
+__asm__ ( GOSYM_PREFIX "runtime.SetCPUProfileRate" ) ;
+#line 135 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+void
+runtime_SetCPUProfileRate ( intgo hz )
+{
+uintptr *p;
+uintptr n;
+#line 142 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+if ( hz < 0 )
+hz = 0;
+if ( hz > 1000000 )
+hz = 1000000;
+#line 147 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+runtime_lock ( &lk ) ;
+if ( hz > 0 ) {
+if ( prof == nil ) {
+prof = runtime_SysAlloc ( sizeof *prof , &mstats.other_sys ) ;
+if ( prof == nil ) {
+runtime_printf ( "runtime: cpu profiling cannot allocate memory\n" ) ;
+runtime_unlock ( &lk ) ;
+return;
+}
+}
+if ( prof->on || prof->handoff != 0 ) {
+runtime_printf ( "runtime: cannot set cpu profile rate until previous profile has finished.\n" ) ;
+runtime_unlock ( &lk ) ;
+return;
+}
+#line 163 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+prof->on = true;
+p = prof->log[0];
+#line 167 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+*p++ = 0;
+*p++ = 3;
+*p++ = 0;
+*p++ = 1000000 / hz;
+*p++ = 0;
+prof->nlog = p - prof->log[0];
+prof->toggle = 0;
+prof->wholding = false;
+prof->wtoggle = 0;
+prof->flushing = false;
+prof->eod_sent = false;
+runtime_noteclear ( &prof->wait ) ;
+#line 180 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+runtime_setcpuprofilerate ( tick , hz ) ;
+} else if ( prof != nil && prof->on ) {
+runtime_setcpuprofilerate ( nil , 0 ) ;
+prof->on = false;
+#line 187 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+for ( ;; ) {
+n = prof->handoff;
+if ( n&0x80000000 )
+runtime_printf ( "runtime: setcpuprofile(off) twice" ) ;
+if ( runtime_cas ( &prof->handoff , n , n|0x80000000 ) )
+break;
+}
+if ( n == 0 ) {
+#line 196 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+runtime_notewakeup ( &prof->wait ) ;
+}
+}
+runtime_unlock ( &lk ) ;
+}
+#line 202 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+static void
+tick ( uintptr *pc , int32 n )
+{
+add ( prof , pc , n ) ;
+}
+#line 213 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+static void
+add ( Profile *p , uintptr *pc , int32 n )
+{
+int32 i , j;
+uintptr h , x;
+Bucket *b;
+Entry *e;
+#line 221 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+if ( n > MaxStack )
+n = MaxStack;
+#line 225 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+h = 0;
+for ( i=0; i<n; i++ ) {
+h = h<<8 | ( h>> ( 8* ( sizeof ( h ) -1 ) ) ) ;
+x = pc[i];
+h += x*31 + x*7 + x*3;
+}
+p->count++;
+#line 234 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+b = &p->hash[h%HashSize];
+for ( i=0; i<Assoc; i++ ) {
+e = &b->entry[i];
+if ( e->depth != ( uintptr ) n )
+continue;
+for ( j=0; j<n; j++ )
+if ( e->stack[j] != pc[j] )
+goto ContinueAssoc;
+e->count++;
+return;
+ContinueAssoc:;
+}
+#line 248 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+e = &b->entry[0];
+for ( i=1; i<Assoc; i++ )
+if ( b->entry[i].count < e->count )
+e = &b->entry[i];
+if ( e->count > 0 ) {
+if ( !evict ( p , e ) ) {
+#line 255 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+p->lost++;
+p->totallost++;
+return;
+}
+p->evicts++;
+}
+#line 263 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+e->depth = n;
+e->count = 1;
+for ( i=0; i<n; i++ )
+e->stack[i] = pc[i];
+}
+#line 275 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+static bool
+evict ( Profile *p , Entry *e )
+{
+int32 i , d , nslot;
+uintptr *log , *q;
+#line 281 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+d = e->depth;
+nslot = d+2;
+log = p->log[p->toggle];
+if ( p->nlog+nslot > nelem ( p->log[0] ) ) {
+if ( !flushlog ( p ) )
+return false;
+log = p->log[p->toggle];
+}
+#line 290 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+q = log+p->nlog;
+*q++ = e->count;
+*q++ = d;
+for ( i=0; i<d; i++ )
+*q++ = e->stack[i];
+p->nlog = q - log;
+e->count = 0;
+return true;
+}
+#line 304 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+static bool
+flushlog ( Profile *p )
+{
+uintptr *log , *q;
+#line 309 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+if ( !runtime_cas ( &p->handoff , 0 , p->nlog ) )
+return false;
+runtime_notewakeup ( &p->wait ) ;
+#line 313 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+p->toggle = 1 - p->toggle;
+log = p->log[p->toggle];
+q = log;
+if ( p->lost > 0 ) {
+*q++ = p->lost;
+*q++ = 1;
+*q++ = ( uintptr ) LostProfileData;
+}
+p->nlog = q - log;
+return true;
+}
+#line 327 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+Slice
+getprofile ( Profile *p )
+{
+uint32 i , j , n;
+Slice ret;
+Bucket *b;
+Entry *e;
+#line 335 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+ret.array = nil;
+ret.len = 0;
+ret.cap = 0;
+#line 339 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+if ( p == nil )
+return ret;
+#line 342 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+if ( p->wholding ) {
+#line 345 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+for ( ;; ) {
+n = p->handoff;
+if ( n == 0 ) {
+runtime_printf ( "runtime: phase error during cpu profile handoff\n" ) ;
+return ret;
+}
+if ( n & 0x80000000 ) {
+p->wtoggle = 1 - p->wtoggle;
+p->wholding = false;
+p->flushing = true;
+goto flush;
+}
+if ( runtime_cas ( &p->handoff , n , 0 ) )
+break;
+}
+p->wtoggle = 1 - p->wtoggle;
+p->wholding = false;
+}
+#line 364 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+if ( p->flushing )
+goto flush;
+#line 367 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+if ( !p->on && p->handoff == 0 )
+return ret;
+#line 371 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+runtime_notetsleepg ( &p->wait , -1 ) ;
+runtime_noteclear ( &p->wait ) ;
+#line 374 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+n = p->handoff;
+if ( n == 0 ) {
+runtime_printf ( "runtime: phase error during cpu profile wait\n" ) ;
+return ret;
+}
+if ( n == 0x80000000 ) {
+p->flushing = true;
+goto flush;
+}
+n &= ~0x80000000;
+#line 386 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+p->wholding = true;
+#line 388 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+ret.array = ( byte* ) p->log[p->wtoggle];
+ret.len = n*sizeof ( uintptr ) ;
+ret.cap = ret.len;
+return ret;
+#line 393 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+flush:
+#line 398 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+for ( i=0; i<HashSize; i++ ) {
+b = &p->hash[i];
+for ( j=0; j<Assoc; j++ ) {
+e = &b->entry[j];
+if ( e->count > 0 && !evict ( p , e ) ) {
+#line 404 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+goto breakflush;
+}
+}
+}
+breakflush:
+#line 411 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+if ( p->nlog > 0 ) {
+#line 414 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+ret.array = ( byte* ) p->log[p->toggle];
+ret.len = p->nlog*sizeof ( uintptr ) ;
+ret.cap = ret.len;
+p->nlog = 0;
+return ret;
+}
+#line 422 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+if ( !p->eod_sent ) {
+#line 425 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+p->eod_sent = true;
+ret.array = ( byte* ) eod;
+ret.len = sizeof eod;
+ret.cap = ret.len;
+return ret;
+}
+#line 433 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+p->flushing = false;
+if ( !runtime_cas ( &p->handoff , p->handoff , 0 ) )
+runtime_printf ( "runtime: profile flush racing with something\n" ) ;
+return ret;
+}
+Slice runtime_CPUProfile() __asm__ (GOSYM_PREFIX "runtime.CPUProfile");
+Slice runtime_CPUProfile()
+{
+ Slice ret;
+#line 441 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/cpuprof.goc"
+
+ ret = getprofile(prof);
+return ret;
+}
diff --git a/libgo/runtime/cpuprof.goc b/libgo/runtime/cpuprof.goc
index 28ae9bb..7d27bc6 100644
--- a/libgo/runtime/cpuprof.goc
+++ b/libgo/runtime/cpuprof.goc
@@ -87,7 +87,6 @@ struct Profile {
uintptr count; // tick count
uintptr evicts; // eviction count
uintptr lost; // lost ticks that need to be logged
- uintptr totallost; // total lost ticks
// Active recent stack traces.
Bucket hash[HashSize];
@@ -253,7 +252,6 @@ add(Profile *p, uintptr *pc, int32 n)
if(!evict(p, e)) {
// Could not evict entry. Record lost stack.
p->lost++;
- p->totallost++;
return;
}
p->evicts++;
@@ -317,6 +315,7 @@ flushlog(Profile *p)
*q++ = p->lost;
*q++ = 1;
*q++ = (uintptr)LostProfileData;
+ p->lost = 0;
}
p->nlog = q - log;
return true;
diff --git a/libgo/runtime/env_posix.c b/libgo/runtime/env_posix.c
index 93f90f5..ff4bf0c 100644
--- a/libgo/runtime/env_posix.c
+++ b/libgo/runtime/env_posix.c
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
#include "runtime.h"
#include "array.h"
diff --git a/libgo/runtime/go-now.c b/libgo/runtime/go-now.c
index 73cc160..ea8d070 100644
--- a/libgo/runtime/go-now.c
+++ b/libgo/runtime/go-now.c
@@ -11,16 +11,6 @@
// Return current time. This is the implementation of time.now().
struct time_now_ret
-{
- int64_t sec;
- int32_t nsec;
-};
-
-struct time_now_ret now()
- __asm__ (GOSYM_PREFIX "time.now")
- __attribute__ ((no_split_stack));
-
-struct time_now_ret
now()
{
struct timeval tv;
diff --git a/libgo/runtime/go-signal.c b/libgo/runtime/go-signal.c
index 1624122..4a1bf56 100644
--- a/libgo/runtime/go-signal.c
+++ b/libgo/runtime/go-signal.c
@@ -238,16 +238,13 @@ runtime_sighandler (int sig, Siginfo *info,
/* The start of handling a signal which panics. */
static void
-sig_panic_leadin (int sig)
+sig_panic_leadin (G *gp)
{
int i;
sigset_t clear;
- if (runtime_m ()->mallocing)
- {
- runtime_printf ("caught signal while mallocing: %d\n", sig);
- runtime_throw ("caught signal while mallocing");
- }
+ if (!runtime_canpanic (gp))
+ runtime_throw ("unexpected signal during runtime execution");
/* The signal handler blocked signals; unblock them. */
i = sigfillset (&clear);
@@ -281,13 +278,14 @@ sig_panic_info_handler (int sig, Siginfo *info, void *context)
/* It would be nice to set g->sigpc here as the gc library does, but
I don't know how to get it portably. */
- sig_panic_leadin (sig);
+ sig_panic_leadin (g);
switch (sig)
{
#ifdef SIGBUS
case SIGBUS:
- if (info->si_code == BUS_ADRERR && (uintptr_t) info->si_addr < 0x1000)
+ if ((info->si_code == BUS_ADRERR && (uintptr_t) info->si_addr < 0x1000)
+ || g->paniconfault)
runtime_panicstring ("invalid memory address or "
"nil pointer dereference");
runtime_printf ("unexpected fault address %p\n", info->si_addr);
@@ -296,10 +294,11 @@ sig_panic_info_handler (int sig, Siginfo *info, void *context)
#ifdef SIGSEGV
case SIGSEGV:
- if ((info->si_code == 0
- || info->si_code == SEGV_MAPERR
- || info->si_code == SEGV_ACCERR)
- && (uintptr_t) info->si_addr < 0x1000)
+ if (((info->si_code == 0
+ || info->si_code == SEGV_MAPERR
+ || info->si_code == SEGV_ACCERR)
+ && (uintptr_t) info->si_addr < 0x1000)
+ || g->paniconfault)
runtime_panicstring ("invalid memory address or "
"nil pointer dereference");
runtime_printf ("unexpected fault address %p\n", info->si_addr);
@@ -342,7 +341,7 @@ sig_panic_handler (int sig)
g->sigcode0 = 0;
g->sigcode1 = 0;
- sig_panic_leadin (sig);
+ sig_panic_leadin (g);
switch (sig)
{
diff --git a/libgo/runtime/heapdump.c b/libgo/runtime/heapdump.c
new file mode 100644
index 0000000..d0cfb01
--- /dev/null
+++ b/libgo/runtime/heapdump.c
@@ -0,0 +1,776 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Implementation of runtime/debug.WriteHeapDump. Writes all
+// objects in the heap plus additional info (roots, threads,
+// finalizers, etc.) to a file.
+
+// The format of the dumped file is described at
+// http://code.google.com/p/go-wiki/wiki/heapdump13
+
+#include "runtime.h"
+#include "arch.h"
+#include "malloc.h"
+#include "mgc0.h"
+#include "go-type.h"
+#include "go-defer.h"
+#include "go-panic.h"
+
+#define hash __hash
+#define KindNoPointers GO_NO_POINTERS
+
+enum {
+ FieldKindEol = 0,
+ FieldKindPtr = 1,
+ FieldKindString = 2,
+ FieldKindSlice = 3,
+ FieldKindIface = 4,
+ FieldKindEface = 5,
+
+ TagEOF = 0,
+ TagObject = 1,
+ TagOtherRoot = 2,
+ TagType = 3,
+ TagGoRoutine = 4,
+ TagStackFrame = 5,
+ TagParams = 6,
+ TagFinalizer = 7,
+ TagItab = 8,
+ TagOSThread = 9,
+ TagMemStats = 10,
+ TagQueuedFinalizer = 11,
+ TagData = 12,
+ TagBss = 13,
+ TagDefer = 14,
+ TagPanic = 15,
+ TagMemProf = 16,
+ TagAllocSample = 17,
+
+ TypeInfo_Conservative = 127,
+};
+
+// static uintptr* playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg);
+// static void dumpfields(uintptr *prog);
+static void dumpefacetypes(void *obj, uintptr size, const Type *type, uintptr kind);
+
+// fd to write the dump to.
+static uintptr dumpfd;
+
+// buffer of pending write data
+enum {
+ BufSize = 4096,
+};
+static byte buf[BufSize];
+static uintptr nbuf;
+
+static void
+hwrite(const byte *data, uintptr len)
+{
+ if(len + nbuf <= BufSize) {
+ runtime_memmove(buf + nbuf, data, len);
+ nbuf += len;
+ return;
+ }
+ runtime_write(dumpfd, buf, nbuf);
+ if(len >= BufSize) {
+ runtime_write(dumpfd, data, len);
+ nbuf = 0;
+ } else {
+ runtime_memmove(buf, data, len);
+ nbuf = len;
+ }
+}
+
+static void
+flush(void)
+{
+ runtime_write(dumpfd, buf, nbuf);
+ nbuf = 0;
+}
+
+// Cache of types that have been serialized already.
+// We use a type's hash field to pick a bucket.
+// Inside a bucket, we keep a list of types that
+// have been serialized so far, most recently used first.
+// Note: when a bucket overflows we may end up
+// serializing a type more than once. That's ok.
+enum {
+ TypeCacheBuckets = 256, // must be a power of 2
+ TypeCacheAssoc = 4,
+};
+typedef struct TypeCacheBucket TypeCacheBucket;
+struct TypeCacheBucket {
+ const Type *t[TypeCacheAssoc];
+};
+static TypeCacheBucket typecache[TypeCacheBuckets];
+
+// dump a uint64 in a varint format parseable by encoding/binary
+static void
+dumpint(uint64 v)
+{
+ byte buf[10];
+ int32 n;
+ n = 0;
+ while(v >= 0x80) {
+ buf[n++] = v | 0x80;
+ v >>= 7;
+ }
+ buf[n++] = v;
+ hwrite(buf, n);
+}
+
+static void
+dumpbool(bool b)
+{
+ dumpint(b ? 1 : 0);
+}
+
+// dump varint uint64 length followed by memory contents
+static void
+dumpmemrange(const byte *data, uintptr len)
+{
+ dumpint(len);
+ hwrite(data, len);
+}
+
+static void
+dumpstr(String s)
+{
+ dumpmemrange(s.str, s.len);
+}
+
+static void
+dumpcstr(const int8 *c)
+{
+ dumpmemrange((const byte*)c, runtime_findnull((const byte*)c));
+}
+
+// dump information for a type
+static void
+dumptype(const Type *t)
+{
+ TypeCacheBucket *b;
+ int32 i, j;
+
+ if(t == nil) {
+ return;
+ }
+
+ // If we've definitely serialized the type before,
+ // no need to do it again.
+ b = &typecache[t->hash & (TypeCacheBuckets-1)];
+ if(t == b->t[0]) return;
+ for(i = 1; i < TypeCacheAssoc; i++) {
+ if(t == b->t[i]) {
+ // Move-to-front
+ for(j = i; j > 0; j--) {
+ b->t[j] = b->t[j-1];
+ }
+ b->t[0] = t;
+ return;
+ }
+ }
+ // Might not have been dumped yet. Dump it and
+ // remember we did so.
+ for(j = TypeCacheAssoc-1; j > 0; j--) {
+ b->t[j] = b->t[j-1];
+ }
+ b->t[0] = t;
+
+ // dump the type
+ dumpint(TagType);
+ dumpint((uintptr)t);
+ dumpint(t->__size);
+ if(t->__uncommon == nil || t->__uncommon->__pkg_path == nil || t->__uncommon->__name == nil) {
+ dumpstr(*t->__reflection);
+ } else {
+ dumpint(t->__uncommon->__pkg_path->len + 1 + t->__uncommon->__name->len);
+ hwrite(t->__uncommon->__pkg_path->str, t->__uncommon->__pkg_path->len);
+ hwrite((const byte*)".", 1);
+ hwrite(t->__uncommon->__name->str, t->__uncommon->__name->len);
+ }
+ dumpbool(t->__size > PtrSize || (t->__code & KindNoPointers) == 0);
+ // dumpfields((uintptr*)t->gc + 1);
+}
+
+// returns true if object is scannable
+static bool
+scannable(byte *obj)
+{
+ uintptr *b, off, shift;
+
+ off = (uintptr*)obj - (uintptr*)runtime_mheap.arena_start; // word offset
+ b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ return ((*b >> shift) & bitScan) != 0;
+}
+
+// dump an object
+static void
+dumpobj(byte *obj, uintptr size, const Type *type, uintptr kind)
+{
+ if(type != nil) {
+ dumptype(type);
+ dumpefacetypes(obj, size, type, kind);
+ }
+
+ dumpint(TagObject);
+ dumpint((uintptr)obj);
+ dumpint((uintptr)type);
+ dumpint(kind);
+ dumpmemrange(obj, size);
+}
+
+static void
+dumpotherroot(const char *description, byte *to)
+{
+ dumpint(TagOtherRoot);
+ dumpcstr((const int8 *)description);
+ dumpint((uintptr)to);
+}
+
+static void
+dumpfinalizer(byte *obj, FuncVal *fn, const FuncType* ft, const PtrType *ot)
+{
+ dumpint(TagFinalizer);
+ dumpint((uintptr)obj);
+ dumpint((uintptr)fn);
+ dumpint((uintptr)fn->fn);
+ dumpint((uintptr)ft);
+ dumpint((uintptr)ot);
+}
+
+typedef struct ChildInfo ChildInfo;
+struct ChildInfo {
+ // Information passed up from the callee frame about
+ // the layout of the outargs region.
+ uintptr argoff; // where the arguments start in the frame
+ uintptr arglen; // size of args region
+ BitVector args; // if args.n >= 0, pointer map of args region
+
+ byte *sp; // callee sp
+ uintptr depth; // depth in call stack (0 == most recent)
+};
+
+static void
+dumpgoroutine(G *gp)
+{
+ // ChildInfo child;
+ Defer *d;
+ Panic *p;
+
+ dumpint(TagGoRoutine);
+ dumpint((uintptr)gp);
+ dumpint((uintptr)0);
+ dumpint(gp->goid);
+ dumpint(gp->gopc);
+ dumpint(gp->status);
+ dumpbool(gp->issystem);
+ dumpbool(gp->isbackground);
+ dumpint(gp->waitsince);
+ dumpcstr((const int8 *)gp->waitreason);
+ dumpint((uintptr)0);
+ dumpint((uintptr)gp->m);
+ dumpint((uintptr)gp->defer);
+ dumpint((uintptr)gp->panic);
+
+ // dump stack
+ // child.args.n = -1;
+ // child.arglen = 0;
+ // child.sp = nil;
+ // child.depth = 0;
+ // if(!ScanStackByFrames)
+ // runtime_throw("need frame info to dump stacks");
+ // runtime_gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, &child, false);
+
+ // dump defer & panic records
+ for(d = gp->defer; d != nil; d = d->__next) {
+ dumpint(TagDefer);
+ dumpint((uintptr)d);
+ dumpint((uintptr)gp);
+ dumpint((uintptr)d->__arg);
+ dumpint((uintptr)d->__frame);
+ dumpint((uintptr)d->__pfn);
+ dumpint((uintptr)0);
+ dumpint((uintptr)d->__next);
+ }
+ for (p = gp->panic; p != nil; p = p->__next) {
+ dumpint(TagPanic);
+ dumpint((uintptr)p);
+ dumpint((uintptr)gp);
+ dumpint((uintptr)p->__arg.__type_descriptor);
+ dumpint((uintptr)p->__arg.__object);
+ dumpint((uintptr)0);
+ dumpint((uintptr)p->__next);
+ }
+}
+
+static void
+dumpgs(void)
+{
+ G *gp;
+ uint32 i;
+
+ // goroutines & stacks
+ for(i = 0; i < runtime_allglen; i++) {
+ gp = runtime_allg[i];
+ switch(gp->status){
+ default:
+ runtime_printf("unexpected G.status %d\n", gp->status);
+ runtime_throw("mark - bad status");
+ case Gdead:
+ break;
+ case Grunnable:
+ case Gsyscall:
+ case Gwaiting:
+ dumpgoroutine(gp);
+ break;
+ }
+ }
+}
+
+static void
+finq_callback(FuncVal *fn, void *obj, const FuncType *ft, const PtrType *ot)
+{
+ dumpint(TagQueuedFinalizer);
+ dumpint((uintptr)obj);
+ dumpint((uintptr)fn);
+ dumpint((uintptr)fn->fn);
+ dumpint((uintptr)ft);
+ dumpint((uintptr)ot);
+}
+
+
+static void
+dumproots(void)
+{
+ MSpan *s, **allspans;
+ uint32 spanidx;
+ Special *sp;
+ SpecialFinalizer *spf;
+ byte *p;
+
+ // data segment
+ // dumpint(TagData);
+ // dumpint((uintptr)data);
+ // dumpmemrange(data, edata - data);
+ // dumpfields((uintptr*)gcdata + 1);
+
+ // bss segment
+ // dumpint(TagBss);
+ // dumpint((uintptr)bss);
+ // dumpmemrange(bss, ebss - bss);
+ // dumpfields((uintptr*)gcbss + 1);
+
+ // MSpan.types
+ allspans = runtime_mheap.allspans;
+ for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
+ s = allspans[spanidx];
+ if(s->state == MSpanInUse) {
+ // The garbage collector ignores type pointers stored in MSpan.types:
+ // - Compiler-generated types are stored outside of heap.
+ // - The reflect package has runtime-generated types cached in its data structures.
+ // The garbage collector relies on finding the references via that cache.
+ switch(s->types.compression) {
+ case MTypes_Empty:
+ case MTypes_Single:
+ break;
+ case MTypes_Words:
+ case MTypes_Bytes:
+ dumpotherroot("runtime type info", (byte*)s->types.data);
+ break;
+ }
+
+ // Finalizers
+ for(sp = s->specials; sp != nil; sp = sp->next) {
+ if(sp->kind != KindSpecialFinalizer)
+ continue;
+ spf = (SpecialFinalizer*)sp;
+ p = (byte*)((s->start << PageShift) + spf->offset);
+ dumpfinalizer(p, spf->fn, spf->ft, spf->ot);
+ }
+ }
+ }
+
+ // Finalizer queue
+ runtime_iterate_finq(finq_callback);
+}
+
+// Bit vector of free marks.
+// Needs to be as big as the largest number of objects per span.
+static byte hfree[PageSize/8];
+
+static void
+dumpobjs(void)
+{
+ uintptr i, j, size, n, off, shift, *bitp, bits, ti, kind;
+ MSpan *s;
+ MLink *l;
+ byte *p;
+ const Type *t;
+
+ for(i = 0; i < runtime_mheap.nspan; i++) {
+ s = runtime_mheap.allspans[i];
+ if(s->state != MSpanInUse)
+ continue;
+ p = (byte*)(s->start << PageShift);
+ size = s->elemsize;
+ n = (s->npages << PageShift) / size;
+ if(n > PageSize/8)
+ runtime_throw("free array doesn't have enough entries");
+ for(l = s->freelist; l != nil; l = l->next) {
+ hfree[((byte*)l - p) / size] = true;
+ }
+ for(j = 0; j < n; j++, p += size) {
+ if(hfree[j]) {
+ hfree[j] = false;
+ continue;
+ }
+ off = (uintptr*)p - (uintptr*)runtime_mheap.arena_start;
+ bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
+ shift = off % wordsPerBitmapWord;
+ bits = *bitp >> shift;
+
+ // Skip FlagNoGC allocations (stacks)
+ if((bits & bitAllocated) == 0)
+ continue;
+
+ // extract type and kind
+ ti = runtime_gettype(p);
+ t = (Type*)(ti & ~(uintptr)(PtrSize-1));
+ kind = ti & (PtrSize-1);
+
+ // dump it
+ if(kind == TypeInfo_Chan)
+ t = ((const ChanType*)t)->__element_type; // use element type for chan encoding
+ if(t == nil && scannable(p))
+ kind = TypeInfo_Conservative; // special kind for conservatively scanned objects
+ dumpobj(p, size, t, kind);
+ }
+ }
+}
+
+static void
+dumpparams(void)
+{
+ byte *x;
+
+ dumpint(TagParams);
+ x = (byte*)1;
+ if(*(byte*)&x == 1)
+ dumpbool(false); // little-endian ptrs
+ else
+ dumpbool(true); // big-endian ptrs
+ dumpint(PtrSize);
+ dumpint(runtime_Hchansize);
+ dumpint((uintptr)runtime_mheap.arena_start);
+ dumpint((uintptr)runtime_mheap.arena_used);
+ dumpint(0);
+ dumpcstr((const int8 *)"");
+ dumpint(runtime_ncpu);
+}
+
+static void
+dumpms(void)
+{
+ M *mp;
+
+ for(mp = runtime_allm; mp != nil; mp = mp->alllink) {
+ dumpint(TagOSThread);
+ dumpint((uintptr)mp);
+ dumpint(mp->id);
+ dumpint(0);
+ }
+}
+
+static void
+dumpmemstats(void)
+{
+ int32 i;
+
+ dumpint(TagMemStats);
+ dumpint(mstats.alloc);
+ dumpint(mstats.total_alloc);
+ dumpint(mstats.sys);
+ dumpint(mstats.nlookup);
+ dumpint(mstats.nmalloc);
+ dumpint(mstats.nfree);
+ dumpint(mstats.heap_alloc);
+ dumpint(mstats.heap_sys);
+ dumpint(mstats.heap_idle);
+ dumpint(mstats.heap_inuse);
+ dumpint(mstats.heap_released);
+ dumpint(mstats.heap_objects);
+ dumpint(mstats.stacks_inuse);
+ dumpint(mstats.stacks_sys);
+ dumpint(mstats.mspan_inuse);
+ dumpint(mstats.mspan_sys);
+ dumpint(mstats.mcache_inuse);
+ dumpint(mstats.mcache_sys);
+ dumpint(mstats.buckhash_sys);
+ dumpint(mstats.gc_sys);
+ dumpint(mstats.other_sys);
+ dumpint(mstats.next_gc);
+ dumpint(mstats.last_gc);
+ dumpint(mstats.pause_total_ns);
+ for(i = 0; i < 256; i++)
+ dumpint(mstats.pause_ns[i]);
+ dumpint(mstats.numgc);
+}
+
+static void
+dumpmemprof_callback(Bucket *b, uintptr nstk, Location *stk, uintptr size, uintptr allocs, uintptr frees)
+{
+ uintptr i, pc;
+ byte buf[20];
+
+ dumpint(TagMemProf);
+ dumpint((uintptr)b);
+ dumpint(size);
+ dumpint(nstk);
+ for(i = 0; i < nstk; i++) {
+ pc = stk[i].pc;
+ if(stk[i].function.len == 0) {
+ runtime_snprintf(buf, sizeof(buf), "%X", (uint64)pc);
+ dumpcstr((int8*)buf);
+ dumpcstr((const int8*)"?");
+ dumpint(0);
+ } else {
+ dumpstr(stk[i].function);
+ dumpstr(stk[i].filename);
+ dumpint(stk[i].lineno);
+ }
+ }
+ dumpint(allocs);
+ dumpint(frees);
+}
+
+static void
+dumpmemprof(void)
+{
+ MSpan *s, **allspans;
+ uint32 spanidx;
+ Special *sp;
+ SpecialProfile *spp;
+ byte *p;
+
+ runtime_iterate_memprof(dumpmemprof_callback);
+
+ allspans = runtime_mheap.allspans;
+ for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
+ s = allspans[spanidx];
+ if(s->state != MSpanInUse)
+ continue;
+ for(sp = s->specials; sp != nil; sp = sp->next) {
+ if(sp->kind != KindSpecialProfile)
+ continue;
+ spp = (SpecialProfile*)sp;
+ p = (byte*)((s->start << PageShift) + spp->offset);
+ dumpint(TagAllocSample);
+ dumpint((uintptr)p);
+ dumpint((uintptr)spp->b);
+ }
+ }
+}
+
+static void
+mdump(G *gp)
+{
+ const byte *hdr;
+ uintptr i;
+ MSpan *s;
+
+ // make sure we're done sweeping
+ for(i = 0; i < runtime_mheap.nspan; i++) {
+ s = runtime_mheap.allspans[i];
+ if(s->state == MSpanInUse)
+ runtime_MSpan_EnsureSwept(s);
+ }
+
+ runtime_memclr((byte*)&typecache[0], sizeof(typecache));
+ hdr = (const byte*)"go1.3 heap dump\n";
+ hwrite(hdr, runtime_findnull(hdr));
+ dumpparams();
+ dumpobjs();
+ dumpgs();
+ dumpms();
+ dumproots();
+ dumpmemstats();
+ dumpmemprof();
+ dumpint(TagEOF);
+ flush();
+
+ gp->param = nil;
+ gp->status = Grunning;
+ runtime_gogo(gp);
+}
+
+void runtime_debug_WriteHeapDump(uintptr)
+ __asm__(GOSYM_PREFIX "runtime_debug.WriteHeapDump");
+
+void
+runtime_debug_WriteHeapDump(uintptr fd)
+{
+ M *m;
+ G *g;
+
+ // Stop the world.
+ runtime_semacquire(&runtime_worldsema, false);
+ m = runtime_m();
+ m->gcing = 1;
+ m->locks++;
+ runtime_stoptheworld();
+
+ // Update stats so we can dump them.
+ // As a side effect, flushes all the MCaches so the MSpan.freelist
+ // lists contain all the free objects.
+ runtime_updatememstats(nil);
+
+ // Set dump file.
+ dumpfd = fd;
+
+ // Call dump routine on M stack.
+ g = runtime_g();
+ g->status = Gwaiting;
+ g->waitreason = "dumping heap";
+ runtime_mcall(mdump);
+
+ // Reset dump file.
+ dumpfd = 0;
+
+ // Start up the world again.
+ m->gcing = 0;
+ runtime_semrelease(&runtime_worldsema);
+ runtime_starttheworld();
+ m->locks--;
+}
+
+// Runs the specified gc program. Calls the callback for every
+// pointer-like field specified by the program and passes to the
+// callback the kind and offset of that field within the object.
+// offset is the offset in the object of the start of the program.
+// Returns a pointer to the opcode that ended the gc program (either
+// GC_END or GC_ARRAY_NEXT).
+/*
+static uintptr*
+playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg)
+{
+ uintptr len, elemsize, i, *end;
+
+ for(;;) {
+ switch(prog[0]) {
+ case GC_END:
+ return prog;
+ case GC_PTR:
+ callback(arg, FieldKindPtr, offset + prog[1]);
+ prog += 3;
+ break;
+ case GC_APTR:
+ callback(arg, FieldKindPtr, offset + prog[1]);
+ prog += 2;
+ break;
+ case GC_ARRAY_START:
+ len = prog[2];
+ elemsize = prog[3];
+ end = nil;
+ for(i = 0; i < len; i++) {
+ end = playgcprog(offset + prog[1] + i * elemsize, prog + 4, callback, arg);
+ if(end[0] != GC_ARRAY_NEXT)
+ runtime_throw("GC_ARRAY_START did not have matching GC_ARRAY_NEXT");
+ }
+ prog = end + 1;
+ break;
+ case GC_ARRAY_NEXT:
+ return prog;
+ case GC_CALL:
+ playgcprog(offset + prog[1], (uintptr*)((byte*)prog + *(int32*)&prog[2]), callback, arg);
+ prog += 3;
+ break;
+ case GC_CHAN_PTR:
+ callback(arg, FieldKindPtr, offset + prog[1]);
+ prog += 3;
+ break;
+ case GC_STRING:
+ callback(arg, FieldKindString, offset + prog[1]);
+ prog += 2;
+ break;
+ case GC_EFACE:
+ callback(arg, FieldKindEface, offset + prog[1]);
+ prog += 2;
+ break;
+ case GC_IFACE:
+ callback(arg, FieldKindIface, offset + prog[1]);
+ prog += 2;
+ break;
+ case GC_SLICE:
+ callback(arg, FieldKindSlice, offset + prog[1]);
+ prog += 3;
+ break;
+ case GC_REGION:
+ playgcprog(offset + prog[1], (uintptr*)prog[3] + 1, callback, arg);
+ prog += 4;
+ break;
+ default:
+ runtime_printf("%D\n", (uint64)prog[0]);
+ runtime_throw("bad gc op");
+ }
+ }
+}
+
+static void
+dump_callback(void *p, uintptr kind, uintptr offset)
+{
+ USED(&p);
+ dumpint(kind);
+ dumpint(offset);
+}
+
+// dumpint() the kind & offset of each field in an object.
+static void
+dumpfields(uintptr *prog)
+{
+ playgcprog(0, prog, dump_callback, nil);
+ dumpint(FieldKindEol);
+}
+
+static void
+dumpeface_callback(void *p, uintptr kind, uintptr offset)
+{
+ Eface *e;
+
+ if(kind != FieldKindEface)
+ return;
+ e = (Eface*)((byte*)p + offset);
+ dumptype(e->__type_descriptor);
+}
+*/
+
+// The heap dump reader needs to be able to disambiguate
+// Eface entries. So it needs to know every type that might
+// appear in such an entry. The following two routines accomplish
+// that.
+
+// Dump all the types that appear in the type field of
+// any Eface contained in obj.
+static void
+dumpefacetypes(void *obj __attribute__ ((unused)), uintptr size, const Type *type, uintptr kind)
+{
+ uintptr i;
+
+ switch(kind) {
+ case TypeInfo_SingleObject:
+ //playgcprog(0, (uintptr*)type->gc + 1, dumpeface_callback, obj);
+ break;
+ case TypeInfo_Array:
+ for(i = 0; i <= size - type->__size; i += type->__size)
+ //playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
+ break;
+ case TypeInfo_Chan:
+ if(type->__size == 0) // channels may have zero-sized objects in them
+ break;
+ for(i = runtime_Hchansize; i <= size - type->__size; i += type->__size)
+ //playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
+ break;
+ }
+}
diff --git a/libgo/runtime/lfstack.c b/libgo/runtime/lfstack.c
new file mode 100644
index 0000000..cefe7b8
--- /dev/null
+++ b/libgo/runtime/lfstack.c
@@ -0,0 +1,76 @@
+// AUTO-GENERATED by autogen.sh; DO NOT EDIT
+
+#include "runtime.h"
+#include "arch.h"
+#if __SIZEOF_POINTER__ == 8
+# define PTR_BITS 47
+#else
+# define PTR_BITS 32
+#endif
+#define PTR_MASK ((1ull<<PTR_BITS)-1)
+#define CNT_MASK (0ull-1)
+#if __SIZEOF_POINTER__ == 8 && (defined(__sparc__) || (defined(__sun__) && defined(__amd64__)))
+#undef PTR_BITS
+#undef CNT_MASK
+#undef PTR_MASK
+#define PTR_BITS 0
+#define CNT_MASK 7
+#define PTR_MASK ((0ull-1)<<3)
+#endif
+
+#line 33 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/lfstack.goc"
+void
+runtime_lfstackpush ( uint64 *head , LFNode *node )
+{
+uint64 old , new;
+#line 38 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/lfstack.goc"
+if ( ( uintptr ) node != ( ( uintptr ) node&PTR_MASK ) ) {
+runtime_printf ( "p=%p\n" , node ) ;
+runtime_throw ( "runtime_lfstackpush: invalid pointer" ) ;
+}
+#line 43 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/lfstack.goc"
+node->pushcnt++;
+new = ( uint64 ) ( uintptr ) node| ( ( ( uint64 ) node->pushcnt&CNT_MASK ) <<PTR_BITS ) ;
+for ( ;; ) {
+old = runtime_atomicload64 ( head ) ;
+node->next = ( LFNode* ) ( uintptr ) ( old&PTR_MASK ) ;
+if ( runtime_cas64 ( head , old , new ) )
+break;
+}
+}
+#line 53 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/lfstack.goc"
+LFNode*
+runtime_lfstackpop ( uint64 *head )
+{
+LFNode *node , *node2;
+uint64 old , new;
+#line 59 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/lfstack.goc"
+for ( ;; ) {
+old = runtime_atomicload64 ( head ) ;
+if ( old == 0 )
+return nil;
+node = ( LFNode* ) ( uintptr ) ( old&PTR_MASK ) ;
+node2 = runtime_atomicloadp ( &node->next ) ;
+new = 0;
+if ( node2 != nil )
+new = ( uint64 ) ( uintptr ) node2| ( ( ( uint64 ) node2->pushcnt&CNT_MASK ) <<PTR_BITS ) ;
+if ( runtime_cas64 ( head , old , new ) )
+return node;
+}
+}
+void runtime_lfstackpush_go(uint64* head, LFNode* node) __asm__ (GOSYM_PREFIX "runtime.lfstackpush_go");
+void runtime_lfstackpush_go(uint64* head, LFNode* node)
+{
+#line 73 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/lfstack.goc"
+
+ runtime_lfstackpush(head, node);
+}
+LFNode* runtime_lfstackpop_go(uint64* head) __asm__ (GOSYM_PREFIX "runtime.lfstackpop_go");
+LFNode* runtime_lfstackpop_go(uint64* head)
+{
+ LFNode* node;
+#line 77 "../../../trunk/libgo/runtime/../../../trunk/libgo/runtime/lfstack.goc"
+
+ node = runtime_lfstackpop(head);
+return node;
+}
diff --git a/libgo/runtime/lock_sema.c b/libgo/runtime/lock_sema.c
index d0d551d..ef611fb 100644
--- a/libgo/runtime/lock_sema.c
+++ b/libgo/runtime/lock_sema.c
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin netbsd openbsd plan9 solaris windows
+// +build darwin nacl netbsd openbsd plan9 solaris windows
#include "runtime.h"
diff --git a/libgo/runtime/malloc.goc b/libgo/runtime/malloc.goc
index 9c8b8c1..0288722 100644
--- a/libgo/runtime/malloc.goc
+++ b/libgo/runtime/malloc.goc
@@ -63,8 +63,9 @@ extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
extern volatile intgo runtime_MemProfileRate
__asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
-static void* largealloc(uint32, uintptr*);
-static void profilealloc(void *v, uintptr size, uintptr typ);
+static MSpan* largealloc(uint32, uintptr*);
+static void profilealloc(void *v, uintptr size);
+static void settype(MSpan *s, void *v, uintptr typ);
// Allocate an object of at least size bytes.
// Small objects are allocated from the per-thread cache's free lists.
@@ -79,7 +80,7 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
uintptr tinysize, size1;
intgo rate;
MCache *c;
- MCacheList *l;
+ MSpan *s;
MLink *v, *next;
byte *tiny;
bool incallback;
@@ -113,8 +114,8 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
}
if(m->mallocing)
runtime_throw("malloc/free - deadlock");
- // Disable preemption during settype_flush.
- // We can not use m->mallocing for this, because settype_flush calls mallocgc.
+ // Disable preemption during settype.
+ // We can not use m->mallocing for this, because settype calls mallocgc.
m->locks++;
m->mallocing = 1;
@@ -178,15 +179,15 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
}
}
// Allocate a new TinySize block.
- l = &c->list[TinySizeClass];
- if(l->list == nil)
- runtime_MCache_Refill(c, TinySizeClass);
- v = l->list;
+ s = c->alloc[TinySizeClass];
+ if(s->freelist == nil)
+ s = runtime_MCache_Refill(c, TinySizeClass);
+ v = s->freelist;
next = v->next;
+ s->freelist = next;
+ s->ref++;
if(next != nil) // prefetching nil leads to a DTLB miss
PREFETCH(next);
- l->list = next;
- l->nlist--;
((uint64*)v)[0] = 0;
((uint64*)v)[1] = 0;
// See if we need to replace the existing tiny block with the new one
@@ -205,15 +206,15 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
else
sizeclass = runtime_size_to_class128[(size-1024+127) >> 7];
size = runtime_class_to_size[sizeclass];
- l = &c->list[sizeclass];
- if(l->list == nil)
- runtime_MCache_Refill(c, sizeclass);
- v = l->list;
+ s = c->alloc[sizeclass];
+ if(s->freelist == nil)
+ s = runtime_MCache_Refill(c, sizeclass);
+ v = s->freelist;
next = v->next;
+ s->freelist = next;
+ s->ref++;
if(next != nil) // prefetching nil leads to a DTLB miss
PREFETCH(next);
- l->list = next;
- l->nlist--;
if(!(flag & FlagNoZero)) {
v->next = nil;
// block is zeroed iff second word is zero ...
@@ -224,7 +225,8 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
c->local_cachealloc += size;
} else {
// Allocate directly from heap.
- v = largealloc(flag, &size);
+ s = largealloc(flag, &size);
+ v = (void*)(s->start << PageShift);
}
if(flag & FlagNoGC)
@@ -235,34 +237,23 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
if(DebugTypeAtBlockEnd)
*(uintptr*)((uintptr)v+size-sizeof(uintptr)) = typ;
+ m->mallocing = 0;
// TODO: save type even if FlagNoScan? Potentially expensive but might help
// heap profiling/tracing.
- if(UseSpanType && !(flag & FlagNoScan) && typ != 0) {
- uintptr *buf, i;
-
- buf = m->settype_buf;
- i = m->settype_bufsize;
- buf[i++] = (uintptr)v;
- buf[i++] = typ;
- m->settype_bufsize = i;
- }
+ if(UseSpanType && !(flag & FlagNoScan) && typ != 0)
+ settype(s, v, typ);
- m->mallocing = 0;
- if(UseSpanType && !(flag & FlagNoScan) && typ != 0 && m->settype_bufsize == nelem(m->settype_buf))
- runtime_settype_flush(m);
if(raceenabled)
runtime_racemalloc(v, size);
if(runtime_debug.allocfreetrace)
- goto profile;
+ runtime_tracealloc(v, size, typ);
if(!(flag & FlagNoProfiling) && (rate = runtime_MemProfileRate) > 0) {
if(size < (uintptr)rate && size < (uintptr)(uint32)c->next_sample)
c->next_sample -= size;
- else {
- profile:
- profilealloc(v, size, typ);
- }
+ else
+ profilealloc(v, size);
}
m->locks--;
@@ -276,7 +267,7 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
return v;
}
-static void*
+static MSpan*
largealloc(uint32 flag, uintptr *sizep)
{
uintptr npages, size;
@@ -298,11 +289,11 @@ largealloc(uint32 flag, uintptr *sizep)
v = (void*)(s->start << PageShift);
// setup for mark sweep
runtime_markspan(v, 0, 0, true);
- return v;
+ return s;
}
static void
-profilealloc(void *v, uintptr size, uintptr typ)
+profilealloc(void *v, uintptr size)
{
uintptr rate;
int32 next;
@@ -324,7 +315,7 @@ profilealloc(void *v, uintptr size, uintptr typ)
next = 0;
c->next_sample = next;
}
- runtime_MProf_Malloc(v, size, typ);
+ runtime_MProf_Malloc(v, size);
}
void*
@@ -365,8 +356,8 @@ __go_free(void *v)
if(size < TinySize)
runtime_throw("freeing too small block");
- if(raceenabled)
- runtime_racefree(v);
+ if(runtime_debug.allocfreetrace)
+ runtime_tracefree(v, size);
// Ensure that the span is swept.
// If we free into an unswept span, we will corrupt GC bitmaps.
@@ -381,10 +372,24 @@ __go_free(void *v)
s->needzero = 1;
// Must mark v freed before calling unmarkspan and MHeap_Free:
// they might coalesce v into other spans and change the bitmap further.
- runtime_markfreed(v, size);
+ runtime_markfreed(v);
runtime_unmarkspan(v, 1<<PageShift);
+ // NOTE(rsc,dvyukov): The original implementation of efence
+ // in CL 22060046 used SysFree instead of SysFault, so that
+ // the operating system would eventually give the memory
+ // back to us again, so that an efence program could run
+ // longer without running out of memory. Unfortunately,
+ // calling SysFree here without any kind of adjustment of the
+ // heap data structures means that when the memory does
+ // come back to us, we have the wrong metadata for it, either in
+ // the MSpan structures or in the garbage collection bitmap.
+ // Using SysFault here means that the program will run out of
+ // memory fairly quickly in efence mode, but at least it won't
+ // have mysterious crashes due to confused memory reuse.
+ // It should be possible to switch back to SysFree if we also
+ // implement and then call some kind of MHeap_DeleteSpan.
if(runtime_debug.efence)
- runtime_SysFree((void*)(s->start<<PageShift), size, &mstats.heap_sys);
+ runtime_SysFault((void*)(s->start<<PageShift), size);
else
runtime_MHeap_Free(&runtime_mheap, s, 1);
c->local_nlargefree++;
@@ -398,9 +403,18 @@ __go_free(void *v)
// Must mark v freed before calling MCache_Free:
// it might coalesce v and other blocks into a bigger span
// and change the bitmap further.
- runtime_markfreed(v, size);
c->local_nsmallfree[sizeclass]++;
- runtime_MCache_Free(c, v, sizeclass, size);
+ c->local_cachealloc -= size;
+ if(c->alloc[sizeclass] == s) {
+ // We own the span, so we can just add v to the freelist
+ runtime_markfreed(v);
+ ((MLink*)v)->next = s->freelist;
+ s->freelist = v;
+ s->ref--;
+ } else {
+ // Someone else owns this span. Add to free queue.
+ runtime_MCache_Free(c, v, sizeclass, size);
+ }
}
m->mallocing = 0;
}
@@ -456,37 +470,6 @@ runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
return 1;
}
-MCache*
-runtime_allocmcache(void)
-{
- intgo rate;
- MCache *c;
-
- runtime_lock(&runtime_mheap);
- c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
- runtime_unlock(&runtime_mheap);
- runtime_memclr((byte*)c, sizeof(*c));
-
- // Set first allocation sample size.
- rate = runtime_MemProfileRate;
- if(rate > 0x3fffffff) // make 2*rate not overflow
- rate = 0x3fffffff;
- if(rate != 0)
- c->next_sample = runtime_fastrand1() % (2*rate);
-
- return c;
-}
-
-void
-runtime_freemcache(MCache *c)
-{
- runtime_MCache_ReleaseAll(c);
- runtime_lock(&runtime_mheap);
- runtime_purgecachedstats(c);
- runtime_FixAlloc_Free(&runtime_mheap.cachealloc, c);
- runtime_unlock(&runtime_mheap);
-}
-
void
runtime_purgecachedstats(MCache *c)
{
@@ -523,21 +506,25 @@ extern uintptr runtime_sizeof_C_MStats
void
runtime_mallocinit(void)
{
- byte *p;
- uintptr arena_size, bitmap_size, spans_size;
+ byte *p, *p1;
+ uintptr arena_size, bitmap_size, spans_size, p_size;
extern byte _end[];
uintptr limit;
uint64 i;
+ bool reserved;
runtime_sizeof_C_MStats = sizeof(MStats) - (NumSizeClasses - 61) * sizeof(mstats.by_size[0]);
p = nil;
+ p_size = 0;
arena_size = 0;
bitmap_size = 0;
spans_size = 0;
+ reserved = false;
// for 64-bit build
USED(p);
+ USED(p_size);
USED(arena_size);
USED(bitmap_size);
USED(spans_size);
@@ -585,7 +572,9 @@ runtime_mallocinit(void)
spans_size = arena_size / PageSize * sizeof(runtime_mheap.spans[0]);
spans_size = ROUND(spans_size, PageSize);
for(i = 0; i < HeapBaseOptions; i++) {
- p = runtime_SysReserve(HeapBase(i), bitmap_size + spans_size + arena_size + PageSize);
+ p = HeapBase(i);
+ p_size = bitmap_size + spans_size + arena_size + PageSize;
+ p = runtime_SysReserve(p, p_size, &reserved);
if(p != nil)
break;
}
@@ -628,7 +617,8 @@ runtime_mallocinit(void)
// away from the running binary image and then round up
// to a MB boundary.
p = (byte*)ROUND((uintptr)_end + (1<<18), 1<<20);
- p = runtime_SysReserve(p, bitmap_size + spans_size + arena_size + PageSize);
+ p_size = bitmap_size + spans_size + arena_size + PageSize;
+ p = runtime_SysReserve(p, p_size, &reserved);
if(p == nil)
runtime_throw("runtime: cannot reserve arena virtual address space");
}
@@ -636,13 +626,17 @@ runtime_mallocinit(void)
// PageSize can be larger than OS definition of page size,
// so SysReserve can give us a PageSize-unaligned pointer.
// To overcome this we ask for PageSize more and round up the pointer.
- p = (byte*)ROUND((uintptr)p, PageSize);
+ p1 = (byte*)ROUND((uintptr)p, PageSize);
- runtime_mheap.spans = (MSpan**)p;
- runtime_mheap.bitmap = p + spans_size;
- runtime_mheap.arena_start = p + spans_size + bitmap_size;
+ runtime_mheap.spans = (MSpan**)p1;
+ runtime_mheap.bitmap = p1 + spans_size;
+ runtime_mheap.arena_start = p1 + spans_size + bitmap_size;
runtime_mheap.arena_used = runtime_mheap.arena_start;
- runtime_mheap.arena_end = runtime_mheap.arena_start + arena_size;
+ runtime_mheap.arena_end = p + p_size;
+ runtime_mheap.arena_reserved = reserved;
+
+ if(((uintptr)runtime_mheap.arena_start & (PageSize-1)) != 0)
+ runtime_throw("misrounded allocation in mallocinit");
// Initialize the rest of the allocator.
runtime_MHeap_Init(&runtime_mheap);
@@ -655,64 +649,87 @@ runtime_mallocinit(void)
void*
runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
{
- byte *p;
+ byte *p, *p_end;
+ uintptr p_size;
+ bool reserved;
if(n > (uintptr)(h->arena_end - h->arena_used)) {
// We are in 32-bit mode, maybe we didn't use all possible address space yet.
// Reserve some more space.
byte *new_end;
- uintptr needed;
- needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end;
- needed = ROUND(needed, 256<<20);
- new_end = h->arena_end + needed;
+ p_size = ROUND(n + PageSize, 256<<20);
+ new_end = h->arena_end + p_size;
if(new_end <= h->arena_start + MaxArena32) {
- p = runtime_SysReserve(h->arena_end, new_end - h->arena_end);
- if(p == h->arena_end)
+ // TODO: It would be bad if part of the arena
+ // is reserved and part is not.
+ p = runtime_SysReserve(h->arena_end, p_size, &reserved);
+ if(p == h->arena_end) {
h->arena_end = new_end;
+ h->arena_reserved = reserved;
+ }
+ else if(p+p_size <= h->arena_start + MaxArena32) {
+ // Keep everything page-aligned.
+ // Our pages are bigger than hardware pages.
+ h->arena_end = p+p_size;
+ h->arena_used = p + (-(uintptr)p&(PageSize-1));
+ h->arena_reserved = reserved;
+ } else {
+ uint64 stat;
+ stat = 0;
+ runtime_SysFree(p, p_size, &stat);
+ }
}
}
if(n <= (uintptr)(h->arena_end - h->arena_used)) {
// Keep taking from our reservation.
p = h->arena_used;
- runtime_SysMap(p, n, &mstats.heap_sys);
+ runtime_SysMap(p, n, h->arena_reserved, &mstats.heap_sys);
h->arena_used += n;
runtime_MHeap_MapBits(h);
runtime_MHeap_MapSpans(h);
if(raceenabled)
runtime_racemapshadow(p, n);
+
+ if(((uintptr)p & (PageSize-1)) != 0)
+ runtime_throw("misrounded allocation in MHeap_SysAlloc");
return p;
}
// If using 64-bit, our reservation is all we have.
- if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU)
+ if((uintptr)(h->arena_end - h->arena_start) >= MaxArena32)
return nil;
// On 32-bit, once the reservation is gone we can
// try to get memory at a location chosen by the OS
// and hope that it is in the range we allocated bitmap for.
- p = runtime_SysAlloc(n, &mstats.heap_sys);
+ p_size = ROUND(n, PageSize) + PageSize;
+ p = runtime_SysAlloc(p_size, &mstats.heap_sys);
if(p == nil)
return nil;
- if(p < h->arena_start || (uintptr)(p+n - h->arena_start) >= MaxArena32) {
+ if(p < h->arena_start || (uintptr)(p+p_size - h->arena_start) >= MaxArena32) {
runtime_printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
p, h->arena_start, h->arena_start+MaxArena32);
- runtime_SysFree(p, n, &mstats.heap_sys);
+ runtime_SysFree(p, p_size, &mstats.heap_sys);
return nil;
}
-
+
+ p_end = p + p_size;
+ p += -(uintptr)p & (PageSize-1);
if(p+n > h->arena_used) {
h->arena_used = p+n;
- if(h->arena_used > h->arena_end)
- h->arena_end = h->arena_used;
+ if(p_end > h->arena_end)
+ h->arena_end = p_end;
runtime_MHeap_MapBits(h);
runtime_MHeap_MapSpans(h);
if(raceenabled)
runtime_racemapshadow(p, n);
}
+ if(((uintptr)p & (PageSize-1)) != 0)
+ runtime_throw("misrounded allocation in MHeap_SysAlloc");
return p;
}
@@ -740,7 +757,7 @@ runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
if(align != 0) {
if(align&(align-1))
- runtime_throw("persistentalloc: align is now a power of 2");
+ runtime_throw("persistentalloc: align is not a power of 2");
if(align > PageSize)
runtime_throw("persistentalloc: align is too large");
} else
@@ -768,94 +785,67 @@ runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
return p;
}
-static Lock settype_lock;
-
-void
-runtime_settype_flush(M *mp)
+static void
+settype(MSpan *s, void *v, uintptr typ)
{
- uintptr *buf, *endbuf;
uintptr size, ofs, j, t;
uintptr ntypes, nbytes2, nbytes3;
uintptr *data2;
byte *data3;
- void *v;
- uintptr typ, p;
- MSpan *s;
- buf = mp->settype_buf;
- endbuf = buf + mp->settype_bufsize;
-
- runtime_lock(&settype_lock);
- while(buf < endbuf) {
- v = (void*)*buf;
- *buf = 0;
- buf++;
- typ = *buf;
- buf++;
-
- // (Manually inlined copy of runtime_MHeap_Lookup)
- p = (uintptr)v>>PageShift;
- p -= (uintptr)runtime_mheap.arena_start >> PageShift;
- s = runtime_mheap.spans[p];
-
- if(s->sizeclass == 0) {
- s->types.compression = MTypes_Single;
- s->types.data = typ;
- continue;
+ if(s->sizeclass == 0) {
+ s->types.compression = MTypes_Single;
+ s->types.data = typ;
+ return;
+ }
+ size = s->elemsize;
+ ofs = ((uintptr)v - (s->start<<PageShift)) / size;
+
+ switch(s->types.compression) {
+ case MTypes_Empty:
+ ntypes = (s->npages << PageShift) / size;
+ nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
+ data3 = runtime_mallocgc(nbytes3, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
+ s->types.compression = MTypes_Bytes;
+ s->types.data = (uintptr)data3;
+ ((uintptr*)data3)[1] = typ;
+ data3[8*sizeof(uintptr) + ofs] = 1;
+ break;
+
+ case MTypes_Words:
+ ((uintptr*)s->types.data)[ofs] = typ;
+ break;
+
+ case MTypes_Bytes:
+ data3 = (byte*)s->types.data;
+ for(j=1; j<8; j++) {
+ if(((uintptr*)data3)[j] == typ) {
+ break;
+ }
+ if(((uintptr*)data3)[j] == 0) {
+ ((uintptr*)data3)[j] = typ;
+ break;
+ }
}
-
- size = s->elemsize;
- ofs = ((uintptr)v - (s->start<<PageShift)) / size;
-
- switch(s->types.compression) {
- case MTypes_Empty:
+ if(j < 8) {
+ data3[8*sizeof(uintptr) + ofs] = j;
+ } else {
ntypes = (s->npages << PageShift) / size;
- nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
- data3 = runtime_mallocgc(nbytes3, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
- s->types.compression = MTypes_Bytes;
- s->types.data = (uintptr)data3;
- ((uintptr*)data3)[1] = typ;
- data3[8*sizeof(uintptr) + ofs] = 1;
- break;
-
- case MTypes_Words:
- ((uintptr*)s->types.data)[ofs] = typ;
- break;
-
- case MTypes_Bytes:
- data3 = (byte*)s->types.data;
- for(j=1; j<8; j++) {
- if(((uintptr*)data3)[j] == typ) {
- break;
- }
- if(((uintptr*)data3)[j] == 0) {
- ((uintptr*)data3)[j] = typ;
- break;
- }
- }
- if(j < 8) {
- data3[8*sizeof(uintptr) + ofs] = j;
- } else {
- ntypes = (s->npages << PageShift) / size;
- nbytes2 = ntypes * sizeof(uintptr);
- data2 = runtime_mallocgc(nbytes2, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
- s->types.compression = MTypes_Words;
- s->types.data = (uintptr)data2;
-
- // Move the contents of data3 to data2. Then deallocate data3.
- for(j=0; j<ntypes; j++) {
- t = data3[8*sizeof(uintptr) + j];
- t = ((uintptr*)data3)[t];
- data2[j] = t;
- }
- data2[ofs] = typ;
+ nbytes2 = ntypes * sizeof(uintptr);
+ data2 = runtime_mallocgc(nbytes2, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
+ s->types.compression = MTypes_Words;
+ s->types.data = (uintptr)data2;
+
+ // Move the contents of data3 to data2. Then deallocate data3.
+ for(j=0; j<ntypes; j++) {
+ t = data3[8*sizeof(uintptr) + j];
+ t = ((uintptr*)data3)[t];
+ data2[j] = t;
}
- break;
+ data2[ofs] = typ;
}
+ break;
}
- runtime_unlock(&settype_lock);
-
- mp->settype_bufsize = 0;
}
uintptr
@@ -888,9 +878,7 @@ runtime_gettype(void *v)
runtime_throw("runtime_gettype: invalid compression kind");
}
if(0) {
- runtime_lock(&settype_lock);
runtime_printf("%p -> %d,%X\n", v, (int32)s->types.compression, (int64)t);
- runtime_unlock(&settype_lock);
}
return t;
}
@@ -933,7 +921,7 @@ runtime_cnewarray(const Type *typ, intgo n)
}
func GC() {
- runtime_gc(1);
+ runtime_gc(2); // force GC and do eager sweep
}
func SetFinalizer(obj Eface, finalizer Eface) {
@@ -956,15 +944,25 @@ func SetFinalizer(obj Eface, finalizer Eface) {
// because we use &runtime_zerobase for all such allocations.
if(ot->__element_type != nil && ot->__element_type->__size == 0)
return;
+ // The following check is required for cases when a user passes a pointer to composite literal,
+ // but compiler makes it a pointer to global. For example:
+ // var Foo = &Object{}
+ // func main() {
+ // runtime.SetFinalizer(Foo, nil)
+ // }
+ // See issue 7656.
+ if((byte*)obj.__object < runtime_mheap.arena_start || runtime_mheap.arena_used <= (byte*)obj.__object)
+ return;
if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) {
// As an implementation detail we allow to set finalizers for an inner byte
// of an object if it could come from tiny alloc (see mallocgc for details).
- if(ot->__element_type == nil || (ot->__element_type->__code&GO_NO_POINTERS) == 0 || ot->__element_type->__size >= TinySize) {
- runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
+ if(ot->__element_type == nil || (ot->__element_type->__code&KindNoPointers) == 0 || ot->__element_type->__size >= TinySize) {
+ runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block (%p)\n", obj.__object);
goto throw;
}
}
if(finalizer.__type_descriptor != nil) {
+ runtime_createfing();
if(finalizer.__type_descriptor->__code != GO_FUNC)
goto badfunc;
ft = (const FuncType*)finalizer.__type_descriptor;
diff --git a/libgo/runtime/malloc.h b/libgo/runtime/malloc.h
index 30fbb64..86b9fcc 100644
--- a/libgo/runtime/malloc.h
+++ b/libgo/runtime/malloc.h
@@ -20,7 +20,7 @@
// MHeap: the malloc heap, managed at page (4096-byte) granularity.
// MSpan: a run of pages managed by the MHeap.
// MCentral: a shared free list for a given size class.
-// MCache: a per-thread (in Go, per-M) cache for small objects.
+// MCache: a per-thread (in Go, per-P) cache for small objects.
// MStats: allocation statistics.
//
// Allocating a small object proceeds up a hierarchy of caches:
@@ -158,6 +158,9 @@ struct MLink
// SysAlloc obtains a large chunk of zeroed memory from the
// operating system, typically on the order of a hundred kilobytes
// or a megabyte.
+// NOTE: SysAlloc returns OS-aligned memory, but the heap allocator
+// may use larger alignment, so the caller must be careful to realign the
+// memory obtained by SysAlloc.
//
// SysUnused notifies the operating system that the contents
// of the memory region are no longer needed and can be reused
@@ -172,16 +175,29 @@ struct MLink
// SysReserve reserves address space without allocating memory.
// If the pointer passed to it is non-nil, the caller wants the
// reservation there, but SysReserve can still choose another
-// location if that one is unavailable.
+// location if that one is unavailable. On some systems and in some
+// cases SysReserve will simply check that the address space is
+// available and not actually reserve it. If SysReserve returns
+// non-nil, it sets *reserved to true if the address space is
+// reserved, false if it has merely been checked.
+// NOTE: SysReserve returns OS-aligned memory, but the heap allocator
+// may use larger alignment, so the caller must be careful to realign the
+// memory obtained by SysAlloc.
//
// SysMap maps previously reserved address space for use.
+// The reserved argument is true if the address space was really
+// reserved, not merely checked.
+//
+// SysFault marks a (already SysAlloc'd) region to fault
+// if accessed. Used only for debugging the runtime.
void* runtime_SysAlloc(uintptr nbytes, uint64 *stat);
void runtime_SysFree(void *v, uintptr nbytes, uint64 *stat);
void runtime_SysUnused(void *v, uintptr nbytes);
void runtime_SysUsed(void *v, uintptr nbytes);
-void runtime_SysMap(void *v, uintptr nbytes, uint64 *stat);
-void* runtime_SysReserve(void *v, uintptr nbytes);
+void runtime_SysMap(void *v, uintptr nbytes, bool reserved, uint64 *stat);
+void* runtime_SysReserve(void *v, uintptr nbytes, bool *reserved);
+void runtime_SysFault(void *v, uintptr nbytes);
// FixAlloc is a simple free-list allocator for fixed size objects.
// Malloc uses a FixAlloc wrapped around SysAlloc to manages its
@@ -261,6 +277,7 @@ struct MStats
extern MStats mstats
__asm__ (GOSYM_PREFIX "runtime.memStats");
+void runtime_updatememstats(GCStats *stats);
// Size classes. Computed and initialized by InitSizes.
//
@@ -281,8 +298,6 @@ extern int8 runtime_size_to_class128[(MaxSmallSize-1024)/128 + 1];
extern void runtime_InitSizes(void);
-// Per-thread (in Go, per-M) cache for small objects.
-// No locking needed because it is per-thread (per-M).
typedef struct MCacheList MCacheList;
struct MCacheList
{
@@ -290,6 +305,8 @@ struct MCacheList
uint32 nlist;
};
+// Per-thread (in Go, per-P) cache for small objects.
+// No locking needed because it is per-thread (per-P).
struct MCache
{
// The following members are accessed on every malloc,
@@ -301,7 +318,8 @@ struct MCache
byte* tiny;
uintptr tinysize;
// The rest is not accessed on every malloc.
- MCacheList list[NumSizeClasses];
+ MSpan* alloc[NumSizeClasses]; // spans to allocate from
+ MCacheList free[NumSizeClasses];// lists of explicitly freed objects
// Local allocator stats, flushed during GC.
uintptr local_nlookup; // number of pointer lookups
uintptr local_largefree; // bytes freed for large objects (>MaxSmallSize)
@@ -309,8 +327,8 @@ struct MCache
uintptr local_nsmallfree[NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize)
};
-void runtime_MCache_Refill(MCache *c, int32 sizeclass);
-void runtime_MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size);
+MSpan* runtime_MCache_Refill(MCache *c, int32 sizeclass);
+void runtime_MCache_Free(MCache *c, MLink *p, int32 sizeclass, uintptr size);
void runtime_MCache_ReleaseAll(MCache *c);
// MTypes describes the types of blocks allocated within a span.
@@ -408,8 +426,9 @@ struct MSpan
// if sweepgen == h->sweepgen, the span is swept and ready to use
// h->sweepgen is incremented by 2 after every GC
uint32 sweepgen;
- uint16 ref; // number of allocated objects in this span
+ uint16 ref; // capacity - number of objects in freelist
uint8 sizeclass; // size class
+ bool incache; // being used by an MCache
uint8 state; // MSpanInUse etc
uint8 needzero; // needs to be zeroed before allocation
uintptr elemsize; // computed from sizeclass or from npages
@@ -417,8 +436,9 @@ struct MSpan
uintptr npreleased; // number of pages released to the OS
byte *limit; // end of data in span
MTypes types; // types of allocated objects in this span
- Lock specialLock; // TODO: use to protect types also (instead of settype_lock)
+ Lock specialLock; // guards specials list
Special *specials; // linked list of special records sorted by offset.
+ MLink *freebuf; // objects freed explicitly, not incorporated into freelist yet
};
void runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages);
@@ -440,15 +460,16 @@ struct MCentral
{
Lock;
int32 sizeclass;
- MSpan nonempty;
- MSpan empty;
- int32 nfree;
+ MSpan nonempty; // list of spans with a free object
+ MSpan empty; // list of spans with no free objects (or cached in an MCache)
+ int32 nfree; // # of objects available in nonempty spans
};
void runtime_MCentral_Init(MCentral *c, int32 sizeclass);
-int32 runtime_MCentral_AllocList(MCentral *c, MLink **first);
-void runtime_MCentral_FreeList(MCentral *c, MLink *first);
+MSpan* runtime_MCentral_CacheSpan(MCentral *c);
+void runtime_MCentral_UncacheSpan(MCentral *c, MSpan *s);
bool runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end);
+void runtime_MCentral_FreeList(MCentral *c, MLink *start); // TODO: need this?
// Main malloc heap.
// The heap itself is the "free[]" and "large" arrays,
@@ -477,6 +498,7 @@ struct MHeap
byte *arena_start;
byte *arena_used;
byte *arena_end;
+ bool arena_reserved;
// central free lists for small size classes.
// the padding makes sure that the MCentrals are
@@ -510,6 +532,7 @@ void* runtime_MHeap_SysAlloc(MHeap *h, uintptr n);
void runtime_MHeap_MapBits(MHeap *h);
void runtime_MHeap_MapSpans(MHeap *h);
void runtime_MHeap_Scavenger(void*);
+void runtime_MHeap_SplitSpan(MHeap *h, MSpan *s);
void* runtime_mallocgc(uintptr size, uintptr typ, uint32 flag);
void* runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat);
@@ -519,7 +542,7 @@ uintptr runtime_sweepone(void);
void runtime_markscan(void *v);
void runtime_marknogc(void *v);
void runtime_checkallocated(void *v, uintptr n);
-void runtime_markfreed(void *v, uintptr n);
+void runtime_markfreed(void *v);
void runtime_checkfreed(void *v, uintptr n);
extern int32 runtime_checking;
void runtime_markspan(void *v, uintptr size, uintptr n, bool leftover);
@@ -527,9 +550,10 @@ void runtime_unmarkspan(void *v, uintptr size);
void runtime_purgecachedstats(MCache*);
void* runtime_cnew(const Type*);
void* runtime_cnewarray(const Type*, intgo);
+void runtime_tracealloc(void*, uintptr, uintptr);
+void runtime_tracefree(void*, uintptr);
+void runtime_tracegc(void);
-void runtime_settype_flush(M*);
-void runtime_settype_sysfree(MSpan*);
uintptr runtime_gettype(void*);
enum
@@ -550,15 +574,17 @@ struct Obj
uintptr ti; // type info
};
-void runtime_MProf_Malloc(void*, uintptr, uintptr);
-void runtime_MProf_Free(Bucket*, void*, uintptr, bool);
+void runtime_MProf_Malloc(void*, uintptr);
+void runtime_MProf_Free(Bucket*, uintptr, bool);
void runtime_MProf_GC(void);
-void runtime_MProf_TraceGC(void);
-struct Workbuf;
-void runtime_MProf_Mark(struct Workbuf**, void (*)(struct Workbuf**, Obj));
+void runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr));
int32 runtime_gcprocs(void);
void runtime_helpgc(int32 nproc);
void runtime_gchelper(void);
+void runtime_createfing(void);
+G* runtime_wakefing(void);
+extern bool runtime_fingwait;
+extern bool runtime_fingwake;
void runtime_setprofilebucket(void *p, Bucket *b);
@@ -581,13 +607,52 @@ enum
DebugTypeAtBlockEnd = 0,
};
+// Information from the compiler about the layout of stack frames.
+typedef struct BitVector BitVector;
+struct BitVector
+{
+ int32 n; // # of bits
+ uint32 *data;
+};
+typedef struct StackMap StackMap;
+struct StackMap
+{
+ int32 n; // number of bitmaps
+ int32 nbit; // number of bits in each bitmap
+ uint32 data[];
+};
+enum {
+ // Pointer map
+ BitsPerPointer = 2,
+ BitsDead = 0,
+ BitsScalar = 1,
+ BitsPointer = 2,
+ BitsMultiWord = 3,
+ // BitsMultiWord will be set for the first word of a multi-word item.
+ // When it is set, one of the following will be set for the second word.
+ BitsString = 0,
+ BitsSlice = 1,
+ BitsIface = 2,
+ BitsEface = 3,
+};
+// Returns pointer map data for the given stackmap index
+// (the index is encoded in PCDATA_StackMapIndex).
+BitVector runtime_stackmapdata(StackMap *stackmap, int32 n);
+
// defined in mgc0.go
void runtime_gc_m_ptr(Eface*);
+void runtime_gc_g_ptr(Eface*);
void runtime_gc_itab_ptr(Eface*);
void runtime_memorydump(void);
int32 runtime_setgcpercent(int32);
+// Value we use to mark dead pointers when GODEBUG=gcdead=1.
+#define PoisonGC ((uintptr)0xf969696969696969ULL)
+#define PoisonStack ((uintptr)0x6868686868686868ULL)
+
+struct Workbuf;
+void runtime_MProf_Mark(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_proc_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_time_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_netpoll_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
diff --git a/libgo/runtime/mcache.c b/libgo/runtime/mcache.c
index 38f824a..746711a 100644
--- a/libgo/runtime/mcache.c
+++ b/libgo/runtime/mcache.c
@@ -10,69 +10,119 @@
#include "arch.h"
#include "malloc.h"
+extern volatile intgo runtime_MemProfileRate
+ __asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
+
+// dummy MSpan that contains no free objects.
+static MSpan emptymspan;
+
+MCache*
+runtime_allocmcache(void)
+{
+ intgo rate;
+ MCache *c;
+ int32 i;
+
+ runtime_lock(&runtime_mheap);
+ c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
+ runtime_unlock(&runtime_mheap);
+ runtime_memclr((byte*)c, sizeof(*c));
+ for(i = 0; i < NumSizeClasses; i++)
+ c->alloc[i] = &emptymspan;
+
+ // Set first allocation sample size.
+ rate = runtime_MemProfileRate;
+ if(rate > 0x3fffffff) // make 2*rate not overflow
+ rate = 0x3fffffff;
+ if(rate != 0)
+ c->next_sample = runtime_fastrand1() % (2*rate);
+
+ return c;
+}
+
void
+runtime_freemcache(MCache *c)
+{
+ runtime_MCache_ReleaseAll(c);
+ runtime_lock(&runtime_mheap);
+ runtime_purgecachedstats(c);
+ runtime_FixAlloc_Free(&runtime_mheap.cachealloc, c);
+ runtime_unlock(&runtime_mheap);
+}
+
+// Gets a span that has a free object in it and assigns it
+// to be the cached span for the given sizeclass. Returns this span.
+MSpan*
runtime_MCache_Refill(MCache *c, int32 sizeclass)
{
MCacheList *l;
+ MSpan *s;
- // Replenish using central lists.
- l = &c->list[sizeclass];
- if(l->list)
- runtime_throw("MCache_Refill: the list is not empty");
- l->nlist = runtime_MCentral_AllocList(&runtime_mheap.central[sizeclass], &l->list);
- if(l->list == nil)
- runtime_throw("out of memory");
-}
+ runtime_m()->locks++;
+ // Return the current cached span to the central lists.
+ s = c->alloc[sizeclass];
+ if(s->freelist != nil)
+ runtime_throw("refill on a nonempty span");
+ if(s != &emptymspan)
+ runtime_MCentral_UncacheSpan(&runtime_mheap.central[sizeclass], s);
-// Take n elements off l and return them to the central free list.
-static void
-ReleaseN(MCacheList *l, int32 n, int32 sizeclass)
-{
- MLink *first, **lp;
- int32 i;
+ // Push any explicitly freed objects to the central lists.
+ // Not required, but it seems like a good time to do it.
+ l = &c->free[sizeclass];
+ if(l->nlist > 0) {
+ runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], l->list);
+ l->list = nil;
+ l->nlist = 0;
+ }
- // Cut off first n elements.
- first = l->list;
- lp = &l->list;
- for(i=0; i<n; i++)
- lp = &(*lp)->next;
- l->list = *lp;
- *lp = nil;
- l->nlist -= n;
-
- // Return them to central free list.
- runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], first);
+ // Get a new cached span from the central lists.
+ s = runtime_MCentral_CacheSpan(&runtime_mheap.central[sizeclass]);
+ if(s == nil)
+ runtime_throw("out of memory");
+ if(s->freelist == nil) {
+ runtime_printf("%d %d\n", s->ref, (int32)((s->npages << PageShift) / s->elemsize));
+ runtime_throw("empty span");
+ }
+ c->alloc[sizeclass] = s;
+ runtime_m()->locks--;
+ return s;
}
void
-runtime_MCache_Free(MCache *c, void *v, int32 sizeclass, uintptr size)
+runtime_MCache_Free(MCache *c, MLink *p, int32 sizeclass, uintptr size)
{
MCacheList *l;
- MLink *p;
- // Put back on list.
- l = &c->list[sizeclass];
- p = v;
+ // Put on free list.
+ l = &c->free[sizeclass];
p->next = l->list;
l->list = p;
l->nlist++;
- c->local_cachealloc -= size;
- // We transfer span at a time from MCentral to MCache,
- // if we have 2 times more than that, release a half back.
- if(l->nlist >= 2*(runtime_class_to_allocnpages[sizeclass]<<PageShift)/size)
- ReleaseN(l, l->nlist/2, sizeclass);
+ // We transfer a span at a time from MCentral to MCache,
+ // so we'll do the same in the other direction.
+ if(l->nlist >= (runtime_class_to_allocnpages[sizeclass]<<PageShift)/size) {
+ runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], l->list);
+ l->list = nil;
+ l->nlist = 0;
+ }
}
void
runtime_MCache_ReleaseAll(MCache *c)
{
int32 i;
+ MSpan *s;
MCacheList *l;
for(i=0; i<NumSizeClasses; i++) {
- l = &c->list[i];
- if(l->list) {
+ s = c->alloc[i];
+ if(s != &emptymspan) {
+ runtime_MCentral_UncacheSpan(&runtime_mheap.central[i], s);
+ c->alloc[i] = &emptymspan;
+ }
+ l = &c->free[i];
+ if(l->nlist > 0) {
runtime_MCentral_FreeList(&runtime_mheap.central[i], l->list);
l->list = nil;
l->nlist = 0;
diff --git a/libgo/runtime/mcentral.c b/libgo/runtime/mcentral.c
index 1285336..e41a83f 100644
--- a/libgo/runtime/mcentral.c
+++ b/libgo/runtime/mcentral.c
@@ -19,7 +19,8 @@
#include "malloc.h"
static bool MCentral_Grow(MCentral *c);
-static void MCentral_Free(MCentral *c, void *v);
+static void MCentral_Free(MCentral *c, MLink *v);
+static void MCentral_ReturnToHeap(MCentral *c, MSpan *s);
// Initialize a single central free list.
void
@@ -30,12 +31,9 @@ runtime_MCentral_Init(MCentral *c, int32 sizeclass)
runtime_MSpanList_Init(&c->empty);
}
-// Allocate a list of objects from the central free list.
-// Return the number of objects allocated.
-// The objects are linked together by their first words.
-// On return, *pfirst points at the first object.
-int32
-runtime_MCentral_AllocList(MCentral *c, MLink **pfirst)
+// Allocate a span to use in an MCache.
+MSpan*
+runtime_MCentral_CacheSpan(MCentral *c)
{
MSpan *s;
int32 cap, n;
@@ -85,25 +83,63 @@ retry:
// Replenish central list if empty.
if(!MCentral_Grow(c)) {
runtime_unlock(c);
- *pfirst = nil;
- return 0;
+ return nil;
}
- s = c->nonempty.next;
+ goto retry;
havespan:
cap = (s->npages << PageShift) / s->elemsize;
n = cap - s->ref;
- *pfirst = s->freelist;
- s->freelist = nil;
- s->ref += n;
+ if(n == 0)
+ runtime_throw("empty span");
+ if(s->freelist == nil)
+ runtime_throw("freelist empty");
c->nfree -= n;
runtime_MSpanList_Remove(s);
runtime_MSpanList_InsertBack(&c->empty, s);
+ s->incache = true;
+ runtime_unlock(c);
+ return s;
+}
+
+// Return span from an MCache.
+void
+runtime_MCentral_UncacheSpan(MCentral *c, MSpan *s)
+{
+ MLink *v;
+ int32 cap, n;
+
+ runtime_lock(c);
+
+ s->incache = false;
+
+ // Move any explicitly freed items from the freebuf to the freelist.
+ while((v = s->freebuf) != nil) {
+ s->freebuf = v->next;
+ runtime_markfreed(v);
+ v->next = s->freelist;
+ s->freelist = v;
+ s->ref--;
+ }
+
+ if(s->ref == 0) {
+ // Free back to heap. Unlikely, but possible.
+ MCentral_ReturnToHeap(c, s); // unlocks c
+ return;
+ }
+
+ cap = (s->npages << PageShift) / s->elemsize;
+ n = cap - s->ref;
+ if(n > 0) {
+ c->nfree += n;
+ runtime_MSpanList_Remove(s);
+ runtime_MSpanList_Insert(&c->nonempty, s);
+ }
runtime_unlock(c);
- return n;
}
-// Free the list of objects back into the central free list.
+// Free the list of objects back into the central free list c.
+// Called from runtime_free.
void
runtime_MCentral_FreeList(MCentral *c, MLink *start)
{
@@ -118,52 +154,58 @@ runtime_MCentral_FreeList(MCentral *c, MLink *start)
}
// Helper: free one object back into the central free list.
+// Caller must hold lock on c on entry. Holds lock on exit.
static void
-MCentral_Free(MCentral *c, void *v)
+MCentral_Free(MCentral *c, MLink *v)
{
MSpan *s;
- MLink *p;
- int32 size;
// Find span for v.
s = runtime_MHeap_Lookup(&runtime_mheap, v);
if(s == nil || s->ref == 0)
runtime_throw("invalid free");
+ if(s->sweepgen != runtime_mheap.sweepgen)
+ runtime_throw("free into unswept span");
+
+ // If the span is currently being used unsynchronized by an MCache,
+ // we can't modify the freelist. Add to the freebuf instead. The
+ // items will get moved to the freelist when the span is returned
+ // by the MCache.
+ if(s->incache) {
+ v->next = s->freebuf;
+ s->freebuf = v;
+ return;
+ }
- // Move to nonempty if necessary.
+ // Move span to nonempty if necessary.
if(s->freelist == nil) {
runtime_MSpanList_Remove(s);
runtime_MSpanList_Insert(&c->nonempty, s);
}
- // Add v back to s's free list.
- p = v;
- p->next = s->freelist;
- s->freelist = p;
+ // Add the object to span's free list.
+ runtime_markfreed(v);
+ v->next = s->freelist;
+ s->freelist = v;
+ s->ref--;
c->nfree++;
// If s is completely freed, return it to the heap.
- if(--s->ref == 0) {
- size = runtime_class_to_size[c->sizeclass];
- runtime_MSpanList_Remove(s);
- runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
- s->needzero = 1;
- s->freelist = nil;
- c->nfree -= (s->npages << PageShift) / size;
- runtime_unlock(c);
- runtime_MHeap_Free(&runtime_mheap, s, 0);
+ if(s->ref == 0) {
+ MCentral_ReturnToHeap(c, s); // unlocks c
runtime_lock(c);
}
}
// Free n objects from a span s back into the central free list c.
// Called during sweep.
-// Returns true if the span was returned to heap.
+// Returns true if the span was returned to heap. Sets sweepgen to
+// the latest generation.
bool
runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end)
{
- int32 size;
-
+ if(s->incache)
+ runtime_throw("freespan into cached span");
runtime_lock(c);
// Move to nonempty if necessary.
@@ -177,6 +219,12 @@ runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *e
s->freelist = start;
s->ref -= n;
c->nfree += n;
+
+ // delay updating sweepgen until here. This is the signal that
+ // the span may be used in an MCache, so it must come after the
+ // linked list operations above (actually, just after the
+ // lock of c above.)
+ runtime_atomicstore(&s->sweepgen, runtime_mheap.sweepgen);
if(s->ref != 0) {
runtime_unlock(c);
@@ -184,14 +232,7 @@ runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *e
}
// s is completely freed, return it to the heap.
- size = runtime_class_to_size[c->sizeclass];
- runtime_MSpanList_Remove(s);
- s->needzero = 1;
- s->freelist = nil;
- c->nfree -= (s->npages << PageShift) / size;
- runtime_unlock(c);
- runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
- runtime_MHeap_Free(&runtime_mheap, s, 0);
+ MCentral_ReturnToHeap(c, s); // unlocks c
return true;
}
@@ -246,3 +287,21 @@ MCentral_Grow(MCentral *c)
runtime_MSpanList_Insert(&c->nonempty, s);
return true;
}
+
+// Return s to the heap. s must be unused (s->ref == 0). Unlocks c.
+static void
+MCentral_ReturnToHeap(MCentral *c, MSpan *s)
+{
+ int32 size;
+
+ size = runtime_class_to_size[c->sizeclass];
+ runtime_MSpanList_Remove(s);
+ s->needzero = 1;
+ s->freelist = nil;
+ if(s->ref != 0)
+ runtime_throw("ref wrong");
+ c->nfree -= (s->npages << PageShift) / size;
+ runtime_unlock(c);
+ runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
+ runtime_MHeap_Free(&runtime_mheap, s, 0);
+}
diff --git a/libgo/runtime/mem.c b/libgo/runtime/mem.c
index 78f7c51f..8e37486 100644
--- a/libgo/runtime/mem.c
+++ b/libgo/runtime/mem.c
@@ -26,19 +26,33 @@
static int dev_zero = -1;
#endif
-static _Bool
+static int32
addrspace_free(void *v __attribute__ ((unused)), uintptr n __attribute__ ((unused)))
{
#ifdef HAVE_MINCORE
size_t page_size = getpagesize();
- size_t off;
- char one_byte;
+ int32 errval;
+ uintptr chunk;
+ uintptr off;
+
+ // NOTE: vec must be just 1 byte long here.
+ // Mincore returns ENOMEM if any of the pages are unmapped,
+ // but we want to know that all of the pages are unmapped.
+ // To make these the same, we can only ask about one page
+ // at a time. See golang.org/issue/7476.
+ static byte vec[1];
errno = 0;
- for(off = 0; off < n; off += page_size)
- if(mincore((char *)v + off, page_size, (void *)&one_byte) != -1
- || errno != ENOMEM)
+ for(off = 0; off < n; off += chunk) {
+ chunk = page_size * sizeof vec;
+ if(chunk > (n - off))
+ chunk = n - off;
+ errval = mincore((int8*)v + off, chunk, vec);
+ // ENOMEM means unmapped, which is what we want.
+ // Anything else we assume means the pages are mapped.
+ if(errval == 0 || errno != ENOMEM)
return 0;
+ }
#endif
return 1;
}
@@ -115,8 +129,27 @@ runtime_SysFree(void *v, uintptr n, uint64 *stat)
runtime_munmap(v, n);
}
+void
+runtime_SysFault(void *v, uintptr n)
+{
+ int fd = -1;
+
+#ifdef USE_DEV_ZERO
+ if (dev_zero == -1) {
+ dev_zero = open("/dev/zero", O_RDONLY);
+ if (dev_zero < 0) {
+ runtime_printf("open /dev/zero: errno=%d\n", errno);
+ exit(2);
+ }
+ }
+ fd = dev_zero;
+#endif
+
+ runtime_mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, fd, 0);
+}
+
void*
-runtime_SysReserve(void *v, uintptr n)
+runtime_SysReserve(void *v, uintptr n, bool *reserved)
{
int fd = -1;
void *p;
@@ -136,13 +169,14 @@ runtime_SysReserve(void *v, uintptr n)
// much address space. Instead, assume that the reservation is okay
// if we can reserve at least 64K and check the assumption in SysMap.
// Only user-mode Linux (UML) rejects these requests.
- if(sizeof(void*) == 8 && (uintptr)v >= 0xffffffffU) {
+ if(sizeof(void*) == 8 && (n >> 16) > 1LLU<<16) {
p = mmap_fixed(v, 64<<10, PROT_NONE, MAP_ANON|MAP_PRIVATE, fd, 0);
if (p != v) {
runtime_munmap(p, 64<<10);
return nil;
}
runtime_munmap(p, 64<<10);
+ *reserved = false;
return v;
}
@@ -153,11 +187,12 @@ runtime_SysReserve(void *v, uintptr n)
p = runtime_mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_NORESERVE, fd, 0);
if(p == MAP_FAILED)
return nil;
+ *reserved = true;
return p;
}
void
-runtime_SysMap(void *v, uintptr n, uint64 *stat)
+runtime_SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
{
void *p;
int fd = -1;
@@ -176,7 +211,7 @@ runtime_SysMap(void *v, uintptr n, uint64 *stat)
#endif
// On 64-bit, we don't actually have v reserved, so tread carefully.
- if(sizeof(void*) == 8 && (uintptr)v >= 0xffffffffU) {
+ if(!reserved) {
p = mmap_fixed(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, fd, 0);
if(p == MAP_FAILED && errno == ENOMEM)
runtime_throw("runtime: out of memory");
diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c
index e67c5b9..4b78f3b 100644
--- a/libgo/runtime/mgc0.c
+++ b/libgo/runtime/mgc0.c
@@ -1,4 +1,4 @@
-// Copyright 2009 The Go Authors. All rights reserved.
+
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -56,6 +56,7 @@
#include "arch.h"
#include "malloc.h"
#include "mgc0.h"
+#include "chan.h"
#include "race.h"
#include "go-type.h"
@@ -87,16 +88,9 @@ extern void * __splitstack_find_context (void *context[10], size_t *, void **,
enum {
Debug = 0,
CollectStats = 0,
- ScanStackByFrames = 1,
- IgnorePreciseGC = 0,
ConcurrentSweep = 1,
- // Four bits per word (see #defines below).
- wordsPerBitmapWord = sizeof(void*)*8/4,
- bitShift = sizeof(void*)*8/4,
-
WorkbufSize = 16*1024,
- RootBlockSize = 4*1024,
FinBlockSize = 4*1024,
handoffThreshold = 4,
@@ -107,13 +101,6 @@ enum {
LOOP = 2,
PC_BITS = PRECISE | LOOP,
- // Pointer map
- BitsPerPointer = 2,
- BitsNoPointer = 0,
- BitsPointer = 1,
- BitsIface = 2,
- BitsEface = 3,
-
RootData = 0,
RootBss = 1,
RootFinalizers = 2,
@@ -127,44 +114,28 @@ enum {
// Initialized from $GOGC. GOGC=off means no gc.
static int32 gcpercent = GcpercentUnknown;
-static struct
-{
- Lock;
- void* head;
-} pools;
+static FuncVal* poolcleanup;
-void sync_runtime_registerPool(void **)
- __asm__ (GOSYM_PREFIX "sync.runtime_registerPool");
+void sync_runtime_registerPoolCleanup(FuncVal*)
+ __asm__ (GOSYM_PREFIX "sync.runtime_registerPoolCleanup");
void
-sync_runtime_registerPool(void **p)
+sync_runtime_registerPoolCleanup(FuncVal *f)
{
- runtime_lock(&pools);
- p[0] = pools.head;
- pools.head = p;
- runtime_unlock(&pools);
+ poolcleanup = f;
}
static void
clearpools(void)
{
- void **pool, **next;
P *p, **pp;
MCache *c;
- uintptr off;
// clear sync.Pool's
- for(pool = pools.head; pool != nil; pool = next) {
- next = pool[0];
- pool[0] = nil; // next
- pool[1] = nil; // local
- pool[2] = nil; // localSize
- off = (uintptr)pool[3] / sizeof(void*);
- pool[off+0] = nil; // global slice
- pool[off+1] = nil;
- pool[off+2] = nil;
+ if(poolcleanup != nil) {
+ __go_set_closure(poolcleanup);
+ poolcleanup->fn();
}
- pools.head = nil;
for(pp=runtime_allp; (p=*pp) != nil; pp++) {
// clear tinyalloc pool
@@ -178,39 +149,6 @@ clearpools(void)
}
}
-// Bits in per-word bitmap.
-// #defines because enum might not be able to hold the values.
-//
-// Each word in the bitmap describes wordsPerBitmapWord words
-// of heap memory. There are 4 bitmap bits dedicated to each heap word,
-// so on a 64-bit system there is one bitmap word per 16 heap words.
-// The bits in the word are packed together by type first, then by
-// heap location, so each 64-bit bitmap word consists of, from top to bottom,
-// the 16 bitSpecial bits for the corresponding heap words, then the 16 bitMarked bits,
-// then the 16 bitScan/bitBlockBoundary bits, then the 16 bitAllocated bits.
-// This layout makes it easier to iterate over the bits of a given type.
-//
-// The bitmap starts at mheap.arena_start and extends *backward* from
-// there. On a 64-bit system the off'th word in the arena is tracked by
-// the off/16+1'th word before mheap.arena_start. (On a 32-bit system,
-// the only difference is that the divisor is 8.)
-//
-// To pull out the bits corresponding to a given pointer p, we use:
-//
-// off = p - (uintptr*)mheap.arena_start; // word offset
-// b = (uintptr*)mheap.arena_start - off/wordsPerBitmapWord - 1;
-// shift = off % wordsPerBitmapWord
-// bits = *b >> shift;
-// /* then test bits & bitAllocated, bits & bitMarked, etc. */
-//
-#define bitAllocated ((uintptr)1<<(bitShift*0)) /* block start; eligible for garbage collection */
-#define bitScan ((uintptr)1<<(bitShift*1)) /* when bitAllocated is set */
-#define bitMarked ((uintptr)1<<(bitShift*2)) /* when bitAllocated is set */
-#define bitSpecial ((uintptr)1<<(bitShift*3)) /* when bitAllocated is set - has finalizer or being profiled */
-#define bitBlockBoundary ((uintptr)1<<(bitShift*1)) /* when bitAllocated is NOT set - mark for FlagNoGC objects */
-
-#define bitMask (bitAllocated | bitScan | bitMarked | bitSpecial)
-
// Holding worldsema grants an M the right to try to stop the world.
// The procedure is:
//
@@ -256,12 +194,15 @@ struct FinBlock
Finalizer fin[1];
};
-static G *fing;
-static FinBlock *finq; // list of finalizers that are to be executed
-static FinBlock *finc; // cache of free blocks
-static FinBlock *allfin; // list of all blocks
-static int32 fingwait;
+static Lock finlock; // protects the following variables
+static FinBlock *finq; // list of finalizers that are to be executed
+static FinBlock *finc; // cache of free blocks
+static FinBlock *allfin; // list of all blocks
+bool runtime_fingwait;
+bool runtime_fingwake;
+
static Lock gclock;
+static G* fing;
static void runfinq(void*);
static void bgsweep(void*);
@@ -331,7 +272,7 @@ static struct {
// has been marked by this function, false otherwise.
// This function doesn't append the object to any buffer.
static bool
-markonly(void *obj)
+markonly(const void *obj)
{
byte *p;
uintptr *bitp, bits, shift, x, xbits, off, j;
@@ -339,17 +280,17 @@ markonly(void *obj)
PageID k;
// Words outside the arena cannot be pointers.
- if((byte*)obj < runtime_mheap.arena_start || (byte*)obj >= runtime_mheap.arena_used)
+ if((const byte*)obj < runtime_mheap.arena_start || (const byte*)obj >= runtime_mheap.arena_used)
return false;
// obj may be a pointer to a live object.
// Try to find the beginning of the object.
// Round down to word boundary.
- obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
+ obj = (const void*)((uintptr)obj & ~((uintptr)PtrSize-1));
// Find bits for this word.
- off = (uintptr*)obj - (uintptr*)runtime_mheap.arena_start;
+ off = (const uintptr*)obj - (uintptr*)runtime_mheap.arena_start;
bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
shift = off % wordsPerBitmapWord;
xbits = *bitp;
@@ -380,19 +321,19 @@ markonly(void *obj)
x = k;
x -= (uintptr)runtime_mheap.arena_start>>PageShift;
s = runtime_mheap.spans[x];
- if(s == nil || k < s->start || (byte*)obj >= s->limit || s->state != MSpanInUse)
+ if(s == nil || k < s->start || (const byte*)obj >= s->limit || s->state != MSpanInUse)
return false;
p = (byte*)((uintptr)s->start<<PageShift);
if(s->sizeclass == 0) {
obj = p;
} else {
uintptr size = s->elemsize;
- int32 i = ((byte*)obj - p)/size;
+ int32 i = ((const byte*)obj - p)/size;
obj = p+i*size;
}
// Now that we know the object header, reload bits.
- off = (uintptr*)obj - (uintptr*)runtime_mheap.arena_start;
+ off = (const uintptr*)obj - (uintptr*)runtime_mheap.arena_start;
bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
shift = off % wordsPerBitmapWord;
xbits = *bitp;
@@ -768,8 +709,8 @@ checkptr(void *obj, uintptr objti)
// A simple best-effort check until first GC_END.
for(j = 1; pc1[j] != GC_END && pc2[j] != GC_END; j++) {
if(pc1[j] != pc2[j]) {
- runtime_printf("invalid gc type info for '%s' at %p, type info %p, block info %p\n",
- t->string ? (const int8*)t->string->str : (const int8*)"?", j, pc1[j], pc2[j]);
+ runtime_printf("invalid gc type info for '%s', type info %p [%d]=%p, block info %p [%d]=%p\n",
+ t->string ? (const int8*)t->string->str : (const int8*)"?", pc1, (int32)j, pc1[j], pc2, (int32)j, pc2[j]);
runtime_throw("invalid gc type info");
}
}
@@ -793,8 +734,9 @@ scanblock(Workbuf *wbuf, bool keepworking)
uintptr *chan_ret, chancap;
#endif
void *obj;
- const Type *t;
+ const Type *t, *et;
Slice *sliceptr;
+ String *stringptr;
Frame *stack_ptr, stack_top, stack[GC_STACK_CAPACITY+4];
BufferList *scanbuffers;
Scanbuf sbuf;
@@ -851,9 +793,6 @@ scanblock(Workbuf *wbuf, bool keepworking)
for(;;) {
// Each iteration scans the block b of length n, queueing pointers in
// the work buffer.
- if(Debug > 1) {
- runtime_printf("scanblock %p %D\n", b, (int64)n);
- }
if(CollectStats) {
runtime_xadd64(&gcstats.nbytes, n);
@@ -862,6 +801,9 @@ scanblock(Workbuf *wbuf, bool keepworking)
}
if(ti != 0 && false) {
+ if(Debug > 1) {
+ runtime_printf("scanblock %p %D ti %p\n", b, (int64)n, ti);
+ }
pc = (uintptr*)(ti & ~(uintptr)PC_BITS);
precise_type = (ti & PRECISE);
stack_top.elemsize = pc[0];
@@ -918,15 +860,23 @@ scanblock(Workbuf *wbuf, bool keepworking)
pc = chanProg;
break;
default:
+ if(Debug > 1)
+ runtime_printf("scanblock %p %D type %p %S\n", b, (int64)n, type, *t->string);
runtime_throw("scanblock: invalid type");
return;
}
+ if(Debug > 1)
+ runtime_printf("scanblock %p %D type %p %S pc=%p\n", b, (int64)n, type, *t->string, pc);
} else {
pc = defaultProg;
+ if(Debug > 1)
+ runtime_printf("scanblock %p %D unknown type\n", b, (int64)n);
}
#endif
} else {
pc = defaultProg;
+ if(Debug > 1)
+ runtime_printf("scanblock %p %D no span types\n", b, (int64)n);
}
if(IgnorePreciseGC)
@@ -934,7 +884,6 @@ scanblock(Workbuf *wbuf, bool keepworking)
pc++;
stack_top.b = (uintptr)b;
-
end_b = (uintptr)b + n - PtrSize;
for(;;) {
@@ -947,6 +896,8 @@ scanblock(Workbuf *wbuf, bool keepworking)
case GC_PTR:
obj = *(void**)(stack_top.b + pc[1]);
objti = pc[2];
+ if(Debug > 2)
+ runtime_printf("gc_ptr @%p: %p ti=%p\n", stack_top.b+pc[1], obj, objti);
pc += 3;
if(Debug)
checkptr(obj, objti);
@@ -954,6 +905,8 @@ scanblock(Workbuf *wbuf, bool keepworking)
case GC_SLICE:
sliceptr = (Slice*)(stack_top.b + pc[1]);
+ if(Debug > 2)
+ runtime_printf("gc_slice @%p: %p/%D/%D\n", sliceptr, sliceptr->array, (int64)sliceptr->__count, (int64)sliceptr->cap);
if(sliceptr->cap != 0) {
obj = sliceptr->array;
// Can't use slice element type for scanning,
@@ -967,18 +920,25 @@ scanblock(Workbuf *wbuf, bool keepworking)
case GC_APTR:
obj = *(void**)(stack_top.b + pc[1]);
+ if(Debug > 2)
+ runtime_printf("gc_aptr @%p: %p\n", stack_top.b+pc[1], obj);
pc += 2;
break;
case GC_STRING:
- obj = *(void**)(stack_top.b + pc[1]);
- markonly(obj);
+ stringptr = (String*)(stack_top.b + pc[1]);
+ if(Debug > 2)
+ runtime_printf("gc_string @%p: %p/%D\n", stack_top.b+pc[1], stringptr->str, (int64)stringptr->len);
+ if(stringptr->len != 0)
+ markonly(stringptr->str);
pc += 2;
continue;
case GC_EFACE:
eface = (Eface*)(stack_top.b + pc[1]);
pc += 2;
+ if(Debug > 2)
+ runtime_printf("gc_eface @%p: %p %p\n", stack_top.b+pc[1], eface->__type_descriptor, eface->__object);
if(eface->__type_descriptor == nil)
continue;
@@ -999,9 +959,15 @@ scanblock(Workbuf *wbuf, bool keepworking)
continue;
obj = eface->__object;
- if((t->__code & ~KindNoPointers) == KindPtr)
- // objti = (uintptr)((PtrType*)t)->elem->gc;
- objti = 0;
+ if((t->__code & ~KindNoPointers) == KindPtr) {
+ // Only use type information if it is a pointer-containing type.
+ // This matches the GC programs written by cmd/gc/reflect.c's
+ // dgcsym1 in case TPTR32/case TPTR64. See rationale there.
+ et = ((const PtrType*)t)->elem;
+ if(!(et->__code & KindNoPointers))
+ // objti = (uintptr)((const PtrType*)t)->elem->gc;
+ objti = 0;
+ }
} else {
obj = eface->__object;
// objti = (uintptr)t->gc;
@@ -1013,6 +979,8 @@ scanblock(Workbuf *wbuf, bool keepworking)
case GC_IFACE:
iface = (Iface*)(stack_top.b + pc[1]);
pc += 2;
+ if(Debug > 2)
+ runtime_printf("gc_iface @%p: %p/%p %p\n", stack_top.b+pc[1], iface->__methods[0], nil, iface->__object);
if(iface->tab == nil)
continue;
@@ -1032,9 +1000,15 @@ scanblock(Workbuf *wbuf, bool keepworking)
continue;
obj = iface->__object;
- if((t->__code & ~KindNoPointers) == KindPtr)
- // objti = (uintptr)((const PtrType*)t)->elem->gc;
- objti = 0;
+ if((t->__code & ~KindNoPointers) == KindPtr) {
+ // Only use type information if it is a pointer-containing type.
+ // This matches the GC programs written by cmd/gc/reflect.c's
+ // dgcsym1 in case TPTR32/case TPTR64. See rationale there.
+ et = ((const PtrType*)t)->elem;
+ if(!(et->__code & KindNoPointers))
+ // objti = (uintptr)((const PtrType*)t)->elem->gc;
+ objti = 0;
+ }
} else {
obj = iface->__object;
// objti = (uintptr)t->gc;
@@ -1046,6 +1020,8 @@ scanblock(Workbuf *wbuf, bool keepworking)
case GC_DEFAULT_PTR:
while(stack_top.b <= end_b) {
obj = *(byte**)stack_top.b;
+ if(Debug > 2)
+ runtime_printf("gc_default_ptr @%p: %p\n", stack_top.b, obj);
stack_top.b += PtrSize;
if((byte*)obj >= arena_start && (byte*)obj < arena_used) {
*sbuf.ptr.pos++ = (PtrTarget){obj, 0};
@@ -1125,6 +1101,8 @@ scanblock(Workbuf *wbuf, bool keepworking)
objti = pc[3];
pc += 4;
+ if(Debug > 2)
+ runtime_printf("gc_region @%p: %D %p\n", stack_top.b+pc[1], (int64)size, objti);
*sbuf.obj.pos++ = (Obj){obj, size, objti};
if(sbuf.obj.pos == sbuf.obj.end)
flushobjbuf(&sbuf);
@@ -1133,6 +1111,8 @@ scanblock(Workbuf *wbuf, bool keepworking)
#if 0
case GC_CHAN_PTR:
chan = *(Hchan**)(stack_top.b + pc[1]);
+ if(Debug > 2 && chan != nil)
+ runtime_printf("gc_chan_ptr @%p: %p/%D/%D %p\n", stack_top.b+pc[1], chan, (int64)chan->qcount, (int64)chan->dataqsiz, pc[2]);
if(chan == nil) {
pc += 3;
continue;
@@ -1308,6 +1288,7 @@ markroot(ParFor *desc, uint32 i)
USED(&desc);
wbuf = getempty(nil);
+ // Note: if you add a case here, please also update heapdump.c:dumproots.
switch(i) {
case RootData:
// For gccgo this is both data and bss.
@@ -1597,7 +1578,7 @@ runtime_queuefinalizer(void *p, FuncVal *fn, const FuncType *ft, const PtrType *
FinBlock *block;
Finalizer *f;
- runtime_lock(&gclock);
+ runtime_lock(&finlock);
if(finq == nil || finq->cnt == finq->cap) {
if(finc == nil) {
finc = runtime_persistentalloc(FinBlockSize, 0, &mstats.gc_sys);
@@ -1616,25 +1597,45 @@ runtime_queuefinalizer(void *p, FuncVal *fn, const FuncType *ft, const PtrType *
f->ft = ft;
f->ot = ot;
f->arg = p;
- runtime_unlock(&gclock);
+ runtime_fingwake = true;
+ runtime_unlock(&finlock);
+}
+
+void
+runtime_iterate_finq(void (*callback)(FuncVal*, void*, const FuncType*, const PtrType*))
+{
+ FinBlock *fb;
+ Finalizer *f;
+ int32 i;
+
+ for(fb = allfin; fb; fb = fb->alllink) {
+ for(i = 0; i < fb->cnt; i++) {
+ f = &fb->fin[i];
+ callback(f->fn, f->arg, f->ft, f->ot);
+ }
+ }
}
void
runtime_MSpan_EnsureSwept(MSpan *s)
{
M *m = runtime_m();
+ G *g = runtime_g();
uint32 sg;
+ // Caller must disable preemption.
+ // Otherwise when this function returns the span can become unswept again
+ // (if GC is triggered on another goroutine).
+ if(m->locks == 0 && m->mallocing == 0 && g != m->g0)
+ runtime_throw("MSpan_EnsureSwept: m is not locked");
+
sg = runtime_mheap.sweepgen;
if(runtime_atomicload(&s->sweepgen) == sg)
return;
- m->locks++;
if(runtime_cas(&s->sweepgen, sg-2, sg-1)) {
runtime_MSpan_Sweep(s);
- m->locks--;
return;
}
- m->locks--;
// unfortunate condition, and we don't have efficient means to wait
while(runtime_atomicload(&s->sweepgen) != sg)
runtime_osyield();
@@ -1699,7 +1700,7 @@ runtime_MSpan_Sweep(MSpan *s)
shift = off % wordsPerBitmapWord;
*bitp |= bitMarked<<shift;
}
-
+
// Unlink & free special records for any objects we're about to free.
specialp = &s->specials;
special = *specialp;
@@ -1757,8 +1758,11 @@ runtime_MSpan_Sweep(MSpan *s)
continue;
}
- // Clear mark, scan, and special bits.
- *bitp &= ~((bitScan|bitMarked|bitSpecial)<<shift);
+ if(runtime_debug.allocfreetrace)
+ runtime_tracefree(p, size);
+
+ // Clear mark and scan bits.
+ *bitp &= ~((bitScan|bitMarked)<<shift);
if(cl == 0) {
// Free large span.
@@ -1767,8 +1771,9 @@ runtime_MSpan_Sweep(MSpan *s)
// important to set sweepgen before returning it to heap
runtime_atomicstore(&s->sweepgen, sweepgen);
sweepgenset = true;
+ // See note about SysFault vs SysFree in malloc.goc.
if(runtime_debug.efence)
- runtime_SysFree(p, size, &mstats.gc_sys);
+ runtime_SysFault(p, size);
else
runtime_MHeap_Free(&runtime_mheap, s, 1);
c->local_nlargefree++;
@@ -1796,7 +1801,13 @@ runtime_MSpan_Sweep(MSpan *s)
}
}
- if(!sweepgenset) {
+ // We need to set s->sweepgen = h->sweepgen only when all blocks are swept,
+ // because of the potential for a concurrent free/SetFinalizer.
+ // But we need to set it before we make the span available for allocation
+ // (return it to heap or mcentral), because allocation code assumes that a
+ // span is already swept if available for allocation.
+
+ if(!sweepgenset && nfree == 0) {
// The span must be in our exclusive ownership until we update sweepgen,
// check for potential races.
if(s->state != MSpanInUse || s->sweepgen != sweepgen-1) {
@@ -1806,11 +1817,12 @@ runtime_MSpan_Sweep(MSpan *s)
}
runtime_atomicstore(&s->sweepgen, sweepgen);
}
- if(nfree) {
+ if(nfree > 0) {
c->local_nsmallfree[cl] += nfree;
c->local_cachealloc -= nfree * size;
runtime_xadd64(&mstats.next_gc, -(uint64)(nfree * size * (gcpercent + 100)/100));
res = runtime_MCentral_FreeSpan(&runtime_mheap.central[cl], s, nfree, head.next, end);
+ //MCentral_FreeSpan updates sweepgen
}
return res;
}
@@ -1838,17 +1850,16 @@ bgsweep(void* dummy __attribute__ ((unused)))
runtime_gosched();
}
runtime_lock(&gclock);
- if(finq != nil) {
- // kick off or wake up goroutine to run queued finalizers
- if(fing == nil)
- fing = __go_go(runfinq, nil);
- else if(fingwait) {
- fingwait = 0;
- runtime_ready(fing);
- }
+ if(!runtime_mheap.sweepdone) {
+ // It's possible if GC has happened between sweepone has
+ // returned -1 and gclock lock.
+ runtime_unlock(&gclock);
+ continue;
}
sweep.parked = true;
+ runtime_g()->isbackground = true;
runtime_parkunlock(&gclock, "GC sweep wait");
+ runtime_g()->isbackground = false;
}
}
@@ -1880,6 +1891,8 @@ runtime_sweepone(void)
}
if(s->sweepgen != sg-2 || !runtime_cas(&s->sweepgen, sg-2, sg-1))
continue;
+ if(s->incache)
+ runtime_throw("sweep of incache span");
npages = s->npages;
if(!runtime_MSpan_Sweep(s))
npages = 0;
@@ -1896,7 +1909,7 @@ dumpspan(uint32 idx)
byte *p;
byte *arena_start;
MSpan *s;
- bool allocated, special;
+ bool allocated;
s = runtime_mheap.allspans[idx];
if(s->state != MSpanInUse)
@@ -1923,7 +1936,6 @@ dumpspan(uint32 idx)
bits = *bitp>>shift;
allocated = ((bits & bitAllocated) != 0);
- special = ((bits & bitSpecial) != 0);
for(i=0; (uint32)i<size; i+=sizeof(void*)) {
if(column == 0) {
@@ -1931,7 +1943,6 @@ dumpspan(uint32 idx)
}
if(i == 0) {
runtime_printf(allocated ? "(" : "[");
- runtime_printf(special ? "@" : "");
runtime_printf("%p: ", p+i);
} else {
runtime_printf(" ");
@@ -1969,6 +1980,7 @@ runtime_gchelper(void)
{
uint32 nproc;
+ runtime_m()->traceback = 2;
gchelperstart();
// parallel mark for over gc roots
@@ -1981,6 +1993,7 @@ runtime_gchelper(void)
nproc = work.nproc; // work.nproc can change right after we increment work.ndone
if(runtime_xadd(&work.ndone, +1) == nproc-1)
runtime_notewakeup(&work.alldone);
+ runtime_m()->traceback = 0;
}
static void
@@ -2012,8 +2025,8 @@ flushallmcaches(void)
}
}
-static void
-updatememstats(GCStats *stats)
+void
+runtime_updatememstats(GCStats *stats)
{
M *mp;
MSpan *s;
@@ -2099,6 +2112,7 @@ updatememstats(GCStats *stats)
struct gc_args
{
int64 start_time; // start time of GC in ns (just before stoptheworld)
+ bool eagersweep;
};
static void gc(struct gc_args *args);
@@ -2117,6 +2131,8 @@ readgogc(void)
return runtime_atoi(p);
}
+// force = 1 - do GC regardless of current heap usage
+// force = 2 - go GC and eager sweep
void
runtime_gc(int32 force)
{
@@ -2159,7 +2175,7 @@ runtime_gc(int32 force)
return;
runtime_semacquire(&runtime_worldsema, false);
- if(!force && mstats.heap_alloc < mstats.next_gc) {
+ if(force==0 && mstats.heap_alloc < mstats.next_gc) {
// typically threads which lost the race to grab
// worldsema exit here when gc is done.
runtime_semrelease(&runtime_worldsema);
@@ -2168,12 +2184,10 @@ runtime_gc(int32 force)
// Ok, we're doing it! Stop everybody else
a.start_time = runtime_nanotime();
+ a.eagersweep = force >= 2;
m->gcing = 1;
runtime_stoptheworld();
- if(runtime_debug.allocfreetrace)
- runtime_MProf_TraceGC();
-
clearpools();
// Run gc on the g0 stack. We do this so that the g stack
@@ -2182,14 +2196,14 @@ runtime_gc(int32 force)
// we don't need to scan gc's internal state). Also an
// enabler for copyable stacks.
for(i = 0; i < (runtime_debug.gctrace > 1 ? 2 : 1); i++) {
+ if(i > 0)
+ a.start_time = runtime_nanotime();
// switch to g0, call gc(&a), then switch back
g = runtime_g();
g->param = &a;
g->status = Gwaiting;
g->waitreason = "garbage collection";
runtime_mcall(mgc);
- // record a new start time in case we're going around again
- a.start_time = runtime_nanotime();
}
// all done
@@ -2201,17 +2215,6 @@ runtime_gc(int32 force)
// now that gc is done, kick off finalizer thread if needed
if(!ConcurrentSweep) {
- if(finq != nil) {
- runtime_lock(&gclock);
- // kick off or wake up goroutine to run queued finalizers
- if(fing == nil)
- fing = __go_go(runfinq, nil);
- else if(fingwait) {
- fingwait = 0;
- runtime_ready(fing);
- }
- runtime_unlock(&gclock);
- }
// give the queued finalizers, if any, a chance to run
runtime_gosched();
} else {
@@ -2236,21 +2239,21 @@ gc(struct gc_args *args)
int64 t0, t1, t2, t3, t4;
uint64 heap0, heap1, obj, ninstr;
GCStats stats;
- M *mp;
uint32 i;
// Eface eface;
m = runtime_m();
+ if(runtime_debug.allocfreetrace)
+ runtime_tracegc();
+
+ m->traceback = 2;
t0 = args->start_time;
work.tstart = args->start_time;
if(CollectStats)
runtime_memclr((byte*)&gcstats, sizeof(gcstats));
- for(mp=runtime_allm; mp; mp=mp->alllink)
- runtime_settype_flush(mp);
-
m->locks++; // disable gc during mallocs in parforalloc
if(work.markfor == nil)
work.markfor = runtime_parforalloc(MaxGcproc);
@@ -2262,7 +2265,9 @@ gc(struct gc_args *args)
// itabtype = ((PtrType*)eface.__type_descriptor)->elem;
}
- t1 = runtime_nanotime();
+ t1 = 0;
+ if(runtime_debug.gctrace)
+ t1 = runtime_nanotime();
// Sweep what is not sweeped by bgsweep.
while(runtime_sweepone() != (uintptr)-1)
@@ -2277,13 +2282,17 @@ gc(struct gc_args *args)
runtime_helpgc(work.nproc);
}
- t2 = runtime_nanotime();
+ t2 = 0;
+ if(runtime_debug.gctrace)
+ t2 = runtime_nanotime();
gchelperstart();
runtime_parfordo(work.markfor);
scanblock(nil, true);
- t3 = runtime_nanotime();
+ t3 = 0;
+ if(runtime_debug.gctrace)
+ t3 = runtime_nanotime();
bufferList[m->helpgc].busy = 0;
if(work.nproc > 1)
@@ -2298,7 +2307,7 @@ gc(struct gc_args *args)
mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100;
t4 = runtime_nanotime();
- mstats.last_gc = t4;
+ mstats.last_gc = runtime_unixnanotime(); // must be Unix time to make sense to user
mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0;
mstats.pause_total_ns += t4 - t0;
mstats.numgc++;
@@ -2306,18 +2315,22 @@ gc(struct gc_args *args)
runtime_printf("pause %D\n", t4-t0);
if(runtime_debug.gctrace) {
- updatememstats(&stats);
heap1 = mstats.heap_alloc;
+ runtime_updatememstats(&stats);
+ if(heap1 != mstats.heap_alloc) {
+ runtime_printf("runtime: mstats skew: heap=%D/%D\n", heap1, mstats.heap_alloc);
+ runtime_throw("mstats skew");
+ }
obj = mstats.nmalloc - mstats.nfree;
stats.nprocyield += work.markfor->nprocyield;
stats.nosyield += work.markfor->nosyield;
stats.nsleep += work.markfor->nsleep;
- runtime_printf("gc%d(%d): %D+%D+%D ms, %D -> %D MB, %D (%D-%D) objects,"
+ runtime_printf("gc%d(%d): %D+%D+%D+%D us, %D -> %D MB, %D (%D-%D) objects,"
" %d/%d/%d sweeps,"
" %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
- mstats.numgc, work.nproc, (t3-t2)/1000000, (t2-t1)/1000000, (t1-t0+t4-t3)/1000000,
+ mstats.numgc, work.nproc, (t1-t0)/1000, (t2-t1)/1000, (t3-t2)/1000, (t4-t3)/1000,
heap0>>20, heap1>>20, obj,
mstats.nmalloc, mstats.nfree,
sweep.nspan, gcstats.nbgsweep, gcstats.npausesweep,
@@ -2368,7 +2381,7 @@ gc(struct gc_args *args)
sweep.spanidx = 0;
// Temporary disable concurrent sweep, because we see failures on builders.
- if(ConcurrentSweep) {
+ if(ConcurrentSweep && !args->eagersweep) {
runtime_lock(&gclock);
if(sweep.g == nil)
sweep.g = __go_go(bgsweep, nil);
@@ -2384,6 +2397,7 @@ gc(struct gc_args *args)
}
runtime_MProf_GC();
+ m->traceback = 0;
}
extern uintptr runtime_sizeof_C_MStats
@@ -2405,7 +2419,7 @@ runtime_ReadMemStats(MStats *stats)
m = runtime_m();
m->gcing = 1;
runtime_stoptheworld();
- updatememstats(nil);
+ runtime_updatememstats(nil);
// Size of the trailing by_size array differs between Go and C,
// NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
runtime_memmove(stats, &mstats, runtime_sizeof_C_MStats);
@@ -2488,16 +2502,35 @@ runfinq(void* dummy __attribute__ ((unused)))
Eface ef;
Iface iface;
+ // This function blocks for long periods of time, and because it is written in C
+ // we have no liveness information. Zero everything so that uninitialized pointers
+ // do not cause memory leaks.
+ f = nil;
+ fb = nil;
+ next = nil;
+ i = 0;
+ ef.__type_descriptor = nil;
+ ef.__object = nil;
+
+ // force flush to memory
+ USED(&f);
+ USED(&fb);
+ USED(&next);
+ USED(&i);
+ USED(&ef);
+
for(;;) {
- runtime_lock(&gclock);
+ runtime_lock(&finlock);
fb = finq;
finq = nil;
if(fb == nil) {
- fingwait = 1;
- runtime_parkunlock(&gclock, "finalizer wait");
+ runtime_fingwait = true;
+ runtime_g()->isbackground = true;
+ runtime_parkunlock(&finlock, "finalizer wait");
+ runtime_g()->isbackground = false;
continue;
}
- runtime_unlock(&gclock);
+ runtime_unlock(&finlock);
if(raceenabled)
runtime_racefingo();
for(; fb; fb=next) {
@@ -2532,94 +2565,92 @@ runfinq(void* dummy __attribute__ ((unused)))
f->ot = nil;
}
fb->cnt = 0;
+ runtime_lock(&finlock);
fb->next = finc;
finc = fb;
+ runtime_unlock(&finlock);
}
+
+ // Zero everything that's dead, to avoid memory leaks.
+ // See comment at top of function.
+ f = nil;
+ fb = nil;
+ next = nil;
+ i = 0;
+ ef.__type_descriptor = nil;
+ ef.__object = nil;
runtime_gc(1); // trigger another gc to clean up the finalized objects, if possible
}
}
void
+runtime_createfing(void)
+{
+ if(fing != nil)
+ return;
+ // Here we use gclock instead of finlock,
+ // because newproc1 can allocate, which can cause on-demand span sweep,
+ // which can queue finalizers, which would deadlock.
+ runtime_lock(&gclock);
+ if(fing == nil)
+ fing = __go_go(runfinq, nil);
+ runtime_unlock(&gclock);
+}
+
+G*
+runtime_wakefing(void)
+{
+ G *res;
+
+ res = nil;
+ runtime_lock(&finlock);
+ if(runtime_fingwait && runtime_fingwake) {
+ runtime_fingwait = false;
+ runtime_fingwake = false;
+ res = fing;
+ }
+ runtime_unlock(&finlock);
+ return res;
+}
+
+void
runtime_marknogc(void *v)
{
- uintptr *b, obits, bits, off, shift;
+ uintptr *b, off, shift;
off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset
b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
shift = off % wordsPerBitmapWord;
-
- for(;;) {
- obits = *b;
- if((obits>>shift & bitMask) != bitAllocated)
- runtime_throw("bad initial state for marknogc");
- bits = (obits & ~(bitAllocated<<shift)) | bitBlockBoundary<<shift;
- if(runtime_gomaxprocs == 1) {
- *b = bits;
- break;
- } else {
- // more than one goroutine is potentially running: use atomic op
- if(runtime_casp((void**)b, (void*)obits, (void*)bits))
- break;
- }
- }
+ *b = (*b & ~(bitAllocated<<shift)) | bitBlockBoundary<<shift;
}
void
runtime_markscan(void *v)
{
- uintptr *b, obits, bits, off, shift;
+ uintptr *b, off, shift;
off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset
b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
shift = off % wordsPerBitmapWord;
-
- for(;;) {
- obits = *b;
- if((obits>>shift & bitMask) != bitAllocated)
- runtime_throw("bad initial state for markscan");
- bits = obits | bitScan<<shift;
- if(runtime_gomaxprocs == 1) {
- *b = bits;
- break;
- } else {
- // more than one goroutine is potentially running: use atomic op
- if(runtime_casp((void**)b, (void*)obits, (void*)bits))
- break;
- }
- }
+ *b |= bitScan<<shift;
}
-// mark the block at v of size n as freed.
+// mark the block at v as freed.
void
-runtime_markfreed(void *v, uintptr n)
+runtime_markfreed(void *v)
{
- uintptr *b, obits, bits, off, shift;
+ uintptr *b, off, shift;
if(0)
- runtime_printf("markfreed %p+%p\n", v, n);
+ runtime_printf("markfreed %p\n", v);
- if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
+ if((byte*)v > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
runtime_throw("markfreed: bad pointer");
off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset
b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
shift = off % wordsPerBitmapWord;
-
- for(;;) {
- obits = *b;
- // This could be a free of a gc-eligible object (bitAllocated + others) or
- // a FlagNoGC object (bitBlockBoundary set). In either case, we revert to
- // a simple no-scan allocated object because it is going on a free list.
- bits = (obits & ~(bitMask<<shift)) | (bitAllocated<<shift);
- if(runtime_gomaxprocs == 1) {
- *b = bits;
- break;
- } else {
- // more than one goroutine is potentially running: use atomic op
- if(runtime_casp((void**)b, (void*)obits, (void*)bits))
- break;
- }
- }
+ *b = (*b & ~(bitMask<<shift)) | (bitAllocated<<shift);
}
// check that the block at v of size n is marked freed.
@@ -2651,7 +2682,7 @@ runtime_checkfreed(void *v, uintptr n)
void
runtime_markspan(void *v, uintptr size, uintptr n, bool leftover)
{
- uintptr *b, off, shift, i;
+ uintptr *b, *b0, off, shift, i, x;
byte *p;
if((byte*)v+size*n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
@@ -2670,6 +2701,9 @@ runtime_markspan(void *v, uintptr size, uintptr n, bool leftover)
p = v;
if(leftover) // mark a boundary just past end of last block too
n++;
+
+ b0 = nil;
+ x = 0;
for(; n-- > 0; p += size) {
// Okay to use non-atomic ops here, because we control
// the entire span, and each bitmap word has bits for only
@@ -2678,8 +2712,15 @@ runtime_markspan(void *v, uintptr size, uintptr n, bool leftover)
off = (uintptr*)p - (uintptr*)runtime_mheap.arena_start; // word offset
b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
shift = off % wordsPerBitmapWord;
- *b = (*b & ~(bitMask<<shift)) | (bitAllocated<<shift);
+ if(b0 != b) {
+ if(b0 != nil)
+ *b0 = x;
+ b0 = b;
+ x = 0;
+ }
+ x |= bitAllocated<<shift;
}
+ *b0 = x;
}
// unmark the span of memory at v of length n bytes.
@@ -2723,12 +2764,12 @@ runtime_MHeap_MapBits(MHeap *h)
n = (h->arena_used - h->arena_start) / wordsPerBitmapWord;
n = ROUND(n, bitmapChunk);
+ n = ROUND(n, PageSize);
+ page_size = getpagesize();
+ n = ROUND(n, page_size);
if(h->bitmap_mapped >= n)
return;
- page_size = getpagesize();
- n = (n+page_size-1) & ~(page_size-1);
-
- runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped, &mstats.gc_sys);
+ runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped, h->arena_reserved, &mstats.gc_sys);
h->bitmap_mapped = n;
}
diff --git a/libgo/runtime/mgc0.h b/libgo/runtime/mgc0.h
index f8abe6c..16000d1 100644
--- a/libgo/runtime/mgc0.h
+++ b/libgo/runtime/mgc0.h
@@ -44,3 +44,44 @@ enum {
// - at most GC_STACK_CAPACITY allocations because of GC_ARRAY_START
GC_STACK_CAPACITY = 8,
};
+
+enum {
+ ScanStackByFrames = 1,
+ IgnorePreciseGC = 0,
+
+ // Four bits per word (see #defines below).
+ wordsPerBitmapWord = sizeof(void*)*8/4,
+ bitShift = sizeof(void*)*8/4,
+};
+
+// Bits in per-word bitmap.
+// #defines because enum might not be able to hold the values.
+//
+// Each word in the bitmap describes wordsPerBitmapWord words
+// of heap memory. There are 4 bitmap bits dedicated to each heap word,
+// so on a 64-bit system there is one bitmap word per 16 heap words.
+// The bits in the word are packed together by type first, then by
+// heap location, so each 64-bit bitmap word consists of, from top to bottom,
+// the 16 bitMarked bits for the corresponding heap words,
+// then the 16 bitScan/bitBlockBoundary bits, then the 16 bitAllocated bits.
+// This layout makes it easier to iterate over the bits of a given type.
+//
+// The bitmap starts at mheap.arena_start and extends *backward* from
+// there. On a 64-bit system the off'th word in the arena is tracked by
+// the off/16+1'th word before mheap.arena_start. (On a 32-bit system,
+// the only difference is that the divisor is 8.)
+//
+// To pull out the bits corresponding to a given pointer p, we use:
+//
+// off = p - (uintptr*)mheap.arena_start; // word offset
+// b = (uintptr*)mheap.arena_start - off/wordsPerBitmapWord - 1;
+// shift = off % wordsPerBitmapWord
+// bits = *b >> shift;
+// /* then test bits & bitAllocated, bits & bitMarked, etc. */
+//
+#define bitAllocated ((uintptr)1<<(bitShift*0)) /* block start; eligible for garbage collection */
+#define bitScan ((uintptr)1<<(bitShift*1)) /* when bitAllocated is set */
+#define bitMarked ((uintptr)1<<(bitShift*2)) /* when bitAllocated is set */
+#define bitBlockBoundary ((uintptr)1<<(bitShift*1)) /* when bitAllocated is NOT set - mark for FlagNoGC objects */
+
+#define bitMask (bitAllocated | bitScan | bitMarked)
diff --git a/libgo/runtime/mheap.c b/libgo/runtime/mheap.c
index 3a5eb15..793915e 100644
--- a/libgo/runtime/mheap.c
+++ b/libgo/runtime/mheap.c
@@ -88,7 +88,7 @@ runtime_MHeap_MapSpans(MHeap *h)
n = ROUND(n, pagesize);
if(h->spans_mapped >= n)
return;
- runtime_SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, &mstats.other_sys);
+ runtime_SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats.other_sys);
h->spans_mapped = n;
}
@@ -522,6 +522,7 @@ runtime_MHeap_Scavenger(void* dummy)
G *g;
MHeap *h;
uint64 tick, now, forcegc, limit;
+ int64 unixnow;
uint32 k;
Note note, *notep;
@@ -548,8 +549,8 @@ runtime_MHeap_Scavenger(void* dummy)
runtime_notetsleepg(&note, tick);
runtime_lock(h);
- now = runtime_nanotime();
- if(now - mstats.last_gc > forcegc) {
+ unixnow = runtime_unixnanotime();
+ if(unixnow - mstats.last_gc > forcegc) {
runtime_unlock(h);
// The scavenger can not block other goroutines,
// otherwise deadlock detector can fire spuriously.
@@ -561,8 +562,8 @@ runtime_MHeap_Scavenger(void* dummy)
if(runtime_debug.gctrace > 0)
runtime_printf("scvg%d: GC forced\n", k);
runtime_lock(h);
- now = runtime_nanotime();
}
+ now = runtime_nanotime();
scavenge(k, now, limit);
runtime_unlock(h);
}
@@ -573,7 +574,7 @@ void runtime_debug_freeOSMemory(void) __asm__("runtime_debug.freeOSMemory");
void
runtime_debug_freeOSMemory(void)
{
- runtime_gc(1);
+ runtime_gc(2); // force GC and do eager sweep
runtime_lock(&runtime_mheap);
scavenge(-1, ~(uintptr)0, 0);
runtime_unlock(&runtime_mheap);
@@ -590,6 +591,7 @@ runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages)
span->freelist = nil;
span->ref = 0;
span->sizeclass = 0;
+ span->incache = false;
span->elemsize = 0;
span->state = MSpanDead;
span->unusedsince = 0;
@@ -598,6 +600,7 @@ runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages)
span->specialLock.key = 0;
span->specials = nil;
span->needzero = 0;
+ span->freebuf = nil;
}
// Initialize an empty doubly-linked list.
@@ -672,6 +675,7 @@ addspecial(void *p, Special *s)
// Ensure that the span is swept.
// GC accesses specials list w/o locks. And it's just much safer.
+ runtime_m()->locks++;
runtime_MSpan_EnsureSwept(span);
offset = (uintptr)p - (span->start << PageShift);
@@ -684,6 +688,7 @@ addspecial(void *p, Special *s)
while((x = *t) != nil) {
if(offset == x->offset && kind == x->kind) {
runtime_unlock(&span->specialLock);
+ runtime_m()->locks--;
return false; // already exists
}
if(offset < x->offset || (offset == x->offset && kind < x->kind))
@@ -695,6 +700,7 @@ addspecial(void *p, Special *s)
s->next = x;
*t = s;
runtime_unlock(&span->specialLock);
+ runtime_m()->locks--;
return true;
}
@@ -714,6 +720,7 @@ removespecial(void *p, byte kind)
// Ensure that the span is swept.
// GC accesses specials list w/o locks. And it's just much safer.
+ runtime_m()->locks++;
runtime_MSpan_EnsureSwept(span);
offset = (uintptr)p - (span->start << PageShift);
@@ -726,11 +733,13 @@ removespecial(void *p, byte kind)
if(offset == s->offset && kind == s->kind) {
*t = s->next;
runtime_unlock(&span->specialLock);
+ runtime_m()->locks--;
return s;
}
t = &s->next;
}
runtime_unlock(&span->specialLock);
+ runtime_m()->locks--;
return nil;
}
@@ -805,7 +814,7 @@ runtime_freespecial(Special *s, void *p, uintptr size, bool freed)
return false; // don't free p until finalizer is done
case KindSpecialProfile:
sp = (SpecialProfile*)s;
- runtime_MProf_Free(sp->b, p, size, freed);
+ runtime_MProf_Free(sp->b, size, freed);
runtime_lock(&runtime_mheap.speciallock);
runtime_FixAlloc_Free(&runtime_mheap.specialprofilealloc, sp);
runtime_unlock(&runtime_mheap.speciallock);
@@ -823,6 +832,8 @@ runtime_freeallspecials(MSpan *span, void *p, uintptr size)
Special *s, **t, *list;
uintptr offset;
+ if(span->sweepgen != runtime_mheap.sweepgen)
+ runtime_throw("runtime: freeallspecials: unswept span");
// first, collect all specials into the list; then, free them
// this is required to not cause deadlock between span->specialLock and proflock
list = nil;
@@ -848,3 +859,92 @@ runtime_freeallspecials(MSpan *span, void *p, uintptr size)
runtime_throw("can't explicitly free an object with a finalizer");
}
}
+
+// Split an allocated span into two equal parts.
+void
+runtime_MHeap_SplitSpan(MHeap *h, MSpan *s)
+{
+ MSpan *t;
+ MCentral *c;
+ uintptr i;
+ uintptr npages;
+ PageID p;
+
+ if(s->state != MSpanInUse)
+ runtime_throw("MHeap_SplitSpan on a free span");
+ if(s->sizeclass != 0 && s->ref != 1)
+ runtime_throw("MHeap_SplitSpan doesn't have an allocated object");
+ npages = s->npages;
+
+ // remove the span from whatever list it is in now
+ if(s->sizeclass > 0) {
+ // must be in h->central[x].empty
+ c = &h->central[s->sizeclass];
+ runtime_lock(c);
+ runtime_MSpanList_Remove(s);
+ runtime_unlock(c);
+ runtime_lock(h);
+ } else {
+ // must be in h->busy/busylarge
+ runtime_lock(h);
+ runtime_MSpanList_Remove(s);
+ }
+ // heap is locked now
+
+ if(npages == 1) {
+ // convert span of 1 PageSize object to a span of 2 PageSize/2 objects.
+ s->ref = 2;
+ s->sizeclass = runtime_SizeToClass(PageSize/2);
+ s->elemsize = PageSize/2;
+ } else {
+ // convert span of n>1 pages into two spans of n/2 pages each.
+ if((s->npages & 1) != 0)
+ runtime_throw("MHeap_SplitSpan on an odd size span");
+
+ // compute position in h->spans
+ p = s->start;
+ p -= (uintptr)h->arena_start >> PageShift;
+
+ // Allocate a new span for the first half.
+ t = runtime_FixAlloc_Alloc(&h->spanalloc);
+ runtime_MSpan_Init(t, s->start, npages/2);
+ t->limit = (byte*)((t->start + npages/2) << PageShift);
+ t->state = MSpanInUse;
+ t->elemsize = npages << (PageShift - 1);
+ t->sweepgen = s->sweepgen;
+ if(t->elemsize <= MaxSmallSize) {
+ t->sizeclass = runtime_SizeToClass(t->elemsize);
+ t->ref = 1;
+ }
+
+ // the old span holds the second half.
+ s->start += npages/2;
+ s->npages = npages/2;
+ s->elemsize = npages << (PageShift - 1);
+ if(s->elemsize <= MaxSmallSize) {
+ s->sizeclass = runtime_SizeToClass(s->elemsize);
+ s->ref = 1;
+ }
+
+ // update span lookup table
+ for(i = p; i < p + npages/2; i++)
+ h->spans[i] = t;
+ }
+
+ // place the span into a new list
+ if(s->sizeclass > 0) {
+ runtime_unlock(h);
+ c = &h->central[s->sizeclass];
+ runtime_lock(c);
+ // swept spans are at the end of the list
+ runtime_MSpanList_InsertBack(&c->empty, s);
+ runtime_unlock(c);
+ } else {
+ // Swept spans are at the end of lists.
+ if(s->npages < nelem(h->free))
+ runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
+ else
+ runtime_MSpanList_InsertBack(&h->busylarge, s);
+ runtime_unlock(h);
+ }
+}
diff --git a/libgo/runtime/mprof.goc b/libgo/runtime/mprof.goc
index 24f8fe5..8bd56ba 100644
--- a/libgo/runtime/mprof.goc
+++ b/libgo/runtime/mprof.goc
@@ -178,68 +178,16 @@ runtime_MProf_GC(void)
runtime_unlock(&proflock);
}
-static const char*
-typeinfoname(int32 typeinfo)
-{
- if(typeinfo == TypeInfo_SingleObject)
- return "single object";
- else if(typeinfo == TypeInfo_Array)
- return "array";
- else if(typeinfo == TypeInfo_Chan)
- return "channel";
- // runtime_throw("typinfoname: unknown type info");
- return "unknown";
-}
-
-static void
-printstackframes(Location *stk, int32 nstk)
-{
- Location *loc;
- int32 frame;
-
- for(frame = 0; frame < nstk; frame++) {
- loc = &stk[frame];
- if (loc->function.len > 0) {
- runtime_printf("\t#%d %p %S %S:%d\n", frame, loc->pc, loc->function, loc->filename, (int32)loc->lineno);
- } else {
- runtime_printf("\t#%d %p\n", frame, loc->pc);
- }
- }
-}
-
-// Called by collector to report a gc in allocfreetrace mode.
-void
-runtime_MProf_TraceGC(void)
-{
- Location stk[32];
- int32 nstk;
-
- nstk = runtime_callers(1, stk, nelem(stk));
- runtime_printf("MProf_TraceGC\n");
- printstackframes(stk, nstk);
-}
-
// Called by malloc to record a profiled block.
void
-runtime_MProf_Malloc(void *p, uintptr size, uintptr typ)
+runtime_MProf_Malloc(void *p, uintptr size)
{
Location stk[32];
Bucket *b;
- Type *type;
- const char *name;
int32 nstk;
nstk = runtime_callers(1, stk, nelem(stk));
runtime_lock(&proflock);
- if(runtime_debug.allocfreetrace) {
- type = (Type*)(typ & ~3);
- name = typeinfoname(typ & 3);
- runtime_printf("MProf_Malloc(p=%p, size=%p, type=%p <%s", p, size, type, name);
- if(type != nil)
- runtime_printf(" of %S", *type->__reflection);
- runtime_printf(">)\n");
- printstackframes(stk, nstk);
- }
b = stkbucket(MProf, size, stk, nstk, true);
b->recent_allocs++;
b->recent_alloc_bytes += size;
@@ -254,7 +202,7 @@ runtime_MProf_Malloc(void *p, uintptr size, uintptr typ)
// Called when freeing a profiled block.
void
-runtime_MProf_Free(Bucket *b, void *p, uintptr size, bool freed)
+runtime_MProf_Free(Bucket *b, uintptr size, bool freed)
{
runtime_lock(&proflock);
if(freed) {
@@ -264,10 +212,6 @@ runtime_MProf_Free(Bucket *b, void *p, uintptr size, bool freed)
b->prev_frees++;
b->prev_free_bytes += size;
}
- if(runtime_debug.allocfreetrace) {
- runtime_printf("MProf_Free(p=%p, size=%p)\n", p, size);
- printstackframes(b->stk, b->nstk);
- }
runtime_unlock(&proflock);
}
@@ -384,6 +328,18 @@ runtime_MProf_Mark(struct Workbuf **wbufp, void (*enqueue1)(struct Workbuf**, Ob
enqueue1(wbufp, (Obj){(byte*)&bbuckets, sizeof bbuckets, 0});
}
+void
+runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr))
+{
+ Bucket *b;
+
+ runtime_lock(&proflock);
+ for(b=mbuckets; b; b=b->allnext) {
+ callback(b, b->nstk, b->stk, b->size, b->allocs, b->frees);
+ }
+ runtime_unlock(&proflock);
+}
+
// Must match BlockProfileRecord in debug.go.
typedef struct BRecord BRecord;
struct BRecord {
@@ -536,3 +492,73 @@ func GoroutineProfile(b Slice) (n int, ok bool) {
runtime_starttheworld();
}
}
+
+// Tracing of alloc/free/gc.
+
+static Lock tracelock;
+
+static const char*
+typeinfoname(int32 typeinfo)
+{
+ if(typeinfo == TypeInfo_SingleObject)
+ return "single object";
+ else if(typeinfo == TypeInfo_Array)
+ return "array";
+ else if(typeinfo == TypeInfo_Chan)
+ return "channel";
+ runtime_throw("typinfoname: unknown type info");
+ return nil;
+}
+
+void
+runtime_tracealloc(void *p, uintptr size, uintptr typ)
+{
+ const char *name;
+ Type *type;
+
+ runtime_lock(&tracelock);
+ runtime_m()->traceback = 2;
+ type = (Type*)(typ & ~3);
+ name = typeinfoname(typ & 3);
+ if(type == nil)
+ runtime_printf("tracealloc(%p, %p, %s)\n", p, size, name);
+ else
+ runtime_printf("tracealloc(%p, %p, %s of %S)\n", p, size, name, *type->__reflection);
+ if(runtime_m()->curg == nil || runtime_g() == runtime_m()->curg) {
+ runtime_goroutineheader(runtime_g());
+ runtime_traceback();
+ } else {
+ runtime_goroutineheader(runtime_m()->curg);
+ runtime_traceback();
+ }
+ runtime_printf("\n");
+ runtime_m()->traceback = 0;
+ runtime_unlock(&tracelock);
+}
+
+void
+runtime_tracefree(void *p, uintptr size)
+{
+ runtime_lock(&tracelock);
+ runtime_m()->traceback = 2;
+ runtime_printf("tracefree(%p, %p)\n", p, size);
+ runtime_goroutineheader(runtime_g());
+ runtime_traceback();
+ runtime_printf("\n");
+ runtime_m()->traceback = 0;
+ runtime_unlock(&tracelock);
+}
+
+void
+runtime_tracegc(void)
+{
+ runtime_lock(&tracelock);
+ runtime_m()->traceback = 2;
+ runtime_printf("tracegc()\n");
+ // running on m->g0 stack; show all non-g0 goroutines
+ runtime_tracebackothers(runtime_g());
+ runtime_printf("end tracegc\n");
+ runtime_printf("\n");
+ runtime_m()->traceback = 0;
+ runtime_unlock(&tracelock);
+}
diff --git a/libgo/runtime/netpoll.goc b/libgo/runtime/netpoll.goc
index 15dd58c..5308e01 100644
--- a/libgo/runtime/netpoll.goc
+++ b/libgo/runtime/netpoll.goc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
package net
@@ -63,6 +63,7 @@ struct PollDesc
G* wg; // READY, WAIT, G waiting for write or nil
Timer wt; // write deadline timer
int64 wd; // write deadline
+ void* user; // user settable cookie
};
static struct
@@ -88,6 +89,11 @@ static FuncVal deadlineFn = {(void(*)(void))deadline};
static FuncVal readDeadlineFn = {(void(*)(void))readDeadline};
static FuncVal writeDeadlineFn = {(void(*)(void))writeDeadline};
+// runtimeNano returns the current value of the runtime clock in nanoseconds.
+func runtimeNano() (ns int64) {
+ ns = runtime_nanotime();
+}
+
func runtime_pollServerInit() {
runtime_netpollinit();
}
@@ -141,7 +147,7 @@ func runtime_pollWait(pd *PollDesc, mode int) (err int) {
if(err == 0) {
// As for now only Solaris uses level-triggered IO.
if(Solaris)
- runtime_netpollarm(pd->fd, mode);
+ runtime_netpollarm(pd, mode);
while(!netpollblock(pd, mode, false)) {
err = checkerr(pd, mode);
if(err != 0)
@@ -256,6 +262,30 @@ runtime_netpollfd(PollDesc *pd)
return pd->fd;
}
+void**
+runtime_netpolluser(PollDesc *pd)
+{
+ return &pd->user;
+}
+
+bool
+runtime_netpollclosing(PollDesc *pd)
+{
+ return pd->closing;
+}
+
+void
+runtime_netpolllock(PollDesc *pd)
+{
+ runtime_lock(pd);
+}
+
+void
+runtime_netpollunlock(PollDesc *pd)
+{
+ runtime_unlock(pd);
+}
+
// make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
void
runtime_netpollready(G **gpp, PollDesc *pd, int32 mode)
diff --git a/libgo/runtime/netpoll_epoll.c b/libgo/runtime/netpoll_epoll.c
index fe534c9..1281f45 100644
--- a/libgo/runtime/netpoll_epoll.c
+++ b/libgo/runtime/netpoll_epoll.c
@@ -117,9 +117,9 @@ runtime_netpollclose(uintptr fd)
}
void
-runtime_netpollarm(uintptr fd, int32 mode)
+runtime_netpollarm(PollDesc* pd, int32 mode)
{
- USED(fd);
+ USED(pd);
USED(mode);
runtime_throw("unused");
}
diff --git a/libgo/runtime/netpoll_kqueue.c b/libgo/runtime/netpoll_kqueue.c
index bc38644..5144a87 100644
--- a/libgo/runtime/netpoll_kqueue.c
+++ b/libgo/runtime/netpoll_kqueue.c
@@ -60,9 +60,9 @@ runtime_netpollclose(uintptr fd)
}
void
-runtime_netpollarm(uintptr fd, int32 mode)
+runtime_netpollarm(PollDesc* pd, int32 mode)
{
- USED(fd, mode);
+ USED(pd, mode);
runtime_throw("unused");
}
diff --git a/libgo/runtime/panic.c b/libgo/runtime/panic.c
index 78d4dd9..6a5d007 100644
--- a/libgo/runtime/panic.c
+++ b/libgo/runtime/panic.c
@@ -154,6 +154,30 @@ runtime_dopanic(int32 unused __attribute__ ((unused)))
runtime_exit(2);
}
+bool
+runtime_canpanic(G *gp)
+{
+ M *m = runtime_m();
+ byte g;
+
+ USED(&g); // don't use global g, it points to gsignal
+
+ // Is it okay for gp to panic instead of crashing the program?
+ // Yes, as long as it is running Go code, not runtime code,
+ // and not stuck in a system call.
+ if(gp == nil || gp != m->curg)
+ return false;
+ if(m->locks-m->softfloat != 0 || m->mallocing != 0 || m->throwing != 0 || m->gcing != 0 || m->dying != 0)
+ return false;
+ if(gp->status != Grunning)
+ return false;
+#ifdef GOOS_windows
+ if(m->libcallsp != 0)
+ return false;
+#endif
+ return true;
+}
+
void
runtime_throw(const char *s)
{
@@ -182,6 +206,10 @@ runtime_panicstring(const char *s)
runtime_printf("panic: %s\n", s);
runtime_throw("panic during gc");
}
+ if(runtime_m()->locks) {
+ runtime_printf("panic: %s\n", s);
+ runtime_throw("panic holding locks");
+ }
runtime_newErrorCString(s, &err);
runtime_panic(err);
}
@@ -194,3 +222,9 @@ runtime_Goexit(void)
rundefer();
runtime_goexit();
}
+
+void
+runtime_panicdivide(void)
+{
+ runtime_panicstring("integer divide by zero");
+}
diff --git a/libgo/runtime/print.c b/libgo/runtime/print.c
index f602e9a..ae7e740 100644
--- a/libgo/runtime/print.c
+++ b/libgo/runtime/print.c
@@ -91,6 +91,25 @@ runtime_printf(const char *s, ...)
va_end(va);
}
+int32
+runtime_snprintf(byte *buf, int32 n, const char *s, ...)
+{
+ G *g = runtime_g();
+ va_list va;
+ int32 m;
+
+ g->writebuf = buf;
+ g->writenbuf = n-1;
+ va_start(va, s);
+ go_vprintf(s, va);
+ va_end(va);
+ *g->writebuf = '\0';
+ m = g->writebuf - buf;
+ g->writenbuf = 0;
+ g->writebuf = nil;
+ return m;
+}
+
// Very simple printf. Only for debugging prints.
// Do not add to this without checking with Rob.
static void
diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c
index 8e32f78a2..363cc19 100644
--- a/libgo/runtime/proc.c
+++ b/libgo/runtime/proc.c
@@ -486,6 +486,7 @@ runtime_schedinit(void)
runtime_sched.maxmcount = 10000;
runtime_precisestack = 0;
+ // runtime_symtabinit();
runtime_mallocinit();
mcommoninit(m);
@@ -494,6 +495,10 @@ runtime_schedinit(void)
// in a fault during a garbage collection, it will not
// need to allocated memory.
runtime_newErrorCString(0, &i);
+
+ // Initialize the cached gotraceback value, since
+ // gotraceback calls getenv, which mallocs on Plan 9.
+ runtime_gotraceback(nil);
runtime_goargs();
runtime_goenvs();
@@ -526,6 +531,15 @@ initDone(void *arg __attribute__ ((unused))) {
};
// The main goroutine.
+// Note: C frames in general are not copyable during stack growth, for two reasons:
+// 1) We don't know where in a frame to find pointers to other stack locations.
+// 2) There's no guarantee that globals or heap values do not point into the frame.
+//
+// The C frame for runtime.main is copyable, because:
+// 1) There are no pointers to other stack locations in the frame
+// (d.fn points at a global, d.link is nil, d.argp is -1).
+// 2) The only pointer into this frame is from the defer chain,
+// which is explicitly handled during stack copying.
void
runtime_main(void* dummy __attribute__((unused)))
{
@@ -1094,6 +1108,22 @@ runtime_allocm(P *p, int32 stacksize, byte** ret_g0_stack, size_t* ret_g0_stacks
return mp;
}
+static G*
+allocg(void)
+{
+ G *gp;
+ // static Type *gtype;
+
+ // if(gtype == nil) {
+ // Eface e;
+ // runtime_gc_g_ptr(&e);
+ // gtype = ((PtrType*)e.__type_descriptor)->__element_type;
+ // }
+ // gp = runtime_cnew(gtype);
+ gp = runtime_malloc(sizeof(G));
+ return gp;
+}
+
static M* lockextra(bool nilokay);
static void unlockextra(M*);
@@ -1587,6 +1617,8 @@ top:
gcstopm();
goto top;
}
+ if(runtime_fingwait && runtime_fingwake && (gp = runtime_wakefing()) != nil)
+ runtime_ready(gp);
// local runq
gp = runqget(m->p);
if(gp)
@@ -1783,6 +1815,8 @@ top:
void
runtime_park(bool(*unlockf)(G*, void*), void *lock, const char *reason)
{
+ if(g->status != Grunning)
+ runtime_throw("bad g status");
m->waitlock = lock;
m->waitunlockf = unlockf;
g->waitreason = reason;
@@ -1834,6 +1868,8 @@ park0(G *gp)
void
runtime_gosched(void)
{
+ if(g->status != Grunning)
+ runtime_throw("bad g status");
runtime_mcall(runtime_gosched0);
}
@@ -1861,6 +1897,8 @@ runtime_gosched0(G *gp)
void
runtime_goexit(void)
{
+ if(g->status != Grunning)
+ runtime_throw("bad g status");
if(raceenabled)
runtime_racegoend();
runtime_mcall(goexit0);
@@ -1874,6 +1912,13 @@ goexit0(G *gp)
gp->entry = nil;
gp->m = nil;
gp->lockedm = nil;
+ gp->paniconfault = 0;
+ gp->defer = nil; // should be true already but just in case.
+ gp->panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
+ gp->writenbuf = 0;
+ gp->writebuf = nil;
+ gp->waitreason = nil;
+ gp->param = nil;
m->curg = nil;
m->lockedg = nil;
if(m->locked & ~LockExternal) {
@@ -2122,8 +2167,8 @@ syscall_runtime_BeforeFork(void)
{
// Fork can hang if preempted with signals frequently enough (see issue 5517).
// Ensure that we stay on the same M where we disable profiling.
- m->locks++;
- if(m->profilehz != 0)
+ runtime_m()->locks++;
+ if(runtime_m()->profilehz != 0)
runtime_resetcpuprofiler(0);
}
@@ -2138,7 +2183,7 @@ syscall_runtime_AfterFork(void)
hz = runtime_sched.profilehz;
if(hz != 0)
runtime_resetcpuprofiler(hz);
- m->locks--;
+ runtime_m()->locks--;
}
// Allocate a new g, with a stack big enough for stacksize bytes.
@@ -2147,7 +2192,7 @@ runtime_malg(int32 stacksize, byte** ret_stack, size_t* ret_stacksize)
{
G *newg;
- newg = runtime_malloc(sizeof(G));
+ newg = allocg();
if(stacksize >= 0) {
#if USING_SPLIT_STACK
int dont_block_signals = 0;
@@ -2204,6 +2249,10 @@ __go_go(void (*fn)(void*), void* arg)
P *p;
//runtime_printf("newproc1 %p %p narg=%d nret=%d\n", fn->fn, argp, narg, nret);
+ if(fn == nil) {
+ m->throwing = -1; // do not dump full stacks
+ runtime_throw("go of nil func value");
+ }
m->locks++; // disable preemption because it can be holding p in a local var
p = m->p;
@@ -2510,14 +2559,14 @@ runtime_sigprof()
if(mp == nil)
return;
+ // Profiling runs concurrently with GC, so it must not allocate.
+ mp->mallocing++;
+
traceback = true;
if(mp->mcache == nil)
traceback = false;
- // Profiling runs concurrently with GC, so it must not allocate.
- mp->mallocing++;
-
runtime_lock(&prof);
if(prof.fn == nil) {
runtime_unlock(&prof);
@@ -2765,7 +2814,7 @@ checkdead(void)
}
runtime_unlock(&allglock);
if(grunning == 0) // possible if main goroutine calls runtime_Goexit()
- runtime_exit(0);
+ runtime_throw("no goroutines (main called runtime.Goexit) - deadlock!");
m->throwing = -1; // do not dump full stacks
runtime_throw("all goroutines are asleep - deadlock!");
}
diff --git a/libgo/runtime/race.h b/libgo/runtime/race.h
index e84c5d4..0f4718a 100644
--- a/libgo/runtime/race.h
+++ b/libgo/runtime/race.h
@@ -17,7 +17,6 @@ void runtime_racefini(void);
void runtime_racemapshadow(void *addr, uintptr size);
void runtime_racemalloc(void *p, uintptr sz);
-void runtime_racefree(void *p);
uintptr runtime_racegostart(void *pc);
void runtime_racegoend(void);
void runtime_racewritepc(void *addr, void *callpc, void *pc);
diff --git a/libgo/runtime/rdebug.goc b/libgo/runtime/rdebug.goc
index 230e8fa..63eb4dd 100644
--- a/libgo/runtime/rdebug.goc
+++ b/libgo/runtime/rdebug.goc
@@ -19,3 +19,8 @@ func setGCPercent(in int) (out int) {
func setMaxThreads(in int) (out int) {
out = runtime_setmaxthreads(in);
}
+
+func SetPanicOnFault(enabled bool) (old bool) {
+ old = runtime_g()->paniconfault;
+ runtime_g()->paniconfault = enabled;
+}
diff --git a/libgo/runtime/runtime.c b/libgo/runtime/runtime.c
index 1678a45..33fe1e7 100644
--- a/libgo/runtime/runtime.c
+++ b/libgo/runtime/runtime.c
@@ -8,8 +8,20 @@
#include "config.h"
#include "runtime.h"
+#include "arch.h"
#include "array.h"
+enum {
+ maxround = sizeof(uintptr),
+};
+
+// Keep a cached value to make gotraceback fast,
+// since we call it on every call to gentraceback.
+// The cached value is a uint32 in which the low bit
+// is the "crash" setting and the top 31 bits are the
+// gotraceback value.
+static uint32 traceback_cache = ~(uint32)0;
+
// The GOTRACEBACK environment variable controls the
// behavior of a Go program that is crashing and exiting.
// GOTRACEBACK=0 suppress all tracebacks
@@ -20,18 +32,28 @@ int32
runtime_gotraceback(bool *crash)
{
const byte *p;
+ uint32 x;
if(crash != nil)
*crash = false;
- p = runtime_getenv("GOTRACEBACK");
- if(p == nil || p[0] == '\0')
- return 1; // default is on
- if(runtime_strcmp((const char *)p, "crash") == 0) {
- if(crash != nil)
- *crash = true;
- return 2; // extra information
+ if(runtime_m()->traceback != 0)
+ return runtime_m()->traceback;
+ x = runtime_atomicload(&traceback_cache);
+ if(x == ~(uint32)0) {
+ p = runtime_getenv("GOTRACEBACK");
+ if(p == nil)
+ p = (const byte*)"";
+ if(p[0] == '\0')
+ x = 1<<1;
+ else if(runtime_strcmp((const char *)p, "crash") == 0)
+ x = (2<<1) | 1;
+ else
+ x = runtime_atoi(p)<<1;
+ runtime_atomicstore(&traceback_cache, x);
}
- return runtime_atoi(p);
+ if(crash != nil)
+ *crash = x&1;
+ return x>>1;
}
static int32 argc;
@@ -90,6 +112,8 @@ runtime_goenvs_unix(void)
syscall_Envs.__values = (void*)s;
syscall_Envs.__count = n;
syscall_Envs.__capacity = n;
+
+ traceback_cache = ~(uint32)0;
}
int32
@@ -275,6 +299,7 @@ static struct {
{"allocfreetrace", &runtime_debug.allocfreetrace},
{"efence", &runtime_debug.efence},
{"gctrace", &runtime_debug.gctrace},
+ {"gcdead", &runtime_debug.gcdead},
{"scheddetail", &runtime_debug.scheddetail},
{"schedtrace", &runtime_debug.schedtrace},
};
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
index 6bd53a8..8fc10ff 100644
--- a/libgo/runtime/runtime.h
+++ b/libgo/runtime/runtime.h
@@ -49,6 +49,8 @@ typedef unsigned int uintptr __attribute__ ((mode (pointer)));
typedef intptr intgo; // Go's int
typedef uintptr uintgo; // Go's uint
+typedef uintptr uintreg;
+
/* Defined types. */
typedef uint8 bool;
@@ -216,6 +218,7 @@ struct G
bool ispanic;
bool issystem; // do not output in stack dump
bool isbackground; // ignore in deadlock detector
+ bool paniconfault; // panic (instead of crash) on unexpected fault address
M* m; // for debuggers, but offset not hard-coded
M* lockedm;
int32 sig;
@@ -251,6 +254,7 @@ struct M
int32 throwing;
int32 gcing;
int32 locks;
+ int32 softfloat;
int32 dying;
int32 profilehz;
int32 helpgc;
@@ -272,15 +276,11 @@ struct M
uint32 waitsemacount;
uint32 waitsemalock;
GCStats gcstats;
- bool racecall;
bool needextram;
bool dropextram; // for gccgo: drop after call is done.
+ uint8 traceback;
bool (*waitunlockf)(G*, void*);
void* waitlock;
-
- uintptr settype_buf[1024];
- uintptr settype_bufsize;
-
uintptr end[];
};
@@ -340,6 +340,7 @@ enum
SigDefault = 1<<4, // if the signal isn't explicitly requested, don't monitor it
SigHandling = 1<<5, // our signal handler is registered
SigIgnored = 1<<6, // the signal was ignored before we registered for it
+ SigGoExit = 1<<7, // cause all runtime procs to exit (only used on Plan 9).
};
// Layout of in-memory per-function information prepared by linker
@@ -352,6 +353,16 @@ struct Func
uintptr entry; // entry pc
};
+#ifdef GOOS_nacl
+enum {
+ NaCl = 1,
+};
+#else
+enum {
+ NaCl = 0,
+};
+#endif
+
#ifdef GOOS_windows
enum {
Windows = 1
@@ -385,6 +396,8 @@ struct Timers
// Package time knows the layout of this structure.
// If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
+// For GOOS=nacl, package syscall knows the layout of this structure.
+// If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer.
struct Timer
{
int32 i; // heap index
@@ -441,11 +454,13 @@ struct DebugVars
int32 allocfreetrace;
int32 efence;
int32 gctrace;
+ int32 gcdead;
int32 scheddetail;
int32 schedtrace;
};
extern bool runtime_precisestack;
+extern bool runtime_copystack;
/*
* defined macros
@@ -490,6 +505,7 @@ extern uint32 runtime_panicking;
extern int8* runtime_goos;
extern int32 runtime_ncpu;
extern void (*runtime_sysargs)(int32, uint8**);
+extern uint32 runtime_Hchansize;
extern DebugVars runtime_debug;
extern uintptr runtime_maxstacksize;
@@ -497,6 +513,7 @@ extern uintptr runtime_maxstacksize;
* common functions and data
*/
#define runtime_strcmp(s1, s2) __builtin_strcmp((s1), (s2))
+#define runtime_strncmp(s1, s2, n) __builtin_strncmp((s1), (s2), (n))
#define runtime_strstr(s1, s2) __builtin_strstr((s1), (s2))
intgo runtime_findnull(const byte*);
intgo runtime_findnullw(const uint16*);
@@ -511,8 +528,10 @@ void runtime_goenvs(void);
void runtime_goenvs_unix(void);
void runtime_throw(const char*) __attribute__ ((noreturn));
void runtime_panicstring(const char*) __attribute__ ((noreturn));
+bool runtime_canpanic(G*);
void runtime_prints(const char*);
void runtime_printf(const char*, ...);
+int32 runtime_snprintf(byte*, int32, const char*, ...);
#define runtime_mcmp(a, b, s) __builtin_memcmp((a), (b), (s))
#define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s))
void* runtime_mal(uintptr);
@@ -552,6 +571,7 @@ int32 runtime_gcount(void);
void runtime_mcall(void(*)(G*));
uint32 runtime_fastrand1(void);
int32 runtime_timediv(int64, int32, int32*);
+int32 runtime_round2(int32 x); // round x up to a power of 2.
// atomic operations
#define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
@@ -590,7 +610,8 @@ G* __go_go(void (*pfn)(void*), void*);
void siginit(void);
bool __go_sigsend(int32 sig);
int32 runtime_callers(int32, Location*, int32);
-int64 runtime_nanotime(void);
+int64 runtime_nanotime(void); // monotonic time
+int64 runtime_unixnanotime(void); // real time, can skip
void runtime_dopanic(int32) __attribute__ ((noreturn));
void runtime_startpanic(void);
void runtime_freezetheworld(void);
@@ -611,12 +632,18 @@ int32 runtime_netpollopen(uintptr, PollDesc*);
int32 runtime_netpollclose(uintptr);
void runtime_netpollready(G**, PollDesc*, int32);
uintptr runtime_netpollfd(PollDesc*);
-void runtime_netpollarm(uintptr, int32);
+void runtime_netpollarm(PollDesc*, int32);
+void** runtime_netpolluser(PollDesc*);
+bool runtime_netpollclosing(PollDesc*);
+void runtime_netpolllock(PollDesc*);
+void runtime_netpollunlock(PollDesc*);
void runtime_crash(void);
void runtime_parsedebugvars(void);
void _rt0_go(void);
void* runtime_funcdata(Func*, int32);
int32 runtime_setmaxthreads(int32);
+G* runtime_timejump(void);
+void runtime_iterate_finq(void (*callback)(FuncVal*, void*, const FuncType*, const PtrType*));
void runtime_stoptheworld(void);
void runtime_starttheworld(void);
@@ -814,3 +841,12 @@ bool runtime_gcwaiting(void);
void runtime_badsignal(int);
Defer* runtime_newdefer(void);
void runtime_freedefer(Defer*);
+
+struct time_now_ret
+{
+ int64_t sec;
+ int32_t nsec;
+};
+
+struct time_now_ret now() __asm__ (GOSYM_PREFIX "time.now")
+ __attribute__ ((no_split_stack));
diff --git a/libgo/runtime/time.goc b/libgo/runtime/time.goc
index 13ce41f..220629b 100644
--- a/libgo/runtime/time.goc
+++ b/libgo/runtime/time.goc
@@ -6,6 +6,8 @@
package time
+#include <sys/time.h>
+
#include "runtime.h"
#include "defs.h"
#include "arch.h"
@@ -20,11 +22,19 @@ static Timers timers;
static void addtimer(Timer*);
static void dumptimers(const char*);
+// nacl fake time support.
+int64 runtime_timens;
+
// Package time APIs.
// Godoc uses the comments in package time, not these.
// time.now is implemented in assembly.
+// runtimeNano returns the current value of the runtime clock in nanoseconds.
+func runtimeNano() (ns int64) {
+ ns = runtime_nanotime();
+}
+
// Sleep puts the current goroutine to sleep for at least ns nanoseconds.
func Sleep(ns int64) {
runtime_tsleep(ns, "sleep");
@@ -45,6 +55,14 @@ func stopTimer(t *Timer) (stopped bool) {
// C runtime.
+int64 runtime_unixnanotime(void)
+{
+ struct time_now_ret r;
+
+ r = now();
+ return r.sec*1000000000 + r.nsec;
+}
+
static void timerproc(void*);
static void siftup(int32);
static void siftdown(int32);
@@ -187,6 +205,7 @@ timerproc(void* dummy __attribute__ ((unused)))
{
int64 delta, now;
Timer *t;
+ FuncVal *fv;
void (*f)(int64, Eface);
Eface arg;
@@ -214,12 +233,13 @@ timerproc(void* dummy __attribute__ ((unused)))
siftdown(0);
t->i = -1; // mark as removed
}
+ fv = t->fv;
f = (void*)t->fv->fn;
arg = t->arg;
runtime_unlock(&timers);
if(raceenabled)
runtime_raceacquire(t);
- __go_set_closure(t->fv);
+ __go_set_closure(fv);
f(now, arg);
// clear f and arg to avoid leak while sleeping for next timer
@@ -234,7 +254,9 @@ timerproc(void* dummy __attribute__ ((unused)))
if(delta < 0) {
// No timers left - put goroutine to sleep.
timers.rescheduling = true;
+ runtime_g()->isbackground = true;
runtime_parkunlock(&timers, "timer goroutine (idle)");
+ runtime_g()->isbackground = false;
continue;
}
// At least one timer pending. Sleep until then.