aboutsummaryrefslogtreecommitdiff
path: root/libgo/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2016-10-14 22:51:46 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2016-10-14 22:51:46 +0000
commit1f0be9ee86f63bac9c4541a9cfaf52cb5ae5e89a (patch)
tree584ab0cd64a2743fa7198ca34c7b13282c1c0ad7 /libgo/runtime
parent2045acd902fd8028514a72c58c98dba11749b8ad (diff)
downloadgcc-1f0be9ee86f63bac9c4541a9cfaf52cb5ae5e89a.zip
gcc-1f0be9ee86f63bac9c4541a9cfaf52cb5ae5e89a.tar.gz
gcc-1f0be9ee86f63bac9c4541a9cfaf52cb5ae5e89a.tar.bz2
runtime: copy mprof code from Go 1.7 runtime
Also create a gccgo version of some of the traceback code in traceback_gccgo.go, replacing some code currently in C. This required modifying the compiler so that when compiling the runtime package a slice expression does not cause a local array variable to escape to the heap. Reviewed-on: https://go-review.googlesource.com/31230 From-SVN: r241189
Diffstat (limited to 'libgo/runtime')
-rw-r--r--libgo/runtime/go-traceback.c37
-rw-r--r--libgo/runtime/malloc.h34
-rw-r--r--libgo/runtime/mgc0.c1
-rw-r--r--libgo/runtime/mprof.goc564
-rw-r--r--libgo/runtime/proc.c92
-rw-r--r--libgo/runtime/runtime.c12
-rw-r--r--libgo/runtime/runtime.h18
7 files changed, 66 insertions, 692 deletions
diff --git a/libgo/runtime/go-traceback.c b/libgo/runtime/go-traceback.c
deleted file mode 100644
index 7b33cca..0000000
--- a/libgo/runtime/go-traceback.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/* go-traceback.c -- stack backtrace for Go.
-
- Copyright 2012 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "config.h"
-
-#include "runtime.h"
-
-/* Print a stack trace for the current goroutine. */
-
-void
-runtime_traceback ()
-{
- Location locbuf[100];
- int32 c;
-
- c = runtime_callers (1, locbuf, nelem (locbuf), false);
- runtime_printtrace (locbuf, c, true);
-}
-
-void
-runtime_printtrace (Location *locbuf, int32 c, bool current)
-{
- int32 i;
-
- for (i = 0; i < c; ++i)
- {
- if (runtime_showframe (locbuf[i].function, current))
- {
- runtime_printf ("%S\n", locbuf[i].function);
- runtime_printf ("\t%S:%D\n", locbuf[i].filename,
- (int64) locbuf[i].lineno);
- }
- }
-}
diff --git a/libgo/runtime/malloc.h b/libgo/runtime/malloc.h
index 12a25b57..011eaa9 100644
--- a/libgo/runtime/malloc.h
+++ b/libgo/runtime/malloc.h
@@ -303,7 +303,7 @@ struct SpecialFinalizer
};
// The described object is being heap profiled.
-typedef struct Bucket Bucket; // from mprof.goc
+typedef struct bucket Bucket; // from mprof.go
typedef struct SpecialProfile SpecialProfile;
struct SpecialProfile
{
@@ -414,7 +414,8 @@ void runtime_MHeap_Scavenger(void*);
void runtime_MHeap_SplitSpan(MHeap *h, MSpan *s);
void* runtime_mallocgc(uintptr size, uintptr typ, uint32 flag);
-void* runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat);
+void* runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
+ __asm__(GOSYM_PREFIX "runtime.persistentalloc");
int32 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s);
void runtime_gc(int32 force);
uintptr runtime_sweepone(void);
@@ -428,12 +429,15 @@ void runtime_markspan(void *v, uintptr size, uintptr n, bool leftover);
void runtime_unmarkspan(void *v, uintptr size);
void runtime_purgecachedstats(MCache*);
void* runtime_cnew(const Type*)
- __asm__(GOSYM_PREFIX "runtime.newobject");
+ __asm__(GOSYM_PREFIX "runtime.newobject");
void* runtime_cnewarray(const Type*, intgo)
- __asm__(GOSYM_PREFIX "runtime.newarray");
-void runtime_tracealloc(void*, uintptr, uintptr);
-void runtime_tracefree(void*, uintptr);
-void runtime_tracegc(void);
+ __asm__(GOSYM_PREFIX "runtime.newarray");
+void runtime_tracealloc(void*, uintptr, uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.tracealloc");
+void runtime_tracefree(void*, uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.tracefree");
+void runtime_tracegc(void)
+ __asm__ (GOSYM_PREFIX "runtime.tracegc");
uintptr runtime_gettype(void*);
@@ -455,10 +459,14 @@ struct Obj
uintptr ti; // type info
};
-void runtime_MProf_Malloc(void*, uintptr);
-void runtime_MProf_Free(Bucket*, uintptr, bool);
-void runtime_MProf_GC(void);
-void runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr));
+void runtime_MProf_Malloc(void*, uintptr)
+ __asm__ (GOSYM_PREFIX "runtime.mProf_Malloc");
+void runtime_MProf_Free(Bucket*, uintptr, bool)
+ __asm__ (GOSYM_PREFIX "runtime.mProf_Free");
+void runtime_MProf_GC(void)
+ __asm__ (GOSYM_PREFIX "runtime.mProf_GC");
+void runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr))
+ __asm__ (GOSYM_PREFIX "runtime.iterate_memprof");
int32 runtime_gcprocs(void);
void runtime_helpgc(int32 nproc);
void runtime_gchelper(void);
@@ -467,7 +475,8 @@ G* runtime_wakefing(void);
extern bool runtime_fingwait;
extern bool runtime_fingwake;
-void runtime_setprofilebucket(void *p, Bucket *b);
+void runtime_setprofilebucket(void *p, Bucket *b)
+ __asm__ (GOSYM_PREFIX "runtime.setprofilebucket");
struct __go_func_type;
struct __go_ptr_type;
@@ -533,7 +542,6 @@ int32 runtime_setgcpercent(int32);
#define PoisonStack ((uintptr)0x6868686868686868ULL)
struct Workbuf;
-void runtime_MProf_Mark(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_proc_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_time_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_netpoll_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c
index 4877678..2c8e5a8 100644
--- a/libgo/runtime/mgc0.c
+++ b/libgo/runtime/mgc0.c
@@ -1277,7 +1277,6 @@ markroot(ParFor *desc, uint32 i)
enqueue1(&wbuf, (Obj){(byte*)&runtime_allp, sizeof runtime_allp, 0});
enqueue1(&wbuf, (Obj){(byte*)&work, sizeof work, 0});
runtime_proc_scan(&wbuf, enqueue1);
- runtime_MProf_Mark(&wbuf, enqueue1);
runtime_time_scan(&wbuf, enqueue1);
runtime_netpoll_scan(&wbuf, enqueue1);
break;
diff --git a/libgo/runtime/mprof.goc b/libgo/runtime/mprof.goc
deleted file mode 100644
index c4966a4..0000000
--- a/libgo/runtime/mprof.goc
+++ /dev/null
@@ -1,564 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Malloc profiling.
-// Patterned after tcmalloc's algorithms; shorter code.
-
-package runtime
-#include "runtime.h"
-#include "arch.h"
-#include "malloc.h"
-#include "defs.h"
-#include "go-type.h"
-#include "go-string.h"
-
-// NOTE(rsc): Everything here could use cas if contention became an issue.
-static Lock proflock;
-
-// All memory allocations are local and do not escape outside of the profiler.
-// The profiler is forbidden from referring to garbage-collected memory.
-
-enum { MProf, BProf }; // profile types
-
-// Per-call-stack profiling information.
-// Lookup by hashing call stack into a linked-list hash table.
-struct Bucket
-{
- Bucket *next; // next in hash list
- Bucket *allnext; // next in list of all mbuckets/bbuckets
- int32 typ;
- // Generally unions can break precise GC,
- // this one is fine because it does not contain pointers.
- union
- {
- struct // typ == MProf
- {
- // The following complex 3-stage scheme of stats accumulation
- // is required to obtain a consistent picture of mallocs and frees
- // for some point in time.
- // The problem is that mallocs come in real time, while frees
- // come only after a GC during concurrent sweeping. So if we would
- // naively count them, we would get a skew toward mallocs.
- //
- // Mallocs are accounted in recent stats.
- // Explicit frees are accounted in recent stats.
- // GC frees are accounted in prev stats.
- // After GC prev stats are added to final stats and
- // recent stats are moved into prev stats.
- uintptr allocs;
- uintptr frees;
- uintptr alloc_bytes;
- uintptr free_bytes;
-
- uintptr prev_allocs; // since last but one till last gc
- uintptr prev_frees;
- uintptr prev_alloc_bytes;
- uintptr prev_free_bytes;
-
- uintptr recent_allocs; // since last gc till now
- uintptr recent_frees;
- uintptr recent_alloc_bytes;
- uintptr recent_free_bytes;
-
- };
- struct // typ == BProf
- {
- int64 count;
- int64 cycles;
- };
- };
- uintptr hash; // hash of size + stk
- uintptr size;
- uintptr nstk;
- Location stk[1];
-};
-enum {
- BuckHashSize = 179999,
-};
-static Bucket **buckhash;
-static Bucket *mbuckets; // memory profile buckets
-static Bucket *bbuckets; // blocking profile buckets
-static uintptr bucketmem;
-
-// Return the bucket for stk[0:nstk], allocating new bucket if needed.
-static Bucket*
-stkbucket(int32 typ, uintptr size, Location *stk, int32 nstk, bool alloc)
-{
- int32 i, j;
- uintptr h;
- Bucket *b;
-
- if(buckhash == nil) {
- buckhash = runtime_SysAlloc(BuckHashSize*sizeof buckhash[0], &mstats()->buckhash_sys);
- if(buckhash == nil)
- runtime_throw("runtime: cannot allocate memory");
- }
-
- // Hash stack.
- h = 0;
- for(i=0; i<nstk; i++) {
- h += stk[i].pc;
- h += h<<10;
- h ^= h>>6;
- }
- // hash in size
- h += size;
- h += h<<10;
- h ^= h>>6;
- // finalize
- h += h<<3;
- h ^= h>>11;
-
- i = h%BuckHashSize;
- for(b = buckhash[i]; b; b=b->next) {
- if(b->typ == typ && b->hash == h && b->size == size && b->nstk == (uintptr)nstk) {
- for(j = 0; j < nstk; j++) {
- if(b->stk[j].pc != stk[j].pc ||
- b->stk[j].lineno != stk[j].lineno ||
- !__go_strings_equal(b->stk[j].filename, stk[j].filename))
- break;
- }
- if (j == nstk)
- return b;
- }
- }
-
- if(!alloc)
- return nil;
-
- b = runtime_persistentalloc(sizeof *b + nstk*sizeof stk[0], 0, &mstats()->buckhash_sys);
- bucketmem += sizeof *b + nstk*sizeof stk[0];
- runtime_memmove(b->stk, stk, nstk*sizeof stk[0]);
- b->typ = typ;
- b->hash = h;
- b->size = size;
- b->nstk = nstk;
- b->next = buckhash[i];
- buckhash[i] = b;
- if(typ == MProf) {
- b->allnext = mbuckets;
- mbuckets = b;
- } else {
- b->allnext = bbuckets;
- bbuckets = b;
- }
- return b;
-}
-
-static void
-MProf_GC(void)
-{
- Bucket *b;
-
- for(b=mbuckets; b; b=b->allnext) {
- b->allocs += b->prev_allocs;
- b->frees += b->prev_frees;
- b->alloc_bytes += b->prev_alloc_bytes;
- b->free_bytes += b->prev_free_bytes;
-
- b->prev_allocs = b->recent_allocs;
- b->prev_frees = b->recent_frees;
- b->prev_alloc_bytes = b->recent_alloc_bytes;
- b->prev_free_bytes = b->recent_free_bytes;
-
- b->recent_allocs = 0;
- b->recent_frees = 0;
- b->recent_alloc_bytes = 0;
- b->recent_free_bytes = 0;
- }
-}
-
-// Record that a gc just happened: all the 'recent' statistics are now real.
-void
-runtime_MProf_GC(void)
-{
- runtime_lock(&proflock);
- MProf_GC();
- runtime_unlock(&proflock);
-}
-
-// Called by malloc to record a profiled block.
-void
-runtime_MProf_Malloc(void *p, uintptr size)
-{
- Location stk[32];
- Bucket *b;
- int32 nstk;
-
- nstk = runtime_callers(1, stk, nelem(stk), false);
- runtime_lock(&proflock);
- b = stkbucket(MProf, size, stk, nstk, true);
- b->recent_allocs++;
- b->recent_alloc_bytes += size;
- runtime_unlock(&proflock);
-
- // Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
- // This reduces potential contention and chances of deadlocks.
- // Since the object must be alive during call to MProf_Malloc,
- // it's fine to do this non-atomically.
- runtime_setprofilebucket(p, b);
-}
-
-// Called when freeing a profiled block.
-void
-runtime_MProf_Free(Bucket *b, uintptr size, bool freed)
-{
- runtime_lock(&proflock);
- if(freed) {
- b->recent_frees++;
- b->recent_free_bytes += size;
- } else {
- b->prev_frees++;
- b->prev_free_bytes += size;
- }
- runtime_unlock(&proflock);
-}
-
-int64 runtime_blockprofilerate; // in CPU ticks
-
-void runtime_SetBlockProfileRate(intgo) __asm__ (GOSYM_PREFIX "runtime.SetBlockProfileRate");
-
-void
-runtime_SetBlockProfileRate(intgo rate)
-{
- int64 r;
-
- if(rate <= 0)
- r = 0; // disable profiling
- else {
- // convert ns to cycles, use float64 to prevent overflow during multiplication
- r = (float64)rate*runtime_tickspersecond()/(1000*1000*1000);
- if(r == 0)
- r = 1;
- }
- runtime_atomicstore64((uint64*)&runtime_blockprofilerate, r);
-}
-
-void
-runtime_blockevent(int64 cycles, int32 skip)
-{
- int32 nstk;
- int64 rate;
- Location stk[32];
- Bucket *b;
-
- if(cycles <= 0)
- return;
- rate = runtime_atomicload64((uint64*)&runtime_blockprofilerate);
- if(rate <= 0 || (rate > cycles && runtime_fastrand1()%rate > cycles))
- return;
-
- nstk = runtime_callers(skip, stk, nelem(stk), false);
- runtime_lock(&proflock);
- b = stkbucket(BProf, 0, stk, nstk, true);
- b->count++;
- b->cycles += cycles;
- runtime_unlock(&proflock);
-}
-
-// Go interface to profile data. (Declared in debug.go)
-
-// Must match MemProfileRecord in debug.go.
-typedef struct Record Record;
-struct Record {
- int64 alloc_bytes, free_bytes;
- int64 alloc_objects, free_objects;
- uintptr stk[32];
-};
-
-// Write b's data to r.
-static void
-record(Record *r, Bucket *b)
-{
- uint32 i;
-
- r->alloc_bytes = b->alloc_bytes;
- r->free_bytes = b->free_bytes;
- r->alloc_objects = b->allocs;
- r->free_objects = b->frees;
- for(i=0; i<b->nstk && i<nelem(r->stk); i++)
- r->stk[i] = b->stk[i].pc;
- for(; i<nelem(r->stk); i++)
- r->stk[i] = 0;
-}
-
-func MemProfile(p Slice, include_inuse_zero bool) (n int, ok bool) {
- Bucket *b;
- Record *r;
- bool clear;
-
- runtime_lock(&proflock);
- n = 0;
- clear = true;
- for(b=mbuckets; b; b=b->allnext) {
- if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
- n++;
- if(b->allocs != 0 || b->frees != 0)
- clear = false;
- }
- if(clear) {
- // Absolutely no data, suggesting that a garbage collection
- // has not yet happened. In order to allow profiling when
- // garbage collection is disabled from the beginning of execution,
- // accumulate stats as if a GC just happened, and recount buckets.
- MProf_GC();
- MProf_GC();
- n = 0;
- for(b=mbuckets; b; b=b->allnext)
- if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
- n++;
- }
- ok = false;
- if(n <= p.__count) {
- ok = true;
- r = (Record*)p.__values;
- for(b=mbuckets; b; b=b->allnext)
- if(include_inuse_zero || b->alloc_bytes != b->free_bytes)
- record(r++, b);
- }
- runtime_unlock(&proflock);
-}
-
-void
-runtime_MProf_Mark(struct Workbuf **wbufp, void (*enqueue1)(struct Workbuf**, Obj))
-{
- // buckhash is not allocated via mallocgc.
- enqueue1(wbufp, (Obj){(byte*)&mbuckets, sizeof mbuckets, 0});
- enqueue1(wbufp, (Obj){(byte*)&bbuckets, sizeof bbuckets, 0});
-}
-
-void
-runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr))
-{
- Bucket *b;
-
- runtime_lock(&proflock);
- for(b=mbuckets; b; b=b->allnext) {
- callback(b, b->nstk, b->stk, b->size, b->allocs, b->frees);
- }
- runtime_unlock(&proflock);
-}
-
-// Must match BlockProfileRecord in debug.go.
-typedef struct BRecord BRecord;
-struct BRecord {
- int64 count;
- int64 cycles;
- uintptr stk[32];
-};
-
-func BlockProfile(p Slice) (n int, ok bool) {
- Bucket *b;
- BRecord *r;
- int32 i;
-
- runtime_lock(&proflock);
- n = 0;
- for(b=bbuckets; b; b=b->allnext)
- n++;
- ok = false;
- if(n <= p.__count) {
- ok = true;
- r = (BRecord*)p.__values;
- for(b=bbuckets; b; b=b->allnext, r++) {
- r->count = b->count;
- r->cycles = b->cycles;
- for(i=0; (uintptr)i<b->nstk && (uintptr)i<nelem(r->stk); i++)
- r->stk[i] = b->stk[i].pc;
- for(; (uintptr)i<nelem(r->stk); i++)
- r->stk[i] = 0;
- }
- }
- runtime_unlock(&proflock);
-}
-
-// Must match StackRecord in debug.go.
-typedef struct TRecord TRecord;
-struct TRecord {
- uintptr stk[32];
-};
-
-func ThreadCreateProfile(p Slice) (n int, ok bool) {
- TRecord *r;
- M *first, *mp;
- int32 i;
-
- first = runtime_atomicloadp(&runtime_allm);
- n = 0;
- for(mp=first; mp; mp=mp->alllink)
- n++;
- ok = false;
- if(n <= p.__count) {
- ok = true;
- r = (TRecord*)p.__values;
- for(mp=first; mp; mp=mp->alllink) {
- for(i = 0; (uintptr)i < nelem(r->stk); i++) {
- r->stk[i] = mp->createstack[i].pc;
- }
- r++;
- }
- }
-}
-
-func Stack(b Slice, all bool) (n int) {
- byte *pc;
- bool enablegc = false;
-
- pc = (byte*)(uintptr)runtime_getcallerpc(&b);
-
- if(all) {
- runtime_acquireWorldsema();
- runtime_m()->gcing = 1;
- runtime_stopTheWorldWithSema();
- enablegc = mstats()->enablegc;
- mstats()->enablegc = false;
- }
-
- if(b.__count == 0)
- n = 0;
- else{
- G* g = runtime_g();
- g->writebuf.__values = b.__values;
- g->writebuf.__count = 0;
- g->writebuf.__capacity = b.__count;
- USED(pc);
- runtime_goroutineheader(g);
- runtime_traceback();
- runtime_printcreatedby(g);
- if(all)
- runtime_tracebackothers(g);
- n = g->writebuf.__count;
- g->writebuf.__values = nil;
- g->writebuf.__count = 0;
- g->writebuf.__capacity = 0;
- }
-
- if(all) {
- runtime_m()->gcing = 0;
- mstats()->enablegc = enablegc;
- runtime_releaseWorldsema();
- runtime_startTheWorldWithSema();
- }
-}
-
-static void
-saveg(G *gp, TRecord *r)
-{
- int32 n, i;
- Location locstk[nelem(r->stk)];
-
- if(gp == runtime_g()) {
- n = runtime_callers(0, locstk, nelem(r->stk), false);
- for(i = 0; i < n; i++)
- r->stk[i] = locstk[i].pc;
- }
- else {
- // FIXME: Not implemented.
- n = 0;
- }
- if((size_t)n < nelem(r->stk))
- r->stk[n] = 0;
-}
-
-func GoroutineProfile(b Slice) (n int, ok bool) {
- uintptr i;
- TRecord *r;
- G *gp;
-
- ok = false;
- n = runtime_gcount();
- if(n <= b.__count) {
- runtime_acquireWorldsema();
- runtime_m()->gcing = 1;
- runtime_stopTheWorldWithSema();
-
- n = runtime_gcount();
- if(n <= b.__count) {
- G* g = runtime_g();
- ok = true;
- r = (TRecord*)b.__values;
- saveg(g, r++);
- for(i = 0; i < runtime_allglen; i++) {
- gp = runtime_allg[i];
- if(gp == g || gp->atomicstatus == _Gdead)
- continue;
- saveg(gp, r++);
- }
- }
-
- runtime_m()->gcing = 0;
- runtime_releaseWorldsema();
- runtime_startTheWorldWithSema();
- }
-}
-
-// Tracing of alloc/free/gc.
-
-static Lock tracelock;
-
-static const char*
-typeinfoname(int32 typeinfo)
-{
- if(typeinfo == TypeInfo_SingleObject)
- return "single object";
- else if(typeinfo == TypeInfo_Array)
- return "array";
- else if(typeinfo == TypeInfo_Chan)
- return "channel";
- runtime_throw("typinfoname: unknown type info");
- return nil;
-}
-
-void
-runtime_tracealloc(void *p, uintptr size, uintptr typ)
-{
- const char *name;
- Type *type;
-
- runtime_lock(&tracelock);
- runtime_m()->traceback = 2;
- type = (Type*)(typ & ~3);
- name = typeinfoname(typ & 3);
- if(type == nil)
- runtime_printf("tracealloc(%p, %p, %s)\n", p, size, name);
- else
- runtime_printf("tracealloc(%p, %p, %s of %S)\n", p, size, name, *type->__reflection);
- if(runtime_m()->curg == nil || runtime_g() == runtime_m()->curg) {
- runtime_goroutineheader(runtime_g());
- runtime_traceback();
- } else {
- runtime_goroutineheader(runtime_m()->curg);
- runtime_traceback();
- }
- runtime_printf("\n");
- runtime_m()->traceback = 0;
- runtime_unlock(&tracelock);
-}
-
-void
-runtime_tracefree(void *p, uintptr size)
-{
- runtime_lock(&tracelock);
- runtime_m()->traceback = 2;
- runtime_printf("tracefree(%p, %p)\n", p, size);
- runtime_goroutineheader(runtime_g());
- runtime_traceback();
- runtime_printf("\n");
- runtime_m()->traceback = 0;
- runtime_unlock(&tracelock);
-}
-
-void
-runtime_tracegc(void)
-{
- runtime_lock(&tracelock);
- runtime_m()->traceback = 2;
- runtime_printf("tracegc()\n");
- // running on m->g0 stack; show all non-g0 goroutines
- runtime_tracebackothers(runtime_g());
- runtime_printf("end tracegc\n");
- runtime_printf("\n");
- runtime_m()->traceback = 0;
- runtime_unlock(&tracelock);
-}
diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c
index 246ab7d..7d65c4b 100644
--- a/libgo/runtime/proc.c
+++ b/libgo/runtime/proc.c
@@ -658,67 +658,12 @@ runtime_main(void* dummy __attribute__((unused)))
}
void
-runtime_goroutineheader(G *gp)
-{
- String status;
- int64 waitfor;
-
- switch(gp->atomicstatus) {
- case _Gidle:
- status = runtime_gostringnocopy((const byte*)"idle");
- break;
- case _Grunnable:
- status = runtime_gostringnocopy((const byte*)"runnable");
- break;
- case _Grunning:
- status = runtime_gostringnocopy((const byte*)"running");
- break;
- case _Gsyscall:
- status = runtime_gostringnocopy((const byte*)"syscall");
- break;
- case _Gwaiting:
- if(gp->waitreason.len > 0)
- status = gp->waitreason;
- else
- status = runtime_gostringnocopy((const byte*)"waiting");
- break;
- default:
- status = runtime_gostringnocopy((const byte*)"???");
- break;
- }
-
- // approx time the G is blocked, in minutes
- waitfor = 0;
- if((gp->atomicstatus == _Gwaiting || gp->atomicstatus == _Gsyscall) && gp->waitsince != 0)
- waitfor = (runtime_nanotime() - gp->waitsince) / (60LL*1000*1000*1000);
-
- if(waitfor < 1)
- runtime_printf("goroutine %D [%S]:\n", gp->goid, status);
- else
- runtime_printf("goroutine %D [%S, %D minutes]:\n", gp->goid, status, waitfor);
-}
-
-void
-runtime_printcreatedby(G *g)
-{
- if(g != nil && g->gopc != 0 && g->goid != 1) {
- String fn;
- String file;
- intgo line;
-
- if(__go_file_line(g->gopc - 1, -1, &fn, &file, &line)) {
- runtime_printf("created by %S\n", fn);
- runtime_printf("\t%S:%D\n", file, (int64) line);
- }
- }
-}
-
-void
runtime_tracebackothers(G * volatile me)
{
G * volatile gp;
Traceback tb;
int32 traceback;
+ Slice slice;
volatile uintptr i;
tb.gp = me;
@@ -739,7 +684,10 @@ runtime_tracebackothers(G * volatile me)
runtime_gogo(gp);
}
- runtime_printtrace(tb.locbuf, tb.c, false);
+ slice.__values = &tb.locbuf[0];
+ slice.__count = tb.c;
+ slice.__capacity = tb.c;
+ runtime_printtrace(slice, nil);
runtime_printcreatedby(gp);
}
@@ -780,7 +728,10 @@ runtime_tracebackothers(G * volatile me)
runtime_gogo(gp);
}
- runtime_printtrace(tb.locbuf, tb.c, false);
+ slice.__values = &tb.locbuf[0];
+ slice.__count = tb.c;
+ slice.__capacity = tb.c;
+ runtime_printtrace(slice, nil);
runtime_printcreatedby(gp);
}
}
@@ -3597,3 +3548,28 @@ sync_runtime_doSpin()
{
runtime_procyield(ACTIVE_SPIN_CNT);
}
+
+// For Go code to look at variables, until we port proc.go.
+
+extern M** runtime_go_allm(void)
+ __asm__ (GOSYM_PREFIX "runtime.allm");
+
+M**
+runtime_go_allm()
+{
+ return &runtime_allm;
+}
+
+extern Slice runtime_go_allgs(void)
+ __asm__ (GOSYM_PREFIX "runtime.allgs");
+
+Slice
+runtime_go_allgs()
+{
+ Slice s;
+
+ s.__values = runtime_allg;
+ s.__count = runtime_allglen;
+ s.__capacity = allgcap;
+ return s;
+}
diff --git a/libgo/runtime/runtime.c b/libgo/runtime/runtime.c
index 8e6f1f5..70331f4 100644
--- a/libgo/runtime/runtime.c
+++ b/libgo/runtime/runtime.c
@@ -90,18 +90,6 @@ runtime_cputicks(void)
#endif
}
-bool
-runtime_showframe(String s, bool current)
-{
- static int32 traceback = -1;
-
- if(current && runtime_m()->throwing > 0)
- return 1;
- if(traceback < 0)
- traceback = runtime_gotraceback(nil);
- return traceback > 1 || (__builtin_memchr(s.str, '.', s.len) != nil && __builtin_memcmp(s.str, "runtime.", 7) != 0);
-}
-
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
void
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
index 27b0649..be19e95 100644
--- a/libgo/runtime/runtime.h
+++ b/libgo/runtime/runtime.h
@@ -89,7 +89,7 @@ typedef struct __go_interface_type InterfaceType;
typedef struct __go_map_type MapType;
typedef struct __go_channel_type ChanType;
-typedef struct traceback Traceback;
+typedef struct tracebackg Traceback;
typedef struct location Location;
@@ -261,8 +261,10 @@ enum {
};
void runtime_hashinit(void);
-void runtime_traceback(void);
-void runtime_tracebackothers(G*);
+void runtime_traceback(void)
+ __asm__ (GOSYM_PREFIX "runtime.traceback");
+void runtime_tracebackothers(G*)
+ __asm__ (GOSYM_PREFIX "runtime.tracebackothers");
enum
{
// The maximum number of frames we print for a traceback
@@ -325,8 +327,10 @@ void runtime_sigenable(uint32 sig);
void runtime_sigdisable(uint32 sig);
void runtime_sigignore(uint32 sig);
int32 runtime_gotraceback(bool *crash);
-void runtime_goroutineheader(G*);
-void runtime_printtrace(Location*, int32, bool);
+void runtime_goroutineheader(G*)
+ __asm__ (GOSYM_PREFIX "runtime.goroutineheader");
+void runtime_printtrace(Slice, G*)
+ __asm__ (GOSYM_PREFIX "runtime.printtrace");
#define runtime_open(p, f, m) open((p), (f), (m))
#define runtime_read(d, v, n) read((d), (v), (n))
#define runtime_write(d, v, n) write((d), (v), (n))
@@ -561,8 +565,8 @@ void runtime_lockOSThread(void);
void runtime_unlockOSThread(void);
bool runtime_lockedOSThread(void);
-bool runtime_showframe(String, bool);
-void runtime_printcreatedby(G*);
+void runtime_printcreatedby(G*)
+ __asm__(GOSYM_PREFIX "runtime.printcreatedby");
uintptr runtime_memlimit(void);