diff options
Diffstat (limited to 'libgo/runtime/runtime.h')
-rw-r--r-- | libgo/runtime/runtime.h | 255 |
1 files changed, 175 insertions, 80 deletions
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h index 959220d..5b2a64f 100644 --- a/libgo/runtime/runtime.h +++ b/libgo/runtime/runtime.h @@ -54,9 +54,11 @@ typedef uint8 bool; typedef uint8 byte; typedef struct Func Func; typedef struct G G; -typedef union Lock Lock; +typedef struct Lock Lock; typedef struct M M; -typedef union Note Note; +typedef struct P P; +typedef struct Note Note; +typedef struct String String; typedef struct FuncVal FuncVal; typedef struct SigTab SigTab; typedef struct MCache MCache; @@ -64,14 +66,14 @@ typedef struct FixAlloc FixAlloc; typedef struct Hchan Hchan; typedef struct Timers Timers; typedef struct Timer Timer; -typedef struct GCStats GCStats; -typedef struct LFNode LFNode; -typedef struct ParFor ParFor; -typedef struct ParForThread ParForThread; -typedef struct CgoMal CgoMal; +typedef struct GCStats GCStats; +typedef struct LFNode LFNode; +typedef struct ParFor ParFor; +typedef struct ParForThread ParForThread; +typedef struct CgoMal CgoMal; +typedef struct PollDesc PollDesc; typedef struct __go_open_array Slice; -typedef struct String String; typedef struct __go_interface Iface; typedef struct __go_empty_interface Eface; typedef struct __go_type_descriptor Type; @@ -81,6 +83,7 @@ typedef struct __go_panic_stack Panic; typedef struct __go_ptr_type PtrType; typedef struct __go_func_type FuncType; typedef struct __go_map_type MapType; +typedef struct __go_channel_type ChanType; typedef struct Traceback Traceback; @@ -110,11 +113,20 @@ enum Grunning, Gsyscall, Gwaiting, - Gmoribund, + Gmoribund_unused, // currently unused, but hardcoded in gdb scripts Gdead, }; enum { + // P status + Pidle, + Prunning, + Psyscall, + Pgcstop, + Pdead, +}; +enum +{ true = 1, false = 0, }; @@ -129,19 +141,22 @@ enum // Global <-> per-M stack segment cache transfer batch size. StackCacheBatch = 16, }; - /* * structures */ -union Lock +struct Lock { - uint32 key; // futex-based impl - M* waitm; // linked list of waiting M's (sema-based impl) + // Futex-based impl treats it as uint32 key, + // while sema-based impl as M* waitm. + // Used to be a union, but unions break precise GC. + uintptr key; }; -union Note +struct Note { - uint32 key; // futex-based impl - M* waitm; // waiting M (sema-based impl) + // Futex-based impl treats it as uint32 key, + // while sema-based impl as M* waitm. + // Used to be a union, but unions break precise GC. + uintptr key; }; struct String { @@ -194,13 +209,12 @@ struct G uint32 selgen; // valid sudog pointer const char* waitreason; // if status==Gwaiting G* schedlink; - bool readyonstop; bool ispanic; - bool issystem; - int8 raceignore; // ignore race detection events + bool issystem; // do not output in stack dump + bool isbackground; // ignore in deadlock detector + bool blockingsyscall; // hint that the next syscall will block M* m; // for debuggers, but offset not hard-coded M* lockedm; - M* idlem; int32 sig; int32 writenbuf; byte* writebuf; @@ -224,34 +238,44 @@ struct M { G* g0; // goroutine with scheduling stack G* gsignal; // signal-handling G + byte* gsignalstack; + size_t gsignalstacksize; + void (*mstartfn)(void); G* curg; // current running goroutine + P* p; // attached P for executing Go code (nil if not executing Go code) + P* nextp; int32 id; int32 mallocing; int32 throwing; int32 gcing; int32 locks; int32 nomemprof; - int32 waitnextg; int32 dying; int32 profilehz; int32 helpgc; + bool blockingsyscall; + bool spinning; uint32 fastrand; uint64 ncgocall; // number of cgo calls in total - Note havenextg; - G* nextg; + int32 ncgo; // number of cgo calls currently in progress + CgoMal* cgomal; + Note park; M* alllink; // on allm M* schedlink; MCache *mcache; G* lockedg; - G* idleg; Location createstack[32]; // Stack that created this thread. + uint32 locked; // tracking for LockOSThread M* nextwaitm; // next M waiting for lock uintptr waitsema; // semaphore for parking on locks uint32 waitsemacount; uint32 waitsemalock; GCStats gcstats; bool racecall; + bool needextram; void* racepc; + void (*waitunlockf)(Lock*); + void* waitlock; uintptr settype_buf[1024]; uintptr settype_bufsize; @@ -259,6 +283,38 @@ struct M uintptr end[]; }; +struct P +{ + Lock; + + uint32 status; // one of Pidle/Prunning/... + P* link; + uint32 tick; // incremented on every scheduler or system call + M* m; // back-link to associated M (nil if idle) + MCache* mcache; + + // Queue of runnable goroutines. + G** runq; + int32 runqhead; + int32 runqtail; + int32 runqsize; + + // Available G's (status == Gdead) + G* gfree; + int32 gfreecnt; + + byte pad[64]; +}; + +// The m->locked word holds a single bit saying whether +// external calls to LockOSThread are in effect, and then a counter +// of the internal nesting depth of lockOSThread / unlockOSThread. +enum +{ + LockExternal = 1, + LockInternal = 2, +}; + struct SigTab { int32 sig; @@ -271,6 +327,8 @@ enum SigThrow = 1<<2, // if signal.Notify doesn't take it, exit loudly SigPanic = 1<<3, // if the signal is from the kernel, panic SigDefault = 1<<4, // if the signal isn't explicitly requested, don't monitor it + SigHandling = 1<<5, // our signal handler is registered + SigIgnored = 1<<6, // the signal was ignored before we registered for it }; #ifndef NSIG @@ -343,6 +401,7 @@ struct ParFor bool wait; // if true, wait while all threads finish processing, // otherwise parfor may return while other threads are still working ParForThread *thr; // array of thread descriptors + uint32 pad; // to align ParForThread.pos for 64-bit atomic operations // stats uint64 nsteal; uint64 nstealcnt; @@ -356,7 +415,7 @@ struct ParFor struct CgoMal { CgoMal *next; - byte *alloc; + void *alloc; }; /* @@ -369,6 +428,19 @@ struct CgoMal #define USED(v) ((void) v) #define ROUND(x, n) (((x)+(n)-1)&~((n)-1)) /* all-caps to mark as macro: it evaluates n twice */ +byte* runtime_startup_random_data; +uint32 runtime_startup_random_data_len; +void runtime_get_random_data(byte**, int32*); + +enum { + // hashinit wants this many random bytes + HashRandomBytes = 32 +}; +void runtime_hashinit(void); + +void runtime_traceback(); +void runtime_tracebackothers(G*); + /* * external data */ @@ -376,21 +448,27 @@ extern uintptr runtime_zerobase; extern G* runtime_allg; extern G* runtime_lastg; extern M* runtime_allm; +extern P** runtime_allp; extern int32 runtime_gomaxprocs; extern bool runtime_singleproc; extern uint32 runtime_panicking; -extern int32 runtime_gcwaiting; // gc is waiting to run +extern uint32 runtime_gcwaiting; // gc is waiting to run +extern int8* runtime_goos; extern int32 runtime_ncpu; +extern void (*runtime_sysargs)(int32, uint8**); /* * common functions and data */ +#define runtime_strcmp(s1, s2) __builtin_strcmp((s1), (s2)) +#define runtime_strstr(s1, s2) __builtin_strstr((s1), (s2)) intgo runtime_findnull(const byte*); void runtime_dump(byte*, int32); /* * very low level c-called */ +struct __go_func_type; void runtime_args(int32, byte**); void runtime_osinit(); void runtime_goargs(void); @@ -400,42 +478,98 @@ void runtime_throw(const char*) __attribute__ ((noreturn)); void runtime_panicstring(const char*) __attribute__ ((noreturn)); void runtime_prints(const char*); void runtime_printf(const char*, ...); +#define runtime_mcmp(a, b, s) __builtin_memcmp((a), (b), (s)) +#define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s)) void* runtime_mal(uintptr); +String runtime_gostring(const byte*); +String runtime_gostringnocopy(const byte*); void runtime_schedinit(void); void runtime_initsig(void); void runtime_sigenable(uint32 sig); -int32 runtime_gotraceback(void); +void runtime_sigdisable(uint32 sig); +int32 runtime_gotraceback(bool *crash); void runtime_goroutineheader(G*); void runtime_goroutinetrailer(G*); -void runtime_traceback(); -void runtime_tracebackothers(G*); void runtime_printtrace(Location*, int32, bool); -String runtime_gostring(const byte*); -String runtime_gostringnocopy(const byte*); +#define runtime_open(p, f, m) open((p), (f), (m)) +#define runtime_read(d, v, n) read((d), (v), (n)) +#define runtime_write(d, v, n) write((d), (v), (n)) +#define runtime_close(d) close(d) +#define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) +#define runtime_cas64(pval, pold, new) __atomic_compare_exchange_n (pval, pold, new, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) +#define runtime_casp(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) +// Don't confuse with XADD x86 instruction, +// this one is actually 'addx', that is, add-and-fetch. +#define runtime_xadd(p, v) __sync_add_and_fetch (p, v) +#define runtime_xadd64(p, v) __sync_add_and_fetch (p, v) +#define runtime_xchg(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST) +#define runtime_xchg64(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST) +#define runtime_atomicload(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) +#define runtime_atomicstore(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) +#define runtime_atomicstore64(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) +#define runtime_atomicload64(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) +#define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) +#define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) +void runtime_ready(G*); +const byte* runtime_getenv(const char*); +int32 runtime_atoi(const byte*); void* runtime_mstart(void*); G* runtime_malg(int32, byte**, size_t*); +void runtime_mpreinit(M*); void runtime_minit(void); +void runtime_unminit(void); +void runtime_signalstack(byte*, int32); +MCache* runtime_allocmcache(void); +void runtime_freemcache(MCache*); void runtime_mallocinit(void); +void runtime_mprofinit(void); +#define runtime_malloc(s) __go_alloc(s) +#define runtime_free(p) __go_free(p) +bool runtime_addfinalizer(void*, FuncVal *fn, const struct __go_func_type *); +#define runtime_getcallersp(p) __builtin_frame_address(1) +int32 runtime_mcount(void); +int32 runtime_gcount(void); +uint32 runtime_fastrand1(void); + +void runtime_setmg(M*, G*); +void runtime_newextram(void); +#define runtime_exit(s) exit(s) +#define runtime_breakpoint() __builtin_trap() void runtime_gosched(void); void runtime_park(void(*)(Lock*), Lock*, const char*); void runtime_tsleep(int64, const char*); M* runtime_newm(void); void runtime_goexit(void); void runtime_entersyscall(void) __asm__ (GOSYM_PREFIX "syscall.Entersyscall"); +void runtime_entersyscallblock(void); void runtime_exitsyscall(void) __asm__ (GOSYM_PREFIX "syscall.Exitsyscall"); +G* __go_go(void (*pfn)(void*), void*); void siginit(void); bool __go_sigsend(int32 sig); int32 runtime_callers(int32, Location*, int32); int64 runtime_nanotime(void); +void runtime_dopanic(int32) __attribute__ ((noreturn)); +void runtime_startpanic(void); +void runtime_sigprof(); +void runtime_resetcpuprofiler(int32); +void runtime_setcpuprofilerate(void(*)(uintptr*, int32), int32); +void runtime_usleep(uint32); int64 runtime_cputicks(void); int64 runtime_tickspersecond(void); void runtime_blockevent(int64, int32); extern int64 runtime_blockprofilerate; +void runtime_addtimer(Timer*); +bool runtime_deltimer(Timer*); +G* runtime_netpoll(bool); +void runtime_netpollinit(void); +int32 runtime_netpollopen(int32, PollDesc*); +int32 runtime_netpollclose(int32); +void runtime_netpollready(G**, PollDesc*, int32); +void runtime_crash(void); void runtime_stoptheworld(void); void runtime_starttheworld(void); extern uint32 runtime_worldsema; -G* __go_go(void (*pfn)(void*), void*); /* * mutual exclusion locks. in the uncontended case, @@ -533,6 +667,7 @@ void __wrap_rtems_task_variable_add(void **); * runtime go-called */ void runtime_printbool(_Bool); +void runtime_printbyte(int8); void runtime_printfloat(double); void runtime_printint(int64); void runtime_printiface(Iface); @@ -544,53 +679,10 @@ void runtime_printuint(uint64); void runtime_printhex(uint64); void runtime_printslice(Slice); void runtime_printcomplex(__complex double); - -struct __go_func_type; void reflect_call(const struct __go_func_type *, FuncVal *, _Bool, _Bool, void **, void **) __asm__ (GOSYM_PREFIX "reflect.call"); - -/* Functions. */ #define runtime_panic __go_panic -#define runtime_write(d, v, n) write((d), (v), (n)) -#define runtime_malloc(s) __go_alloc(s) -#define runtime_free(p) __go_free(p) -#define runtime_strcmp(s1, s2) __builtin_strcmp((s1), (s2)) -#define runtime_mcmp(a, b, s) __builtin_memcmp((a), (b), (s)) -#define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s)) -#define runtime_exit(s) exit(s) -MCache* runtime_allocmcache(void); -void free(void *v); -#define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) -#define runtime_casp(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) -#define runtime_cas64(pval, pold, new) __atomic_compare_exchange_n (pval, pold, new, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) -#define runtime_xadd(p, v) __sync_add_and_fetch (p, v) -#define runtime_xadd64(p, v) __sync_add_and_fetch (p, v) -#define runtime_xchg(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST) -#define runtime_atomicload(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) -#define runtime_atomicstore(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) -#define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) -#define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) -#define runtime_atomicload64(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) -#define runtime_atomicstore64(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) -#define PREFETCH(p) __builtin_prefetch(p) - -struct __go_func_type; -bool runtime_addfinalizer(void*, FuncVal *fn, const struct __go_func_type *); -#define runtime_getcallersp(p) __builtin_frame_address(1) -int32 runtime_mcount(void); -int32 runtime_gcount(void); -void runtime_dopanic(int32) __attribute__ ((noreturn)); -void runtime_startpanic(void); -void runtime_ready(G*); -const byte* runtime_getenv(const char*); -int32 runtime_atoi(const byte*); -uint32 runtime_fastrand1(void); - -void runtime_sigprof(); -void runtime_resetcpuprofiler(int32); -void runtime_setcpuprofilerate(void(*)(uintptr*, int32), int32); -void runtime_usleep(uint32); /* * runtime c-called (but written in Go) @@ -605,14 +697,13 @@ void runtime_newErrorString(String, Eface*) /* * wrapped for go users */ -#define ISNAN(f) __builtin_isnan(f) void runtime_semacquire(uint32 volatile *); void runtime_semrelease(uint32 volatile *); int32 runtime_gomaxprocsfunc(int32 n); void runtime_procyield(uint32); void runtime_osyield(void); -void runtime_LockOSThread(void) __asm__ (GOSYM_PREFIX "runtime.LockOSThread"); -void runtime_UnlockOSThread(void) __asm__ (GOSYM_PREFIX "runtime.UnlockOSThread"); +void runtime_lockOSThread(void); +void runtime_unlockOSThread(void); bool runtime_showframe(String, bool); @@ -628,12 +719,13 @@ uintptr runtime_memlimit(void); // This is a no-op on other systems. void runtime_setprof(bool); +#define ISNAN(f) __builtin_isnan(f) + enum { - UseSpanType = 1, + UseSpanType = 0, }; -void runtime_setsig(int32, bool, bool); #define runtime_setitimer setitimer void runtime_check(void); @@ -658,5 +750,8 @@ struct backtrace_state; extern struct backtrace_state *__go_get_backtrace_state(void); extern _Bool __go_file_line(uintptr, String*, String*, intgo *); extern byte* runtime_progname(); +extern void runtime_main(void*); int32 getproccount(void); + +#define PREFETCH(p) __builtin_prefetch(p) |