aboutsummaryrefslogtreecommitdiff
path: root/libgo/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2016-10-15 00:29:06 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2016-10-15 00:29:06 +0000
commit35d942444418606e75f2e65aa7708616c5233035 (patch)
tree156c9d476a537c240533dedcf79341b37bcdf5d9 /libgo/runtime
parent2a0b23da0560f4d321e28ae3673cd130ebf0e8f8 (diff)
downloadgcc-35d942444418606e75f2e65aa7708616c5233035.zip
gcc-35d942444418606e75f2e65aa7708616c5233035.tar.gz
gcc-35d942444418606e75f2e65aa7708616c5233035.tar.bz2
runtime: copy runtime package time code from Go 1.7
Fix handling of function values for -fgo-c-header to generate FuncVal*, not simply FuncVal. While we're here change runtime.nanotime to use clock_gettime with CLOCK_MONOTONIC, rather than gettimeofday. This is what the gc library does. It provides nanosecond precision and a monotonic clock. Reviewed-on: https://go-review.googlesource.com/31232 From-SVN: r241197
Diffstat (limited to 'libgo/runtime')
-rw-r--r--libgo/runtime/go-nanotime.c6
-rw-r--r--libgo/runtime/malloc.h1
-rw-r--r--libgo/runtime/mgc0.c1
-rw-r--r--libgo/runtime/netpoll.goc35
-rw-r--r--libgo/runtime/runtime.h42
-rw-r--r--libgo/runtime/time.goc353
6 files changed, 25 insertions, 413 deletions
diff --git a/libgo/runtime/go-nanotime.c b/libgo/runtime/go-nanotime.c
index 7e5e3e0..d221847 100644
--- a/libgo/runtime/go-nanotime.c
+++ b/libgo/runtime/go-nanotime.c
@@ -14,8 +14,8 @@ int64 runtime_nanotime (void)
int64
runtime_nanotime (void)
{
- struct timeval tv;
+ struct timespec ts;
- gettimeofday (&tv, NULL);
- return (int64) tv.tv_sec * 1000000000 + (int64) tv.tv_usec * 1000;
+ clock_gettime (CLOCK_MONOTONIC, &ts);
+ return (int64) ts.tv_sec * 1000000000 + (int64) ts.tv_nsec;
}
diff --git a/libgo/runtime/malloc.h b/libgo/runtime/malloc.h
index 011eaa9..e674c88 100644
--- a/libgo/runtime/malloc.h
+++ b/libgo/runtime/malloc.h
@@ -543,5 +543,4 @@ int32 runtime_setgcpercent(int32);
struct Workbuf;
void runtime_proc_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
-void runtime_time_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
void runtime_netpoll_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c
index 2c8e5a8..0b96696 100644
--- a/libgo/runtime/mgc0.c
+++ b/libgo/runtime/mgc0.c
@@ -1277,7 +1277,6 @@ markroot(ParFor *desc, uint32 i)
enqueue1(&wbuf, (Obj){(byte*)&runtime_allp, sizeof runtime_allp, 0});
enqueue1(&wbuf, (Obj){(byte*)&work, sizeof work, 0});
runtime_proc_scan(&wbuf, enqueue1);
- runtime_time_scan(&wbuf, enqueue1);
runtime_netpoll_scan(&wbuf, enqueue1);
break;
diff --git a/libgo/runtime/netpoll.goc b/libgo/runtime/netpoll.goc
index ecd426f..9467c02 100644
--- a/libgo/runtime/netpoll.goc
+++ b/libgo/runtime/netpoll.goc
@@ -89,11 +89,6 @@ static FuncVal deadlineFn = {(void(*)(void))deadline};
static FuncVal readDeadlineFn = {(void(*)(void))readDeadline};
static FuncVal writeDeadlineFn = {(void(*)(void))writeDeadline};
-// runtimeNano returns the current value of the runtime clock in nanoseconds.
-func runtimeNano() (ns int64) {
- ns = runtime_nanotime();
-}
-
func runtime_pollServerInit() {
runtime_netpollinit();
}
@@ -176,13 +171,13 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
}
pd->seq++; // invalidate current timers
// Reset current timers.
- if(pd->rt.fv) {
+ if(pd->rt.f) {
runtime_deltimer(&pd->rt);
- pd->rt.fv = nil;
+ pd->rt.f = nil;
}
- if(pd->wt.fv) {
+ if(pd->wt.f) {
runtime_deltimer(&pd->wt);
- pd->wt.fv = nil;
+ pd->wt.f = nil;
}
// Setup new timers.
if(d != 0 && d <= runtime_nanotime())
@@ -192,7 +187,7 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
if(mode == 'w' || mode == 'r'+'w')
pd->wd = d;
if(pd->rd > 0 && pd->rd == pd->wd) {
- pd->rt.fv = &deadlineFn;
+ pd->rt.f = &deadlineFn;
pd->rt.when = pd->rd;
// Copy current seq into the timer arg.
// Timer func will check the seq against current descriptor seq,
@@ -203,7 +198,7 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
runtime_addtimer(&pd->rt);
} else {
if(pd->rd > 0) {
- pd->rt.fv = &readDeadlineFn;
+ pd->rt.f = &readDeadlineFn;
pd->rt.when = pd->rd;
pd->rt.arg.type = nil; // should be *pollDesc type descriptor.
pd->rt.arg.data = pd;
@@ -211,7 +206,7 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
runtime_addtimer(&pd->rt);
}
if(pd->wd > 0) {
- pd->wt.fv = &writeDeadlineFn;
+ pd->wt.f = &writeDeadlineFn;
pd->wt.when = pd->wd;
pd->wt.arg.type = nil; // should be *pollDesc type descriptor.
pd->wt.arg.data = pd;
@@ -244,13 +239,13 @@ func runtime_pollUnblock(pd *PollDesc) {
runtime_atomicstorep(&rg, nil); // full memory barrier between store to closing and read of rg/wg in netpollunblock
rg = netpollunblock(pd, 'r', false);
wg = netpollunblock(pd, 'w', false);
- if(pd->rt.fv) {
+ if(pd->rt.f) {
runtime_deltimer(&pd->rt);
- pd->rt.fv = nil;
+ pd->rt.f = nil;
}
- if(pd->wt.fv) {
+ if(pd->wt.f) {
runtime_deltimer(&pd->wt);
- pd->wt.fv = nil;
+ pd->wt.f = nil;
}
runtime_unlock(pd);
if(rg)
@@ -408,17 +403,17 @@ deadlineimpl(Eface arg, uintptr seq, bool read, bool write)
return;
}
if(read) {
- if(pd->rd <= 0 || pd->rt.fv == nil)
+ if(pd->rd <= 0 || pd->rt.f == nil)
runtime_throw("deadlineimpl: inconsistent read deadline");
pd->rd = -1;
- runtime_atomicstorep(&pd->rt.fv, nil); // full memory barrier between store to rd and load of rg in netpollunblock
+ runtime_atomicstorep(&pd->rt.f, nil); // full memory barrier between store to rd and load of rg in netpollunblock
rg = netpollunblock(pd, 'r', false);
}
if(write) {
- if(pd->wd <= 0 || (pd->wt.fv == nil && !read))
+ if(pd->wd <= 0 || (pd->wt.f == nil && !read))
runtime_throw("deadlineimpl: inconsistent write deadline");
pd->wd = -1;
- runtime_atomicstorep(&pd->wt.fv, nil); // full memory barrier between store to wd and load of wg in netpollunblock
+ runtime_atomicstorep(&pd->wt.f, nil); // full memory barrier between store to wd and load of wg in netpollunblock
wg = netpollunblock(pd, 'w', false);
}
runtime_unlock(pd);
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
index be19e95..f73d745 100644
--- a/libgo/runtime/runtime.h
+++ b/libgo/runtime/runtime.h
@@ -66,8 +66,7 @@ typedef struct SigTab SigTab;
typedef struct mcache MCache;
typedef struct FixAlloc FixAlloc;
typedef struct hchan Hchan;
-typedef struct Timers Timers;
-typedef struct Timer Timer;
+typedef struct timer Timer;
typedef struct gcstats GCStats;
typedef struct LFNode LFNode;
typedef struct ParFor ParFor;
@@ -181,36 +180,6 @@ enum {
};
#endif
-struct Timers
-{
- Lock;
- G *timerproc;
- bool sleeping;
- bool rescheduling;
- Note waitnote;
- Timer **t;
- int32 len;
- int32 cap;
-};
-
-// Package time knows the layout of this structure.
-// If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
-// For GOOS=nacl, package syscall knows the layout of this structure.
-// If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer.
-struct Timer
-{
- intgo i; // heap index
-
- // Timer wakes up at when, and then at when+period, ... (period > 0 only)
- // each time calling f(now, arg) in the timer goroutine, so f must be
- // a well-behaved function and not block.
- int64 when;
- int64 period;
- FuncVal *fv;
- Eface arg;
- uintptr seq;
-};
-
// Lock-free stack node.
struct LFNode
{
@@ -403,7 +372,8 @@ bool __go_sigsend(int32 sig);
int32 runtime_callers(int32, Location*, int32, bool keep_callers);
int64 runtime_nanotime(void) // monotonic time
__asm__(GOSYM_PREFIX "runtime.nanotime");
-int64 runtime_unixnanotime(void); // real time, can skip
+int64 runtime_unixnanotime(void) // real time, can skip
+ __asm__ (GOSYM_PREFIX "runtime.unixnanotime");
void runtime_dopanic(int32) __attribute__ ((noreturn));
void runtime_startpanic(void);
void runtime_freezetheworld(void);
@@ -422,8 +392,10 @@ int64 runtime_tickspersecond(void)
__asm__ (GOSYM_PREFIX "runtime.tickspersecond");
void runtime_blockevent(int64, int32);
extern int64 runtime_blockprofilerate;
-void runtime_addtimer(Timer*);
-bool runtime_deltimer(Timer*);
+void runtime_addtimer(Timer*)
+ __asm__ (GOSYM_PREFIX "runtime.addtimer");
+bool runtime_deltimer(Timer*)
+ __asm__ (GOSYM_PREFIX "runtime.deltimer");
G* runtime_netpoll(bool);
void runtime_netpollinit(void);
int32 runtime_netpollopen(uintptr, PollDesc*);
diff --git a/libgo/runtime/time.goc b/libgo/runtime/time.goc
deleted file mode 100644
index b77ad33..0000000
--- a/libgo/runtime/time.goc
+++ /dev/null
@@ -1,353 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Time-related runtime and pieces of package time.
-
-package time
-
-#include <sys/time.h>
-
-#include "runtime.h"
-#include "defs.h"
-#include "arch.h"
-#include "malloc.h"
-
-enum {
- debug = 0,
-};
-
-static Timers timers;
-static void addtimer(Timer*);
-static void dumptimers(const char*);
-
-// nacl fake time support.
-int64 runtime_timens;
-
-// Package time APIs.
-// Godoc uses the comments in package time, not these.
-
-// time.now is implemented in assembly.
-
-// runtimeNano returns the current value of the runtime clock in nanoseconds.
-func runtimeNano() (ns int64) {
- ns = runtime_nanotime();
-}
-
-// Sleep puts the current goroutine to sleep for at least ns nanoseconds.
-func Sleep(ns int64) {
- runtime_tsleep(ns, "sleep");
-}
-
-// startTimer adds t to the timer heap.
-func startTimer(t *Timer) {
- runtime_addtimer(t);
-}
-
-// stopTimer removes t from the timer heap if it is there.
-// It returns true if t was removed, false if t wasn't even there.
-func stopTimer(t *Timer) (stopped bool) {
- stopped = runtime_deltimer(t);
-}
-
-// C runtime.
-
-int64 runtime_unixnanotime(void)
-{
- struct time_now_ret r;
-
- r = now();
- return r.sec*1000000000 + r.nsec;
-}
-
-static void timerproc(void*);
-static void siftup(int32);
-static void siftdown(int32);
-
-// Ready the goroutine e.data.
-static void
-ready(Eface e, uintptr seq)
-{
- USED(seq);
-
- runtime_ready(e.__object);
-}
-
-static FuncVal readyv = {(void(*)(void))ready};
-
-// Put the current goroutine to sleep for ns nanoseconds.
-void
-runtime_tsleep(int64 ns, const char *reason)
-{
- G* g;
- Timer t;
-
- g = runtime_g();
-
- if(ns <= 0)
- return;
-
- t.when = runtime_nanotime() + ns;
- t.period = 0;
- t.fv = &readyv;
- t.arg.__object = g;
- t.seq = 0;
- runtime_lock(&timers);
- addtimer(&t);
- runtime_parkunlock(&timers, reason);
-}
-
-void
-runtime_addtimer(Timer *t)
-{
- runtime_lock(&timers);
- addtimer(t);
- runtime_unlock(&timers);
-}
-
-// Add a timer to the heap and start or kick the timer proc
-// if the new timer is earlier than any of the others.
-static void
-addtimer(Timer *t)
-{
- int32 n;
- Timer **nt;
-
- // when must never be negative; otherwise timerproc will overflow
- // during its delta calculation and never expire other timers.
- if(t->when < 0)
- t->when = (int64)((1ULL<<63)-1);
-
- if(timers.len >= timers.cap) {
- // Grow slice.
- n = 16;
- if(n <= timers.cap)
- n = timers.cap*3 / 2;
- nt = runtime_malloc(n*sizeof nt[0]);
- runtime_memmove(nt, timers.t, timers.len*sizeof nt[0]);
- runtime_free(timers.t);
- timers.t = nt;
- timers.cap = n;
- }
- t->i = timers.len++;
- timers.t[t->i] = t;
- siftup(t->i);
- if(t->i == 0) {
- // siftup moved to top: new earliest deadline.
- if(timers.sleeping) {
- timers.sleeping = false;
- runtime_notewakeup(&timers.waitnote);
- }
- if(timers.rescheduling) {
- timers.rescheduling = false;
- runtime_ready(timers.timerproc);
- }
- }
- if(timers.timerproc == nil) {
- timers.timerproc = __go_go(timerproc, nil);
- timers.timerproc->issystem = true;
- }
- if(debug)
- dumptimers("addtimer");
-}
-
-// Used to force a dereference before the lock is acquired.
-static int32 gi;
-
-// Delete timer t from the heap.
-// Do not need to update the timerproc:
-// if it wakes up early, no big deal.
-bool
-runtime_deltimer(Timer *t)
-{
- int32 i;
-
- // Dereference t so that any panic happens before the lock is held.
- // Discard result, because t might be moving in the heap.
- i = t->i;
- gi = i;
-
- runtime_lock(&timers);
-
- // t may not be registered anymore and may have
- // a bogus i (typically 0, if generated by Go).
- // Verify it before proceeding.
- i = t->i;
- if(i < 0 || i >= timers.len || timers.t[i] != t) {
- runtime_unlock(&timers);
- return false;
- }
-
- timers.len--;
- if(i == timers.len) {
- timers.t[i] = nil;
- } else {
- timers.t[i] = timers.t[timers.len];
- timers.t[timers.len] = nil;
- timers.t[i]->i = i;
- siftup(i);
- siftdown(i);
- }
- if(debug)
- dumptimers("deltimer");
- runtime_unlock(&timers);
- return true;
-}
-
-// Timerproc runs the time-driven events.
-// It sleeps until the next event in the timers heap.
-// If addtimer inserts a new earlier event, addtimer
-// wakes timerproc early.
-static void
-timerproc(void* dummy __attribute__ ((unused)))
-{
- int64 delta, now;
- Timer *t;
- FuncVal *fv;
- void (*f)(Eface, uintptr);
- Eface arg;
- uintptr seq;
-
- for(;;) {
- runtime_lock(&timers);
- timers.sleeping = false;
- now = runtime_nanotime();
- for(;;) {
- if(timers.len == 0) {
- delta = -1;
- break;
- }
- t = timers.t[0];
- delta = t->when - now;
- if(delta > 0)
- break;
- if(t->period > 0) {
- // leave in heap but adjust next time to fire
- t->when += t->period * (1 + -delta/t->period);
- siftdown(0);
- } else {
- // remove from heap
- timers.t[0] = timers.t[--timers.len];
- timers.t[0]->i = 0;
- siftdown(0);
- t->i = -1; // mark as removed
- }
- fv = t->fv;
- f = (void*)t->fv->fn;
- arg = t->arg;
- seq = t->seq;
- runtime_unlock(&timers);
- __builtin_call_with_static_chain(f(arg, seq), fv);
-
- // clear f and arg to avoid leak while sleeping for next timer
- f = nil;
- USED(f);
- arg.__type_descriptor = nil;
- arg.__object = nil;
- USED(&arg);
-
- runtime_lock(&timers);
- }
- if(delta < 0) {
- // No timers left - put goroutine to sleep.
- timers.rescheduling = true;
- runtime_g()->isbackground = true;
- runtime_parkunlock(&timers, "timer goroutine (idle)");
- runtime_g()->isbackground = false;
- continue;
- }
- // At least one timer pending. Sleep until then.
- timers.sleeping = true;
- runtime_noteclear(&timers.waitnote);
- runtime_unlock(&timers);
- runtime_notetsleepg(&timers.waitnote, delta);
- }
-}
-
-// heap maintenance algorithms.
-
-static void
-siftup(int32 i)
-{
- int32 p;
- int64 when;
- Timer **t, *tmp;
-
- t = timers.t;
- when = t[i]->when;
- tmp = t[i];
- while(i > 0) {
- p = (i-1)/4; // parent
- if(when >= t[p]->when)
- break;
- t[i] = t[p];
- t[i]->i = i;
- t[p] = tmp;
- tmp->i = p;
- i = p;
- }
-}
-
-static void
-siftdown(int32 i)
-{
- int32 c, c3, len;
- int64 when, w, w3;
- Timer **t, *tmp;
-
- t = timers.t;
- len = timers.len;
- when = t[i]->when;
- tmp = t[i];
- for(;;) {
- c = i*4 + 1; // left child
- c3 = c + 2; // mid child
- if(c >= len) {
- break;
- }
- w = t[c]->when;
- if(c+1 < len && t[c+1]->when < w) {
- w = t[c+1]->when;
- c++;
- }
- if(c3 < len) {
- w3 = t[c3]->when;
- if(c3+1 < len && t[c3+1]->when < w3) {
- w3 = t[c3+1]->when;
- c3++;
- }
- if(w3 < w) {
- w = w3;
- c = c3;
- }
- }
- if(w >= when)
- break;
- t[i] = t[c];
- t[i]->i = i;
- t[c] = tmp;
- tmp->i = c;
- i = c;
- }
-}
-
-static void
-dumptimers(const char *msg)
-{
- Timer *t;
- int32 i;
-
- runtime_printf("timers: %s\n", msg);
- for(i = 0; i < timers.len; i++) {
- t = timers.t[i];
- runtime_printf("\t%d\t%p:\ti %d when %D period %D fn %p\n",
- i, t, t->i, t->when, t->period, t->fv->fn);
- }
- runtime_printf("\n");
-}
-
-void
-runtime_time_scan(struct Workbuf** wbufp, void (*enqueue1)(struct Workbuf**, Obj))
-{
- enqueue1(wbufp, (Obj){(byte*)&timers, sizeof timers, 0});
-}