diff options
author | Ian Lance Taylor <ian@gcc.gnu.org> | 2018-10-08 14:21:30 +0000 |
---|---|---|
committer | Ian Lance Taylor <ian@gcc.gnu.org> | 2018-10-08 14:21:30 +0000 |
commit | 3cbb7cbb096134746588d08a469778b11ae6ac73 (patch) | |
tree | eff67e0dc5d748e398ea1a7ffa36f6a3bbda072b /libgo/go/runtime | |
parent | a3368b8ea1bda5edd41900096b10514bcf7c6de7 (diff) | |
download | gcc-3cbb7cbb096134746588d08a469778b11ae6ac73.zip gcc-3cbb7cbb096134746588d08a469778b11ae6ac73.tar.gz gcc-3cbb7cbb096134746588d08a469778b11ae6ac73.tar.bz2 |
libgo: update to Go 1.11.1 release
Reviewed-on: https://go-review.googlesource.com/c/140277
From-SVN: r264932
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r-- | libgo/go/runtime/chan.go | 23 | ||||
-rw-r--r-- | libgo/go/runtime/mbarrier.go | 13 | ||||
-rw-r--r-- | libgo/go/runtime/os_darwin.go | 11 | ||||
-rw-r--r-- | libgo/go/runtime/os_netbsd.go | 31 | ||||
-rw-r--r-- | libgo/go/runtime/select.go | 2 | ||||
-rw-r--r-- | libgo/go/runtime/trace/annotation.go | 8 |
6 files changed, 58 insertions, 30 deletions
diff --git a/libgo/go/runtime/chan.go b/libgo/go/runtime/chan.go index 88a8944..cb369ef 100644 --- a/libgo/go/runtime/chan.go +++ b/libgo/go/runtime/chan.go @@ -102,7 +102,7 @@ func makechan(t *chantype, size int) *hchan { // Queue or element size is zero. c = (*hchan)(mallocgc(hchanSize, nil, true)) // Race detector uses this location for synchronization. - c.buf = unsafe.Pointer(c) + c.buf = c.raceaddr() case elem.kind&kindNoPointers != 0: // Elements do not contain pointers. // Allocate hchan and buf in one call. @@ -166,7 +166,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { } if raceenabled { - racereadpc(unsafe.Pointer(c), callerpc, funcPC(chansend)) + racereadpc(c.raceaddr(), callerpc, funcPC(chansend)) } // Fast path: check for failed non-blocking operation without acquiring the lock. @@ -352,8 +352,8 @@ func closechan(c *hchan) { if raceenabled { callerpc := getcallerpc() - racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan)) - racerelease(unsafe.Pointer(c)) + racewritepc(c.raceaddr(), callerpc, funcPC(closechan)) + racerelease(c.raceaddr()) } c.closed = 1 @@ -376,7 +376,7 @@ func closechan(c *hchan) { gp := sg.g gp.param = nil if raceenabled { - raceacquireg(gp, unsafe.Pointer(c)) + raceacquireg(gp, c.raceaddr()) } gp.schedlink.set(glist) glist = gp @@ -395,7 +395,7 @@ func closechan(c *hchan) { gp := sg.g gp.param = nil if raceenabled { - raceacquireg(gp, unsafe.Pointer(c)) + raceacquireg(gp, c.raceaddr()) } gp.schedlink.set(glist) glist = gp @@ -477,7 +477,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) if c.closed != 0 && c.qcount == 0 { if raceenabled { - raceacquire(unsafe.Pointer(c)) + raceacquire(c.raceaddr()) } unlock(&c.lock) if ep != nil { @@ -755,6 +755,15 @@ func (q *waitq) dequeue() *sudog { } } +func (c *hchan) raceaddr() unsafe.Pointer { + // Treat read-like and write-like operations on the channel to + // happen at this address. Avoid using the address of qcount + // or dataqsiz, because the len() and cap() builtins read + // those addresses, and we don't want them racing with + // operations like close(). + return unsafe.Pointer(&c.buf) +} + func racesync(c *hchan, sg *sudog) { racerelease(chanbuf(c, 0)) raceacquireg(sg.g, chanbuf(c, 0)) diff --git a/libgo/go/runtime/mbarrier.go b/libgo/go/runtime/mbarrier.go index 24e5865..4871315 100644 --- a/libgo/go/runtime/mbarrier.go +++ b/libgo/go/runtime/mbarrier.go @@ -309,6 +309,19 @@ func typedmemclr(typ *_type, ptr unsafe.Pointer) { memclrNoHeapPointers(ptr, typ.size) } +//go:linkname reflect_typedmemclr reflect.typedmemclr +func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) { + typedmemclr(typ, ptr) +} + +//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial +func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) { + if typ.kind&kindNoPointers == 0 { + bulkBarrierPreWrite(uintptr(ptr), 0, size) + } + memclrNoHeapPointers(ptr, size) +} + // memclrHasPointers clears n bytes of typed memory starting at ptr. // The caller must ensure that the type of the object at ptr has // pointers, usually by checking typ.kind&kindNoPointers. However, ptr diff --git a/libgo/go/runtime/os_darwin.go b/libgo/go/runtime/os_darwin.go index 9597633..235f794 100644 --- a/libgo/go/runtime/os_darwin.go +++ b/libgo/go/runtime/os_darwin.go @@ -34,6 +34,10 @@ func semacreate(mp *m) { //go:nosplit func semasleep(ns int64) int32 { + var start int64 + if ns >= 0 { + start = nanotime() + } mp := getg().m pthread_mutex_lock(&mp.mutex) for { @@ -43,8 +47,13 @@ func semasleep(ns int64) int32 { return 0 } if ns >= 0 { + spent := nanotime() - start + if spent >= ns { + pthread_mutex_unlock(&mp.mutex) + return -1 + } var t timespec - t.set_nsec(ns) + t.set_nsec(ns - spent) err := pthread_cond_timedwait_relative_np(&mp.cond, &mp.mutex, &t) if err == _ETIMEDOUT { pthread_mutex_unlock(&mp.mutex) diff --git a/libgo/go/runtime/os_netbsd.go b/libgo/go/runtime/os_netbsd.go index ea47e5c..16a1192 100644 --- a/libgo/go/runtime/os_netbsd.go +++ b/libgo/go/runtime/os_netbsd.go @@ -29,15 +29,9 @@ func semacreate(mp *m) { //go:nosplit func semasleep(ns int64) int32 { _g_ := getg() - - // Compute sleep deadline. - var tsp *timespec - var ts timespec + var deadline int64 if ns >= 0 { - var nsec int32 - ts.set_sec(int64(timediv(ns, 1000000000, &nsec))) - ts.set_nsec(nsec) - tsp = &ts + deadline = nanotime() + ns } for { @@ -50,18 +44,21 @@ func semasleep(ns int64) int32 { } // Sleep until unparked by semawakeup or timeout. + var tsp *timespec + var ts timespec + if ns >= 0 { + wait := deadline - nanotime() + if wait <= 0 { + return -1 + } + var nsec int32 + ts.set_sec(timediv(wait, 1000000000, &nsec)) + ts.set_nsec(nsec) + tsp = &ts + } ret := lwp_park(_CLOCK_MONOTONIC, _TIMER_RELTIME, tsp, 0, unsafe.Pointer(&_g_.m.waitsemacount), nil) if ret == _ETIMEDOUT { return -1 - } else if ret == _EINTR && ns >= 0 { - // Avoid sleeping forever if we keep getting - // interrupted (for example by the profiling - // timer). It would be if tsp upon return had the - // remaining time to sleep, but this is good enough. - var nsec int32 - ns /= 2 - ts.set_sec(timediv(ns, 1000000000, &nsec)) - ts.set_nsec(nsec) } } } diff --git a/libgo/go/runtime/select.go b/libgo/go/runtime/select.go index 39c12da..fb8373f 100644 --- a/libgo/go/runtime/select.go +++ b/libgo/go/runtime/select.go @@ -426,7 +426,7 @@ rclose: typedmemclr(c.elemtype, cas.elem) } if raceenabled { - raceacquire(unsafe.Pointer(c)) + raceacquire(c.raceaddr()) } goto retc diff --git a/libgo/go/runtime/trace/annotation.go b/libgo/go/runtime/trace/annotation.go index 3545ef3..d5a7d00 100644 --- a/libgo/go/runtime/trace/annotation.go +++ b/libgo/go/runtime/trace/annotation.go @@ -24,13 +24,13 @@ type traceContextKey struct{} // If the end function is called multiple times, only the first // call is used in the latency measurement. // -// ctx, task := trace.NewTask(ctx, "awesome task") -// trace.WithRegion(ctx, prepWork) +// ctx, task := trace.NewTask(ctx, "awesomeTask") +// trace.WithRegion(ctx, "preparation", prepWork) // // preparation of the task // go func() { // continue processing the task in a separate goroutine. // defer task.End() -// trace.WithRegion(ctx, remainingWork) -// } +// trace.WithRegion(ctx, "remainingWork", remainingWork) +// }() func NewTask(pctx context.Context, taskType string) (ctx context.Context, task *Task) { pid := fromContext(pctx).id id := newID() |