aboutsummaryrefslogtreecommitdiff
path: root/libgo/go
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2016-10-18 14:38:29 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2016-10-18 14:38:29 +0000
commit812ba636c7b12f2c503e34aaf9e2da50d5777b82 (patch)
treec81659977f9a532c5d4b53872301ba5e67c4166f /libgo/go
parentf5de494c59532fdad30097af4185b2ce74700984 (diff)
downloadgcc-812ba636c7b12f2c503e34aaf9e2da50d5777b82.zip
gcc-812ba636c7b12f2c503e34aaf9e2da50d5777b82.tar.gz
gcc-812ba636c7b12f2c503e34aaf9e2da50d5777b82.tar.bz2
runtime: copy netpoll code from Go 1.7 runtime
Reviewed-on: https://go-review.googlesource.com/31325 From-SVN: r241307
Diffstat (limited to 'libgo/go')
-rw-r--r--libgo/go/runtime/netpoll.go452
-rw-r--r--libgo/go/runtime/netpoll_epoll.go116
-rw-r--r--libgo/go/runtime/netpoll_kqueue.go110
-rw-r--r--libgo/go/runtime/netpoll_nacl.go26
-rw-r--r--libgo/go/runtime/netpoll_solaris.go225
-rw-r--r--libgo/go/runtime/netpoll_stub.go19
-rw-r--r--libgo/go/runtime/netpoll_windows.go145
-rw-r--r--libgo/go/runtime/stubs.go8
-rw-r--r--libgo/go/runtime/time.go4
9 files changed, 1100 insertions, 5 deletions
diff --git a/libgo/go/runtime/netpoll.go b/libgo/go/runtime/netpoll.go
new file mode 100644
index 0000000..729b597
--- /dev/null
+++ b/libgo/go/runtime/netpoll.go
@@ -0,0 +1,452 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// Export temporarily for gccgo's C code to call:
+//go:linkname netpoll runtime.netpoll
+
+// Integrated network poller (platform-independent part).
+// A particular implementation (epoll/kqueue) must define the following functions:
+// func netpollinit() // to initialize the poller
+// func netpollopen(fd uintptr, pd *pollDesc) int32 // to arm edge-triggered notifications
+// and associate fd with pd.
+// An implementation must call the following function to denote that the pd is ready.
+// func netpollready(gpp **g, pd *pollDesc, mode int32)
+
+// pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
+// goroutines respectively. The semaphore can be in the following states:
+// pdReady - io readiness notification is pending;
+// a goroutine consumes the notification by changing the state to nil.
+// pdWait - a goroutine prepares to park on the semaphore, but not yet parked;
+// the goroutine commits to park by changing the state to G pointer,
+// or, alternatively, concurrent io notification changes the state to READY,
+// or, alternatively, concurrent timeout/close changes the state to nil.
+// G pointer - the goroutine is blocked on the semaphore;
+// io notification or timeout/close changes the state to READY or nil respectively
+// and unparks the goroutine.
+// nil - nothing of the above.
+const (
+ pdReady uintptr = 1
+ pdWait uintptr = 2
+)
+
+const pollBlockSize = 4 * 1024
+
+// Network poller descriptor.
+type pollDesc struct {
+ link *pollDesc // in pollcache, protected by pollcache.lock
+
+ // The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
+ // This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
+ // pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO readiness notification)
+ // proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
+ // in a lock-free way by all operations.
+ // NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg),
+ // that will blow up when GC starts moving objects.
+ lock mutex // protects the following fields
+ fd uintptr
+ closing bool
+ seq uintptr // protects from stale timers and ready notifications
+ rg uintptr // pdReady, pdWait, G waiting for read or nil
+ rt timer // read deadline timer (set if rt.f != nil)
+ rd int64 // read deadline
+ wg uintptr // pdReady, pdWait, G waiting for write or nil
+ wt timer // write deadline timer
+ wd int64 // write deadline
+ user uint32 // user settable cookie
+}
+
+type pollCache struct {
+ lock mutex
+ first *pollDesc
+ // PollDesc objects must be type-stable,
+ // because we can get ready notification from epoll/kqueue
+ // after the descriptor is closed/reused.
+ // Stale notifications are detected using seq variable,
+ // seq is incremented when deadlines are changed or descriptor is reused.
+}
+
+var (
+ netpollInited uint32
+ pollcache pollCache
+)
+
+//go:linkname net_runtime_pollServerInit net.runtime_pollServerInit
+func net_runtime_pollServerInit() {
+ netpollinit()
+ atomic.Store(&netpollInited, 1)
+}
+
+func netpollinited() bool {
+ return atomic.Load(&netpollInited) != 0
+}
+
+//go:linkname net_runtime_pollOpen net.runtime_pollOpen
+func net_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
+ pd := pollcache.alloc()
+ lock(&pd.lock)
+ if pd.wg != 0 && pd.wg != pdReady {
+ throw("netpollOpen: blocked write on free descriptor")
+ }
+ if pd.rg != 0 && pd.rg != pdReady {
+ throw("netpollOpen: blocked read on free descriptor")
+ }
+ pd.fd = fd
+ pd.closing = false
+ pd.seq++
+ pd.rg = 0
+ pd.rd = 0
+ pd.wg = 0
+ pd.wd = 0
+ unlock(&pd.lock)
+
+ var errno int32
+ errno = netpollopen(fd, pd)
+ return pd, int(errno)
+}
+
+//go:linkname net_runtime_pollClose net.runtime_pollClose
+func net_runtime_pollClose(pd *pollDesc) {
+ if !pd.closing {
+ throw("netpollClose: close w/o unblock")
+ }
+ if pd.wg != 0 && pd.wg != pdReady {
+ throw("netpollClose: blocked write on closing descriptor")
+ }
+ if pd.rg != 0 && pd.rg != pdReady {
+ throw("netpollClose: blocked read on closing descriptor")
+ }
+ netpollclose(pd.fd)
+ pollcache.free(pd)
+}
+
+func (c *pollCache) free(pd *pollDesc) {
+ lock(&c.lock)
+ pd.link = c.first
+ c.first = pd
+ unlock(&c.lock)
+}
+
+//go:linkname net_runtime_pollReset net.runtime_pollReset
+func net_runtime_pollReset(pd *pollDesc, mode int) int {
+ err := netpollcheckerr(pd, int32(mode))
+ if err != 0 {
+ return err
+ }
+ if mode == 'r' {
+ pd.rg = 0
+ } else if mode == 'w' {
+ pd.wg = 0
+ }
+ return 0
+}
+
+//go:linkname net_runtime_pollWait net.runtime_pollWait
+func net_runtime_pollWait(pd *pollDesc, mode int) int {
+ err := netpollcheckerr(pd, int32(mode))
+ if err != 0 {
+ return err
+ }
+ // As for now only Solaris uses level-triggered IO.
+ if GOOS == "solaris" {
+ netpollarm(pd, mode)
+ }
+ for !netpollblock(pd, int32(mode), false) {
+ err = netpollcheckerr(pd, int32(mode))
+ if err != 0 {
+ return err
+ }
+ // Can happen if timeout has fired and unblocked us,
+ // but before we had a chance to run, timeout has been reset.
+ // Pretend it has not happened and retry.
+ }
+ return 0
+}
+
+//go:linkname net_runtime_pollWaitCanceled net.runtime_pollWaitCanceled
+func net_runtime_pollWaitCanceled(pd *pollDesc, mode int) {
+ // This function is used only on windows after a failed attempt to cancel
+ // a pending async IO operation. Wait for ioready, ignore closing or timeouts.
+ for !netpollblock(pd, int32(mode), true) {
+ }
+}
+
+//go:linkname net_runtime_pollSetDeadline net.runtime_pollSetDeadline
+func net_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
+ lock(&pd.lock)
+ if pd.closing {
+ unlock(&pd.lock)
+ return
+ }
+ pd.seq++ // invalidate current timers
+ // Reset current timers.
+ if pd.rt.f != nil {
+ deltimer(&pd.rt)
+ pd.rt.f = nil
+ }
+ if pd.wt.f != nil {
+ deltimer(&pd.wt)
+ pd.wt.f = nil
+ }
+ // Setup new timers.
+ if d != 0 && d <= nanotime() {
+ d = -1
+ }
+ if mode == 'r' || mode == 'r'+'w' {
+ pd.rd = d
+ }
+ if mode == 'w' || mode == 'r'+'w' {
+ pd.wd = d
+ }
+ if pd.rd > 0 && pd.rd == pd.wd {
+ pd.rt.f = netpollDeadline
+ pd.rt.when = pd.rd
+ // Copy current seq into the timer arg.
+ // Timer func will check the seq against current descriptor seq,
+ // if they differ the descriptor was reused or timers were reset.
+ pd.rt.arg = pd
+ pd.rt.seq = pd.seq
+ addtimer(&pd.rt)
+ } else {
+ if pd.rd > 0 {
+ pd.rt.f = netpollReadDeadline
+ pd.rt.when = pd.rd
+ pd.rt.arg = pd
+ pd.rt.seq = pd.seq
+ addtimer(&pd.rt)
+ }
+ if pd.wd > 0 {
+ pd.wt.f = netpollWriteDeadline
+ pd.wt.when = pd.wd
+ pd.wt.arg = pd
+ pd.wt.seq = pd.seq
+ addtimer(&pd.wt)
+ }
+ }
+ // If we set the new deadline in the past, unblock currently pending IO if any.
+ var rg, wg *g
+ atomicstorep(unsafe.Pointer(&wg), nil) // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
+ if pd.rd < 0 {
+ rg = netpollunblock(pd, 'r', false)
+ }
+ if pd.wd < 0 {
+ wg = netpollunblock(pd, 'w', false)
+ }
+ unlock(&pd.lock)
+ if rg != nil {
+ goready(rg, 3)
+ }
+ if wg != nil {
+ goready(wg, 3)
+ }
+}
+
+//go:linkname net_runtime_pollUnblock net.runtime_pollUnblock
+func net_runtime_pollUnblock(pd *pollDesc) {
+ lock(&pd.lock)
+ if pd.closing {
+ throw("netpollUnblock: already closing")
+ }
+ pd.closing = true
+ pd.seq++
+ var rg, wg *g
+ atomicstorep(unsafe.Pointer(&rg), nil) // full memory barrier between store to closing and read of rg/wg in netpollunblock
+ rg = netpollunblock(pd, 'r', false)
+ wg = netpollunblock(pd, 'w', false)
+ if pd.rt.f != nil {
+ deltimer(&pd.rt)
+ pd.rt.f = nil
+ }
+ if pd.wt.f != nil {
+ deltimer(&pd.wt)
+ pd.wt.f = nil
+ }
+ unlock(&pd.lock)
+ if rg != nil {
+ goready(rg, 3)
+ }
+ if wg != nil {
+ goready(wg, 3)
+ }
+}
+
+// make pd ready, newly runnable goroutines (if any) are returned in rg/wg
+// May run during STW, so write barriers are not allowed.
+//go:nowritebarrier
+func netpollready(gpp *guintptr, pd *pollDesc, mode int32) {
+ var rg, wg guintptr
+ if mode == 'r' || mode == 'r'+'w' {
+ rg.set(netpollunblock(pd, 'r', true))
+ }
+ if mode == 'w' || mode == 'r'+'w' {
+ wg.set(netpollunblock(pd, 'w', true))
+ }
+ if rg != 0 {
+ rg.ptr().schedlink = *gpp
+ *gpp = rg
+ }
+ if wg != 0 {
+ wg.ptr().schedlink = *gpp
+ *gpp = wg
+ }
+}
+
+func netpollcheckerr(pd *pollDesc, mode int32) int {
+ if pd.closing {
+ return 1 // errClosing
+ }
+ if (mode == 'r' && pd.rd < 0) || (mode == 'w' && pd.wd < 0) {
+ return 2 // errTimeout
+ }
+ return 0
+}
+
+func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
+ return atomic.Casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp)))
+}
+
+// returns true if IO is ready, or false if timedout or closed
+// waitio - wait only for completed IO, ignore errors
+func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
+ gpp := &pd.rg
+ if mode == 'w' {
+ gpp = &pd.wg
+ }
+
+ // set the gpp semaphore to WAIT
+ for {
+ old := *gpp
+ if old == pdReady {
+ *gpp = 0
+ return true
+ }
+ if old != 0 {
+ throw("netpollblock: double wait")
+ }
+ if atomic.Casuintptr(gpp, 0, pdWait) {
+ break
+ }
+ }
+
+ // need to recheck error states after setting gpp to WAIT
+ // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
+ // do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
+ if waitio || netpollcheckerr(pd, mode) == 0 {
+ gopark(netpollblockcommit, unsafe.Pointer(gpp), "IO wait", traceEvGoBlockNet, 5)
+ }
+ // be careful to not lose concurrent READY notification
+ old := atomic.Xchguintptr(gpp, 0)
+ if old > pdWait {
+ throw("netpollblock: corrupted state")
+ }
+ return old == pdReady
+}
+
+func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
+ gpp := &pd.rg
+ if mode == 'w' {
+ gpp = &pd.wg
+ }
+
+ for {
+ old := *gpp
+ if old == pdReady {
+ return nil
+ }
+ if old == 0 && !ioready {
+ // Only set READY for ioready. runtime_pollWait
+ // will check for timeout/cancel before waiting.
+ return nil
+ }
+ var new uintptr
+ if ioready {
+ new = pdReady
+ }
+ if atomic.Casuintptr(gpp, old, new) {
+ if old == pdReady || old == pdWait {
+ old = 0
+ }
+ return (*g)(unsafe.Pointer(old))
+ }
+ }
+}
+
+func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
+ lock(&pd.lock)
+ // Seq arg is seq when the timer was set.
+ // If it's stale, ignore the timer event.
+ if seq != pd.seq {
+ // The descriptor was reused or timers were reset.
+ unlock(&pd.lock)
+ return
+ }
+ var rg *g
+ if read {
+ if pd.rd <= 0 || pd.rt.f == nil {
+ throw("netpolldeadlineimpl: inconsistent read deadline")
+ }
+ pd.rd = -1
+ atomicstorep(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock
+ rg = netpollunblock(pd, 'r', false)
+ }
+ var wg *g
+ if write {
+ if pd.wd <= 0 || pd.wt.f == nil && !read {
+ throw("netpolldeadlineimpl: inconsistent write deadline")
+ }
+ pd.wd = -1
+ atomicstorep(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock
+ wg = netpollunblock(pd, 'w', false)
+ }
+ unlock(&pd.lock)
+ if rg != nil {
+ goready(rg, 0)
+ }
+ if wg != nil {
+ goready(wg, 0)
+ }
+}
+
+func netpollDeadline(arg interface{}, seq uintptr) {
+ netpolldeadlineimpl(arg.(*pollDesc), seq, true, true)
+}
+
+func netpollReadDeadline(arg interface{}, seq uintptr) {
+ netpolldeadlineimpl(arg.(*pollDesc), seq, true, false)
+}
+
+func netpollWriteDeadline(arg interface{}, seq uintptr) {
+ netpolldeadlineimpl(arg.(*pollDesc), seq, false, true)
+}
+
+func (c *pollCache) alloc() *pollDesc {
+ lock(&c.lock)
+ if c.first == nil {
+ const pdSize = unsafe.Sizeof(pollDesc{})
+ n := pollBlockSize / pdSize
+ if n == 0 {
+ n = 1
+ }
+ // Must be in non-GC memory because can be referenced
+ // only from epoll/kqueue internals.
+ mem := persistentalloc(n*pdSize, 0, &memstats.other_sys)
+ for i := uintptr(0); i < n; i++ {
+ pd := (*pollDesc)(add(mem, i*pdSize))
+ pd.link = c.first
+ c.first = pd
+ }
+ }
+ pd := c.first
+ c.first = pd.link
+ unlock(&c.lock)
+ return pd
+}
diff --git a/libgo/go/runtime/netpoll_epoll.go b/libgo/go/runtime/netpoll_epoll.go
new file mode 100644
index 0000000..777150e
--- /dev/null
+++ b/libgo/go/runtime/netpoll_epoll.go
@@ -0,0 +1,116 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package runtime
+
+import "unsafe"
+
+//extern epoll_create
+func epollcreate(size int32) int32
+
+//extern epoll_create1
+func epollcreate1(flags int32) int32
+
+//go:noescape
+//extern epoll_ctl
+func epollctl(epfd, op, fd int32, ev *epollevent) int32
+
+//go:noescape
+//extern epoll_wait
+func epollwait(epfd int32, ev *epollevent, nev, timeout int32) int32
+
+//extern __go_fcntl_uintptr
+func fcntlUintptr(fd, cmd, arg uintptr) (uintptr, uintptr)
+
+func closeonexec(fd int32) {
+ fcntlUintptr(uintptr(fd), _F_SETFD, _FD_CLOEXEC)
+}
+
+var (
+ epfd int32 = -1 // epoll descriptor
+)
+
+func netpollinit() {
+ epfd = epollcreate1(_EPOLL_CLOEXEC)
+ if epfd >= 0 {
+ return
+ }
+ epfd = epollcreate(1024)
+ if epfd >= 0 {
+ closeonexec(epfd)
+ return
+ }
+ println("netpollinit: failed to create epoll descriptor", errno())
+ throw("netpollinit: failed to create descriptor")
+}
+
+func netpollopen(fd uintptr, pd *pollDesc) int32 {
+ var ev epollevent
+ ev.events = _EPOLLIN | _EPOLLOUT | _EPOLLRDHUP | _EPOLLET
+ *(**pollDesc)(unsafe.Pointer(&ev.data)) = pd
+ if epollctl(epfd, _EPOLL_CTL_ADD, int32(fd), &ev) < 0 {
+ return int32(errno())
+ }
+ return 0
+}
+
+func netpollclose(fd uintptr) int32 {
+ var ev epollevent
+ if epollctl(epfd, _EPOLL_CTL_DEL, int32(fd), &ev) < 0 {
+ return int32(errno())
+ }
+ return 0
+}
+
+func netpollarm(pd *pollDesc, mode int) {
+ throw("unused")
+}
+
+// polls for ready network connections
+// returns list of goroutines that become runnable
+func netpoll(block bool) *g {
+ if epfd == -1 {
+ return nil
+ }
+ waitms := int32(-1)
+ if !block {
+ waitms = 0
+ }
+ var events [128]epollevent
+retry:
+ n := epollwait(epfd, &events[0], int32(len(events)), waitms)
+ if n < 0 {
+ e := errno()
+ if e != _EINTR {
+ println("runtime: epollwait on fd", epfd, "failed with", e)
+ throw("epollwait failed")
+ }
+ goto retry
+ }
+ var gp guintptr
+ for i := int32(0); i < n; i++ {
+ ev := &events[i]
+ if ev.events == 0 {
+ continue
+ }
+ var mode int32
+ if ev.events&(_EPOLLIN|_EPOLLRDHUP|_EPOLLHUP|_EPOLLERR) != 0 {
+ mode += 'r'
+ }
+ if ev.events&(_EPOLLOUT|_EPOLLHUP|_EPOLLERR) != 0 {
+ mode += 'w'
+ }
+ if mode != 0 {
+ pd := *(**pollDesc)(unsafe.Pointer(&ev.data))
+
+ netpollready(&gp, pd, mode)
+ }
+ }
+ if block && gp == 0 {
+ goto retry
+ }
+ return gp.ptr()
+}
diff --git a/libgo/go/runtime/netpoll_kqueue.go b/libgo/go/runtime/netpoll_kqueue.go
new file mode 100644
index 0000000..eae4f21
--- /dev/null
+++ b/libgo/go/runtime/netpoll_kqueue.go
@@ -0,0 +1,110 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package runtime
+
+// Integrated network poller (kqueue-based implementation).
+
+import "unsafe"
+
+//extern kqueue
+func kqueue() int32
+
+//go:noescape
+//extern kevent
+func kevent(kq int32, ch *keventt, nch uintptr, ev *keventt, nev uintptr, ts *timespec) int32
+
+//extern __go_fcntl_uintptr
+func fcntlUintptr(fd, cmd, arg uintptr) (uintptr, uintptr)
+
+func closeonexec(fd int32) {
+ fcntlUintptr(uintptr(fd), _F_SETFD, _FD_CLOEXEC)
+}
+
+var (
+ kq int32 = -1
+)
+
+func netpollinit() {
+ kq = kqueue()
+ if kq < 0 {
+ println("netpollinit: kqueue failed with", errno())
+ throw("netpollinit: kqueue failed")
+ }
+ closeonexec(kq)
+}
+
+func netpollopen(fd uintptr, pd *pollDesc) int32 {
+ // Arm both EVFILT_READ and EVFILT_WRITE in edge-triggered mode (EV_CLEAR)
+ // for the whole fd lifetime. The notifications are automatically unregistered
+ // when fd is closed.
+ var ev [2]keventt
+ *(*uintptr)(unsafe.Pointer(&ev[0].ident)) = fd
+ ev[0].filter = _EVFILT_READ
+ ev[0].flags = _EV_ADD | _EV_CLEAR
+ ev[0].fflags = 0
+ ev[0].data = 0
+ ev[0].udata = (*byte)(unsafe.Pointer(pd))
+ ev[1] = ev[0]
+ ev[1].filter = _EVFILT_WRITE
+ n := kevent(kq, &ev[0], 2, nil, 0, nil)
+ if n < 0 {
+ return int32(errno())
+ }
+ return 0
+}
+
+func netpollclose(fd uintptr) int32 {
+ // Don't need to unregister because calling close()
+ // on fd will remove any kevents that reference the descriptor.
+ return 0
+}
+
+func netpollarm(pd *pollDesc, mode int) {
+ throw("unused")
+}
+
+// Polls for ready network connections.
+// Returns list of goroutines that become runnable.
+func netpoll(block bool) *g {
+ if kq == -1 {
+ return nil
+ }
+ var tp *timespec
+ var ts timespec
+ if !block {
+ tp = &ts
+ }
+ var events [64]keventt
+retry:
+ n := kevent(kq, nil, 0, &events[0], uintptr(len(events)), tp)
+ if n < 0 {
+ e := errno()
+ if e != _EINTR {
+ println("runtime: kevent on fd", kq, "failed with", e)
+ throw("kevent failed")
+ }
+ goto retry
+ }
+ var gp guintptr
+ for i := 0; i < int(n); i++ {
+ ev := &events[i]
+ var mode int32
+ if ev.filter == _EVFILT_READ {
+ mode += 'r'
+ }
+ if ev.filter == _EVFILT_WRITE {
+ mode += 'w'
+ }
+ if mode != 0 {
+ netpollready(&gp, (*pollDesc)(unsafe.Pointer(ev.udata)), mode)
+ }
+ }
+ if block && gp == 0 {
+ goto retry
+ }
+ return gp.ptr()
+}
diff --git a/libgo/go/runtime/netpoll_nacl.go b/libgo/go/runtime/netpoll_nacl.go
new file mode 100644
index 0000000..5cbc300
--- /dev/null
+++ b/libgo/go/runtime/netpoll_nacl.go
@@ -0,0 +1,26 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Fake network poller for NaCl.
+// Should never be used, because NaCl network connections do not honor "SetNonblock".
+
+package runtime
+
+func netpollinit() {
+}
+
+func netpollopen(fd uintptr, pd *pollDesc) int32 {
+ return 0
+}
+
+func netpollclose(fd uintptr) int32 {
+ return 0
+}
+
+func netpollarm(pd *pollDesc, mode int) {
+}
+
+func netpoll(block bool) *g {
+ return nil
+}
diff --git a/libgo/go/runtime/netpoll_solaris.go b/libgo/go/runtime/netpoll_solaris.go
new file mode 100644
index 0000000..cc6754c
--- /dev/null
+++ b/libgo/go/runtime/netpoll_solaris.go
@@ -0,0 +1,225 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// Solaris runtime-integrated network poller.
+//
+// Solaris uses event ports for scalable network I/O. Event
+// ports are level-triggered, unlike epoll and kqueue which
+// can be configured in both level-triggered and edge-triggered
+// mode. Level triggering means we have to keep track of a few things
+// ourselves. After we receive an event for a file descriptor,
+// it's our responsibility to ask again to be notified for future
+// events for that descriptor. When doing this we must keep track of
+// what kind of events the goroutines are currently interested in,
+// for example a fd may be open both for reading and writing.
+//
+// A description of the high level operation of this code
+// follows. Networking code will get a file descriptor by some means
+// and will register it with the netpolling mechanism by a code path
+// that eventually calls runtime·netpollopen. runtime·netpollopen
+// calls port_associate with an empty event set. That means that we
+// will not receive any events at this point. The association needs
+// to be done at this early point because we need to process the I/O
+// readiness notification at some point in the future. If I/O becomes
+// ready when nobody is listening, when we finally care about it,
+// nobody will tell us anymore.
+//
+// Beside calling runtime·netpollopen, the networking code paths
+// will call runtime·netpollarm each time goroutines are interested
+// in doing network I/O. Because now we know what kind of I/O we
+// are interested in (reading/writing), we can call port_associate
+// passing the correct type of event set (POLLIN/POLLOUT). As we made
+// sure to have already associated the file descriptor with the port,
+// when we now call port_associate, we will unblock the main poller
+// loop (in runtime·netpoll) right away if the socket is actually
+// ready for I/O.
+//
+// The main poller loop runs in its own thread waiting for events
+// using port_getn. When an event happens, it will tell the scheduler
+// about it using runtime·netpollready. Besides doing this, it must
+// also re-associate the events that were not part of this current
+// notification with the file descriptor. Failing to do this would
+// mean each notification will prevent concurrent code using the
+// same file descriptor in parallel.
+//
+// The logic dealing with re-associations is encapsulated in
+// runtime·netpollupdate. This function takes care to associate the
+// descriptor only with the subset of events that were previously
+// part of the association, except the one that just happened. We
+// can't re-associate with that right away, because event ports
+// are level triggered so it would cause a busy loop. Instead, that
+// association is effected only by the runtime·netpollarm code path,
+// when Go code actually asks for I/O.
+//
+// The open and arming mechanisms are serialized using the lock
+// inside PollDesc. This is required because the netpoll loop runs
+// asynchronously in respect to other Go code and by the time we get
+// to call port_associate to update the association in the loop, the
+// file descriptor might have been closed and reopened already. The
+// lock allows runtime·netpollupdate to be called synchronously from
+// the loop thread while preventing other threads operating to the
+// same PollDesc, so once we unblock in the main loop, until we loop
+// again we know for sure we are always talking about the same file
+// descriptor and can safely access the data we want (the event set).
+
+//extern __go_fcntl_uintptr
+func fcntlUintptr(fd, cmd, arg uintptr) (uintptr, uintptr)
+
+func fcntl(fd, cmd int32, arg uintptr) int32 {
+ r, _ := fcntlUintptr(uintptr(fd), uintptr(cmd), arg)
+ return int32(r)
+}
+
+//extern port_create
+func port_create() int32
+
+//extern port_associate
+func port_associate(port, source int32, object uintptr, events uint32, user uintptr) int32
+
+//extern port_dissociate
+func port_dissociate(port, source int32, object uintptr) int32
+
+//extern port_getn
+func port_getn(port int32, evs *portevent, max uint32, nget *uint32, timeout *timespec) int32
+
+var portfd int32 = -1
+
+func netpollinit() {
+ portfd = port_create()
+ if portfd >= 0 {
+ fcntl(portfd, _F_SETFD, _FD_CLOEXEC)
+ return
+ }
+
+ print("netpollinit: failed to create port (", errno(), ")\n")
+ throw("netpollinit: failed to create port")
+}
+
+func netpollopen(fd uintptr, pd *pollDesc) int32 {
+ lock(&pd.lock)
+ // We don't register for any specific type of events yet, that's
+ // netpollarm's job. We merely ensure we call port_associate before
+ // asynchronous connect/accept completes, so when we actually want
+ // to do any I/O, the call to port_associate (from netpollarm,
+ // with the interested event set) will unblock port_getn right away
+ // because of the I/O readiness notification.
+ pd.user = 0
+ r := port_associate(portfd, _PORT_SOURCE_FD, fd, 0, uintptr(unsafe.Pointer(pd)))
+ unlock(&pd.lock)
+ if r < 0 {
+ return int32(errno())
+ }
+ return 0
+}
+
+func netpollclose(fd uintptr) int32 {
+ if port_dissociate(portfd, _PORT_SOURCE_FD, fd) < 0 {
+ return int32(errno())
+ }
+ return 0
+}
+
+// Updates the association with a new set of interested events. After
+// this call, port_getn will return one and only one event for that
+// particular descriptor, so this function needs to be called again.
+func netpollupdate(pd *pollDesc, set, clear uint32) {
+ if pd.closing {
+ return
+ }
+
+ old := pd.user
+ events := (old & ^clear) | set
+ if old == events {
+ return
+ }
+
+ if events != 0 && port_associate(portfd, _PORT_SOURCE_FD, pd.fd, events, uintptr(unsafe.Pointer(pd))) != 0 {
+ print("netpollupdate: failed to associate (", errno(), ")\n")
+ throw("netpollupdate: failed to associate")
+ }
+ pd.user = events
+}
+
+// subscribe the fd to the port such that port_getn will return one event.
+func netpollarm(pd *pollDesc, mode int) {
+ lock(&pd.lock)
+ switch mode {
+ case 'r':
+ netpollupdate(pd, _POLLIN, 0)
+ case 'w':
+ netpollupdate(pd, _POLLOUT, 0)
+ default:
+ throw("netpollarm: bad mode")
+ }
+ unlock(&pd.lock)
+}
+
+// polls for ready network connections
+// returns list of goroutines that become runnable
+func netpoll(block bool) *g {
+ if portfd == -1 {
+ return nil
+ }
+
+ var wait *timespec
+ var zero timespec
+ if !block {
+ wait = &zero
+ }
+
+ var events [128]portevent
+retry:
+ var n uint32 = 1
+ if port_getn(portfd, &events[0], uint32(len(events)), &n, wait) < 0 {
+ if e := errno(); e != _EINTR {
+ print("runtime: port_getn on fd ", portfd, " failed with ", e, "\n")
+ throw("port_getn failed")
+ }
+ goto retry
+ }
+
+ var gp guintptr
+ for i := 0; i < int(n); i++ {
+ ev := &events[i]
+
+ if ev.portev_events == 0 {
+ continue
+ }
+ pd := (*pollDesc)(unsafe.Pointer(ev.portev_user))
+
+ var mode, clear int32
+ if (ev.portev_events & (_POLLIN | _POLLHUP | _POLLERR)) != 0 {
+ mode += 'r'
+ clear |= _POLLIN
+ }
+ if (ev.portev_events & (_POLLOUT | _POLLHUP | _POLLERR)) != 0 {
+ mode += 'w'
+ clear |= _POLLOUT
+ }
+ // To effect edge-triggered events, we need to be sure to
+ // update our association with whatever events were not
+ // set with the event. For example if we are registered
+ // for POLLIN|POLLOUT, and we get POLLIN, besides waking
+ // the goroutine interested in POLLIN we have to not forget
+ // about the one interested in POLLOUT.
+ if clear != 0 {
+ lock(&pd.lock)
+ netpollupdate(pd, 0, uint32(clear))
+ unlock(&pd.lock)
+ }
+
+ if mode != 0 {
+ netpollready(&gp, pd, mode)
+ }
+ }
+
+ if block && gp == 0 {
+ goto retry
+ }
+ return gp.ptr()
+}
diff --git a/libgo/go/runtime/netpoll_stub.go b/libgo/go/runtime/netpoll_stub.go
new file mode 100644
index 0000000..09f64ad
--- /dev/null
+++ b/libgo/go/runtime/netpoll_stub.go
@@ -0,0 +1,19 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build plan9
+
+package runtime
+
+// Polls for ready network connections.
+// Returns list of goroutines that become runnable.
+func netpoll(block bool) (gp *g) {
+ // Implementation for platforms that do not support
+ // integrated network poller.
+ return
+}
+
+func netpollinited() bool {
+ return false
+}
diff --git a/libgo/go/runtime/netpoll_windows.go b/libgo/go/runtime/netpoll_windows.go
new file mode 100644
index 0000000..7ad1158
--- /dev/null
+++ b/libgo/go/runtime/netpoll_windows.go
@@ -0,0 +1,145 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+const _DWORD_MAX = 0xffffffff
+
+const _INVALID_HANDLE_VALUE = ^uintptr(0)
+
+// net_op must be the same as beginning of net.operation. Keep these in sync.
+type net_op struct {
+ // used by windows
+ o overlapped
+ // used by netpoll
+ pd *pollDesc
+ mode int32
+ errno int32
+ qty uint32
+}
+
+type overlappedEntry struct {
+ key uintptr
+ op *net_op // In reality it's *overlapped, but we cast it to *net_op anyway.
+ internal uintptr
+ qty uint32
+}
+
+var iocphandle uintptr = _INVALID_HANDLE_VALUE // completion port io handle
+
+func netpollinit() {
+ iocphandle = stdcall4(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX)
+ if iocphandle == 0 {
+ println("netpoll: failed to create iocp handle (errno=", getlasterror(), ")")
+ throw("netpoll: failed to create iocp handle")
+ }
+}
+
+func netpollopen(fd uintptr, pd *pollDesc) int32 {
+ if stdcall4(_CreateIoCompletionPort, fd, iocphandle, 0, 0) == 0 {
+ return -int32(getlasterror())
+ }
+ return 0
+}
+
+func netpollclose(fd uintptr) int32 {
+ // nothing to do
+ return 0
+}
+
+func netpollarm(pd *pollDesc, mode int) {
+ throw("unused")
+}
+
+// Polls for completed network IO.
+// Returns list of goroutines that become runnable.
+func netpoll(block bool) *g {
+ var entries [64]overlappedEntry
+ var wait, qty, key, flags, n, i uint32
+ var errno int32
+ var op *net_op
+ var gp guintptr
+
+ mp := getg().m
+
+ if iocphandle == _INVALID_HANDLE_VALUE {
+ return nil
+ }
+ wait = 0
+ if block {
+ wait = _INFINITE
+ }
+retry:
+ if _GetQueuedCompletionStatusEx != nil {
+ n = uint32(len(entries) / int(gomaxprocs))
+ if n < 8 {
+ n = 8
+ }
+ if block {
+ mp.blocked = true
+ }
+ if stdcall6(_GetQueuedCompletionStatusEx, iocphandle, uintptr(unsafe.Pointer(&entries[0])), uintptr(n), uintptr(unsafe.Pointer(&n)), uintptr(wait), 0) == 0 {
+ mp.blocked = false
+ errno = int32(getlasterror())
+ if !block && errno == _WAIT_TIMEOUT {
+ return nil
+ }
+ println("netpoll: GetQueuedCompletionStatusEx failed (errno=", errno, ")")
+ throw("netpoll: GetQueuedCompletionStatusEx failed")
+ }
+ mp.blocked = false
+ for i = 0; i < n; i++ {
+ op = entries[i].op
+ errno = 0
+ qty = 0
+ if stdcall5(_WSAGetOverlappedResult, op.pd.fd, uintptr(unsafe.Pointer(op)), uintptr(unsafe.Pointer(&qty)), 0, uintptr(unsafe.Pointer(&flags))) == 0 {
+ errno = int32(getlasterror())
+ }
+ handlecompletion(&gp, op, errno, qty)
+ }
+ } else {
+ op = nil
+ errno = 0
+ qty = 0
+ if block {
+ mp.blocked = true
+ }
+ if stdcall5(_GetQueuedCompletionStatus, iocphandle, uintptr(unsafe.Pointer(&qty)), uintptr(unsafe.Pointer(&key)), uintptr(unsafe.Pointer(&op)), uintptr(wait)) == 0 {
+ mp.blocked = false
+ errno = int32(getlasterror())
+ if !block && errno == _WAIT_TIMEOUT {
+ return nil
+ }
+ if op == nil {
+ println("netpoll: GetQueuedCompletionStatus failed (errno=", errno, ")")
+ throw("netpoll: GetQueuedCompletionStatus failed")
+ }
+ // dequeued failed IO packet, so report that
+ }
+ mp.blocked = false
+ handlecompletion(&gp, op, errno, qty)
+ }
+ if block && gp == 0 {
+ goto retry
+ }
+ return gp.ptr()
+}
+
+func handlecompletion(gpp *guintptr, op *net_op, errno int32, qty uint32) {
+ if op == nil {
+ throw("netpoll: GetQueuedCompletionStatus returned op == nil")
+ }
+ mode := op.mode
+ if mode != 'r' && mode != 'w' {
+ println("netpoll: GetQueuedCompletionStatus returned invalid mode=", mode)
+ throw("netpoll: GetQueuedCompletionStatus returned invalid mode")
+ }
+ op.errno = errno
+ op.qty = qty
+ netpollready(gpp, op.pd, mode)
+}
diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go
index 3db8fea..083710d 100644
--- a/libgo/go/runtime/stubs.go
+++ b/libgo/go/runtime/stubs.go
@@ -296,7 +296,7 @@ func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
func lock(l *mutex)
func unlock(l *mutex)
-// Here for gccgo for Solaris.
+// Here for gccgo for netpoll and Solaris.
func errno() int
// Temporary for gccgo until we port proc.go.
@@ -460,3 +460,9 @@ func setmaxthreads(int) int
func setMaxThreads(in int) (out int) {
return setmaxthreads(in)
}
+
+// Temporary for gccgo until we port atomic_pointer.go.
+//go:nosplit
+func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
+ atomic.StorepNoWB(noescape(ptr), new)
+}
diff --git a/libgo/go/runtime/time.go b/libgo/go/runtime/time.go
index d9a1d59..8df185d 100644
--- a/libgo/go/runtime/time.go
+++ b/libgo/go/runtime/time.go
@@ -8,10 +8,6 @@ package runtime
import "unsafe"
-// Export temporarily for gccgo's C code to call:
-//go:linkname addtimer runtime.addtimer
-//go:linkname deltimer runtime.deltimer
-
// Package time knows the layout of this structure.
// If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
// For GOOS=nacl, package syscall knows the layout of this structure.