aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime/netpoll_kqueue.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime/netpoll_kqueue.go')
-rw-r--r--libgo/go/runtime/netpoll_kqueue.go32
1 files changed, 20 insertions, 12 deletions
diff --git a/libgo/go/runtime/netpoll_kqueue.go b/libgo/go/runtime/netpoll_kqueue.go
index 9450461..c41a7d0 100644
--- a/libgo/go/runtime/netpoll_kqueue.go
+++ b/libgo/go/runtime/netpoll_kqueue.go
@@ -8,7 +8,10 @@ package runtime
// Integrated network poller (kqueue-based implementation).
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
//extern kqueue
func kqueue() int32
@@ -21,6 +24,8 @@ var (
kq int32 = -1
netpollBreakRd, netpollBreakWr uintptr // for netpollBreak
+
+ netpollWakeSig uint32 // used to avoid duplicate calls of netpollBreak
)
func netpollinit() {
@@ -83,19 +88,21 @@ func netpollarm(pd *pollDesc, mode int) {
throw("runtime: unused")
}
-// netpollBreak interrupts an epollwait.
+// netpollBreak interrupts a kevent.
func netpollBreak() {
- for {
- var b byte
- n := write(netpollBreakWr, unsafe.Pointer(&b), 1)
- if n == 1 || n == -_EAGAIN {
- break
- }
- if n == -_EINTR {
- continue
+ if atomic.Cas(&netpollWakeSig, 0, 1) {
+ for {
+ var b byte
+ n := write(netpollBreakWr, unsafe.Pointer(&b), 1)
+ if n == 1 || n == -_EAGAIN {
+ break
+ }
+ if n == -_EINTR {
+ continue
+ }
+ println("runtime: netpollBreak write failed with", -n)
+ throw("runtime: netpollBreak write failed")
}
- println("runtime: netpollBreak write failed with", -n)
- throw("runtime: netpollBreak write failed")
}
}
@@ -153,6 +160,7 @@ retry:
// if blocking.
var tmp [16]byte
read(int32(netpollBreakRd), noescape(unsafe.Pointer(&tmp[0])), int32(len(tmp)))
+ atomic.Store(&netpollWakeSig, 0)
}
continue
}