aboutsummaryrefslogtreecommitdiff
path: root/libgo/go
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2016-12-19 18:00:35 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2016-12-19 18:00:35 +0000
commit0d3dd8fb65050363f1f82b5f048799fd9a0a0f5a (patch)
tree2ebab7c43a3260f883a2cf83ca162d10a8850870 /libgo/go
parent4daecdb62396a1571f3cba861a0068ab539f8e28 (diff)
downloadgcc-0d3dd8fb65050363f1f82b5f048799fd9a0a0f5a.zip
gcc-0d3dd8fb65050363f1f82b5f048799fd9a0a0f5a.tar.gz
gcc-0d3dd8fb65050363f1f82b5f048799fd9a0a0f5a.tar.bz2
runtime: copy cgo support from Go 1.7 runtime
Remove support for _cgo_allocate. It was removed from the gc toolchain in Go 1.5, so it is unlikely that anybody is trying to use it. Reviewed-on: https://go-review.googlesource.com/34557 From-SVN: r243805
Diffstat (limited to 'libgo/go')
-rw-r--r--libgo/go/runtime/cgo_gccgo.go110
-rw-r--r--libgo/go/runtime/cgo_mmap.go43
-rw-r--r--libgo/go/runtime/os_gccgo.go38
-rw-r--r--libgo/go/runtime/proc.go330
-rw-r--r--libgo/go/runtime/runtime2.go10
-rw-r--r--libgo/go/runtime/signal1_unix.go2
-rw-r--r--libgo/go/runtime/signal_gccgo.go35
-rw-r--r--libgo/go/runtime/signal_sighandler.go4
-rw-r--r--libgo/go/runtime/stubs.go31
9 files changed, 534 insertions, 69 deletions
diff --git a/libgo/go/runtime/cgo_gccgo.go b/libgo/go/runtime/cgo_gccgo.go
new file mode 100644
index 0000000..a55fb43
--- /dev/null
+++ b/libgo/go/runtime/cgo_gccgo.go
@@ -0,0 +1,110 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ _ "unsafe"
+)
+
+// For historical reasons these functions are called as though they
+// were in the syscall package.
+//go:linkname Cgocall syscall.Cgocall
+//go:linkname CgocallDone syscall.CgocallDone
+//go:linkname CgocallBack syscall.CgocallBack
+//go:linkname CgocallBackDone syscall.CgocallBackDone
+
+// A routine that may be called by SWIG.
+//go:linkname _cgo_panic _cgo_panic
+
+// iscgo is set to true if the cgo tool sets the C variable runtime_iscgo
+// to true.
+var iscgo bool
+
+// cgoHasExtraM is set on startup when an extra M is created for cgo.
+// The extra M must be created before any C/C++ code calls cgocallback.
+var cgoHasExtraM bool
+
+// Cgocall prepares to call from code written in Go to code written in
+// C/C++. This takes the current goroutine out of the Go scheduler, as
+// though it were making a system call. Otherwise the program can
+// lookup if the C code blocks. The idea is to call this function,
+// then immediately call the C/C++ function. After the C/C++ function
+// returns, call cgocalldone. The usual Go code would look like
+// syscall.Cgocall()
+// defer syscall.Cgocalldone()
+// cfunction()
+func Cgocall() {
+ lockOSThread()
+ mp := getg().m
+ mp.ncgocall++
+ mp.ncgo++
+ entersyscall(0)
+}
+
+// CgocallDone prepares to return to Go code from C/C++ code.
+func CgocallDone() {
+ gp := getg()
+ if gp == nil {
+ throw("no g in CgocallDone")
+ }
+ gp.m.ncgo--
+
+ // If we are invoked because the C function called _cgo_panic,
+ // then _cgo_panic will already have exited syscall mode.
+ if gp.atomicstatus == _Gsyscall {
+ exitsyscall(0)
+ }
+
+ unlockOSThread()
+}
+
+// CgocallBack is used when calling from C/C++ code into Go code.
+// The usual approach is
+// syscall.CgocallBack()
+// defer syscall.CgocallBackDone()
+// gofunction()
+//go:nosplit
+func CgocallBack() {
+ if getg() == nil || getg().m == nil {
+ needm(0)
+ mp := getg().m
+ mp.dropextram = true
+ }
+
+ exitsyscall(0)
+
+ if getg().m.ncgo == 0 {
+ // The C call to Go came from a thread created by C.
+ // The C call to Go came from a thread not currently running
+ // any Go. In the case of -buildmode=c-archive or c-shared,
+ // this call may be coming in before package initialization
+ // is complete. Wait until it is.
+ <-main_init_done
+ }
+
+ mp := getg().m
+ if mp.needextram || atomic.Load(&extraMWaiters) > 0 {
+ mp.needextram = false
+ newextram()
+ }
+}
+
+// CgocallBackDone prepares to return to C/C++ code that has called
+// into Go code.
+func CgocallBackDone() {
+ entersyscall(0)
+ mp := getg().m
+ if mp.dropextram && mp.ncgo == 0 {
+ mp.dropextram = false
+ dropm()
+ }
+}
+
+// _cgo_panic may be called by SWIG code to panic.
+func _cgo_panic(p *byte) {
+ exitsyscall(0)
+ panic(gostringnocopy(p))
+}
diff --git a/libgo/go/runtime/cgo_mmap.go b/libgo/go/runtime/cgo_mmap.go
deleted file mode 100644
index bcdd6cd..0000000
--- a/libgo/go/runtime/cgo_mmap.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Support for memory sanitizer. See runtime/cgo/mmap.go.
-
-// +build linux,amd64
-
-package runtime
-
-import "unsafe"
-
-// _cgo_mmap is filled in by runtime/cgo when it is linked into the
-// program, so it is only non-nil when using cgo.
-//go:linkname _cgo_mmap _cgo_mmap
-var _cgo_mmap unsafe.Pointer
-
-func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer {
- if _cgo_mmap != nil {
- // Make ret a uintptr so that writing to it in the
- // function literal does not trigger a write barrier.
- // A write barrier here could break because of the way
- // that mmap uses the same value both as a pointer and
- // an errno value.
- // TODO: Fix mmap to return two values.
- var ret uintptr
- systemstack(func() {
- ret = callCgoMmap(addr, n, prot, flags, fd, off)
- })
- return unsafe.Pointer(ret)
- }
- return sysMmap(addr, n, prot, flags, fd, off)
-}
-
-// sysMmap calls the mmap system call. It is implemented in assembly.
-func sysMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
-
-// cgoMmap calls the mmap function in the runtime/cgo package on the
-// callCgoMmap calls the mmap function in the runtime/cgo package
-// using the GCC calling convention. It is implemented in assembly.
-func callCgoMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) uintptr
diff --git a/libgo/go/runtime/os_gccgo.go b/libgo/go/runtime/os_gccgo.go
index 4609432..f45ab25 100644
--- a/libgo/go/runtime/os_gccgo.go
+++ b/libgo/go/runtime/os_gccgo.go
@@ -8,6 +8,44 @@ import (
"unsafe"
)
+// Temporary for C code to call:
+//go:linkname minit runtime.minit
+
+// minit is called to initialize a new m (including the bootstrap m).
+// Called on the new thread, cannot allocate memory.
+func minit() {
+ // Initialize signal handling.
+ _g_ := getg()
+
+ var st _stack_t
+ sigaltstack(nil, &st)
+ if st.ss_flags&_SS_DISABLE != 0 {
+ signalstack(_g_.m.gsignalstack, _g_.m.gsignalstacksize)
+ _g_.m.newSigstack = true
+ } else {
+ _g_.m.newSigstack = false
+ }
+
+ // FIXME: We should set _g_.m.procid here.
+
+ // restore signal mask from m.sigmask and unblock essential signals
+ nmask := _g_.m.sigmask
+ for i := range sigtable {
+ if sigtable[i].flags&_SigUnblock != 0 {
+ sigdelset(&nmask, int32(i))
+ }
+ }
+ sigprocmask(_SIG_SETMASK, &nmask, nil)
+}
+
+// Called from dropm to undo the effect of an minit.
+//go:nosplit
+func unminit() {
+ if getg().m.newSigstack {
+ signalstack(nil, 0)
+ }
+}
+
var urandom_dev = []byte("/dev/urandom\x00")
func getRandomData(r []byte) {
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
new file mode 100644
index 0000000..fa90a28
--- /dev/null
+++ b/libgo/go/runtime/proc.go
@@ -0,0 +1,330 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// Functions temporarily called by C code.
+//go:linkname newextram runtime.newextram
+
+// Functions temporarily in C that have not yet been ported.
+func allocm(*p, bool, *unsafe.Pointer, *uintptr) *m
+func malg(bool, bool, *unsafe.Pointer, *uintptr) *g
+func allgadd(*g)
+
+// C functions for ucontext management.
+func setGContext()
+func makeGContext(*g, unsafe.Pointer, uintptr)
+
+// main_init_done is a signal used by cgocallbackg that initialization
+// has been completed. It is made before _cgo_notify_runtime_init_done,
+// so all cgo calls can rely on it existing. When main_init is complete,
+// it is closed, meaning cgocallbackg can reliably receive from it.
+var main_init_done chan bool
+
+// If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
+// and casfrom_Gscanstatus instead.
+// casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
+// put it in the Gscan state is finished.
+//go:nosplit
+func casgstatus(gp *g, oldval, newval uint32) {
+ if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
+ systemstack(func() {
+ print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
+ throw("casgstatus: bad incoming values")
+ })
+ }
+
+ if oldval == _Grunning && gp.gcscanvalid {
+ // If oldvall == _Grunning, then the actual status must be
+ // _Grunning or _Grunning|_Gscan; either way,
+ // we own gp.gcscanvalid, so it's safe to read.
+ // gp.gcscanvalid must not be true when we are running.
+ print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
+ throw("casgstatus")
+ }
+
+ // See http://golang.org/cl/21503 for justification of the yield delay.
+ const yieldDelay = 5 * 1000
+ var nextYield int64
+
+ // loop if gp->atomicstatus is in a scan state giving
+ // GC time to finish and change the state to oldval.
+ for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
+ if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
+ systemstack(func() {
+ throw("casgstatus: waiting for Gwaiting but is Grunnable")
+ })
+ }
+ // Help GC if needed.
+ // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
+ // gp.preemptscan = false
+ // systemstack(func() {
+ // gcphasework(gp)
+ // })
+ // }
+ // But meanwhile just yield.
+ if i == 0 {
+ nextYield = nanotime() + yieldDelay
+ }
+ if nanotime() < nextYield {
+ for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
+ procyield(1)
+ }
+ } else {
+ osyield()
+ nextYield = nanotime() + yieldDelay/2
+ }
+ }
+ if newval == _Grunning && gp.gcscanvalid {
+ // Run queueRescan on the system stack so it has more space.
+ systemstack(func() { queueRescan(gp) })
+ }
+}
+
+// needm is called when a cgo callback happens on a
+// thread without an m (a thread not created by Go).
+// In this case, needm is expected to find an m to use
+// and return with m, g initialized correctly.
+// Since m and g are not set now (likely nil, but see below)
+// needm is limited in what routines it can call. In particular
+// it can only call nosplit functions (textflag 7) and cannot
+// do any scheduling that requires an m.
+//
+// In order to avoid needing heavy lifting here, we adopt
+// the following strategy: there is a stack of available m's
+// that can be stolen. Using compare-and-swap
+// to pop from the stack has ABA races, so we simulate
+// a lock by doing an exchange (via casp) to steal the stack
+// head and replace the top pointer with MLOCKED (1).
+// This serves as a simple spin lock that we can use even
+// without an m. The thread that locks the stack in this way
+// unlocks the stack by storing a valid stack head pointer.
+//
+// In order to make sure that there is always an m structure
+// available to be stolen, we maintain the invariant that there
+// is always one more than needed. At the beginning of the
+// program (if cgo is in use) the list is seeded with a single m.
+// If needm finds that it has taken the last m off the list, its job
+// is - once it has installed its own m so that it can do things like
+// allocate memory - to create a spare m and put it on the list.
+//
+// Each of these extra m's also has a g0 and a curg that are
+// pressed into service as the scheduling stack and current
+// goroutine for the duration of the cgo callback.
+//
+// When the callback is done with the m, it calls dropm to
+// put the m back on the list.
+//go:nosplit
+func needm(x byte) {
+ if iscgo && !cgoHasExtraM {
+ // Can happen if C/C++ code calls Go from a global ctor.
+ // Can not throw, because scheduler is not initialized yet.
+ write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
+ exit(1)
+ }
+
+ // Lock extra list, take head, unlock popped list.
+ // nilokay=false is safe here because of the invariant above,
+ // that the extra list always contains or will soon contain
+ // at least one m.
+ mp := lockextra(false)
+
+ // Set needextram when we've just emptied the list,
+ // so that the eventual call into cgocallbackg will
+ // allocate a new m for the extra list. We delay the
+ // allocation until then so that it can be done
+ // after exitsyscall makes sure it is okay to be
+ // running at all (that is, there's no garbage collection
+ // running right now).
+ mp.needextram = mp.schedlink == 0
+ unlockextra(mp.schedlink.ptr())
+
+ // Save and block signals before installing g.
+ // Once g is installed, any incoming signals will try to execute,
+ // but we won't have the sigaltstack settings and other data
+ // set up appropriately until the end of minit, which will
+ // unblock the signals. This is the same dance as when
+ // starting a new m to run Go code via newosproc.
+ msigsave(mp)
+ sigblock()
+
+ // Install g (= m->curg).
+ setg(mp.curg)
+ atomic.Store(&mp.curg.atomicstatus, _Gsyscall)
+ setGContext()
+
+ // Initialize this thread to use the m.
+ minit()
+}
+
+var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
+
+// newextram allocates m's and puts them on the extra list.
+// It is called with a working local m, so that it can do things
+// like call schedlock and allocate.
+func newextram() {
+ c := atomic.Xchg(&extraMWaiters, 0)
+ if c > 0 {
+ for i := uint32(0); i < c; i++ {
+ oneNewExtraM()
+ }
+ } else {
+ // Make sure there is at least one extra M.
+ mp := lockextra(true)
+ unlockextra(mp)
+ if mp == nil {
+ oneNewExtraM()
+ }
+ }
+}
+
+// oneNewExtraM allocates an m and puts it on the extra list.
+func oneNewExtraM() {
+ // Create extra goroutine locked to extra m.
+ // The goroutine is the context in which the cgo callback will run.
+ // The sched.pc will never be returned to, but setting it to
+ // goexit makes clear to the traceback routines where
+ // the goroutine stack ends.
+ var g0SP unsafe.Pointer
+ var g0SPSize uintptr
+ mp := allocm(nil, true, &g0SP, &g0SPSize)
+ gp := malg(true, false, nil, nil)
+ gp.gcscanvalid = true // fresh G, so no dequeueRescan necessary
+ gp.gcRescan = -1
+
+ // malg returns status as Gidle, change to Gdead before adding to allg
+ // where GC will see it.
+ // gccgo uses Gdead here, not Gsyscall, because the split
+ // stack context is not initialized.
+ casgstatus(gp, _Gidle, _Gdead)
+ gp.m = mp
+ mp.curg = gp
+ mp.locked = _LockInternal
+ mp.lockedg = gp
+ gp.lockedm = mp
+ gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
+ if raceenabled {
+ gp.racectx = racegostart(funcPC(newextram))
+ }
+ // put on allg for garbage collector
+ allgadd(gp)
+
+ // The context for gp will be set up in needm.
+ // Here we need to set the context for g0.
+ makeGContext(mp.g0, g0SP, g0SPSize)
+
+ // Add m to the extra list.
+ mnext := lockextra(true)
+ mp.schedlink.set(mnext)
+ unlockextra(mp)
+}
+
+// dropm is called when a cgo callback has called needm but is now
+// done with the callback and returning back into the non-Go thread.
+// It puts the current m back onto the extra list.
+//
+// The main expense here is the call to signalstack to release the
+// m's signal stack, and then the call to needm on the next callback
+// from this thread. It is tempting to try to save the m for next time,
+// which would eliminate both these costs, but there might not be
+// a next time: the current thread (which Go does not control) might exit.
+// If we saved the m for that thread, there would be an m leak each time
+// such a thread exited. Instead, we acquire and release an m on each
+// call. These should typically not be scheduling operations, just a few
+// atomics, so the cost should be small.
+//
+// TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
+// variable using pthread_key_create. Unlike the pthread keys we already use
+// on OS X, this dummy key would never be read by Go code. It would exist
+// only so that we could register at thread-exit-time destructor.
+// That destructor would put the m back onto the extra list.
+// This is purely a performance optimization. The current version,
+// in which dropm happens on each cgo call, is still correct too.
+// We may have to keep the current version on systems with cgo
+// but without pthreads, like Windows.
+func dropm() {
+ // Clear m and g, and return m to the extra list.
+ // After the call to setg we can only call nosplit functions
+ // with no pointer manipulation.
+ mp := getg().m
+
+ // Block signals before unminit.
+ // Unminit unregisters the signal handling stack (but needs g on some systems).
+ // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
+ // It's important not to try to handle a signal between those two steps.
+ sigmask := mp.sigmask
+ sigblock()
+ unminit()
+
+ // gccgo sets the stack to Gdead here, because the splitstack
+ // context is not initialized.
+ mp.curg.atomicstatus = _Gdead
+ mp.curg.gcstack = nil
+ mp.curg.gcnextsp = nil
+
+ mnext := lockextra(true)
+ mp.schedlink.set(mnext)
+
+ setg(nil)
+
+ // Commit the release of mp.
+ unlockextra(mp)
+
+ msigrestore(sigmask)
+}
+
+// A helper function for EnsureDropM.
+func getm() uintptr {
+ return uintptr(unsafe.Pointer(getg().m))
+}
+
+var extram uintptr
+var extraMWaiters uint32
+
+// lockextra locks the extra list and returns the list head.
+// The caller must unlock the list by storing a new list head
+// to extram. If nilokay is true, then lockextra will
+// return a nil list head if that's what it finds. If nilokay is false,
+// lockextra will keep waiting until the list head is no longer nil.
+//go:nosplit
+func lockextra(nilokay bool) *m {
+ const locked = 1
+
+ incr := false
+ for {
+ old := atomic.Loaduintptr(&extram)
+ if old == locked {
+ yield := osyield
+ yield()
+ continue
+ }
+ if old == 0 && !nilokay {
+ if !incr {
+ // Add 1 to the number of threads
+ // waiting for an M.
+ // This is cleared by newextram.
+ atomic.Xadd(&extraMWaiters, 1)
+ incr = true
+ }
+ usleep(1)
+ continue
+ }
+ if atomic.Casuintptr(&extram, old, locked) {
+ return (*m)(unsafe.Pointer(old))
+ }
+ yield := osyield
+ yield()
+ continue
+ }
+}
+
+//go:nosplit
+func unlockextra(mp *m) {
+ atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
+}
diff --git a/libgo/go/runtime/runtime2.go b/libgo/go/runtime/runtime2.go
index 4712318..978a317 100644
--- a/libgo/go/runtime/runtime2.go
+++ b/libgo/go/runtime/runtime2.go
@@ -479,8 +479,6 @@ type m struct {
dropextram bool // drop after call is done
gcing int32
-
- cgomal *cgoMal // allocations via _cgo_allocate
}
type p struct {
@@ -801,14 +799,6 @@ var (
// array.
type g_ucontext_t [(_sizeof_ucontext_t + 15) / unsafe.Sizeof(unsafe.Pointer(nil))]unsafe.Pointer
-// cgoMal tracks allocations made by _cgo_allocate
-// FIXME: _cgo_allocate has been removed from gc and can probably be
-// removed from gccgo too.
-type cgoMal struct {
- next *cgoMal
- alloc unsafe.Pointer
-}
-
// sigset is the Go version of the C type sigset_t.
// _sigset_t is defined by the Makefile from <signal.h>.
type sigset _sigset_t
diff --git a/libgo/go/runtime/signal1_unix.go b/libgo/go/runtime/signal1_unix.go
index 48c5491..181aebe 100644
--- a/libgo/go/runtime/signal1_unix.go
+++ b/libgo/go/runtime/signal1_unix.go
@@ -327,7 +327,7 @@ func ensureSigM() {
//go:norace
//go:nowritebarrierrec
func badsignal(sig uintptr, c *sigctxt) {
- needm()
+ needm(0)
if !sigsend(uint32(sig)) {
// A foreign thread received the signal sig, and the
// Go code does not want to handle it.
diff --git a/libgo/go/runtime/signal_gccgo.go b/libgo/go/runtime/signal_gccgo.go
index 321c619..4e5044f 100644
--- a/libgo/go/runtime/signal_gccgo.go
+++ b/libgo/go/runtime/signal_gccgo.go
@@ -17,18 +17,19 @@ import (
func sigaction(signum int32, act *_sigaction, oact *_sigaction) int32
//extern sigprocmask
-func sigprocmask(how int32, set *_sigset_t, oldset *_sigset_t) int32
+func sigprocmask(how int32, set *sigset, oldset *sigset) int32
-// The argument should be simply *_sigset_t, but that fails on GNU/Linux
-// which sometimes uses _sigset_t and sometimes uses ___sigset_t.
//extern sigfillset
-func sigfillset(set unsafe.Pointer) int32
+func sigfillset(set *sigset) int32
//extern sigemptyset
-func sigemptyset(set *_sigset_t) int32
+func sigemptyset(set *sigset) int32
//extern sigaddset
-func sigaddset(set *_sigset_t, signum int32) int32
+func sigaddset(set *sigset, signum int32) int32
+
+//extern sigdelset
+func sigdelset(set *sigset, signum int32) int32
//extern sigaltstack
func sigaltstack(ss *_stack_t, oss *_stack_t) int32
@@ -57,9 +58,19 @@ func (c *sigctxt) sigcode() uint64 {
}
//go:nosplit
+func msigsave(mp *m) {
+ sigprocmask(_SIG_SETMASK, nil, &mp.sigmask)
+}
+
+//go:nosplit
+func msigrestore(sigmask sigset) {
+ sigprocmask(_SIG_SETMASK, &sigmask, nil)
+}
+
+//go:nosplit
func sigblock() {
- var set _sigset_t
- sigfillset(unsafe.Pointer(&set))
+ var set sigset
+ sigfillset(&set)
sigprocmask(_SIG_SETMASK, &set, nil)
}
@@ -81,7 +92,7 @@ func setsig(i int32, fn uintptr, restart bool) {
if restart {
sa.sa_flags |= _SA_RESTART
}
- sigfillset(unsafe.Pointer(&sa.sa_mask))
+ sigfillset((*sigset)(unsafe.Pointer(&sa.sa_mask)))
setSigactionHandler(&sa, fn)
sigaction(i, &sa, nil)
}
@@ -117,10 +128,12 @@ func getsig(i int32) uintptr {
return getSigactionHandler(&sa)
}
+func signalstack(p unsafe.Pointer, n uintptr)
+
//go:nosplit
//go:nowritebarrierrec
func updatesigmask(m sigmask) {
- var mask _sigset_t
+ var mask sigset
sigemptyset(&mask)
for i := int32(0); i < _NSIG; i++ {
if m[(i-1)/32]&(1<<((uint(i)-1)&31)) != 0 {
@@ -131,7 +144,7 @@ func updatesigmask(m sigmask) {
}
func unblocksig(sig int32) {
- var mask _sigset_t
+ var mask sigset
sigemptyset(&mask)
sigaddset(&mask, sig)
sigprocmask(_SIG_UNBLOCK, &mask, nil)
diff --git a/libgo/go/runtime/signal_sighandler.go b/libgo/go/runtime/signal_sighandler.go
index 3cbec66..766bb7d 100644
--- a/libgo/go/runtime/signal_sighandler.go
+++ b/libgo/go/runtime/signal_sighandler.go
@@ -52,8 +52,8 @@ func sighandler(sig uint32, info *_siginfo_t, ctxt unsafe.Pointer, gp *g) {
// All signals were blocked due to the sigaction mask;
// unblock them.
- var set _sigset_t
- sigfillset(unsafe.Pointer(&set))
+ var set sigset
+ sigfillset(&set)
sigprocmask(_SIG_UNBLOCK, &set, nil)
sigpanic()
diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go
index b4fee6b..dde9ebd 100644
--- a/libgo/go/runtime/stubs.go
+++ b/libgo/go/runtime/stubs.go
@@ -249,6 +249,24 @@ func funcPC(f interface{}) uintptr {
}
// For gccgo, to communicate from the C code to the Go code.
+//go:linkname setIsCgo runtime.setIsCgo
+func setIsCgo() {
+ iscgo = true
+}
+
+// Temporary for gccgo until we port proc.go.
+//go:linkname makeMainInitDone runtime.makeMainInitDone
+func makeMainInitDone() {
+ main_init_done = make(chan bool)
+}
+
+// Temporary for gccgo until we port proc.go.
+//go:linkname closeMainInitDone runtime.closeMainInitDone
+func closeMainInitDone() {
+ close(main_init_done)
+}
+
+// For gccgo, to communicate from the C code to the Go code.
//go:linkname setCpuidECX runtime.setCpuidECX
func setCpuidECX(v uint32) {
cpuid_ecx = v
@@ -301,6 +319,9 @@ var writeBarrier struct {
alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
}
+func queueRescan(*g) {
+}
+
// Here for gccgo until we port atomic_pointer.go and mgc.go.
//go:nosplit
func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
@@ -446,6 +467,8 @@ func cpuprofAdd(stk []uintptr) {
func Breakpoint()
func LockOSThread()
func UnlockOSThread()
+func lockOSThread()
+func unlockOSThread()
func allm() *m
func allgs() []*g
@@ -499,8 +522,6 @@ func getZerobase() *uintptr {
}
// Temporary for gccgo until we port proc.go.
-func needm()
-func dropm()
func sigprof()
func mcount() int32
func gcount() int32
@@ -529,6 +550,12 @@ func getsched() *schedt {
return &sched
}
+// Temporary for gccgo until we port proc.go.
+//go:linkname getCgoHasExtraM runtime.getCgoHasExtraM
+func getCgoHasExtraM() *bool {
+ return &cgoHasExtraM
+}
+
// Throw and rethrow an exception.
func throwException()
func rethrowException()