aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2022-02-18 13:10:34 -0800
committerIan Lance Taylor <iant@golang.org>2022-02-18 13:12:08 -0800
commit20a33efdf32bf0aedcb0c9813ddc7572bb1ab8c7 (patch)
tree94aec72c2092a11fa49f0b45da8e036f13416209 /libgo/go/runtime
parent1931cbad498e625b1e24452dcfffe02539b12224 (diff)
downloadgcc-20a33efdf32bf0aedcb0c9813ddc7572bb1ab8c7.zip
gcc-20a33efdf32bf0aedcb0c9813ddc7572bb1ab8c7.tar.gz
gcc-20a33efdf32bf0aedcb0c9813ddc7572bb1ab8c7.tar.bz2
libgo: update to Go1.18rc1 release
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/386594
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/crash_cgo_test.go26
-rw-r--r--libgo/go/runtime/crash_unix_test.go2
-rw-r--r--libgo/go/runtime/debug/mod.go157
-rw-r--r--libgo/go/runtime/debug/mod_test.go75
-rw-r--r--libgo/go/runtime/export_test.go20
-rw-r--r--libgo/go/runtime/histogram.go52
-rw-r--r--libgo/go/runtime/histogram_test.go40
-rw-r--r--libgo/go/runtime/internal/syscall/errno.c27
-rw-r--r--libgo/go/runtime/internal/syscall/syscall_linux.go23
-rw-r--r--libgo/go/runtime/mbarrier.go2
-rw-r--r--libgo/go/runtime/mbitmap.go12
-rw-r--r--libgo/go/runtime/mgcmark.go2
-rw-r--r--libgo/go/runtime/mgcpacer.go72
-rw-r--r--libgo/go/runtime/mgcpacer_test.go45
-rw-r--r--libgo/go/runtime/mgcscavenge.go58
-rw-r--r--libgo/go/runtime/pprof/pprof_test.go67
-rw-r--r--libgo/go/runtime/proc.go143
-rw-r--r--libgo/go/runtime/runtime2.go19
-rw-r--r--libgo/go/runtime/sigqueue.go48
-rw-r--r--libgo/go/runtime/slice.go2
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/aprof.go7
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/pprof_callback.go89
-rw-r--r--libgo/go/runtime/trace.go10
23 files changed, 718 insertions, 280 deletions
diff --git a/libgo/go/runtime/crash_cgo_test.go b/libgo/go/runtime/crash_cgo_test.go
index 615892d..7344a93 100644
--- a/libgo/go/runtime/crash_cgo_test.go
+++ b/libgo/go/runtime/crash_cgo_test.go
@@ -216,6 +216,19 @@ func TestCgoCCodeSIGPROF(t *testing.T) {
}
}
+func TestCgoPprofCallback(t *testing.T) {
+ t.Parallel()
+ switch runtime.GOOS {
+ case "windows", "plan9":
+ t.Skipf("skipping cgo pprof callback test on %s", runtime.GOOS)
+ }
+ got := runTestProg(t, "testprogcgo", "CgoPprofCallback")
+ want := "OK\n"
+ if got != want {
+ t.Errorf("expected %q got %v", want, got)
+ }
+}
+
func TestCgoCrashTraceback(t *testing.T) {
t.Parallel()
switch platform := runtime.GOOS + "/" + runtime.GOARCH; platform {
@@ -614,6 +627,9 @@ func TestSegv(t *testing.T) {
t.Log(got)
want := "SIGSEGV"
if !strings.Contains(got, want) {
+ if runtime.GOOS == "darwin" && runtime.GOARCH == "amd64" && strings.Contains(got, "fatal: morestack on g0") {
+ testenv.SkipFlaky(t, 39457)
+ }
t.Errorf("did not see %q in output", want)
}
@@ -628,13 +644,11 @@ func TestSegv(t *testing.T) {
// a VDSO call via asmcgocall.
testenv.SkipFlaky(t, 50504)
}
- if testenv.Builder() == "linux-mips64le-mengzhuo" && strings.Contains(got, "runtime: unknown pc") {
- // Runtime sometimes throw "unknown pc" when generating the traceback.
- // Curiously, that doesn't seem to happen on the linux-mips64le-rtrk
- // builder.
- testenv.SkipFlaky(t, 50605)
- }
}
+ if test == "SegvInCgo" && strings.Contains(got, "runtime: unknown pc") {
+ testenv.SkipFlaky(t, 50979)
+ }
+
nowant := "runtime: "
if strings.Contains(got, nowant) {
t.Errorf("unexpectedly saw %q in output", nowant)
diff --git a/libgo/go/runtime/crash_unix_test.go b/libgo/go/runtime/crash_unix_test.go
index b7ca811..ab94aff 100644
--- a/libgo/go/runtime/crash_unix_test.go
+++ b/libgo/go/runtime/crash_unix_test.go
@@ -132,7 +132,7 @@ func TestCrashDumpsAllThreads(t *testing.T) {
out := outbuf.Bytes()
n := bytes.Count(out, []byte("main.crashDumpsAllThreadsLoop"))
if n != 4 {
- t.Errorf("found %d instances of main.loop; expected 4", n)
+ t.Errorf("found %d instances of main.crashDumpsAllThreadsLoop; expected 4", n)
t.Logf("%s", out)
}
}
diff --git a/libgo/go/runtime/debug/mod.go b/libgo/go/runtime/debug/mod.go
index 61b15ad..5184a02 100644
--- a/libgo/go/runtime/debug/mod.go
+++ b/libgo/go/runtime/debug/mod.go
@@ -5,9 +5,9 @@
package debug
import (
- "bytes"
"fmt"
"runtime"
+ "strconv"
"strings"
_ "unsafe" // for go:linkname
)
@@ -24,8 +24,8 @@ func ReadBuildInfo() (info *BuildInfo, ok bool) {
return nil, false
}
data = data[16 : len(data)-16]
- bi := &BuildInfo{}
- if err := bi.UnmarshalText([]byte(data)); err != nil {
+ bi, err := ParseBuildInfo(data)
+ if err != nil {
return nil, false
}
@@ -64,8 +64,18 @@ type BuildSetting struct {
Key, Value string
}
-func (bi *BuildInfo) MarshalText() ([]byte, error) {
- buf := &bytes.Buffer{}
+// quoteKey reports whether key is required to be quoted.
+func quoteKey(key string) bool {
+ return len(key) == 0 || strings.ContainsAny(key, "= \t\r\n\"`")
+}
+
+// quoteValue reports whether value is required to be quoted.
+func quoteValue(value string) bool {
+ return strings.ContainsAny(value, " \t\r\n\"`")
+}
+
+func (bi *BuildInfo) String() string {
+ buf := new(strings.Builder)
if bi.GoVersion != "" {
fmt.Fprintf(buf, "go\t%s\n", bi.GoVersion)
}
@@ -77,12 +87,8 @@ func (bi *BuildInfo) MarshalText() ([]byte, error) {
buf.WriteString(word)
buf.WriteByte('\t')
buf.WriteString(m.Path)
- mv := m.Version
- if mv == "" {
- mv = "(devel)"
- }
buf.WriteByte('\t')
- buf.WriteString(mv)
+ buf.WriteString(m.Version)
if m.Replace == nil {
buf.WriteByte('\t')
buf.WriteString(m.Sum)
@@ -92,27 +98,28 @@ func (bi *BuildInfo) MarshalText() ([]byte, error) {
}
buf.WriteByte('\n')
}
- if bi.Main.Path != "" {
+ if bi.Main != (Module{}) {
formatMod("mod", bi.Main)
}
for _, dep := range bi.Deps {
formatMod("dep", *dep)
}
for _, s := range bi.Settings {
- if strings.ContainsAny(s.Key, "= \t\n") {
- return nil, fmt.Errorf("invalid build setting key %q", s.Key)
+ key := s.Key
+ if quoteKey(key) {
+ key = strconv.Quote(key)
}
- if strings.Contains(s.Value, "\n") {
- return nil, fmt.Errorf("invalid build setting value for key %q: contains newline", s.Value)
+ value := s.Value
+ if quoteValue(value) {
+ value = strconv.Quote(value)
}
- fmt.Fprintf(buf, "build\t%s=%s\n", s.Key, s.Value)
+ fmt.Fprintf(buf, "build\t%s=%s\n", key, value)
}
- return buf.Bytes(), nil
+ return buf.String()
}
-func (bi *BuildInfo) UnmarshalText(data []byte) (err error) {
- *bi = BuildInfo{}
+func ParseBuildInfo(data string) (bi *BuildInfo, err error) {
lineNum := 1
defer func() {
if err != nil {
@@ -121,67 +128,69 @@ func (bi *BuildInfo) UnmarshalText(data []byte) (err error) {
}()
var (
- pathLine = []byte("path\t")
- modLine = []byte("mod\t")
- depLine = []byte("dep\t")
- repLine = []byte("=>\t")
- buildLine = []byte("build\t")
- newline = []byte("\n")
- tab = []byte("\t")
+ pathLine = "path\t"
+ modLine = "mod\t"
+ depLine = "dep\t"
+ repLine = "=>\t"
+ buildLine = "build\t"
+ newline = "\n"
+ tab = "\t"
)
- readModuleLine := func(elem [][]byte) (Module, error) {
+ readModuleLine := func(elem []string) (Module, error) {
if len(elem) != 2 && len(elem) != 3 {
return Module{}, fmt.Errorf("expected 2 or 3 columns; got %d", len(elem))
}
+ version := elem[1]
sum := ""
if len(elem) == 3 {
- sum = string(elem[2])
+ sum = elem[2]
}
return Module{
- Path: string(elem[0]),
- Version: string(elem[1]),
+ Path: elem[0],
+ Version: version,
Sum: sum,
}, nil
}
+ bi = new(BuildInfo)
var (
last *Module
- line []byte
+ line string
ok bool
)
// Reverse of BuildInfo.String(), except for go version.
for len(data) > 0 {
- line, data, ok = bytes.Cut(data, newline)
+ line, data, ok = strings.Cut(data, newline)
if !ok {
break
}
switch {
- case bytes.HasPrefix(line, pathLine):
+ case strings.HasPrefix(line, pathLine):
elem := line[len(pathLine):]
bi.Path = string(elem)
- case bytes.HasPrefix(line, modLine):
- elem := bytes.Split(line[len(modLine):], tab)
+ case strings.HasPrefix(line, modLine):
+ elem := strings.Split(line[len(modLine):], tab)
last = &bi.Main
*last, err = readModuleLine(elem)
if err != nil {
- return err
+ return nil, err
}
- case bytes.HasPrefix(line, depLine):
- elem := bytes.Split(line[len(depLine):], tab)
+ case strings.HasPrefix(line, depLine):
+ elem := strings.Split(line[len(depLine):], tab)
last = new(Module)
bi.Deps = append(bi.Deps, last)
*last, err = readModuleLine(elem)
if err != nil {
- return err
+ return nil, err
}
- case bytes.HasPrefix(line, repLine):
- elem := bytes.Split(line[len(repLine):], tab)
+ case strings.HasPrefix(line, repLine):
+ elem := strings.Split(line[len(repLine):], tab)
if len(elem) != 3 {
- return fmt.Errorf("expected 3 columns for replacement; got %d", len(elem))
+ return nil, fmt.Errorf("expected 3 columns for replacement; got %d", len(elem))
}
if last == nil {
- return fmt.Errorf("replacement with no module on previous line")
+ return nil, fmt.Errorf("replacement with no module on previous line")
}
last.Replace = &Module{
Path: string(elem[0]),
@@ -189,17 +198,63 @@ func (bi *BuildInfo) UnmarshalText(data []byte) (err error) {
Sum: string(elem[2]),
}
last = nil
- case bytes.HasPrefix(line, buildLine):
- key, val, ok := strings.Cut(string(line[len(buildLine):]), "=")
- if !ok {
- return fmt.Errorf("invalid build line")
+ case strings.HasPrefix(line, buildLine):
+ kv := line[len(buildLine):]
+ if len(kv) < 1 {
+ return nil, fmt.Errorf("build line missing '='")
+ }
+
+ var key, rawValue string
+ switch kv[0] {
+ case '=':
+ return nil, fmt.Errorf("build line with missing key")
+
+ case '`', '"':
+ rawKey, err := strconv.QuotedPrefix(kv)
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted key in build line")
+ }
+ if len(kv) == len(rawKey) {
+ return nil, fmt.Errorf("build line missing '=' after quoted key")
+ }
+ if c := kv[len(rawKey)]; c != '=' {
+ return nil, fmt.Errorf("unexpected character after quoted key: %q", c)
+ }
+ key, _ = strconv.Unquote(rawKey)
+ rawValue = kv[len(rawKey)+1:]
+
+ default:
+ var ok bool
+ key, rawValue, ok = strings.Cut(kv, "=")
+ if !ok {
+ return nil, fmt.Errorf("build line missing '=' after key")
+ }
+ if quoteKey(key) {
+ return nil, fmt.Errorf("unquoted key %q must be quoted", key)
+ }
}
- if key == "" {
- return fmt.Errorf("empty key")
+
+ var value string
+ if len(rawValue) > 0 {
+ switch rawValue[0] {
+ case '`', '"':
+ var err error
+ value, err = strconv.Unquote(rawValue)
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted value in build line")
+ }
+
+ default:
+ value = rawValue
+ if quoteValue(value) {
+ return nil, fmt.Errorf("unquoted value %q must be quoted", value)
+ }
+ }
}
- bi.Settings = append(bi.Settings, BuildSetting{Key: key, Value: val})
+
+ bi.Settings = append(bi.Settings, BuildSetting{Key: key, Value: value})
}
lineNum++
}
- return nil
+ return bi, nil
}
diff --git a/libgo/go/runtime/debug/mod_test.go b/libgo/go/runtime/debug/mod_test.go
new file mode 100644
index 0000000..b291769
--- /dev/null
+++ b/libgo/go/runtime/debug/mod_test.go
@@ -0,0 +1,75 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug_test
+
+import (
+ "reflect"
+ "runtime/debug"
+ "strings"
+ "testing"
+)
+
+// strip removes two leading tabs after each newline of s.
+func strip(s string) string {
+ replaced := strings.ReplaceAll(s, "\n\t\t", "\n")
+ if len(replaced) > 0 && replaced[0] == '\n' {
+ replaced = replaced[1:]
+ }
+ return replaced
+}
+
+func FuzzParseBuildInfoRoundTrip(f *testing.F) {
+ // Package built from outside a module, missing some fields..
+ f.Add(strip(`
+ path rsc.io/fortune
+ mod rsc.io/fortune v1.0.0
+ `))
+
+ // Package built from the standard library, missing some fields..
+ f.Add(`path cmd/test2json`)
+
+ // Package built from inside a module.
+ f.Add(strip(`
+ go 1.18
+ path example.com/m
+ mod example.com/m (devel)
+ build -compiler=gc
+ `))
+
+ // Package built in GOPATH mode.
+ f.Add(strip(`
+ go 1.18
+ path example.com/m
+ build -compiler=gc
+ `))
+
+ // Escaped build info.
+ f.Add(strip(`
+ go 1.18
+ path example.com/m
+ build CRAZY_ENV="requires\nescaping"
+ `))
+
+ f.Fuzz(func(t *testing.T, s string) {
+ bi, err := debug.ParseBuildInfo(s)
+ if err != nil {
+ // Not a round-trippable BuildInfo string.
+ t.Log(err)
+ return
+ }
+
+ // s2 could have different escaping from s.
+ // However, it should parse to exactly the same contents.
+ s2 := bi.String()
+ bi2, err := debug.ParseBuildInfo(s2)
+ if err != nil {
+ t.Fatalf("%v:\n%s", err, s2)
+ }
+
+ if !reflect.DeepEqual(bi2, bi) {
+ t.Fatalf("Parsed representation differs.\ninput:\n%s\noutput:\n%s", s, s2)
+ }
+ })
+}
diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go
index 468e3a1..85d6f61 100644
--- a/libgo/go/runtime/export_test.go
+++ b/libgo/go/runtime/export_test.go
@@ -1202,6 +1202,8 @@ func (th *TimeHistogram) Record(duration int64) {
var Pusestackmaps = &usestackmaps
+var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
+
func FinalizerGAsleep() bool {
lock(&finlock)
result := fingwait
@@ -1318,3 +1320,21 @@ func Releasem() {
}
var Timediv = timediv
+
+type PIController struct {
+ piController
+}
+
+func NewPIController(kp, ti, tt, min, max float64) *PIController {
+ return &PIController{piController{
+ kp: kp,
+ ti: ti,
+ tt: tt,
+ min: min,
+ max: max,
+ }}
+}
+
+func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
+ return c.piController.next(input, setpoint, period)
+}
diff --git a/libgo/go/runtime/histogram.go b/libgo/go/runtime/histogram.go
index 0cccbcc..cd7e29a 100644
--- a/libgo/go/runtime/histogram.go
+++ b/libgo/go/runtime/histogram.go
@@ -47,7 +47,7 @@ const (
// │ └---- Next 4 bits -> sub-bucket 1
// └------- Bit 5 set -> super-bucket 2
//
- // Following this pattern, bucket 45 will have the bit 48 set. We don't
+ // Following this pattern, super-bucket 44 will have the bit 47 set. We don't
// have any buckets for higher values, so the highest sub-bucket will
// contain values of 2^48-1 nanoseconds or approx. 3 days. This range is
// more than enough to handle durations produced by the runtime.
@@ -139,36 +139,30 @@ func float64NegInf() float64 {
func timeHistogramMetricsBuckets() []float64 {
b := make([]float64, timeHistTotalBuckets+1)
b[0] = float64NegInf()
- for i := 0; i < timeHistNumSuperBuckets; i++ {
- superBucketMin := uint64(0)
- // The (inclusive) minimum for the first non-negative bucket is 0.
- if i > 0 {
- // The minimum for the second bucket will be
- // 1 << timeHistSubBucketBits, indicating that all
- // sub-buckets are represented by the next timeHistSubBucketBits
- // bits.
- // Thereafter, we shift up by 1 each time, so we can represent
- // this pattern as (i-1)+timeHistSubBucketBits.
- superBucketMin = uint64(1) << uint(i-1+timeHistSubBucketBits)
- }
- // subBucketShift is the amount that we need to shift the sub-bucket
- // index to combine it with the bucketMin.
- subBucketShift := uint(0)
- if i > 1 {
- // The first two super buckets are exact with respect to integers,
- // so we'll never have to shift the sub-bucket index. Thereafter,
- // we shift up by 1 with each subsequent bucket.
- subBucketShift = uint(i - 2)
- }
+ // Super-bucket 0 has no bits above timeHistSubBucketBits
+ // set, so just iterate over each bucket and assign the
+ // incrementing bucket.
+ for i := 0; i < timeHistNumSubBuckets; i++ {
+ bucketNanos := uint64(i)
+ b[i+1] = float64(bucketNanos) / 1e9
+ }
+ // Generate the rest of the super-buckets. It's easier to reason
+ // about if we cut out the 0'th bucket, so subtract one since
+ // we just handled that bucket.
+ for i := 0; i < timeHistNumSuperBuckets-1; i++ {
for j := 0; j < timeHistNumSubBuckets; j++ {
- // j is the sub-bucket index. By shifting the index into position to
- // combine with the bucket minimum, we obtain the minimum value for that
- // sub-bucket.
- subBucketMin := superBucketMin + (uint64(j) << subBucketShift)
-
- // Convert the subBucketMin which is in nanoseconds to a float64 seconds value.
+ // Set the super-bucket bit.
+ bucketNanos := uint64(1) << (i + timeHistSubBucketBits)
+ // Set the sub-bucket bits.
+ bucketNanos |= uint64(j) << i
+ // The index for this bucket is going to be the (i+1)'th super bucket
+ // (note that we're starting from zero, but handled the first super-bucket
+ // earlier, so we need to compensate), and the j'th sub bucket.
+ // Add 1 because we left space for -Inf.
+ bucketIndex := (i+1)*timeHistNumSubBuckets + j + 1
+ // Convert nanoseconds to seconds via a division.
// These values will all be exactly representable by a float64.
- b[i*timeHistNumSubBuckets+j+1] = float64(subBucketMin) / 1e9
+ b[bucketIndex] = float64(bucketNanos) / 1e9
}
}
b[len(b)-1] = float64Inf()
diff --git a/libgo/go/runtime/histogram_test.go b/libgo/go/runtime/histogram_test.go
index dbc64fa..b12b65a 100644
--- a/libgo/go/runtime/histogram_test.go
+++ b/libgo/go/runtime/histogram_test.go
@@ -68,3 +68,43 @@ func TestTimeHistogram(t *testing.T) {
dummyTimeHistogram = TimeHistogram{}
}
+
+func TestTimeHistogramMetricsBuckets(t *testing.T) {
+ buckets := TimeHistogramMetricsBuckets()
+
+ nonInfBucketsLen := TimeHistNumSubBuckets * TimeHistNumSuperBuckets
+ expBucketsLen := nonInfBucketsLen + 2 // Count -Inf and +Inf.
+ if len(buckets) != expBucketsLen {
+ t.Fatalf("unexpected length of buckets: got %d, want %d", len(buckets), expBucketsLen)
+ }
+ // Check the first non-Inf 2*TimeHistNumSubBuckets buckets in order, skipping the
+ // first bucket which should be -Inf (checked later).
+ //
+ // Because of the way this scheme works, the bottom TimeHistNumSubBuckets
+ // buckets are fully populated, and then the next TimeHistNumSubBuckets
+ // have the TimeHistSubBucketBits'th bit set, while the bottom are once
+ // again fully populated.
+ for i := 1; i <= 2*TimeHistNumSubBuckets+1; i++ {
+ if got, want := buckets[i], float64(i-1)/1e9; got != want {
+ t.Errorf("expected bucket %d to have value %e, got %e", i, want, got)
+ }
+ }
+ // Check some values.
+ idxToBucket := map[int]float64{
+ 0: math.Inf(-1),
+ 33: float64(0x10<<1) / 1e9,
+ 34: float64(0x11<<1) / 1e9,
+ 49: float64(0x10<<2) / 1e9,
+ 58: float64(0x19<<2) / 1e9,
+ 65: float64(0x10<<3) / 1e9,
+ 513: float64(0x10<<31) / 1e9,
+ 519: float64(0x16<<31) / 1e9,
+ expBucketsLen - 2: float64(0x1f<<43) / 1e9,
+ expBucketsLen - 1: math.Inf(1),
+ }
+ for idx, bucket := range idxToBucket {
+ if got, want := buckets[idx], bucket; got != want {
+ t.Errorf("expected bucket %d to have value %e, got %e", idx, want, got)
+ }
+ }
+}
diff --git a/libgo/go/runtime/internal/syscall/errno.c b/libgo/go/runtime/internal/syscall/errno.c
new file mode 100644
index 0000000..2bc5900
--- /dev/null
+++ b/libgo/go/runtime/internal/syscall/errno.c
@@ -0,0 +1,27 @@
+/* errno.c -- functions for getting and setting errno
+
+ Copyright 2022 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file. */
+#include <errno.h>
+#include <stdint.h>
+
+#include "runtime.h"
+
+/* errno is typically a macro. These functions set and get errno
+ specific to the libc being used. */
+
+uintptr_t getErrno(void) __asm__ (GOSYM_PREFIX "runtime_1internal_1syscall.getErrno");
+void setErrno(uintptr_t) __asm__ (GOSYM_PREFIX "runtime_1internal_1syscall.setErrno");
+
+uintptr_t
+getErrno(void)
+{
+ return (uintptr_t) errno;
+}
+
+void
+setErrno(uintptr_t value)
+{
+ errno = (int) value;
+}
diff --git a/libgo/go/runtime/internal/syscall/syscall_linux.go b/libgo/go/runtime/internal/syscall/syscall_linux.go
new file mode 100644
index 0000000..a4e9358
--- /dev/null
+++ b/libgo/go/runtime/internal/syscall/syscall_linux.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package syscall provides the syscall primitives required for the runtime.
+package syscall
+
+// TODO(https://go.dev/issue/51087): This package is incomplete and currently
+// only contains very minimal support for Linux.
+
+//extern __go_syscall6
+func syscall6(num uintptr, a1, a2, a3, a4, a5, a6 uintptr) uintptr
+
+func getErrno() uintptr
+func setErrno(uintptr)
+
+// Syscall6 calls system call number 'num' with arguments a1-6.
+func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr) {
+ setErrno(0)
+ r := syscall6(num, a1, a2, a3, a4, a5, a6)
+ errno = getErrno()
+ return r, 0, errno
+}
diff --git a/libgo/go/runtime/mbarrier.go b/libgo/go/runtime/mbarrier.go
index 4982fd4..94eebc9 100644
--- a/libgo/go/runtime/mbarrier.go
+++ b/libgo/go/runtime/mbarrier.go
@@ -204,7 +204,7 @@ func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
// typedmemmovepartial is like typedmemmove but assumes that
// dst and src point off bytes into the value and only copies size bytes.
-// off must be a multiple of sys.PtrSize.
+// off must be a multiple of goarch.PtrSize.
//go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
if writeBarrier.needed && typ.ptrdata > off && size >= goarch.PtrSize {
diff --git a/libgo/go/runtime/mbitmap.go b/libgo/go/runtime/mbitmap.go
index 019ca34..33cfa72 100644
--- a/libgo/go/runtime/mbitmap.go
+++ b/libgo/go/runtime/mbitmap.go
@@ -862,7 +862,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// size is sizeof(_defer{}) (at least 6 words) and dataSize may be
// arbitrarily larger.
//
- // The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore
+ // The checks for size == goarch.PtrSize and size == 2*goarch.PtrSize can therefore
// assume that dataSize == size without checking it explicitly.
if goarch.PtrSize == 8 && size == goarch.PtrSize {
@@ -912,7 +912,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
}
return
}
- // Otherwise typ.size must be 2*sys.PtrSize,
+ // Otherwise typ.size must be 2*goarch.PtrSize,
// and typ.kind&kindGCProg == 0.
if doubleCheck {
if typ.size != 2*goarch.PtrSize || typ.kind&kindGCProg != 0 {
@@ -1114,8 +1114,8 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
// Replicate ptrmask to fill entire pbits uintptr.
// Doubling and truncating is fewer steps than
// iterating by nb each time. (nb could be 1.)
- // Since we loaded typ.ptrdata/sys.PtrSize bits
- // but are pretending to have typ.size/sys.PtrSize,
+ // Since we loaded typ.ptrdata/goarch.PtrSize bits
+ // but are pretending to have typ.size/goarch.PtrSize,
// there might be no replication necessary/possible.
pbits = b
endnb = nb
@@ -1583,7 +1583,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u
// progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
// size the size of the region described by prog, in bytes.
-// The resulting bitvector will have no more than size/sys.PtrSize bits.
+// The resulting bitvector will have no more than size/goarch.PtrSize bits.
func progToPointerMask(prog *byte, size uintptr) bitvector {
n := (size/goarch.PtrSize + 7) / 8
x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
@@ -1716,7 +1716,7 @@ Run:
// into a register and use that register for the entire loop
// instead of repeatedly reading from memory.
// Handling fewer than 8 bits here makes the general loop simpler.
- // The cutoff is sys.PtrSize*8 - 7 to guarantee that when we add
+ // The cutoff is goarch.PtrSize*8 - 7 to guarantee that when we add
// the pattern to a bit buffer holding at most 7 bits (a partial byte)
// it will not overflow.
src := dst
diff --git a/libgo/go/runtime/mgcmark.go b/libgo/go/runtime/mgcmark.go
index 6bc7094..3b568dd 100644
--- a/libgo/go/runtime/mgcmark.go
+++ b/libgo/go/runtime/mgcmark.go
@@ -338,7 +338,7 @@ func markrootSpans(gcw *gcWork, shard int) {
}
// gcAssistAlloc performs GC work to make gp's assist debt positive.
-// gp must be the calling user gorountine.
+// gp must be the calling user goroutine.
//
// This must be called with preemption enabled.
func gcAssistAlloc(gp *g) {
diff --git a/libgo/go/runtime/mgcpacer.go b/libgo/go/runtime/mgcpacer.go
index 5e940b0..0fff13c 100644
--- a/libgo/go/runtime/mgcpacer.go
+++ b/libgo/go/runtime/mgcpacer.go
@@ -154,6 +154,8 @@ type gcControllerState struct {
// For goexperiment.PacerRedesign.
consMarkController piController
+ _ uint32 // Padding for atomics on 32-bit platforms.
+
// heapGoal is the goal heapLive for when next GC ends.
// Set to ^uint64(0) if disabled.
//
@@ -670,10 +672,31 @@ func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) floa
currentConsMark := (float64(c.heapLive-c.trigger) * (utilization + idleUtilization)) /
(float64(scanWork) * (1 - utilization))
- // Update cons/mark controller.
- // Period for this is 1 GC cycle.
+ // Update cons/mark controller. The time period for this is 1 GC cycle.
+ //
+ // This use of a PI controller might seem strange. So, here's an explanation:
+ //
+ // currentConsMark represents the consMark we *should've* had to be perfectly
+ // on-target for this cycle. Given that we assume the next GC will be like this
+ // one in the steady-state, it stands to reason that we should just pick that
+ // as our next consMark. In practice, however, currentConsMark is too noisy:
+ // we're going to be wildly off-target in each GC cycle if we do that.
+ //
+ // What we do instead is make a long-term assumption: there is some steady-state
+ // consMark value, but it's obscured by noise. By constantly shooting for this
+ // noisy-but-perfect consMark value, the controller will bounce around a bit,
+ // but its average behavior, in aggregate, should be less noisy and closer to
+ // the true long-term consMark value, provided its tuned to be slightly overdamped.
+ var ok bool
oldConsMark := c.consMark
- c.consMark = c.consMarkController.next(c.consMark, currentConsMark, 1.0)
+ c.consMark, ok = c.consMarkController.next(c.consMark, currentConsMark, 1.0)
+ if !ok {
+ // The error spiraled out of control. This is incredibly unlikely seeing
+ // as this controller is essentially just a smoothing function, but it might
+ // mean that something went very wrong with how currentConsMark was calculated.
+ // Just reset consMark and keep going.
+ c.consMark = 0
+ }
if debug.gcpacertrace > 0 {
printlock()
@@ -681,6 +704,9 @@ func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) floa
print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ")
print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.stackScan+c.globalsScan, " B exp.) ")
print("in ", c.trigger, " B -> ", c.heapLive, " B (∆goal ", int64(c.heapLive)-int64(c.heapGoal), ", cons/mark ", oldConsMark, ")")
+ if !ok {
+ print("[controller reset]")
+ }
println()
printunlock()
}
@@ -1263,15 +1289,38 @@ type piController struct {
// PI controller state.
errIntegral float64 // Integral of the error from t=0 to now.
+
+ // Error flags.
+ errOverflow bool // Set if errIntegral ever overflowed.
+ inputOverflow bool // Set if an operation with the input overflowed.
}
-func (c *piController) next(input, setpoint, period float64) float64 {
+// next provides a new sample to the controller.
+//
+// input is the sample, setpoint is the desired point, and period is how much
+// time (in whatever unit makes the most sense) has passed since the last sample.
+//
+// Returns a new value for the variable it's controlling, and whether the operation
+// completed successfully. One reason this might fail is if error has been growing
+// in an unbounded manner, to the point of overflow.
+//
+// In the specific case of an error overflow occurs, the errOverflow field will be
+// set and the rest of the controller's internal state will be fully reset.
+func (c *piController) next(input, setpoint, period float64) (float64, bool) {
// Compute the raw output value.
prop := c.kp * (setpoint - input)
rawOutput := prop + c.errIntegral
// Clamp rawOutput into output.
output := rawOutput
+ if isInf(output) || isNaN(output) {
+ // The input had a large enough magnitude that either it was already
+ // overflowed, or some operation with it overflowed.
+ // Set a flag and reset. That's the safest thing to do.
+ c.reset()
+ c.inputOverflow = true
+ return c.min, false
+ }
if output < c.min {
output = c.min
} else if output > c.max {
@@ -1281,6 +1330,19 @@ func (c *piController) next(input, setpoint, period float64) float64 {
// Update the controller's state.
if c.ti != 0 && c.tt != 0 {
c.errIntegral += (c.kp*period/c.ti)*(setpoint-input) + (period/c.tt)*(output-rawOutput)
+ if isInf(c.errIntegral) || isNaN(c.errIntegral) {
+ // So much error has accumulated that we managed to overflow.
+ // The assumptions around the controller have likely broken down.
+ // Set a flag and reset. That's the safest thing to do.
+ c.reset()
+ c.errOverflow = true
+ return c.min, false
+ }
}
- return output
+ return output, true
+}
+
+// reset resets the controller state, except for controller error flags.
+func (c *piController) reset() {
+ c.errIntegral = 0
}
diff --git a/libgo/go/runtime/mgcpacer_test.go b/libgo/go/runtime/mgcpacer_test.go
index 9ec0e51..10a8ca2 100644
--- a/libgo/go/runtime/mgcpacer_test.go
+++ b/libgo/go/runtime/mgcpacer_test.go
@@ -715,3 +715,48 @@ func (f float64Stream) limit(min, max float64) float64Stream {
return v
}
}
+
+func FuzzPIController(f *testing.F) {
+ isNormal := func(x float64) bool {
+ return !math.IsInf(x, 0) && !math.IsNaN(x)
+ }
+ isPositive := func(x float64) bool {
+ return isNormal(x) && x > 0
+ }
+ // Seed with constants from controllers in the runtime.
+ // It's not critical that we keep these in sync, they're just
+ // reasonable seed inputs.
+ f.Add(0.3375, 3.2e6, 1e9, 0.001, 1000.0, 0.01)
+ f.Add(0.9, 4.0, 1000.0, -1000.0, 1000.0, 0.84)
+ f.Fuzz(func(t *testing.T, kp, ti, tt, min, max, setPoint float64) {
+ // Ignore uninteresting invalid parameters. These parameters
+ // are constant, so in practice surprising values will be documented
+ // or will be other otherwise immediately visible.
+ //
+ // We just want to make sure that given a non-Inf, non-NaN input,
+ // we always get a non-Inf, non-NaN output.
+ if !isPositive(kp) || !isPositive(ti) || !isPositive(tt) {
+ return
+ }
+ if !isNormal(min) || !isNormal(max) || min > max {
+ return
+ }
+ // Use a random source, but make it deterministic.
+ rs := rand.New(rand.NewSource(800))
+ randFloat64 := func() float64 {
+ return math.Float64frombits(rs.Uint64())
+ }
+ p := NewPIController(kp, ti, tt, min, max)
+ state := float64(0)
+ for i := 0; i < 100; i++ {
+ input := randFloat64()
+ // Ignore the "ok" parameter. We're just trying to break it.
+ // state is intentionally completely uncorrelated with the input.
+ var ok bool
+ state, ok = p.Next(input, setPoint, 1.0)
+ if !isNormal(state) {
+ t.Fatalf("got NaN or Inf result from controller: %f %v", state, ok)
+ }
+ }
+ })
+}
diff --git a/libgo/go/runtime/mgcscavenge.go b/libgo/go/runtime/mgcscavenge.go
index adf2b05..9d6e0bd 100644
--- a/libgo/go/runtime/mgcscavenge.go
+++ b/libgo/go/runtime/mgcscavenge.go
@@ -165,11 +165,12 @@ func gcPaceScavenger(heapGoal, lastHeapGoal uint64) {
// Sleep/wait state of the background scavenger.
var scavenge struct {
- lock mutex
- g *g
- parked bool
- timer *timer
- sysmonWake uint32 // Set atomically.
+ lock mutex
+ g *g
+ parked bool
+ timer *timer
+ sysmonWake uint32 // Set atomically.
+ printControllerReset bool // Whether the scavenger is in cooldown.
}
// readyForScavenger signals sysmon to wake the scavenger because
@@ -297,8 +298,14 @@ func bgscavenge(c chan int) {
max: 1000.0, // 1000:1
}
// It doesn't really matter what value we start at, but we can't be zero, because
- // that'll cause divide-by-zero issues.
- critSleepRatio := 0.001
+ // that'll cause divide-by-zero issues. Pick something conservative which we'll
+ // also use as a fallback.
+ const startingCritSleepRatio = 0.001
+ critSleepRatio := startingCritSleepRatio
+ // Duration left in nanoseconds during which we avoid using the controller and
+ // we hold critSleepRatio at a conservative value. Used if the controller's
+ // assumptions fail to hold.
+ controllerCooldown := int64(0)
for {
released := uintptr(0)
crit := float64(0)
@@ -385,9 +392,22 @@ func bgscavenge(c chan int) {
// because of the additional overheads of using scavenged memory.
crit *= 1 + scavengeCostRatio
- // Go to sleep for our current sleepNS.
+ // Go to sleep based on how much time we spent doing work.
slept := scavengeSleep(int64(crit / critSleepRatio))
+ // Stop here if we're cooling down from the controller.
+ if controllerCooldown > 0 {
+ // crit and slept aren't exact measures of time, but it's OK to be a bit
+ // sloppy here. We're just hoping we're avoiding some transient bad behavior.
+ t := slept + int64(crit)
+ if t > controllerCooldown {
+ controllerCooldown = 0
+ } else {
+ controllerCooldown -= t
+ }
+ continue
+ }
+
// Calculate the CPU time spent.
//
// This may be slightly inaccurate with respect to GOMAXPROCS, but we're
@@ -397,7 +417,20 @@ func bgscavenge(c chan int) {
cpuFraction := float64(crit) / ((float64(slept) + crit) * float64(gomaxprocs))
// Update the critSleepRatio, adjusting until we reach our ideal fraction.
- critSleepRatio = critSleepController.next(cpuFraction, idealFraction, float64(slept)+crit)
+ var ok bool
+ critSleepRatio, ok = critSleepController.next(cpuFraction, idealFraction, float64(slept)+crit)
+ if !ok {
+ // The core assumption of the controller, that we can get a proportional
+ // response, broke down. This may be transient, so temporarily switch to
+ // sleeping a fixed, conservative amount.
+ critSleepRatio = startingCritSleepRatio
+ controllerCooldown = 5e9 // 5 seconds.
+
+ // Signal the scav trace printer to output this.
+ lock(&scavenge.lock)
+ scavenge.printControllerReset = true
+ unlock(&scavenge.lock)
+ }
}
}
@@ -436,7 +469,11 @@ func (p *pageAlloc) scavenge(nbytes uintptr) uintptr {
// released should be the amount of memory released since the last time this
// was called, and forced indicates whether the scavenge was forced by the
// application.
+//
+// scavenge.lock must be held.
func printScavTrace(gen uint32, released uintptr, forced bool) {
+ assertLockHeld(&scavenge.lock)
+
printlock()
print("scav ", gen, " ",
released>>10, " KiB work, ",
@@ -445,6 +482,9 @@ func printScavTrace(gen uint32, released uintptr, forced bool) {
)
if forced {
print(" (forced)")
+ } else if scavenge.printControllerReset {
+ print(" [controller reset]")
+ scavenge.printControllerReset = false
}
println()
printunlock()
diff --git a/libgo/go/runtime/pprof/pprof_test.go b/libgo/go/runtime/pprof/pprof_test.go
index def49c1..983d4d4 100644
--- a/libgo/go/runtime/pprof/pprof_test.go
+++ b/libgo/go/runtime/pprof/pprof_test.go
@@ -798,7 +798,7 @@ func TestBlockProfile(t *testing.T) {
t.Skip("lots of details are different for gccgo; FIXME")
type TestCase struct {
name string
- f func()
+ f func(*testing.T)
stk []string
re string
}
@@ -907,7 +907,7 @@ func TestBlockProfile(t *testing.T) {
runtime.SetBlockProfileRate(1)
defer runtime.SetBlockProfileRate(0)
for _, test := range tests {
- test.f()
+ test.f(t)
}
t.Run("debug=1", func(t *testing.T) {
@@ -983,42 +983,73 @@ func containsStack(got [][]string, want []string) bool {
return false
}
-const blockDelay = 10 * time.Millisecond
+// awaitBlockedGoroutine spins on runtime.Gosched until a runtime stack dump
+// shows a goroutine in the given state with a stack frame in
+// runtime/pprof.<fName>.
+func awaitBlockedGoroutine(t *testing.T, state, fName string) {
+ re := fmt.Sprintf(`(?m)^goroutine \d+ \[%s\]:\n(?:.+\n\t.+\n)*runtime/pprof\.%s`, regexp.QuoteMeta(state), fName)
+ r := regexp.MustCompile(re)
-func blockChanRecv() {
+ if deadline, ok := t.Deadline(); ok {
+ if d := time.Until(deadline); d > 1*time.Second {
+ timer := time.AfterFunc(d-1*time.Second, func() {
+ debug.SetTraceback("all")
+ panic(fmt.Sprintf("timed out waiting for %#q", re))
+ })
+ defer timer.Stop()
+ }
+ }
+
+ buf := make([]byte, 64<<10)
+ for {
+ runtime.Gosched()
+ n := runtime.Stack(buf, true)
+ if n == len(buf) {
+ // Buffer wasn't large enough for a full goroutine dump.
+ // Resize it and try again.
+ buf = make([]byte, 2*len(buf))
+ continue
+ }
+ if r.Match(buf[:n]) {
+ return
+ }
+ }
+}
+
+func blockChanRecv(t *testing.T) {
c := make(chan bool)
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "chan receive", "blockChanRecv")
c <- true
}()
<-c
}
-func blockChanSend() {
+func blockChanSend(t *testing.T) {
c := make(chan bool)
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "chan send", "blockChanSend")
<-c
}()
c <- true
}
-func blockChanClose() {
+func blockChanClose(t *testing.T) {
c := make(chan bool)
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "chan receive", "blockChanClose")
close(c)
}()
<-c
}
-func blockSelectRecvAsync() {
+func blockSelectRecvAsync(t *testing.T) {
const numTries = 3
c := make(chan bool, 1)
c2 := make(chan bool, 1)
go func() {
for i := 0; i < numTries; i++ {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "select", "blockSelectRecvAsync")
c <- true
}
}()
@@ -1030,11 +1061,11 @@ func blockSelectRecvAsync() {
}
}
-func blockSelectSendSync() {
+func blockSelectSendSync(t *testing.T) {
c := make(chan bool)
c2 := make(chan bool)
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "select", "blockSelectSendSync")
<-c
}()
select {
@@ -1043,11 +1074,11 @@ func blockSelectSendSync() {
}
}
-func blockMutex() {
+func blockMutex(t *testing.T) {
var mu sync.Mutex
mu.Lock()
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "semacquire", "blockMutex")
mu.Unlock()
}()
// Note: Unlock releases mu before recording the mutex event,
@@ -1057,12 +1088,12 @@ func blockMutex() {
mu.Lock()
}
-func blockCond() {
+func blockCond(t *testing.T) {
var mu sync.Mutex
c := sync.NewCond(&mu)
mu.Lock()
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "sync.Cond.Wait", "blockCond")
mu.Lock()
c.Signal()
mu.Unlock()
@@ -1148,7 +1179,7 @@ func TestMutexProfile(t *testing.T) {
t.Fatalf("need MutexProfileRate 0, got %d", old)
}
- blockMutex()
+ blockMutex(t)
t.Run("debug=1", func(t *testing.T) {
var w bytes.Buffer
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
index db1e2b4..881793b 100644
--- a/libgo/go/runtime/proc.go
+++ b/libgo/go/runtime/proc.go
@@ -212,10 +212,6 @@ func main(unsafe.Pointer) {
mainStarted = true
if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
- // For runtime_syscall_doAllThreadsSyscall, we
- // register sysmon is not ready for the world to be
- // stopped.
- atomic.Store(&sched.sysmonStarting, 1)
systemstack(func() {
newm(sysmon, nil, -1)
})
@@ -232,7 +228,6 @@ func main(unsafe.Pointer) {
if g.m != &m0 {
throw("runtime.main not on m0")
}
- m0.doesPark = true
// Record when the world started.
// Must be before doInit for tracing init.
@@ -801,7 +796,13 @@ func mcommoninit(mp *m, id int64) {
if lo|hi == 0 {
hi = 1
}
- mp.fastrand = uint64(hi)<<32 | uint64(lo)
+ // Same behavior as for 1.17.
+ // TODO: Simplify ths.
+ if goarch.BigEndian {
+ mp.fastrand = uint64(lo)<<32 | uint64(hi)
+ } else {
+ mp.fastrand = uint64(hi)<<32 | uint64(lo)
+ }
mpreinit(mp)
@@ -1363,22 +1364,12 @@ func mstartm0() {
initsig(false)
}
-// mPark causes a thread to park itself - temporarily waking for
-// fixups but otherwise waiting to be fully woken. This is the
-// only way that m's should park themselves.
+// mPark causes a thread to park itself, returning once woken.
//go:nosplit
func mPark() {
- g := getg()
- for {
- notesleep(&g.m.park)
- // Note, because of signal handling by this parked m,
- // a preemptive mDoFixup() may actually occur via
- // mDoFixupAndOSYield(). (See golang.org/issue/44193)
- noteclear(&g.m.park)
- if !mDoFixup() {
- return
- }
- }
+ gp := getg()
+ notesleep(&gp.m.park)
+ noteclear(&gp.m.park)
}
// mexit tears down and exits the current thread.
@@ -1623,8 +1614,14 @@ func runSafePointFn() {
//
//go:yeswritebarrierrec
func allocm(_p_ *p, fn func(), id int64, allocatestack bool) (mp *m, g0Stack unsafe.Pointer, g0StackSize uintptr) {
+ allocmLock.rlock()
+
+ // The caller owns _p_, but we may borrow (i.e., acquirep) it. We must
+ // disable preemption to ensure it is not stolen, which would make the
+ // caller lose ownership.
+ acquirem()
+
_g_ := getg()
- acquirem() // disable GC because it can be called from sysmon
if _g_.m.p == 0 {
acquirep(_p_) // temporarily borrow p for mallocs in this function
}
@@ -1664,8 +1661,9 @@ func allocm(_p_ *p, fn func(), id int64, allocatestack bool) (mp *m, g0Stack uns
if _p_ == _g_.m.p.ptr() {
releasep()
}
- releasem(_g_.m)
+ releasem(_g_.m)
+ allocmLock.runlock()
return mp, g0Stack, g0StackSize
}
@@ -1940,9 +1938,17 @@ func unlockextra(mp *m) {
atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
}
-// execLock serializes exec and clone to avoid bugs or unspecified behaviour
-// around exec'ing while creating/destroying threads. See issue #19546.
-var execLock rwmutex
+var (
+ // allocmLock is locked for read when creating new Ms in allocm and their
+ // addition to allm. Thus acquiring this lock for write blocks the
+ // creation of new Ms.
+ allocmLock rwmutex
+
+ // execLock serializes exec and clone to avoid bugs or unspecified
+ // behaviour around exec'ing while creating/destroying threads. See
+ // issue #19546.
+ execLock rwmutex
+)
// newmHandoff contains a list of m structures that need new OS threads.
// This is used by newm in situations where newm itself can't safely
@@ -1972,8 +1978,19 @@ var newmHandoff struct {
// id is optional pre-allocated m ID. Omit by passing -1.
//go:nowritebarrierrec
func newm(fn func(), _p_ *p, id int64) {
+ // allocm adds a new M to allm, but they do not start until created by
+ // the OS in newm1 or the template thread.
+ //
+ // doAllThreadsSyscall requires that every M in allm will eventually
+ // start and be signal-able, even with a STW.
+ //
+ // Disable preemption here until we start the thread to ensure that
+ // newm is not preempted between allocm and starting the new thread,
+ // ensuring that anything added to allm is guaranteed to eventually
+ // start.
+ acquirem()
+
mp, _, _ := allocm(_p_, fn, id, false)
- mp.doesPark = (_p_ != nil)
mp.nextp.set(_p_)
mp.sigmask = initSigmask
if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
@@ -1999,9 +2016,14 @@ func newm(fn func(), _p_ *p, id int64) {
notewakeup(&newmHandoff.wake)
}
unlock(&newmHandoff.lock)
+ // The M has not started yet, but the template thread does not
+ // participate in STW, so it will always process queued Ms and
+ // it is safe to releasem.
+ releasem(getg().m)
return
}
newm1(mp)
+ releasem(getg().m)
}
func newm1(mp *m) {
@@ -2030,67 +2052,6 @@ func startTemplateThread() {
releasem(mp)
}
-// mFixupRace is used to temporarily borrow the race context from the
-// coordinating m during a syscall_runtime_doAllThreadsSyscall and
-// loan it out to each of the m's of the runtime so they can execute a
-// mFixup.fn in that context.
-var mFixupRace struct {
- lock mutex
- ctx uintptr
-}
-
-// mDoFixup runs any outstanding fixup function for the running m.
-// Returns true if a fixup was outstanding and actually executed.
-//
-// Note: to avoid deadlocks, and the need for the fixup function
-// itself to be async safe, signals are blocked for the working m
-// while it holds the mFixup lock. (See golang.org/issue/44193)
-//
-//go:nosplit
-func mDoFixup() bool {
- _g_ := getg()
- if used := atomic.Load(&_g_.m.mFixup.used); used == 0 {
- return false
- }
-
- // slow path - if fixup fn is used, block signals and lock.
- var sigmask sigset
- sigsave(&sigmask)
- sigblock(false)
- lock(&_g_.m.mFixup.lock)
- fn := _g_.m.mFixup.fn
- if fn != nil {
- if gcphase != _GCoff {
- // We can't have a write barrier in this
- // context since we may not have a P, but we
- // clear fn to signal that we've executed the
- // fixup. As long as fn is kept alive
- // elsewhere, technically we should have no
- // issues with the GC, but fn is likely
- // generated in a different package altogether
- // that may change independently. Just assert
- // the GC is off so this lack of write barrier
- // is more obviously safe.
- throw("GC must be disabled to protect validity of fn value")
- }
- *(*uintptr)(unsafe.Pointer(&_g_.m.mFixup.fn)) = 0
- fn(false)
- }
- unlock(&_g_.m.mFixup.lock)
- msigrestore(sigmask)
- return fn != nil
-}
-
-// mDoFixupAndOSYield is called when an m is unable to send a signal
-// because the allThreadsSyscall mechanism is in progress. That is, an
-// mPark() has been interrupted with this signal handler so we need to
-// ensure the fixup is executed from this context.
-//go:nosplit
-func mDoFixupAndOSYield() {
- mDoFixup()
- osyield()
-}
-
// templateThread is a thread in a known-good state that exists solely
// to start new threads in known-good states when the calling thread
// may not be in a good state.
@@ -2127,7 +2088,6 @@ func templateThread() {
noteclear(&newmHandoff.wake)
unlock(&newmHandoff.lock)
notesleep(&newmHandoff.wake)
- mDoFixup()
}
}
@@ -4826,10 +4786,6 @@ func sysmon() {
checkdead()
unlock(&sched.lock)
- // For syscall_runtime_doAllThreadsSyscall, sysmon is
- // sufficiently up to participate in fixups.
- atomic.Store(&sched.sysmonStarting, 0)
-
lasttrace := int64(0)
idle := 0 // how many cycles in succession we had not wokeup somebody
delay := uint32(0)
@@ -4844,7 +4800,6 @@ func sysmon() {
delay = 10 * 1000
}
usleep(delay)
- mDoFixup()
// sysmon should not enter deep sleep if schedtrace is enabled so that
// it can print that information at the right time.
@@ -4881,7 +4836,6 @@ func sysmon() {
osRelax(true)
}
syscallWake = notetsleep(&sched.sysmonnote, sleep)
- mDoFixup()
if shouldRelax {
osRelax(false)
}
@@ -4924,7 +4878,6 @@ func sysmon() {
incidlelocked(1)
}
}
- mDoFixup()
if GOOS == "netbsd" && needSysmonWorkaround {
// netpoll is responsible for waiting for timer
// expiration, so we typically don't have to worry
diff --git a/libgo/go/runtime/runtime2.go b/libgo/go/runtime/runtime2.go
index 3c0348f..7885fbb 100644
--- a/libgo/go/runtime/runtime2.go
+++ b/libgo/go/runtime/runtime2.go
@@ -593,7 +593,6 @@ type m struct {
ncgo int32 // number of cgo calls currently in progress
// Not for gccgo: cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily
// Not for gccgo: cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
- doesPark bool // non-P running threads: sysmon and newmHandoff never use .park
park note
alllink *m // on allm
schedlink muintptr
@@ -610,16 +609,6 @@ type m struct {
syscalltick uint32
freelink *m // on sched.freem
- // mFixup is used to synchronize OS related m state
- // (credentials etc) use mutex to access. To avoid deadlocks
- // an atomic.Load() of used being zero in mDoFixupFn()
- // guarantees fn is nil.
- mFixup struct {
- lock mutex
- used uint32
- fn func(bool) bool
- }
-
// these are here because they are too large to be on the stack
// of low-level NOSPLIT functions.
// Not for gccgo: libcall libcall
@@ -869,10 +858,6 @@ type schedt struct {
sysmonwait uint32
sysmonnote note
- // While true, sysmon not ready for mFixup calls.
- // Accessed atomically.
- sysmonStarting uint32
-
// safepointFn should be called on each P at the next GC
// safepoint if p.runSafePointFn is set.
safePointFn func(*p)
@@ -890,6 +875,8 @@ type schedt struct {
// with the rest of the runtime.
sysmonlock mutex
+ _ uint32 // for alignment
+
// timeToRun is a distribution of scheduling latencies, defined
// as the sum of time a G spends in the _Grunnable state before
// it transitions to _Grunning.
@@ -906,7 +893,7 @@ const (
_SigPanic // if the signal is from the kernel, panic
_SigDefault // if the signal isn't explicitly requested, don't monitor it
_SigGoExit // cause all runtime procs to exit (only used on Plan 9).
- _SigSetStack // add SA_ONSTACK to libc handler
+ _SigSetStack // Don't explicitly install handler, but add SA_ONSTACK to existing libc handler
_SigUnblock // always unblock; see blockableSig
_SigIgn // _SIG_DFL action is to ignore the signal
)
diff --git a/libgo/go/runtime/sigqueue.go b/libgo/go/runtime/sigqueue.go
index da2491d..49754f5 100644
--- a/libgo/go/runtime/sigqueue.go
+++ b/libgo/go/runtime/sigqueue.go
@@ -11,18 +11,18 @@
//
// sigsend is called by the signal handler to queue a new signal.
// signal_recv is called by the Go program to receive a newly queued signal.
+//
// Synchronization between sigsend and signal_recv is based on the sig.state
-// variable. It can be in 4 states: sigIdle, sigReceiving, sigSending and sigFixup.
-// sigReceiving means that signal_recv is blocked on sig.Note and there are no
-// new pending signals.
-// sigSending means that sig.mask *may* contain new pending signals,
-// signal_recv can't be blocked in this state.
-// sigIdle means that there are no new pending signals and signal_recv is not blocked.
-// sigFixup is a transient state that can only exist as a short
-// transition from sigReceiving and then on to sigIdle: it is
-// used to ensure the AllThreadsSyscall()'s mDoFixup() operation
-// occurs on the sleeping m, waiting to receive a signal.
+// variable. It can be in three states:
+// * sigReceiving means that signal_recv is blocked on sig.Note and there are
+// no new pending signals.
+// * sigSending means that sig.mask *may* contain new pending signals,
+// signal_recv can't be blocked in this state.
+// * sigIdle means that there are no new pending signals and signal_recv is not
+// blocked.
+//
// Transitions between states are done atomically with CAS.
+//
// When signal_recv is unblocked, it resets sig.Note and rechecks sig.mask.
// If several sigsends and signal_recv execute concurrently, it can lead to
// unnecessary rechecks of sig.mask, but it cannot lead to missed signals
@@ -63,7 +63,6 @@ const (
sigIdle = iota
sigReceiving
sigSending
- sigFixup
)
// sigsend delivers a signal from sighandler to the internal signal delivery queue.
@@ -117,9 +116,6 @@ Send:
notewakeup(&sig.note)
break Send
}
- case sigFixup:
- // nothing to do - we need to wait for sigIdle.
- mDoFixupAndOSYield()
}
}
@@ -127,19 +123,6 @@ Send:
return true
}
-// sigRecvPrepareForFixup is used to temporarily wake up the
-// signal_recv() running thread while it is blocked waiting for the
-// arrival of a signal. If it causes the thread to wake up, the
-// sig.state travels through this sequence: sigReceiving -> sigFixup
-// -> sigIdle -> sigReceiving and resumes. (This is only called while
-// GC is disabled.)
-//go:nosplit
-func sigRecvPrepareForFixup() {
- if atomic.Cas(&sig.state, sigReceiving, sigFixup) {
- notewakeup(&sig.note)
- }
-}
-
// Called to receive the next queued signal.
// Must only be called from a single goroutine at a time.
//go:linkname signal_recv os_1signal.signal__recv
@@ -167,16 +150,7 @@ func signal_recv() uint32 {
}
notetsleepg(&sig.note, -1)
noteclear(&sig.note)
- if !atomic.Cas(&sig.state, sigFixup, sigIdle) {
- break Receive
- }
- // Getting here, the code will
- // loop around again to sleep
- // in state sigReceiving. This
- // path is taken when
- // sigRecvPrepareForFixup()
- // has been called by another
- // thread.
+ break Receive
}
case sigSending:
if atomic.Cas(&sig.state, sigSending, sigIdle) {
diff --git a/libgo/go/runtime/slice.go b/libgo/go/runtime/slice.go
index 7c8b458..8c85712 100644
--- a/libgo/go/runtime/slice.go
+++ b/libgo/go/runtime/slice.go
@@ -228,7 +228,7 @@ func growslice(et *_type, oldarray unsafe.Pointer, oldlen, oldcap, cap int) slic
var lenmem, newlenmem, capmem uintptr
// Specialize for common values of et.size.
// For 1 we don't need any division/multiplication.
- // For sys.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
+ // For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
// For powers of 2, use a variable shift.
switch {
case et.size == 1:
diff --git a/libgo/go/runtime/testdata/testprogcgo/aprof.go b/libgo/go/runtime/testdata/testprogcgo/aprof.go
index 44a15b0..c70d633 100644
--- a/libgo/go/runtime/testdata/testprogcgo/aprof.go
+++ b/libgo/go/runtime/testdata/testprogcgo/aprof.go
@@ -7,8 +7,11 @@ package main
// Test that SIGPROF received in C code does not crash the process
// looking for the C code's func pointer.
-// The test fails when the function is the first C function.
-// The exported functions are the first C functions, so we use that.
+// This is a regression test for issue 14599, where profiling fails when the
+// function is the first C function. Exported functions are the first C
+// functions, so we use an exported function. Exported functions are created in
+// lexigraphical order of source files, so this file is named aprof.go to
+// ensure its function is first.
// extern void CallGoNop();
import "C"
diff --git a/libgo/go/runtime/testdata/testprogcgo/pprof_callback.go b/libgo/go/runtime/testdata/testprogcgo/pprof_callback.go
new file mode 100644
index 0000000..e345643
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/pprof_callback.go
@@ -0,0 +1,89 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !plan9 && !windows
+
+package main
+
+// Make many C-to-Go callback while collecting a CPU profile.
+//
+// This is a regression test for issue 50936.
+
+/*
+#include <unistd.h>
+
+void goCallbackPprof();
+
+static void callGo() {
+ // Spent >20us in C so this thread is eligible for sysmon to retake its
+ // P.
+ usleep(50);
+ goCallbackPprof();
+}
+*/
+import "C"
+
+import (
+ "fmt"
+ "os"
+ "runtime/pprof"
+ "runtime"
+ "time"
+)
+
+func init() {
+ register("CgoPprofCallback", CgoPprofCallback)
+}
+
+//export goCallbackPprof
+func goCallbackPprof() {
+ // No-op. We want to stress the cgocall and cgocallback internals,
+ // landing as many pprof signals there as possible.
+}
+
+func CgoPprofCallback() {
+ // Issue 50936 was a crash in the SIGPROF handler when the signal
+ // arrived during the exitsyscall following a cgocall(back) in dropg or
+ // execute, when updating mp.curg.
+ //
+ // These are reachable only when exitsyscall finds no P available. Thus
+ // we make C calls from significantly more Gs than there are available
+ // Ps. Lots of runnable work combined with >20us spent in callGo makes
+ // it possible for sysmon to retake Ps, forcing C calls to go down the
+ // desired exitsyscall path.
+ //
+ // High GOMAXPROCS is used to increase opportunities for failure on
+ // high CPU machines.
+ const (
+ P = 16
+ G = 64
+ )
+ runtime.GOMAXPROCS(P)
+
+ f, err := os.CreateTemp("", "prof")
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+ defer f.Close()
+
+ if err := pprof.StartCPUProfile(f); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+
+ for i := 0; i < G; i++ {
+ go func() {
+ for {
+ C.callGo()
+ }
+ }()
+ }
+
+ time.Sleep(time.Second)
+
+ pprof.StopCPUProfile()
+
+ fmt.Println("OK")
+}
diff --git a/libgo/go/runtime/trace.go b/libgo/go/runtime/trace.go
index 5d7eb75..9678144 100644
--- a/libgo/go/runtime/trace.go
+++ b/libgo/go/runtime/trace.go
@@ -230,7 +230,7 @@ func StartTrace() error {
gp.traceseq = 0
gp.tracelastp = getg().m.p
// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
- id := trace.stackTab.put([]location{location{pc: gp.startpc + sys.PCQuantum}})
+ id := trace.stackTab.put([]location{location{pc: startPCforTrace(gp.startpc) + sys.PCQuantum}})
traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
}
if status == _Gwaiting {
@@ -1066,7 +1066,7 @@ func traceGoCreate(newg *g, pc uintptr) {
newg.traceseq = 0
newg.tracelastp = getg().m.p
// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
- id := trace.stackTab.put([]location{location{pc: pc + sys.PCQuantum}})
+ id := trace.stackTab.put([]location{location{pc: startPCforTrace(pc) + sys.PCQuantum}})
traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
}
@@ -1239,3 +1239,9 @@ func trace_userLog(id uint64, category, message string) {
traceReleaseBuffer(pid)
}
+
+// the start PC of a goroutine for tracing purposes. If pc is a wrapper,
+// it returns the PC of the wrapped function. Otherwise it returns pc.
+func startPCforTrace(pc uintptr) uintptr {
+ return pc
+}