aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2021-01-27 17:55:50 -0800
committerIan Lance Taylor <iant@golang.org>2021-01-29 11:04:55 -0800
commit726b7aa004d6885388a76521222602b8552a41ee (patch)
tree5179037ef840a43dcea0f3be4e07dbcbcfcb2c4a /libgo/go/runtime
parent91a95ad2ae0e0f2fa953fafe55ff2ec32c8277d5 (diff)
downloadgcc-726b7aa004d6885388a76521222602b8552a41ee.zip
gcc-726b7aa004d6885388a76521222602b8552a41ee.tar.gz
gcc-726b7aa004d6885388a76521222602b8552a41ee.tar.bz2
libgo: update to Go1.16rc1
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/287493
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/crash_test.go12
-rw-r--r--libgo/go/runtime/defer_test.go28
-rw-r--r--libgo/go/runtime/export_test.go4
-rw-r--r--libgo/go/runtime/histogram.go60
-rw-r--r--libgo/go/runtime/histogram_test.go22
-rw-r--r--libgo/go/runtime/metrics.go53
-rw-r--r--libgo/go/runtime/metrics/description.go16
-rw-r--r--libgo/go/runtime/metrics/doc.go4
-rw-r--r--libgo/go/runtime/metrics/example_test.go96
-rw-r--r--libgo/go/runtime/metrics/histogram.go29
-rw-r--r--libgo/go/runtime/metrics/value.go2
-rw-r--r--libgo/go/runtime/metrics_test.go38
-rw-r--r--libgo/go/runtime/mgcmark.go4
-rw-r--r--libgo/go/runtime/mgcscavenge.go2
-rw-r--r--libgo/go/runtime/msan0.go9
-rw-r--r--libgo/go/runtime/os_freebsd.go2
-rw-r--r--libgo/go/runtime/os_gccgo.go5
-rw-r--r--libgo/go/runtime/os_js.go7
-rw-r--r--libgo/go/runtime/os_openbsd.go1
-rw-r--r--libgo/go/runtime/proc.go79
-rw-r--r--libgo/go/runtime/runtime2.go1
-rw-r--r--libgo/go/runtime/signal_unix.go9
-rw-r--r--libgo/go/runtime/signal_windows_test.go64
-rw-r--r--libgo/go/runtime/sigqueue.go34
-rw-r--r--libgo/go/runtime/stubs2.go2
-rw-r--r--libgo/go/runtime/testdata/testprog/deadlock.go39
-rw-r--r--libgo/go/runtime/testdata/testwinsignal/main.go19
-rw-r--r--libgo/go/runtime/time.go6
-rw-r--r--libgo/go/runtime/timestub2.go4
29 files changed, 556 insertions, 95 deletions
diff --git a/libgo/go/runtime/crash_test.go b/libgo/go/runtime/crash_test.go
index ab6f381..102b1a5 100644
--- a/libgo/go/runtime/crash_test.go
+++ b/libgo/go/runtime/crash_test.go
@@ -297,6 +297,18 @@ func TestRecursivePanic4(t *testing.T) {
}
+func TestRecursivePanic5(t *testing.T) {
+ output := runTestProg(t, "testprog", "RecursivePanic5")
+ want := `first panic
+second panic
+panic: third panic
+`
+ if !strings.HasPrefix(output, want) {
+ t.Fatalf("output does not start with %q:\n%s", want, output)
+ }
+
+}
+
func TestGoexitCrash(t *testing.T) {
// External linking brings in cgo, causing deadlock detection not working.
testenv.MustInternalLink(t)
diff --git a/libgo/go/runtime/defer_test.go b/libgo/go/runtime/defer_test.go
index 5ac0814..9a40ea1 100644
--- a/libgo/go/runtime/defer_test.go
+++ b/libgo/go/runtime/defer_test.go
@@ -410,3 +410,31 @@ func rec1(max int) {
rec1(max - 1)
}
}
+
+func TestIssue43921(t *testing.T) {
+ defer func() {
+ expect(t, 1, recover())
+ }()
+ func() {
+ // Prevent open-coded defers
+ for {
+ defer func() {}()
+ break
+ }
+
+ defer func() {
+ defer func() {
+ expect(t, 4, recover())
+ }()
+ panic(4)
+ }()
+ panic(1)
+
+ }()
+}
+
+func expect(t *testing.T, n int, err interface{}) {
+ if n != err {
+ t.Fatalf("have %v, want %v", err, n)
+ }
+}
diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go
index 8dd3050..1455c22 100644
--- a/libgo/go/runtime/export_test.go
+++ b/libgo/go/runtime/export_test.go
@@ -1195,12 +1195,12 @@ type TimeHistogram timeHistogram
// Counts returns the counts for the given bucket, subBucket indices.
// Returns true if the bucket was valid, otherwise returns the counts
-// for the overflow bucket and false.
+// for the underflow bucket and false.
func (th *TimeHistogram) Count(bucket, subBucket uint) (uint64, bool) {
t := (*timeHistogram)(th)
i := bucket*TimeHistNumSubBuckets + subBucket
if i >= uint(len(t.counts)) {
- return t.overflow, false
+ return t.underflow, false
}
return t.counts[i], true
}
diff --git a/libgo/go/runtime/histogram.go b/libgo/go/runtime/histogram.go
index 4020969..42baa6c 100644
--- a/libgo/go/runtime/histogram.go
+++ b/libgo/go/runtime/histogram.go
@@ -7,6 +7,7 @@ package runtime
import (
"runtime/internal/atomic"
"runtime/internal/sys"
+ "unsafe"
)
const (
@@ -69,17 +70,21 @@ const (
// for concurrent use. It is also safe to read all the values
// atomically.
type timeHistogram struct {
- counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]uint64
- overflow uint64
+ counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]uint64
+
+ // underflow counts all the times we got a negative duration
+ // sample. Because of how time works on some platforms, it's
+ // possible to measure negative durations. We could ignore them,
+ // but we record them anyway because it's better to have some
+ // signal that it's happening than just missing samples.
+ underflow uint64
}
// record adds the given duration to the distribution.
-//
-// Although the duration is an int64 to facilitate ease-of-use
-// with e.g. nanotime, the duration must be non-negative.
func (h *timeHistogram) record(duration int64) {
if duration < 0 {
- throw("timeHistogram encountered negative duration")
+ atomic.Xadd64(&h.underflow, 1)
+ return
}
// The index of the exponential bucket is just the index
// of the highest set bit adjusted for how many bits we
@@ -92,29 +97,47 @@ func (h *timeHistogram) record(duration int64) {
superBucket = uint(sys.Len64(uint64(duration))) - timeHistSubBucketBits
if superBucket*timeHistNumSubBuckets >= uint(len(h.counts)) {
// The bucket index we got is larger than what we support, so
- // add into the special overflow bucket.
- atomic.Xadd64(&h.overflow, 1)
- return
+ // include this count in the highest bucket, which extends to
+ // infinity.
+ superBucket = timeHistNumSuperBuckets - 1
+ subBucket = timeHistNumSubBuckets - 1
+ } else {
+ // The linear subbucket index is just the timeHistSubBucketsBits
+ // bits after the top bit. To extract that value, shift down
+ // the duration such that we leave the top bit and the next bits
+ // intact, then extract the index.
+ subBucket = uint((duration >> (superBucket - 1)) % timeHistNumSubBuckets)
}
- // The linear subbucket index is just the timeHistSubBucketsBits
- // bits after the top bit. To extract that value, shift down
- // the duration such that we leave the top bit and the next bits
- // intact, then extract the index.
- subBucket = uint((duration >> (superBucket - 1)) % timeHistNumSubBuckets)
} else {
subBucket = uint(duration)
}
atomic.Xadd64(&h.counts[superBucket*timeHistNumSubBuckets+subBucket], 1)
}
+const (
+ fInf = 0x7FF0000000000000
+ fNegInf = 0xFFF0000000000000
+)
+
+func float64Inf() float64 {
+ inf := uint64(fInf)
+ return *(*float64)(unsafe.Pointer(&inf))
+}
+
+func float64NegInf() float64 {
+ inf := uint64(fNegInf)
+ return *(*float64)(unsafe.Pointer(&inf))
+}
+
// timeHistogramMetricsBuckets generates a slice of boundaries for
// the timeHistogram. These boundaries are represented in seconds,
// not nanoseconds like the timeHistogram represents durations.
func timeHistogramMetricsBuckets() []float64 {
- b := make([]float64, timeHistTotalBuckets-1)
+ b := make([]float64, timeHistTotalBuckets+1)
+ b[0] = float64NegInf()
for i := 0; i < timeHistNumSuperBuckets; i++ {
superBucketMin := uint64(0)
- // The (inclusive) minimum for the first bucket is 0.
+ // The (inclusive) minimum for the first non-negative bucket is 0.
if i > 0 {
// The minimum for the second bucket will be
// 1 << timeHistSubBucketBits, indicating that all
@@ -128,7 +151,7 @@ func timeHistogramMetricsBuckets() []float64 {
// index to combine it with the bucketMin.
subBucketShift := uint(0)
if i > 1 {
- // The first two buckets are exact with respect to integers,
+ // The first two super buckets are exact with respect to integers,
// so we'll never have to shift the sub-bucket index. Thereafter,
// we shift up by 1 with each subsequent bucket.
subBucketShift = uint(i - 2)
@@ -141,8 +164,9 @@ func timeHistogramMetricsBuckets() []float64 {
// Convert the subBucketMin which is in nanoseconds to a float64 seconds value.
// These values will all be exactly representable by a float64.
- b[i*timeHistNumSubBuckets+j] = float64(subBucketMin) / 1e9
+ b[i*timeHistNumSubBuckets+j+1] = float64(subBucketMin) / 1e9
}
}
+ b[len(b)-1] = float64Inf()
return b
}
diff --git a/libgo/go/runtime/histogram_test.go b/libgo/go/runtime/histogram_test.go
index 5f5b28f..dbc64fa 100644
--- a/libgo/go/runtime/histogram_test.go
+++ b/libgo/go/runtime/histogram_test.go
@@ -5,6 +5,7 @@
package runtime_test
import (
+ "math"
. "runtime"
"testing"
)
@@ -32,8 +33,8 @@ func TestTimeHistogram(t *testing.T) {
h.Record(base + v)
}
}
- // Hit the overflow bucket.
- h.Record(int64(^uint64(0) >> 1))
+ // Hit the underflow bucket.
+ h.Record(int64(-1))
// Check to make sure there's exactly one count in each
// bucket.
@@ -41,7 +42,7 @@ func TestTimeHistogram(t *testing.T) {
for j := uint(0); j < TimeHistNumSubBuckets; j++ {
c, ok := h.Count(i, j)
if !ok {
- t.Errorf("hit overflow bucket unexpectedly: (%d, %d)", i, j)
+ t.Errorf("hit underflow bucket unexpectedly: (%d, %d)", i, j)
} else if c != 1 {
t.Errorf("bucket (%d, %d) has count that is not 1: %d", i, j, c)
}
@@ -49,10 +50,21 @@ func TestTimeHistogram(t *testing.T) {
}
c, ok := h.Count(TimeHistNumSuperBuckets, 0)
if ok {
- t.Errorf("expected to hit overflow bucket: (%d, %d)", TimeHistNumSuperBuckets, 0)
+ t.Errorf("expected to hit underflow bucket: (%d, %d)", TimeHistNumSuperBuckets, 0)
}
if c != 1 {
- t.Errorf("overflow bucket has count that is not 1: %d", c)
+ t.Errorf("underflow bucket has count that is not 1: %d", c)
}
+
+ // Check overflow behavior.
+ // By hitting a high value, we should just be adding into the highest bucket.
+ h.Record(math.MaxInt64)
+ c, ok = h.Count(TimeHistNumSuperBuckets-1, TimeHistNumSubBuckets-1)
+ if !ok {
+ t.Error("hit underflow bucket in highest bucket unexpectedly")
+ } else if c != 2 {
+ t.Errorf("highest has count that is not 2: %d", c)
+ }
+
dummyTimeHistogram = TimeHistogram{}
}
diff --git a/libgo/go/runtime/metrics.go b/libgo/go/runtime/metrics.go
index 5f09a88..e1f1db2 100644
--- a/libgo/go/runtime/metrics.go
+++ b/libgo/go/runtime/metrics.go
@@ -41,10 +41,28 @@ func initMetrics() {
if metricsInit {
return
}
- sizeClassBuckets = make([]float64, _NumSizeClasses)
- for i := range sizeClassBuckets {
- sizeClassBuckets[i] = float64(class_to_size[i])
+
+ sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1)
+ // Skip size class 0 which is a stand-in for large objects, but large
+ // objects are tracked separately (and they actually get placed in
+ // the last bucket, not the first).
+ sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
+ for i := 1; i < _NumSizeClasses; i++ {
+ // Size classes have an inclusive upper-bound
+ // and exclusive lower bound (e.g. 48-byte size class is
+ // (32, 48]) whereas we want and inclusive lower-bound
+ // and exclusive upper-bound (e.g. 48-byte size class is
+ // [33, 49). We can achieve this by shifting all bucket
+ // boundaries up by 1.
+ //
+ // Also, a float64 can precisely represent integers with
+ // value up to 2^53 and size classes are relatively small
+ // (nowhere near 2^48 even) so this will give us exact
+ // boundaries.
+ sizeClassBuckets[i] = float64(class_to_size[i] + 1)
}
+ sizeClassBuckets = append(sizeClassBuckets, float64Inf())
+
timeHistBuckets = timeHistogramMetricsBuckets()
metrics = map[string]metricData{
"/gc/cycles/automatic:gc-cycles": {
@@ -68,23 +86,27 @@ func initMetrics() {
out.scalar = in.sysStats.gcCyclesDone
},
},
- "/gc/heap/allocs-by-size:objects": {
+ "/gc/heap/allocs-by-size:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
hist := out.float64HistOrInit(sizeClassBuckets)
hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeAllocCount)
- for i := range hist.buckets {
- hist.counts[i] = uint64(in.heapStats.smallAllocCount[i])
+ // Cut off the first index which is ostensibly for size class 0,
+ // but large objects are tracked separately so it's actually unused.
+ for i, count := range in.heapStats.smallAllocCount[1:] {
+ hist.counts[i] = uint64(count)
}
},
},
- "/gc/heap/frees-by-size:objects": {
+ "/gc/heap/frees-by-size:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
hist := out.float64HistOrInit(sizeClassBuckets)
hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeFreeCount)
- for i := range hist.buckets {
- hist.counts[i] = uint64(in.heapStats.smallFreeCount[i])
+ // Cut off the first index which is ostensibly for size class 0,
+ // but large objects are tracked separately so it's actually unused.
+ for i, count := range in.heapStats.smallFreeCount[1:] {
+ hist.counts[i] = uint64(count)
}
},
},
@@ -105,9 +127,12 @@ func initMetrics() {
"/gc/pauses:seconds": {
compute: func(_ *statAggregate, out *metricValue) {
hist := out.float64HistOrInit(timeHistBuckets)
- hist.counts[len(hist.counts)-1] = atomic.Load64(&memstats.gcPauseDist.overflow)
- for i := range hist.buckets {
- hist.counts[i] = atomic.Load64(&memstats.gcPauseDist.counts[i])
+ // The bottom-most bucket, containing negative values, is tracked
+ // as a separately as underflow, so fill that in manually and then
+ // iterate over the rest.
+ hist.counts[0] = atomic.Load64(&memstats.gcPauseDist.underflow)
+ for i := range memstats.gcPauseDist.counts {
+ hist.counts[i+1] = atomic.Load64(&memstats.gcPauseDist.counts[i])
}
},
},
@@ -426,8 +451,8 @@ func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogr
v.pointer = unsafe.Pointer(hist)
}
hist.buckets = buckets
- if len(hist.counts) != len(hist.buckets)+1 {
- hist.counts = make([]uint64, len(buckets)+1)
+ if len(hist.counts) != len(hist.buckets)-1 {
+ hist.counts = make([]uint64, len(buckets)-1)
}
return hist
}
diff --git a/libgo/go/runtime/metrics/description.go b/libgo/go/runtime/metrics/description.go
index 32af5d1..1175156 100644
--- a/libgo/go/runtime/metrics/description.go
+++ b/libgo/go/runtime/metrics/description.go
@@ -23,6 +23,11 @@ type Description struct {
// Examples of units might be "seconds", "bytes", "bytes/second", "cpu-seconds",
// "byte*cpu-seconds", and "bytes/second/second".
//
+ // For histograms, multiple units may apply. For instance, the units of the buckets and
+ // the count. By convention, for histograms, the units of the count are always "samples"
+ // with the type of sample evident by the metric's name, while the unit in the name
+ // specifies the buckets' unit.
+ //
// A complete name might look like "/memory/heap/free:bytes".
Name string
@@ -41,10 +46,6 @@ type Description struct {
//
// This flag thus indicates whether or not it's useful to compute a rate from this value.
Cumulative bool
-
- // StopTheWorld is whether or not the metric requires a stop-the-world
- // event in order to collect it.
- StopTheWorld bool
}
// The English language descriptions below must be kept in sync with the
@@ -69,14 +70,16 @@ var allDesc = []Description{
Cumulative: true,
},
{
- Name: "/gc/heap/allocs-by-size:objects",
+ Name: "/gc/heap/allocs-by-size:bytes",
Description: "Distribution of all objects allocated by approximate size.",
Kind: KindFloat64Histogram,
+ Cumulative: true,
},
{
- Name: "/gc/heap/frees-by-size:objects",
+ Name: "/gc/heap/frees-by-size:bytes",
Description: "Distribution of all objects freed by approximate size.",
Kind: KindFloat64Histogram,
+ Cumulative: true,
},
{
Name: "/gc/heap/goal:bytes",
@@ -92,6 +95,7 @@ var allDesc = []Description{
Name: "/gc/pauses:seconds",
Description: "Distribution individual GC-related stop-the-world pause latencies.",
Kind: KindFloat64Histogram,
+ Cumulative: true,
},
{
Name: "/memory/classes/heap/free:bytes",
diff --git a/libgo/go/runtime/metrics/doc.go b/libgo/go/runtime/metrics/doc.go
index a68184e..021a0bd 100644
--- a/libgo/go/runtime/metrics/doc.go
+++ b/libgo/go/runtime/metrics/doc.go
@@ -61,10 +61,10 @@ Below is the full list of supported metrics, ordered lexicographically.
/gc/cycles/total:gc-cycles
Count of all completed GC cycles.
- /gc/heap/allocs-by-size:objects
+ /gc/heap/allocs-by-size:bytes
Distribution of all objects allocated by approximate size.
- /gc/heap/frees-by-size:objects
+ /gc/heap/frees-by-size:bytes
Distribution of all objects freed by approximate size.
/gc/heap/goal:bytes
diff --git a/libgo/go/runtime/metrics/example_test.go b/libgo/go/runtime/metrics/example_test.go
new file mode 100644
index 0000000..cade0c3
--- /dev/null
+++ b/libgo/go/runtime/metrics/example_test.go
@@ -0,0 +1,96 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package metrics_test
+
+import (
+ "fmt"
+ "runtime/metrics"
+)
+
+func ExampleRead_readingOneMetric() {
+ // Name of the metric we want to read.
+ const myMetric = "/memory/classes/heap/free:bytes"
+
+ // Create a sample for the metric.
+ sample := make([]metrics.Sample, 1)
+ sample[0].Name = myMetric
+
+ // Sample the metric.
+ metrics.Read(sample)
+
+ // Check if the metric is actually supported.
+ // If it's not, the resulting value will always have
+ // kind KindBad.
+ if sample[0].Value.Kind() == metrics.KindBad {
+ panic(fmt.Sprintf("metric %q no longer supported", myMetric))
+ }
+
+ // Handle the result.
+ //
+ // It's OK to assume a particular Kind for a metric;
+ // they're guaranteed not to change.
+ freeBytes := sample[0].Value.Uint64()
+
+ fmt.Printf("free but not released memory: %d\n", freeBytes)
+}
+
+func ExampleRead_readingAllMetrics() {
+ // Get descriptions for all supported metrics.
+ descs := metrics.All()
+
+ // Create a sample for each metric.
+ samples := make([]metrics.Sample, len(descs))
+ for i := range samples {
+ samples[i].Name = descs[i].Name
+ }
+
+ // Sample the metrics. Re-use the samples slice if you can!
+ metrics.Read(samples)
+
+ // Iterate over all results.
+ for _, sample := range samples {
+ // Pull out the name and value.
+ name, value := sample.Name, sample.Value
+
+ // Handle each sample.
+ switch value.Kind() {
+ case metrics.KindUint64:
+ fmt.Printf("%s: %d\n", name, value.Uint64())
+ case metrics.KindFloat64:
+ fmt.Printf("%s: %f\n", name, value.Float64())
+ case metrics.KindFloat64Histogram:
+ // The histogram may be quite large, so let's just pull out
+ // a crude estimate for the median for the sake of this example.
+ fmt.Printf("%s: %f\n", name, medianBucket(value.Float64Histogram()))
+ case metrics.KindBad:
+ // This should never happen because all metrics are supported
+ // by construction.
+ panic("bug in runtime/metrics package!")
+ default:
+ // This may happen as new metrics get added.
+ //
+ // The safest thing to do here is to simply log it somewhere
+ // as something to look into, but ignore it for now.
+ // In the worst case, you might temporarily miss out on a new metric.
+ fmt.Printf("%s: unexpected metric Kind: %v\n", name, value.Kind())
+ }
+ }
+}
+
+func medianBucket(h *metrics.Float64Histogram) float64 {
+ total := uint64(0)
+ for _, count := range h.Counts {
+ total += count
+ }
+ thresh := total / 2
+ total = 0
+ for i, count := range h.Counts {
+ total += count
+ if total > thresh {
+ return h.Buckets[i]
+ }
+ }
+ panic("should not happen")
+}
diff --git a/libgo/go/runtime/metrics/histogram.go b/libgo/go/runtime/metrics/histogram.go
index e1364e1..956422b 100644
--- a/libgo/go/runtime/metrics/histogram.go
+++ b/libgo/go/runtime/metrics/histogram.go
@@ -6,25 +6,28 @@ package metrics
// Float64Histogram represents a distribution of float64 values.
type Float64Histogram struct {
- // Counts contains the weights for each histogram bucket. The length of
- // Counts is equal to the length of Buckets (in the metric description)
- // plus one to account for the implicit minimum bucket.
+ // Counts contains the weights for each histogram bucket.
//
- // Given N buckets, the following is the mathematical relationship between
- // Counts and Buckets.
- // count[0] is the weight of the range (-inf, bucket[0])
- // count[n] is the weight of the range [bucket[n], bucket[n+1]), for 0 < n < N-1
- // count[N-1] is the weight of the range [bucket[N-1], inf)
+ // Given N buckets, Count[n] is the weight of the range
+ // [bucket[n], bucket[n+1]), for 0 <= n < N.
Counts []uint64
- // Buckets contains the boundaries between histogram buckets, in increasing order.
+ // Buckets contains the boundaries of the histogram buckets, in increasing order.
//
- // Because this slice contains boundaries, there are len(Buckets)+1 counts:
- // a count for all values less than the first boundary, a count covering each
- // [slice[i], slice[i+1]) interval, and a count for all values greater than or
- // equal to the last boundary.
+ // Buckets[0] is the inclusive lower bound of the minimum bucket while
+ // Buckets[len(Buckets)-1] is the exclusive upper bound of the maximum bucket.
+ // Hence, there are len(Buckets)-1 counts. Furthermore, len(Buckets) != 1, always,
+ // since at least two boundaries are required to describe one bucket (and 0
+ // boundaries are used to describe 0 buckets).
+ //
+ // Buckets[0] is permitted to have value -Inf and Buckets[len(Buckets)-1] is
+ // permitted to have value Inf.
//
// For a given metric name, the value of Buckets is guaranteed not to change
// between calls until program exit.
+ //
+ // This slice value is permitted to alias with other Float64Histograms' Buckets
+ // fields, so the values within should only ever be read. If they need to be
+ // modified, the user must make a copy.
Buckets []float64
}
diff --git a/libgo/go/runtime/metrics/value.go b/libgo/go/runtime/metrics/value.go
index 0b056b4..61e8a19 100644
--- a/libgo/go/runtime/metrics/value.go
+++ b/libgo/go/runtime/metrics/value.go
@@ -63,7 +63,7 @@ func (v Value) Float64() float64 {
// If v.Kind() != KindFloat64Histogram, this method panics.
func (v Value) Float64Histogram() *Float64Histogram {
if v.kind != KindFloat64Histogram {
- panic("called Float64 on non-float64 metric value")
+ panic("called Float64Histogram on non-Float64Histogram metric value")
}
return (*Float64Histogram)(v.pointer)
}
diff --git a/libgo/go/runtime/metrics_test.go b/libgo/go/runtime/metrics_test.go
index 167edd5..8a3cf01 100644
--- a/libgo/go/runtime/metrics_test.go
+++ b/libgo/go/runtime/metrics_test.go
@@ -70,6 +70,34 @@ func TestReadMetrics(t *testing.T) {
checkUint64(t, name, samples[i].Value.Uint64(), mstats.BuckHashSys)
case "/memory/classes/total:bytes":
checkUint64(t, name, samples[i].Value.Uint64(), mstats.Sys)
+ case "/gc/heap/allocs-by-size:bytes":
+ hist := samples[i].Value.Float64Histogram()
+ // Skip size class 0 in BySize, because it's always empty and not represented
+ // in the histogram.
+ for i, sc := range mstats.BySize[1:] {
+ if b, s := hist.Buckets[i+1], float64(sc.Size+1); b != s {
+ t.Errorf("bucket does not match size class: got %f, want %f", b, s)
+ // The rest of the checks aren't expected to work anyway.
+ continue
+ }
+ if c, m := hist.Counts[i], sc.Mallocs; c != m {
+ t.Errorf("histogram counts do not much BySize for class %d: got %d, want %d", i, c, m)
+ }
+ }
+ case "/gc/heap/frees-by-size:bytes":
+ hist := samples[i].Value.Float64Histogram()
+ // Skip size class 0 in BySize, because it's always empty and not represented
+ // in the histogram.
+ for i, sc := range mstats.BySize[1:] {
+ if b, s := hist.Buckets[i+1], float64(sc.Size+1); b != s {
+ t.Errorf("bucket does not match size class: got %f, want %f", b, s)
+ // The rest of the checks aren't expected to work anyway.
+ continue
+ }
+ if c, f := hist.Counts[i], sc.Frees; c != f {
+ t.Errorf("histogram counts do not much BySize for class %d: got %d, want %d", i, c, f)
+ }
+ }
case "/gc/heap/objects:objects":
checkUint64(t, name, samples[i].Value.Uint64(), mstats.HeapObjects)
case "/gc/heap/goal:bytes":
@@ -133,9 +161,9 @@ func TestReadMetricsConsistency(t *testing.T) {
totalVirtual.got = samples[i].Value.Uint64()
case "/gc/heap/objects:objects":
objects.total = samples[i].Value.Uint64()
- case "/gc/heap/allocs-by-size:objects":
+ case "/gc/heap/allocs-by-size:bytes":
objects.alloc = samples[i].Value.Float64Histogram()
- case "/gc/heap/frees-by-size:objects":
+ case "/gc/heap/frees-by-size:bytes":
objects.free = samples[i].Value.Float64Histogram()
case "/gc/cycles:gc-cycles":
gc.numGC = samples[i].Value.Uint64()
@@ -154,6 +182,12 @@ func TestReadMetricsConsistency(t *testing.T) {
if totalVirtual.got != totalVirtual.want {
t.Errorf(`"/memory/classes/total:bytes" does not match sum of /memory/classes/**: got %d, want %d`, totalVirtual.got, totalVirtual.want)
}
+ if b, c := len(objects.alloc.Buckets), len(objects.alloc.Counts); b != c+1 {
+ t.Errorf("allocs-by-size has wrong bucket or counts length: %d buckets, %d counts", b, c)
+ }
+ if b, c := len(objects.free.Buckets), len(objects.free.Counts); b != c+1 {
+ t.Errorf("frees-by-size has wrong bucket or counts length: %d buckets, %d counts", b, c)
+ }
if len(objects.alloc.Buckets) != len(objects.free.Buckets) {
t.Error("allocs-by-size and frees-by-size buckets don't match in length")
} else if len(objects.alloc.Counts) != len(objects.free.Counts) {
diff --git a/libgo/go/runtime/mgcmark.go b/libgo/go/runtime/mgcmark.go
index ed2a8c1..e558125 100644
--- a/libgo/go/runtime/mgcmark.go
+++ b/libgo/go/runtime/mgcmark.go
@@ -87,8 +87,7 @@ func gcMarkRootPrepare() {
// Gs may be created after this point, but it's okay that we
// ignore them because they begin life without any roots, so
// there's nothing to scan, and any roots they create during
- // the concurrent phase will be scanned during mark
- // termination.
+ // the concurrent phase will be caught by the write barrier.
work.nStackRoots = int(atomic.Loaduintptr(&allglen))
work.markrootNext = 0
@@ -119,7 +118,6 @@ fail:
println("gp", gp, "goid", gp.goid,
"status", readgstatus(gp),
"gcscandone", gp.gcscandone)
- unlock(&allglock) // Avoid self-deadlock with traceback.
throw("scan missed a g")
}
diff --git a/libgo/go/runtime/mgcscavenge.go b/libgo/go/runtime/mgcscavenge.go
index 8b44ac8..da5be70 100644
--- a/libgo/go/runtime/mgcscavenge.go
+++ b/libgo/go/runtime/mgcscavenge.go
@@ -564,7 +564,7 @@ func (p *pageAlloc) scavengeUnreserve(r addrRange, gen uint32) {
func (p *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (uintptr, addrRange) {
assertLockHeld(p.mheapLock)
- // Defensively check if we've recieved an empty address range.
+ // Defensively check if we've received an empty address range.
// If so, just return.
if work.size() == 0 {
// Nothing to do.
diff --git a/libgo/go/runtime/msan0.go b/libgo/go/runtime/msan0.go
index 117c5e5..374d13f 100644
--- a/libgo/go/runtime/msan0.go
+++ b/libgo/go/runtime/msan0.go
@@ -16,7 +16,8 @@ const msanenabled = false
// Because msanenabled is false, none of these functions should be called.
-func msanread(addr unsafe.Pointer, sz uintptr) { throw("msan") }
-func msanwrite(addr unsafe.Pointer, sz uintptr) { throw("msan") }
-func msanmalloc(addr unsafe.Pointer, sz uintptr) { throw("msan") }
-func msanfree(addr unsafe.Pointer, sz uintptr) { throw("msan") }
+func msanread(addr unsafe.Pointer, sz uintptr) { throw("msan") }
+func msanwrite(addr unsafe.Pointer, sz uintptr) { throw("msan") }
+func msanmalloc(addr unsafe.Pointer, sz uintptr) { throw("msan") }
+func msanfree(addr unsafe.Pointer, sz uintptr) { throw("msan") }
+func msanmove(dst, src unsafe.Pointer, sz uintptr) { throw("msan") }
diff --git a/libgo/go/runtime/os_freebsd.go b/libgo/go/runtime/os_freebsd.go
index 611a8cd..9c68366 100644
--- a/libgo/go/runtime/os_freebsd.go
+++ b/libgo/go/runtime/os_freebsd.go
@@ -143,7 +143,7 @@ func futexsleep1(addr *uint32, val uint32, ns int64) {
utp = &ut
}
ret := sys_umtx_op(addr, _UMTX_OP_WAIT_UINT_PRIVATE, val, unsafe.Sizeof(*utp), utp)
- if ret >= 0 || ret == -_EINTR {
+ if ret >= 0 || ret == -_EINTR || ret == -_ETIMEDOUT {
return
}
print("umtx_wait addr=", addr, " val=", val, " ret=", ret, "\n")
diff --git a/libgo/go/runtime/os_gccgo.go b/libgo/go/runtime/os_gccgo.go
index 79331c5..65d7aef 100644
--- a/libgo/go/runtime/os_gccgo.go
+++ b/libgo/go/runtime/os_gccgo.go
@@ -37,6 +37,11 @@ func unminit() {
unminitSignals()
}
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+func mdestroy(mp *m) {
+}
+
var urandom_dev = []byte("/dev/urandom\x00")
func getRandomData(r []byte) {
diff --git a/libgo/go/runtime/os_js.go b/libgo/go/runtime/os_js.go
index 94983b3..24261e8 100644
--- a/libgo/go/runtime/os_js.go
+++ b/libgo/go/runtime/os_js.go
@@ -72,7 +72,7 @@ func clearSignalHandlers() {
}
//go:nosplit
-func sigblock() {
+func sigblock(exiting bool) {
}
// Called to initialize a new m (including the bootstrap m).
@@ -84,6 +84,11 @@ func minit() {
func unminit() {
}
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+func mdestroy(mp *m) {
+}
+
func osinit() {
ncpu = 1
getg().m.procid = 2
diff --git a/libgo/go/runtime/os_openbsd.go b/libgo/go/runtime/os_openbsd.go
index 9cfaa94..50f0480 100644
--- a/libgo/go/runtime/os_openbsd.go
+++ b/libgo/go/runtime/os_openbsd.go
@@ -6,7 +6,6 @@ package runtime
import (
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
index 1696a1b..eec44db 100644
--- a/libgo/go/runtime/proc.go
+++ b/libgo/go/runtime/proc.go
@@ -505,8 +505,29 @@ func lockedOSThread() bool {
}
var (
- allgs []*g
+ // allgs contains all Gs ever created (including dead Gs), and thus
+ // never shrinks.
+ //
+ // Access via the slice is protected by allglock or stop-the-world.
+ // Readers that cannot take the lock may (carefully!) use the atomic
+ // variables below.
allglock mutex
+ allgs []*g
+
+ // allglen and allgptr are atomic variables that contain len(allg) and
+ // &allg[0] respectively. Proper ordering depends on totally-ordered
+ // loads and stores. Writes are protected by allglock.
+ //
+ // allgptr is updated before allglen. Readers should read allglen
+ // before allgptr to ensure that allglen is always <= len(allgptr). New
+ // Gs appended during the race can be missed. For a consistent view of
+ // all Gs, allglock must be held.
+ //
+ // allgptr copies should always be stored as a concrete type or
+ // unsafe.Pointer, not uintptr, to ensure that GC can still reach it
+ // even if it points to a stale array.
+ allglen uintptr
+ allgptr **g
)
func allgadd(gp *g) {
@@ -516,10 +537,25 @@ func allgadd(gp *g) {
lock(&allglock)
allgs = append(allgs, gp)
- allglen = uintptr(len(allgs))
+ if &allgs[0] != allgptr {
+ atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
+ }
+ atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
unlock(&allglock)
}
+// atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex.
+func atomicAllG() (**g, uintptr) {
+ length := atomic.Loaduintptr(&allglen)
+ ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
+ return ptr, length
+}
+
+// atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG.
+func atomicAllGIndex(ptr **g, i uintptr) *g {
+ return *(**g)(add(unsafe.Pointer(ptr), i*sys.PtrSize))
+}
+
const (
// Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
@@ -1293,7 +1329,7 @@ func mexit(osStack bool) {
throw("locked m0 woke up")
}
- sigblock()
+ sigblock(true)
unminit()
// Free the gsignal stack.
@@ -1351,6 +1387,10 @@ found:
}
}
+ // Destroy all allocated resources. After this is called, we may no
+ // longer take any locks.
+ mdestroy(m)
+
if osStack {
// Return from mstart and let the system thread
// library free the g0 stack and terminate the thread.
@@ -1596,7 +1636,7 @@ func needm() {
// starting a new m to run Go code via newosproc.
var sigmask sigset
sigsave(&sigmask)
- sigblock()
+ sigblock(false)
// Lock extra list, take head, unlock popped list.
// nilokay=false is safe here because of the invariant above,
@@ -1735,7 +1775,7 @@ func dropm() {
// Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
// It's important not to try to handle a signal between those two steps.
sigmask := mp.sigmask
- sigblock()
+ sigblock(false)
unminit()
// gccgo sets the stack to Gdead here, because the splitstack
@@ -2692,7 +2732,9 @@ func wakeNetPoller(when int64) {
} else {
// There are no threads in the network poller, try to get
// one there so it can handle new timers.
- wakep()
+ if GOOS != "plan9" { // Temporary workaround - see issue #42303.
+ wakep()
+ }
}
}
@@ -3526,7 +3568,7 @@ func beforefork() {
// group. See issue #18600.
gp.m.locks++
sigsave(&gp.m.sigmask)
- sigblock()
+ sigblock(false)
}
// Called from syscall package before fork.
@@ -3936,7 +3978,7 @@ func badunlockosthread() {
}
func gcount() int32 {
- n := int32(allglen) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
+ n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
for _, _p_ := range allp {
n -= _p_.gFree.n
}
@@ -4596,7 +4638,6 @@ func checkdead() {
case _Grunnable,
_Grunning,
_Gsyscall:
- unlock(&allglock)
print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
throw("checkdead: runnable g")
}
@@ -4757,6 +4798,26 @@ func sysmon() {
}
}
mDoFixup()
+ if GOOS == "netbsd" {
+ // netpoll is responsible for waiting for timer
+ // expiration, so we typically don't have to worry
+ // about starting an M to service timers. (Note that
+ // sleep for timeSleepUntil above simply ensures sysmon
+ // starts running again when that timer expiration may
+ // cause Go code to run again).
+ //
+ // However, netbsd has a kernel bug that sometimes
+ // misses netpollBreak wake-ups, which can lead to
+ // unbounded delays servicing timers. If we detect this
+ // overrun, then startm to get something to handle the
+ // timer.
+ //
+ // See issue 42515 and
+ // https://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=50094.
+ if next, _ := timeSleepUntil(); next < now {
+ startm(nil, false)
+ }
+ }
if atomic.Load(&scavenge.sysmonWake) != 0 {
// Kick the scavenger awake if someone requested it.
wakeScavenger()
diff --git a/libgo/go/runtime/runtime2.go b/libgo/go/runtime/runtime2.go
index 023dac3..4b13cfc 100644
--- a/libgo/go/runtime/runtime2.go
+++ b/libgo/go/runtime/runtime2.go
@@ -1070,7 +1070,6 @@ func (w waitReason) String() string {
}
var (
- allglen uintptr
allm *m
gomaxprocs int32
ncpu int32
diff --git a/libgo/go/runtime/signal_unix.go b/libgo/go/runtime/signal_unix.go
index 1c040f7..e85136a 100644
--- a/libgo/go/runtime/signal_unix.go
+++ b/libgo/go/runtime/signal_unix.go
@@ -988,15 +988,16 @@ func msigrestore(sigmask sigset) {
sigprocmask(_SIG_SETMASK, &sigmask, nil)
}
-// sigblock blocks all signals in the current thread's signal mask.
+// sigblock blocks signals in the current thread's signal mask.
// This is used to block signals while setting up and tearing down g
-// when a non-Go thread calls a Go function.
-// The OS-specific code is expected to define sigset_all.
+// when a non-Go thread calls a Go function. When a thread is exiting
+// we use the sigsetAllExiting value, otherwise the OS specific
+// definition of sigset_all is used.
// This is nosplit and nowritebarrierrec because it is called by needm
// which may be called on a non-Go thread with no g available.
//go:nosplit
//go:nowritebarrierrec
-func sigblock() {
+func sigblock(exiting bool) {
var set sigset
sigfillset(&set)
sigprocmask(_SIG_SETMASK, &set, nil)
diff --git a/libgo/go/runtime/signal_windows_test.go b/libgo/go/runtime/signal_windows_test.go
index a5a885c..33a9b92 100644
--- a/libgo/go/runtime/signal_windows_test.go
+++ b/libgo/go/runtime/signal_windows_test.go
@@ -11,6 +11,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
+ "strconv"
"strings"
"syscall"
"testing"
@@ -79,6 +80,69 @@ func sendCtrlBreak(pid int) error {
return nil
}
+// TestCtrlHandler tests that Go can gracefully handle closing the console window.
+// See https://golang.org/issues/41884.
+func TestCtrlHandler(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ t.Parallel()
+
+ // build go program
+ exe := filepath.Join(t.TempDir(), "test.exe")
+ cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", exe, "testdata/testwinsignal/main.go")
+ out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
+ if err != nil {
+ t.Fatalf("failed to build go exe: %v\n%s", err, out)
+ }
+
+ // run test program
+ cmd = exec.Command(exe)
+ var stderr bytes.Buffer
+ cmd.Stderr = &stderr
+ outPipe, err := cmd.StdoutPipe()
+ if err != nil {
+ t.Fatalf("Failed to create stdout pipe: %v", err)
+ }
+ outReader := bufio.NewReader(outPipe)
+
+ // in a new command window
+ const _CREATE_NEW_CONSOLE = 0x00000010
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ CreationFlags: _CREATE_NEW_CONSOLE,
+ HideWindow: true,
+ }
+ if err := cmd.Start(); err != nil {
+ t.Fatalf("Start failed: %v", err)
+ }
+ defer func() {
+ cmd.Process.Kill()
+ cmd.Wait()
+ }()
+
+ // wait for child to be ready to receive signals
+ if line, err := outReader.ReadString('\n'); err != nil {
+ t.Fatalf("could not read stdout: %v", err)
+ } else if strings.TrimSpace(line) != "ready" {
+ t.Fatalf("unexpected message: %s", line)
+ }
+
+ // gracefully kill pid, this closes the command window
+ if err := exec.Command("taskkill.exe", "/pid", strconv.Itoa(cmd.Process.Pid)).Run(); err != nil {
+ t.Fatalf("failed to kill: %v", err)
+ }
+
+ // check child received, handled SIGTERM
+ if line, err := outReader.ReadString('\n'); err != nil {
+ t.Fatalf("could not read stdout: %v", err)
+ } else if expected, got := syscall.SIGTERM.String(), strings.TrimSpace(line); expected != got {
+ t.Fatalf("Expected '%s' got: %s", expected, got)
+ }
+
+ // check child exited gracefully, did not timeout
+ if err := cmd.Wait(); err != nil {
+ t.Fatalf("Program exited with error: %v\n%s", err, &stderr)
+ }
+}
+
// TestLibraryCtrlHandler tests that Go DLL allows calling program to handle console control events.
// See https://golang.org/issues/35965.
func TestLibraryCtrlHandler(t *testing.T) {
diff --git a/libgo/go/runtime/sigqueue.go b/libgo/go/runtime/sigqueue.go
index 7f9badd..ca41b05 100644
--- a/libgo/go/runtime/sigqueue.go
+++ b/libgo/go/runtime/sigqueue.go
@@ -12,12 +12,16 @@
// sigsend is called by the signal handler to queue a new signal.
// signal_recv is called by the Go program to receive a newly queued signal.
// Synchronization between sigsend and signal_recv is based on the sig.state
-// variable. It can be in 3 states: sigIdle, sigReceiving and sigSending.
+// variable. It can be in 4 states: sigIdle, sigReceiving, sigSending and sigFixup.
// sigReceiving means that signal_recv is blocked on sig.Note and there are no
// new pending signals.
// sigSending means that sig.mask *may* contain new pending signals,
// signal_recv can't be blocked in this state.
// sigIdle means that there are no new pending signals and signal_recv is not blocked.
+// sigFixup is a transient state that can only exist as a short
+// transition from sigReceiving and then on to sigIdle: it is
+// used to ensure the AllThreadsSyscall()'s mDoFixup() operation
+// occurs on the sleeping m, waiting to receive a signal.
// Transitions between states are done atomically with CAS.
// When signal_recv is unblocked, it resets sig.Note and rechecks sig.mask.
// If several sigsends and signal_recv execute concurrently, it can lead to
@@ -59,6 +63,7 @@ const (
sigIdle = iota
sigReceiving
sigSending
+ sigFixup
)
// sigsend delivers a signal from sighandler to the internal signal delivery queue.
@@ -112,6 +117,9 @@ Send:
notewakeup(&sig.note)
break Send
}
+ case sigFixup:
+ // nothing to do - we need to wait for sigIdle.
+ osyield()
}
}
@@ -119,6 +127,19 @@ Send:
return true
}
+// sigRecvPrepareForFixup is used to temporarily wake up the
+// signal_recv() running thread while it is blocked waiting for the
+// arrival of a signal. If it causes the thread to wake up, the
+// sig.state travels through this sequence: sigReceiving -> sigFixup
+// -> sigIdle -> sigReceiving and resumes. (This is only called while
+// GC is disabled.)
+//go:nosplit
+func sigRecvPrepareForFixup() {
+ if atomic.Cas(&sig.state, sigReceiving, sigFixup) {
+ notewakeup(&sig.note)
+ }
+}
+
// Called to receive the next queued signal.
// Must only be called from a single goroutine at a time.
//go:linkname signal_recv os_1signal.signal__recv
@@ -146,7 +167,16 @@ func signal_recv() uint32 {
}
notetsleepg(&sig.note, -1)
noteclear(&sig.note)
- break Receive
+ if !atomic.Cas(&sig.state, sigFixup, sigIdle) {
+ break Receive
+ }
+ // Getting here, the code will
+ // loop around again to sleep
+ // in state sigReceiving. This
+ // path is taken when
+ // sigRecvPrepareForFixup()
+ // has been called by another
+ // thread.
}
case sigSending:
if atomic.Cas(&sig.state, sigSending, sigIdle) {
diff --git a/libgo/go/runtime/stubs2.go b/libgo/go/runtime/stubs2.go
index 0aaed29..93ff566 100644
--- a/libgo/go/runtime/stubs2.go
+++ b/libgo/go/runtime/stubs2.go
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build !js
// +build !plan9
// +build !windows
-// +build !js
package runtime
diff --git a/libgo/go/runtime/testdata/testprog/deadlock.go b/libgo/go/runtime/testdata/testprog/deadlock.go
index 105d6a5..781acbd 100644
--- a/libgo/go/runtime/testdata/testprog/deadlock.go
+++ b/libgo/go/runtime/testdata/testprog/deadlock.go
@@ -25,6 +25,7 @@ func init() {
register("RecursivePanic2", RecursivePanic2)
register("RecursivePanic3", RecursivePanic3)
register("RecursivePanic4", RecursivePanic4)
+ register("RecursivePanic5", RecursivePanic5)
register("GoexitExit", GoexitExit)
register("GoNil", GoNil)
register("MainGoroutineID", MainGoroutineID)
@@ -160,6 +161,44 @@ func RecursivePanic4() {
panic("first panic")
}
+// Test case where we have an open-coded defer higher up the stack (in two), and
+// in the current function (three) we recover in a defer while we still have
+// another defer to be processed.
+func RecursivePanic5() {
+ one()
+ panic("third panic")
+}
+
+//go:noinline
+func one() {
+ two()
+}
+
+//go:noinline
+func two() {
+ defer func() {
+ }()
+
+ three()
+}
+
+//go:noinline
+func three() {
+ defer func() {
+ }()
+
+ defer func() {
+ fmt.Println(recover())
+ }()
+
+ defer func() {
+ fmt.Println(recover())
+ panic("second panic")
+ }()
+
+ panic("first panic")
+}
+
func GoexitExit() {
println("t1")
go func() {
diff --git a/libgo/go/runtime/testdata/testwinsignal/main.go b/libgo/go/runtime/testdata/testwinsignal/main.go
new file mode 100644
index 0000000..1e7c947
--- /dev/null
+++ b/libgo/go/runtime/testdata/testwinsignal/main.go
@@ -0,0 +1,19 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "time"
+)
+
+func main() {
+ c := make(chan os.Signal, 1)
+ signal.Notify(c)
+
+ fmt.Println("ready")
+ sig := <-c
+
+ time.Sleep(time.Second)
+ fmt.Println(sig)
+}
diff --git a/libgo/go/runtime/time.go b/libgo/go/runtime/time.go
index 65a1ae0..a69db99 100644
--- a/libgo/go/runtime/time.go
+++ b/libgo/go/runtime/time.go
@@ -608,8 +608,14 @@ func moveTimers(pp *p, timers []*timer) {
for {
switch s := atomic.Load(&t.status); s {
case timerWaiting:
+ if !atomic.Cas(&t.status, s, timerMoving) {
+ continue
+ }
t.pp = 0
doaddtimer(pp, t)
+ if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
+ badTimer()
+ }
break loop
case timerModifiedEarlier, timerModifiedLater:
if !atomic.Cas(&t.status, s, timerMoving) {
diff --git a/libgo/go/runtime/timestub2.go b/libgo/go/runtime/timestub2.go
index 38446fb..f691388 100644
--- a/libgo/go/runtime/timestub2.go
+++ b/libgo/go/runtime/timestub2.go
@@ -2,10 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !darwin
-// +build !windows
-// +build !freebsd
-
package runtime
func walltime1() (sec int64, nsec int32)