aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/testing
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/testing')
-rw-r--r--libgo/go/testing/benchmark.go232
-rw-r--r--libgo/go/testing/benchmark_test.go122
-rw-r--r--libgo/go/testing/cover.go3
-rw-r--r--libgo/go/testing/example.go92
-rw-r--r--libgo/go/testing/export_test.go5
-rw-r--r--libgo/go/testing/internal/testdeps/deps.go1
-rw-r--r--libgo/go/testing/quick/quick.go3
-rw-r--r--libgo/go/testing/quick/quick_test.go2
-rw-r--r--libgo/go/testing/run_example.go64
-rw-r--r--libgo/go/testing/run_example_js.go74
-rw-r--r--libgo/go/testing/sub_test.go5
-rw-r--r--libgo/go/testing/testing.go126
12 files changed, 504 insertions, 225 deletions
diff --git a/libgo/go/testing/benchmark.go b/libgo/go/testing/benchmark.go
index 8dd8cbc..0e348be 100644
--- a/libgo/go/testing/benchmark.go
+++ b/libgo/go/testing/benchmark.go
@@ -8,23 +8,32 @@ import (
"flag"
"fmt"
"internal/race"
+ "io"
+ "math"
"os"
"runtime"
+ "sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
+ "unicode"
)
-var matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`")
-var benchTime = benchTimeFlag{d: 1 * time.Second}
-var benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks")
-
-func init() {
+func initBenchmarkFlags() {
+ matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`")
+ benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks")
flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d`")
}
+var (
+ matchBenchmarks *string
+ benchmarkMemory *bool
+
+ benchTime = benchTimeFlag{d: 1 * time.Second} // changed during test of testing package
+)
+
type benchTimeFlag struct {
d time.Duration
n int
@@ -101,10 +110,12 @@ type B struct {
// The net total of this test after being run.
netAllocs uint64
netBytes uint64
+ // Extra metrics collected by ReportMetric.
+ extra map[string]float64
}
// StartTimer starts timing a test. This function is called automatically
-// before a benchmark starts, but it can also used to resume timing after
+// before a benchmark starts, but it can also be used to resume timing after
// a call to StopTimer.
func (b *B) StartTimer() {
if !b.timerOn {
@@ -129,9 +140,19 @@ func (b *B) StopTimer() {
}
}
-// ResetTimer zeros the elapsed benchmark time and memory allocation counters.
+// ResetTimer zeroes the elapsed benchmark time and memory allocation counters
+// and deletes user-reported metrics.
// It does not affect whether the timer is running.
func (b *B) ResetTimer() {
+ if b.extra == nil {
+ // Allocate the extra map before reading memory stats.
+ // Pre-size it to make more allocation unlikely.
+ b.extra = make(map[string]float64, 16)
+ } else {
+ for k := range b.extra {
+ delete(b.extra, k)
+ }
+ }
if b.timerOn {
runtime.ReadMemStats(&memStats)
b.startAllocs = memStats.Mallocs
@@ -154,13 +175,6 @@ func (b *B) ReportAllocs() {
b.showAllocResult = true
}
-func (b *B) nsPerOp() int64 {
- if b.N <= 0 {
- return 0
- }
- return b.duration.Nanoseconds() / int64(b.N)
-}
-
// runN runs a single benchmark for the specified number of iterations.
func (b *B) runN(n int) {
benchmarkLock.Lock()
@@ -183,53 +197,20 @@ func (b *B) runN(n int) {
}
}
-func min(x, y int) int {
+func min(x, y int64) int64 {
if x > y {
return y
}
return x
}
-func max(x, y int) int {
+func max(x, y int64) int64 {
if x < y {
return y
}
return x
}
-// roundDown10 rounds a number down to the nearest power of 10.
-func roundDown10(n int) int {
- var tens = 0
- // tens = floor(log_10(n))
- for n >= 10 {
- n = n / 10
- tens++
- }
- // result = 10^tens
- result := 1
- for i := 0; i < tens; i++ {
- result *= 10
- }
- return result
-}
-
-// roundUp rounds x up to a number of the form [1eX, 2eX, 3eX, 5eX].
-func roundUp(n int) int {
- base := roundDown10(n)
- switch {
- case n <= base:
- return base
- case n <= (2 * base):
- return 2 * base
- case n <= (3 * base):
- return 3 * base
- case n <= (5 * base):
- return 5 * base
- default:
- return 10 * base
- }
-}
-
// run1 runs the first iteration of benchFunc. It reports whether more
// iterations of this benchmarks should be run.
func (b *B) run1() bool {
@@ -312,23 +293,53 @@ func (b *B) launch() {
b.runN(b.benchTime.n)
} else {
d := b.benchTime.d
- for n := 1; !b.failed && b.duration < d && n < 1e9; {
+ for n := int64(1); !b.failed && b.duration < d && n < 1e9; {
last := n
// Predict required iterations.
- n = int(d.Nanoseconds())
- if nsop := b.nsPerOp(); nsop != 0 {
- n /= int(nsop)
+ goalns := d.Nanoseconds()
+ prevIters := int64(b.N)
+ prevns := b.duration.Nanoseconds()
+ if prevns <= 0 {
+ // Round up, to avoid div by zero.
+ prevns = 1
}
+ // Order of operations matters.
+ // For very fast benchmarks, prevIters ~= prevns.
+ // If you divide first, you get 0 or 1,
+ // which can hide an order of magnitude in execution time.
+ // So multiply first, then divide.
+ n = goalns * prevIters / prevns
// Run more iterations than we think we'll need (1.2x).
+ n += n / 5
// Don't grow too fast in case we had timing errors previously.
+ n = min(n, 100*last)
// Be sure to run at least one more than last time.
- n = max(min(n+n/5, 100*last), last+1)
- // Round up to something easy to read.
- n = roundUp(n)
- b.runN(n)
+ n = max(n, last+1)
+ // Don't run more than 1e9 times. (This also keeps n in int range on 32 bit platforms.)
+ n = min(n, 1e9)
+ b.runN(int(n))
}
}
- b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes}
+ b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes, b.extra}
+}
+
+// ReportMetric adds "n unit" to the reported benchmark results.
+// If the metric is per-iteration, the caller should divide by b.N,
+// and by convention units should end in "/op".
+// ReportMetric overrides any previously reported value for the same unit.
+// ReportMetric panics if unit is the empty string or if unit contains
+// any whitespace.
+// If unit is a unit normally reported by the benchmark framework itself
+// (such as "allocs/op"), ReportMetric will override that metric.
+// Setting "ns/op" to 0 will suppress that built-in metric.
+func (b *B) ReportMetric(n float64, unit string) {
+ if unit == "" {
+ panic("metric unit must not be empty")
+ }
+ if strings.IndexFunc(unit, unicode.IsSpace) >= 0 {
+ panic("metric unit must not contain whitespace")
+ }
+ b.extra[unit] = n
}
// The results of a benchmark run.
@@ -338,56 +349,122 @@ type BenchmarkResult struct {
Bytes int64 // Bytes processed in one iteration.
MemAllocs uint64 // The total number of memory allocations.
MemBytes uint64 // The total number of bytes allocated.
+
+ // Extra records additional metrics reported by ReportMetric.
+ Extra map[string]float64
}
+// NsPerOp returns the "ns/op" metric.
func (r BenchmarkResult) NsPerOp() int64 {
+ if v, ok := r.Extra["ns/op"]; ok {
+ return int64(v)
+ }
if r.N <= 0 {
return 0
}
return r.T.Nanoseconds() / int64(r.N)
}
+// mbPerSec returns the "MB/s" metric.
func (r BenchmarkResult) mbPerSec() float64 {
+ if v, ok := r.Extra["MB/s"]; ok {
+ return v
+ }
if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
return 0
}
return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
}
-// AllocsPerOp returns r.MemAllocs / r.N.
+// AllocsPerOp returns the "allocs/op" metric,
+// which is calculated as r.MemAllocs / r.N.
func (r BenchmarkResult) AllocsPerOp() int64 {
+ if v, ok := r.Extra["allocs/op"]; ok {
+ return int64(v)
+ }
if r.N <= 0 {
return 0
}
return int64(r.MemAllocs) / int64(r.N)
}
-// AllocedBytesPerOp returns r.MemBytes / r.N.
+// AllocedBytesPerOp returns the "B/op" metric,
+// which is calculated as r.MemBytes / r.N.
func (r BenchmarkResult) AllocedBytesPerOp() int64 {
+ if v, ok := r.Extra["B/op"]; ok {
+ return int64(v)
+ }
if r.N <= 0 {
return 0
}
return int64(r.MemBytes) / int64(r.N)
}
+// String returns a summary of the benchmark results.
+// It follows the benchmark result line format from
+// https://golang.org/design/14313-benchmark-format, not including the
+// benchmark name.
+// Extra metrics override built-in metrics of the same name.
+// String does not include allocs/op or B/op, since those are reported
+// by MemString.
func (r BenchmarkResult) String() string {
- mbs := r.mbPerSec()
- mb := ""
- if mbs != 0 {
- mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
- }
- nsop := r.NsPerOp()
- ns := fmt.Sprintf("%10d ns/op", nsop)
- if r.N > 0 && nsop < 100 {
- // The format specifiers here make sure that
- // the ones digits line up for all three possible formats.
- if nsop < 10 {
- ns = fmt.Sprintf("%13.2f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
- } else {
- ns = fmt.Sprintf("%12.1f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
+ buf := new(strings.Builder)
+ fmt.Fprintf(buf, "%8d", r.N)
+
+ // Get ns/op as a float.
+ ns, ok := r.Extra["ns/op"]
+ if !ok {
+ ns = float64(r.T.Nanoseconds()) / float64(r.N)
+ }
+ if ns != 0 {
+ buf.WriteByte('\t')
+ prettyPrint(buf, ns, "ns/op")
+ }
+
+ if mbs := r.mbPerSec(); mbs != 0 {
+ fmt.Fprintf(buf, "\t%7.2f MB/s", mbs)
+ }
+
+ // Print extra metrics that aren't represented in the standard
+ // metrics.
+ var extraKeys []string
+ for k := range r.Extra {
+ switch k {
+ case "ns/op", "MB/s", "B/op", "allocs/op":
+ // Built-in metrics reported elsewhere.
+ continue
}
+ extraKeys = append(extraKeys, k)
+ }
+ sort.Strings(extraKeys)
+ for _, k := range extraKeys {
+ buf.WriteByte('\t')
+ prettyPrint(buf, r.Extra[k], k)
+ }
+ return buf.String()
+}
+
+func prettyPrint(w io.Writer, x float64, unit string) {
+ // Print all numbers with 10 places before the decimal point
+ // and small numbers with three sig figs.
+ var format string
+ switch y := math.Abs(x); {
+ case y == 0 || y >= 99.95:
+ format = "%10.0f %s"
+ case y >= 9.995:
+ format = "%12.1f %s"
+ case y >= 0.9995:
+ format = "%13.2f %s"
+ case y >= 0.09995:
+ format = "%14.3f %s"
+ case y >= 0.009995:
+ format = "%15.4f %s"
+ case y >= 0.0009995:
+ format = "%16.5f %s"
+ default:
+ format = "%17.6f %s"
}
- return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb)
+ fmt.Fprintf(w, format, x, unit)
}
// MemString returns r.AllocedBytesPerOp and r.AllocsPerOp in the same format as 'go test'.
@@ -680,9 +757,12 @@ func (b *B) SetParallelism(p int) {
}
}
-// Benchmark benchmarks a single function. Useful for creating
+// Benchmark benchmarks a single function. It is useful for creating
// custom benchmarks that do not use the "go test" command.
//
+// If f depends on testing flags, then Init must be used to register
+// those flags before calling Benchmark and before calling flag.Parse.
+//
// If f calls Run, the result will be an estimate of running all its
// subbenchmarks that don't call Run in sequence in a single benchmark.
func Benchmark(f func(b *B)) BenchmarkResult {
diff --git a/libgo/go/testing/benchmark_test.go b/libgo/go/testing/benchmark_test.go
index 431bb53..1434c26 100644
--- a/libgo/go/testing/benchmark_test.go
+++ b/libgo/go/testing/benchmark_test.go
@@ -7,63 +7,70 @@ package testing_test
import (
"bytes"
"runtime"
+ "sort"
+ "strings"
"sync/atomic"
"testing"
"text/template"
+ "time"
)
-var roundDownTests = []struct {
- v, expected int
+var prettyPrintTests = []struct {
+ v float64
+ expected string
}{
- {1, 1},
- {9, 1},
- {10, 10},
- {11, 10},
- {100, 100},
- {101, 100},
- {999, 100},
- {1000, 1000},
- {1001, 1000},
+ {0, " 0 x"},
+ {1234.1, " 1234 x"},
+ {-1234.1, " -1234 x"},
+ {99.950001, " 100 x"},
+ {99.949999, " 99.9 x"},
+ {9.9950001, " 10.0 x"},
+ {9.9949999, " 9.99 x"},
+ {-9.9949999, " -9.99 x"},
+ {0.0099950001, " 0.0100 x"},
+ {0.0099949999, " 0.00999 x"},
}
-func TestRoundDown10(t *testing.T) {
- for _, tt := range roundDownTests {
- actual := testing.RoundDown10(tt.v)
- if tt.expected != actual {
- t.Errorf("roundDown10(%d): expected %d, actual %d", tt.v, tt.expected, actual)
+func TestPrettyPrint(t *testing.T) {
+ for _, tt := range prettyPrintTests {
+ buf := new(strings.Builder)
+ testing.PrettyPrint(buf, tt.v, "x")
+ if tt.expected != buf.String() {
+ t.Errorf("prettyPrint(%v): expected %q, actual %q", tt.v, tt.expected, buf.String())
}
}
}
-var roundUpTests = []struct {
- v, expected int
-}{
- {0, 1},
- {1, 1},
- {2, 2},
- {3, 3},
- {5, 5},
- {9, 10},
- {999, 1000},
- {1000, 1000},
- {1400, 2000},
- {1700, 2000},
- {2700, 3000},
- {4999, 5000},
- {5000, 5000},
- {5001, 10000},
-}
+func TestResultString(t *testing.T) {
+ // Test fractional ns/op handling
+ r := testing.BenchmarkResult{
+ N: 100,
+ T: 240 * time.Nanosecond,
+ }
+ if r.NsPerOp() != 2 {
+ t.Errorf("NsPerOp: expected 2, actual %v", r.NsPerOp())
+ }
+ if want, got := " 100\t 2.40 ns/op", r.String(); want != got {
+ t.Errorf("String: expected %q, actual %q", want, got)
+ }
-func TestRoundUp(t *testing.T) {
- for _, tt := range roundUpTests {
- actual := testing.RoundUp(tt.v)
- if tt.expected != actual {
- t.Errorf("roundUp(%d): expected %d, actual %d", tt.v, tt.expected, actual)
- }
+ // Test sub-1 ns/op (issue #31005)
+ r.T = 40 * time.Nanosecond
+ if want, got := " 100\t 0.400 ns/op", r.String(); want != got {
+ t.Errorf("String: expected %q, actual %q", want, got)
+ }
+
+ // Test 0 ns/op
+ r.T = 0
+ if want, got := " 100", r.String(); want != got {
+ t.Errorf("String: expected %q, actual %q", want, got)
}
}
func TestRunParallel(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
testing.Benchmark(func(b *testing.B) {
procs := uint32(0)
iters := uint64(0)
@@ -111,3 +118,38 @@ func ExampleB_RunParallel() {
})
})
}
+
+func TestReportMetric(t *testing.T) {
+ res := testing.Benchmark(func(b *testing.B) {
+ b.ReportMetric(12345, "ns/op")
+ b.ReportMetric(0.2, "frobs/op")
+ })
+ // Test built-in overriding.
+ if res.NsPerOp() != 12345 {
+ t.Errorf("NsPerOp: expected %v, actual %v", 12345, res.NsPerOp())
+ }
+ // Test stringing.
+ res.N = 1 // Make the output stable
+ want := " 1\t 12345 ns/op\t 0.200 frobs/op"
+ if want != res.String() {
+ t.Errorf("expected %q, actual %q", want, res.String())
+ }
+}
+
+func ExampleB_ReportMetric() {
+ // This reports a custom benchmark metric relevant to a
+ // specific algorithm (in this case, sorting).
+ testing.Benchmark(func(b *testing.B) {
+ var compares int64
+ for i := 0; i < b.N; i++ {
+ s := []int{5, 4, 3, 2, 1}
+ sort.Slice(s, func(i, j int) bool {
+ compares++
+ return s[i] < s[j]
+ })
+ }
+ // This metric is per-operation, so divide by b.N and
+ // report it as a "/op" unit.
+ b.ReportMetric(float64(compares)/float64(b.N), "compares/op")
+ })
+}
diff --git a/libgo/go/testing/cover.go b/libgo/go/testing/cover.go
index 17c03f5..62ee5ac 100644
--- a/libgo/go/testing/cover.go
+++ b/libgo/go/testing/cover.go
@@ -109,7 +109,8 @@ func coverReport() {
}
}
if total == 0 {
- total = 1
+ fmt.Println("coverage: [no statements]")
+ return
}
fmt.Printf("coverage: %.1f%% of statements%s\n", 100*float64(active)/float64(total), cover.CoveredPackages)
}
diff --git a/libgo/go/testing/example.go b/libgo/go/testing/example.go
index f4beb76..c122121 100644
--- a/libgo/go/testing/example.go
+++ b/libgo/go/testing/example.go
@@ -6,7 +6,6 @@ package testing
import (
"fmt"
- "io"
"os"
"sort"
"strings"
@@ -56,68 +55,39 @@ func sortLines(output string) string {
return strings.Join(lines, "\n")
}
-func runExample(eg InternalExample) (ok bool) {
- if *chatty {
- fmt.Printf("=== RUN %s\n", eg.Name)
- }
-
- // Capture stdout.
- stdout := os.Stdout
- r, w, err := os.Pipe()
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
- os.Stdout = w
- outC := make(chan string)
- go func() {
- var buf strings.Builder
- _, err := io.Copy(&buf, r)
- r.Close()
- if err != nil {
- fmt.Fprintf(os.Stderr, "testing: copying pipe: %v\n", err)
- os.Exit(1)
+// processRunResult computes a summary and status of the result of running an example test.
+// stdout is the captured output from stdout of the test.
+// recovered is the result of invoking recover after running the test, in case it panicked.
+//
+// If stdout doesn't match the expected output or if recovered is non-nil, it'll print the cause of failure to stdout.
+// If the test is chatty/verbose, it'll print a success message to stdout.
+// If recovered is non-nil, it'll panic with that value.
+func (eg *InternalExample) processRunResult(stdout string, timeSpent time.Duration, recovered interface{}) (passed bool) {
+ passed = true
+
+ dstr := fmtDuration(timeSpent)
+ var fail string
+ got := strings.TrimSpace(stdout)
+ want := strings.TrimSpace(eg.Output)
+ if eg.Unordered {
+ if sortLines(got) != sortLines(want) && recovered == nil {
+ fail = fmt.Sprintf("got:\n%s\nwant (unordered):\n%s\n", stdout, eg.Output)
}
- outC <- buf.String()
- }()
-
- start := time.Now()
- ok = true
-
- // Clean up in a deferred call so we can recover if the example panics.
- defer func() {
- dstr := fmtDuration(time.Since(start))
-
- // Close pipe, restore stdout, get output.
- w.Close()
- os.Stdout = stdout
- out := <-outC
-
- var fail string
- err := recover()
- got := strings.TrimSpace(out)
- want := strings.TrimSpace(eg.Output)
- if eg.Unordered {
- if sortLines(got) != sortLines(want) && err == nil {
- fail = fmt.Sprintf("got:\n%s\nwant (unordered):\n%s\n", out, eg.Output)
- }
- } else {
- if got != want && err == nil {
- fail = fmt.Sprintf("got:\n%s\nwant:\n%s\n", got, want)
- }
- }
- if fail != "" || err != nil {
- fmt.Printf("--- FAIL: %s (%s)\n%s", eg.Name, dstr, fail)
- ok = false
- } else if *chatty {
- fmt.Printf("--- PASS: %s (%s)\n", eg.Name, dstr)
- }
- if err != nil {
- panic(err)
+ } else {
+ if got != want && recovered == nil {
+ fail = fmt.Sprintf("got:\n%s\nwant:\n%s\n", got, want)
}
- }()
+ }
+ if fail != "" || recovered != nil {
+ fmt.Printf("--- FAIL: %s (%s)\n%s", eg.Name, dstr, fail)
+ passed = false
+ } else if *chatty {
+ fmt.Printf("--- PASS: %s (%s)\n", eg.Name, dstr)
+ }
+ if recovered != nil {
+ // Propagate the previously recovered result, by panicking.
+ panic(recovered)
+ }
- // Run example.
- eg.F()
return
}
diff --git a/libgo/go/testing/export_test.go b/libgo/go/testing/export_test.go
index 89781b4..0022491 100644
--- a/libgo/go/testing/export_test.go
+++ b/libgo/go/testing/export_test.go
@@ -4,7 +4,4 @@
package testing
-var (
- RoundDown10 = roundDown10
- RoundUp = roundUp
-)
+var PrettyPrint = prettyPrint
diff --git a/libgo/go/testing/internal/testdeps/deps.go b/libgo/go/testing/internal/testdeps/deps.go
index 14512e9..af08dd7 100644
--- a/libgo/go/testing/internal/testdeps/deps.go
+++ b/libgo/go/testing/internal/testdeps/deps.go
@@ -98,7 +98,6 @@ func (l *testLog) add(op, name string) {
}
var log testLog
-var didSetLogger bool
func (TestDeps) StartTestLog(w io.Writer) {
log.mu.Lock()
diff --git a/libgo/go/testing/quick/quick.go b/libgo/go/testing/quick/quick.go
index 0457fc7..c01647e 100644
--- a/libgo/go/testing/quick/quick.go
+++ b/libgo/go/testing/quick/quick.go
@@ -180,7 +180,8 @@ type Config struct {
MaxCount int
// MaxCountScale is a non-negative scale factor applied to the
// default maximum.
- // If zero, the default is unchanged.
+ // A count of zero implies the default, which is usually 100
+ // but can be set by the -quickchecks flag.
MaxCountScale float64
// Rand specifies a source of random numbers.
// If nil, a default pseudo-random source will be used.
diff --git a/libgo/go/testing/quick/quick_test.go b/libgo/go/testing/quick/quick_test.go
index 4246cd1..9df6dd4 100644
--- a/libgo/go/testing/quick/quick_test.go
+++ b/libgo/go/testing/quick/quick_test.go
@@ -319,7 +319,7 @@ func TestInt64(t *testing.T) {
}
return true
}
- cfg := &Config{MaxCount: 100000}
+ cfg := &Config{MaxCount: 10000}
Check(f, cfg)
if uint64(lo)>>62 == 0 || uint64(hi)>>62 == 0 {
t.Errorf("int64 returned range %#016x,%#016x; does not look like full range", lo, hi)
diff --git a/libgo/go/testing/run_example.go b/libgo/go/testing/run_example.go
new file mode 100644
index 0000000..10bde49
--- /dev/null
+++ b/libgo/go/testing/run_example.go
@@ -0,0 +1,64 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !js
+
+// TODO(@musiol, @odeke-em): re-unify this entire file back into
+// example.go when js/wasm gets an os.Pipe implementation
+// and no longer needs this separation.
+
+package testing
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+)
+
+func runExample(eg InternalExample) (ok bool) {
+ if *chatty {
+ fmt.Printf("=== RUN %s\n", eg.Name)
+ }
+
+ // Capture stdout.
+ stdout := os.Stdout
+ r, w, err := os.Pipe()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ os.Stdout = w
+ outC := make(chan string)
+ go func() {
+ var buf strings.Builder
+ _, err := io.Copy(&buf, r)
+ r.Close()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: copying pipe: %v\n", err)
+ os.Exit(1)
+ }
+ outC <- buf.String()
+ }()
+
+ start := time.Now()
+
+ // Clean up in a deferred call so we can recover if the example panics.
+ defer func() {
+ timeSpent := time.Since(start)
+
+ // Close pipe, restore stdout, get output.
+ w.Close()
+ os.Stdout = stdout
+ out := <-outC
+
+ err := recover()
+ ok = eg.processRunResult(out, timeSpent, err)
+ }()
+
+ // Run example.
+ eg.F()
+ return
+}
diff --git a/libgo/go/testing/run_example_js.go b/libgo/go/testing/run_example_js.go
new file mode 100644
index 0000000..472e0c5
--- /dev/null
+++ b/libgo/go/testing/run_example_js.go
@@ -0,0 +1,74 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build js
+
+package testing
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+)
+
+// TODO(@musiol, @odeke-em): unify this code back into
+// example.go when js/wasm gets an os.Pipe implementation.
+func runExample(eg InternalExample) (ok bool) {
+ if *chatty {
+ fmt.Printf("=== RUN %s\n", eg.Name)
+ }
+
+ // Capture stdout to temporary file. We're not using
+ // os.Pipe because it is not supported on js/wasm.
+ stdout := os.Stdout
+ f := createTempFile(eg.Name)
+ os.Stdout = f
+ start := time.Now()
+
+ // Clean up in a deferred call so we can recover if the example panics.
+ defer func() {
+ timeSpent := time.Since(start)
+
+ // Restore stdout, get output and remove temporary file.
+ os.Stdout = stdout
+ var buf strings.Builder
+ _, seekErr := f.Seek(0, os.SEEK_SET)
+ _, readErr := io.Copy(&buf, f)
+ out := buf.String()
+ f.Close()
+ os.Remove(f.Name())
+ if seekErr != nil {
+ fmt.Fprintf(os.Stderr, "testing: seek temp file: %v\n", seekErr)
+ os.Exit(1)
+ }
+ if readErr != nil {
+ fmt.Fprintf(os.Stderr, "testing: read temp file: %v\n", readErr)
+ os.Exit(1)
+ }
+
+ err := recover()
+ ok = eg.processRunResult(out, timeSpent, err)
+ }()
+
+ // Run example.
+ eg.F()
+ return
+}
+
+func createTempFile(exampleName string) *os.File {
+ for i := 0; ; i++ {
+ name := fmt.Sprintf("%s/go-example-stdout-%s-%d.txt", os.TempDir(), exampleName, i)
+ f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+ if err != nil {
+ if os.IsExist(err) {
+ continue
+ }
+ fmt.Fprintf(os.Stderr, "testing: open temp file: %v\n", err)
+ os.Exit(1)
+ }
+ return f
+ }
+}
diff --git a/libgo/go/testing/sub_test.go b/libgo/go/testing/sub_test.go
index 5a6d51b..cc5dd2f 100644
--- a/libgo/go/testing/sub_test.go
+++ b/libgo/go/testing/sub_test.go
@@ -16,7 +16,7 @@ import (
)
func init() {
- // Make benchmark tests run 10* faster.
+ // Make benchmark tests run 10x faster.
benchTime.d = 100 * time.Millisecond
}
@@ -756,6 +756,9 @@ func TestLogAfterComplete(t *T) {
}
func TestBenchmark(t *T) {
+ if Short() {
+ t.Skip("skipping in short mode")
+ }
res := Benchmark(func(b *B) {
for i := 0; i < 5; i++ {
b.Run("", func(b *B) {
diff --git a/libgo/go/testing/testing.go b/libgo/go/testing/testing.go
index 3068630..339df13 100644
--- a/libgo/go/testing/testing.go
+++ b/libgo/go/testing/testing.go
@@ -249,7 +249,18 @@ import (
"time"
)
-var (
+var initRan bool
+
+// Init registers testing flags. These flags are automatically registered by
+// the "go test" command before running test functions, so Init is only needed
+// when calling functions such as Benchmark without using "go test".
+//
+// Init has no effect if it was already called.
+func Init() {
+ if initRan {
+ return
+ }
+ initRan = true
// The short flag requests that tests run more quickly, but its functionality
// is provided by test writers themselves. The testing package is just its
// home. The all.bash installation script sets it to make installation more
@@ -265,25 +276,50 @@ var (
// this flag lets "go test" tell the binary to write the files in the directory where
// the "go test" command is run.
outputDir = flag.String("test.outputdir", "", "write profiles to `dir`")
-
// Report as tests are run; default is silent for success.
- chatty = flag.Bool("test.v", false, "verbose: print additional output")
- count = flag.Uint("test.count", 1, "run tests and benchmarks `n` times")
- coverProfile = flag.String("test.coverprofile", "", "write a coverage profile to `file`")
- matchList = flag.String("test.list", "", "list tests, examples, and benchmarks matching `regexp` then exit")
- match = flag.String("test.run", "", "run only tests and examples matching `regexp`")
- memProfile = flag.String("test.memprofile", "", "write an allocation profile to `file`")
- memProfileRate = flag.Int("test.memprofilerate", 0, "set memory allocation profiling `rate` (see runtime.MemProfileRate)")
- cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to `file`")
- blockProfile = flag.String("test.blockprofile", "", "write a goroutine blocking profile to `file`")
- blockProfileRate = flag.Int("test.blockprofilerate", 1, "set blocking profile `rate` (see runtime.SetBlockProfileRate)")
- mutexProfile = flag.String("test.mutexprofile", "", "write a mutex contention profile to the named file after execution")
+ chatty = flag.Bool("test.v", false, "verbose: print additional output")
+ count = flag.Uint("test.count", 1, "run tests and benchmarks `n` times")
+ coverProfile = flag.String("test.coverprofile", "", "write a coverage profile to `file`")
+ matchList = flag.String("test.list", "", "list tests, examples, and benchmarks matching `regexp` then exit")
+ match = flag.String("test.run", "", "run only tests and examples matching `regexp`")
+ memProfile = flag.String("test.memprofile", "", "write an allocation profile to `file`")
+ memProfileRate = flag.Int("test.memprofilerate", 0, "set memory allocation profiling `rate` (see runtime.MemProfileRate)")
+ cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to `file`")
+ blockProfile = flag.String("test.blockprofile", "", "write a goroutine blocking profile to `file`")
+ blockProfileRate = flag.Int("test.blockprofilerate", 1, "set blocking profile `rate` (see runtime.SetBlockProfileRate)")
+ mutexProfile = flag.String("test.mutexprofile", "", "write a mutex contention profile to the named file after execution")
mutexProfileFraction = flag.Int("test.mutexprofilefraction", 1, "if >= 0, calls runtime.SetMutexProfileFraction()")
- traceFile = flag.String("test.trace", "", "write an execution trace to `file`")
- timeout = flag.Duration("test.timeout", 0, "panic test binary after duration `d` (default 0, timeout disabled)")
- cpuListStr = flag.String("test.cpu", "", "comma-separated `list` of cpu counts to run each test with")
- parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "run at most `n` tests in parallel")
- testlog = flag.String("test.testlogfile", "", "write test action log to `file` (for use only by cmd/go)")
+ traceFile = flag.String("test.trace", "", "write an execution trace to `file`")
+ timeout = flag.Duration("test.timeout", 0, "panic test binary after duration `d` (default 0, timeout disabled)")
+ cpuListStr = flag.String("test.cpu", "", "comma-separated `list` of cpu counts to run each test with")
+ parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "run at most `n` tests in parallel")
+ testlog = flag.String("test.testlogfile", "", "write test action log to `file` (for use only by cmd/go)")
+
+ initBenchmarkFlags()
+}
+
+var (
+ // Flags, registered during Init.
+ short *bool
+ failFast *bool
+ outputDir *string
+ chatty *bool
+ count *uint
+ coverProfile *string
+ matchList *string
+ match *string
+ memProfile *string
+ memProfileRate *int
+ cpuProfile *string
+ blockProfile *string
+ blockProfileRate *int
+ mutexProfile *string
+ mutexProfileFraction *int
+ traceFile *string
+ timeout *time.Duration
+ cpuListStr *string
+ parallel *int
+ testlog *string
haveExamples bool // are there examples?
@@ -328,11 +364,12 @@ type common struct {
// Short reports whether the -test.short flag is set.
func Short() bool {
- // Catch code that calls this from TestMain without first
- // calling flag.Parse. This shouldn't really be a panic
+ if short == nil {
+ panic("testing: Short called before Init")
+ }
+ // Catch code that calls this from TestMain without first calling flag.Parse.
if !flag.Parsed() {
- fmt.Fprintf(os.Stderr, "testing: testing.Short called before flag.Parse\n")
- os.Exit(2)
+ panic("testing: Short called before Parse")
}
return *short
@@ -347,6 +384,13 @@ func CoverMode() string {
// Verbose reports whether the -test.v flag is set.
func Verbose() bool {
+ // Same as in Short.
+ if chatty == nil {
+ panic("testing: Verbose called before Init")
+ }
+ if !flag.Parsed() {
+ panic("testing: Verbose called before Parse")
+ }
return *chatty
}
@@ -614,7 +658,7 @@ func (c *common) log(s string) {
c.logDepth(s, 3) // logDepth + log + public function
}
-// logDepth generates the output. At an arbitary stack depth
+// logDepth generates the output at an arbitrary stack depth.
func (c *common) logDepth(s string, depth int) {
c.mu.Lock()
defer c.mu.Unlock()
@@ -727,7 +771,7 @@ func (c *common) Helper() {
// for the caller after skip frames (where 0 means the current function).
func callerName(skip int) string {
// Make room for the skip PC.
- var pc [2]uintptr
+ var pc [1]uintptr
n := runtime.Callers(skip+2, pc[:]) // skip + runtime.Callers + callerName
if n == 0 {
panic("testing: zero callers found")
@@ -1031,6 +1075,12 @@ type testDeps interface {
// It is not meant to be called directly and is not subject to the Go 1 compatibility document.
// It may change signature from release to release.
func MainStart(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) *M {
+ // In most cases, Init has already been called by the testinginit code
+ // that 'go test' injects into test packages.
+ // Call it again here to handle cases such as:
+ // - test packages that don't import "testing" (such as example-only packages)
+ // - direct use of MainStart (though that isn't well-supported)
+ Init()
return &M{
deps: deps,
tests: tests,
@@ -1287,7 +1337,7 @@ func (m *M) writeProfiles() {
os.Exit(2)
}
if err = m.deps.WriteProfileTo("mutex", f, 0); err != nil {
- fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *blockProfile, err)
+ fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *mutexProfile, err)
os.Exit(2)
}
f.Close()
@@ -1303,20 +1353,18 @@ func toOutputDir(path string) string {
if *outputDir == "" || path == "" {
return path
}
- if runtime.GOOS == "windows" {
- // On Windows, it's clumsy, but we can be almost always correct
- // by just looking for a drive letter and a colon.
- // Absolute paths always have a drive letter (ignoring UNC).
- // Problem: if path == "C:A" and outputdir == "C:\Go" it's unclear
- // what to do, but even then path/filepath doesn't help.
- // TODO: Worth doing better? Probably not, because we're here only
- // under the management of go test.
- if len(path) >= 2 {
- letter, colon := path[0], path[1]
- if ('a' <= letter && letter <= 'z' || 'A' <= letter && letter <= 'Z') && colon == ':' {
- // If path starts with a drive letter we're stuck with it regardless.
- return path
- }
+ // On Windows, it's clumsy, but we can be almost always correct
+ // by just looking for a drive letter and a colon.
+ // Absolute paths always have a drive letter (ignoring UNC).
+ // Problem: if path == "C:A" and outputdir == "C:\Go" it's unclear
+ // what to do, but even then path/filepath doesn't help.
+ // TODO: Worth doing better? Probably not, because we're here only
+ // under the management of go test.
+ if runtime.GOOS == "windows" && len(path) >= 2 {
+ letter, colon := path[0], path[1]
+ if ('a' <= letter && letter <= 'z' || 'A' <= letter && letter <= 'Z') && colon == ':' {
+ // If path starts with a drive letter we're stuck with it regardless.
+ return path
}
}
if os.IsPathSeparator(path[0]) {