diff options
Diffstat (limited to 'libgo/go/testing/sub_test.go')
-rw-r--r-- | libgo/go/testing/sub_test.go | 93 |
1 files changed, 91 insertions, 2 deletions
diff --git a/libgo/go/testing/sub_test.go b/libgo/go/testing/sub_test.go index bb7b3e0..acf5dea 100644 --- a/libgo/go/testing/sub_test.go +++ b/libgo/go/testing/sub_test.go @@ -8,11 +8,18 @@ import ( "bytes" "fmt" "regexp" + "runtime" "strings" + "sync" "sync/atomic" "time" ) +func init() { + // Make benchmark tests run 10* faster. + *benchTime = 100 * time.Millisecond +} + func TestTestContext(t *T) { const ( add1 = 0 @@ -455,8 +462,14 @@ func TestBRun(t *T) { _ = append([]byte(nil), buf[:]...) } } - b.Run("", func(b *B) { alloc(b) }) - b.Run("", func(b *B) { alloc(b) }) + b.Run("", func(b *B) { + alloc(b) + b.ReportAllocs() + }) + b.Run("", func(b *B) { + alloc(b) + b.ReportAllocs() + }) // runtime.MemStats sometimes reports more allocations than the // benchmark is responsible for. Luckily the point of this test is // to ensure that the results are not underreported, so we can @@ -517,6 +530,26 @@ func TestBenchmarkOutput(t *T) { Benchmark(func(b *B) {}) } +func TestBenchmarkStartsFrom1(t *T) { + var first = true + Benchmark(func(b *B) { + if first && b.N != 1 { + panic(fmt.Sprintf("Benchmark() first N=%v; want 1", b.N)) + } + first = false + }) +} + +func TestBenchmarkReadMemStatsBeforeFirstRun(t *T) { + var first = true + Benchmark(func(b *B) { + if first && (b.startAllocs == 0 || b.startBytes == 0) { + panic(fmt.Sprintf("ReadMemStats not called before first run")) + } + first = false + }) +} + func TestParallelSub(t *T) { c := make(chan int) block := make(chan int) @@ -532,3 +565,59 @@ func TestParallelSub(t *T) { <-c } } + +type funcWriter func([]byte) (int, error) + +func (fw funcWriter) Write(b []byte) (int, error) { return fw(b) } + +func TestRacyOutput(t *T) { + var runs int32 // The number of running Writes + var races int32 // Incremented for each race detected + raceDetector := func(b []byte) (int, error) { + // Check if some other goroutine is concurrently calling Write. + if atomic.LoadInt32(&runs) > 0 { + atomic.AddInt32(&races, 1) // Race detected! + } + atomic.AddInt32(&runs, 1) + defer atomic.AddInt32(&runs, -1) + runtime.Gosched() // Increase probability of a race + return len(b), nil + } + + var wg sync.WaitGroup + root := &T{ + common: common{w: funcWriter(raceDetector), chatty: true}, + context: newTestContext(1, newMatcher(regexp.MatchString, "", "")), + } + root.Run("", func(t *T) { + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + t.Run(fmt.Sprint(i), func(t *T) { + t.Logf("testing run %d", i) + }) + }(i) + } + }) + wg.Wait() + + if races > 0 { + t.Errorf("detected %d racy Writes", races) + } +} + +func TestBenchmark(t *T) { + res := Benchmark(func(b *B) { + for i := 0; i < 5; i++ { + b.Run("", func(b *B) { + for i := 0; i < b.N; i++ { + time.Sleep(time.Millisecond) + } + }) + } + }) + if res.NsPerOp() < 4000000 { + t.Errorf("want >5ms; got %v", time.Duration(res.NsPerOp())) + } +} |