aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/testing/benchmark_test.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/testing/benchmark_test.go')
-rw-r--r--libgo/go/testing/benchmark_test.go122
1 files changed, 82 insertions, 40 deletions
diff --git a/libgo/go/testing/benchmark_test.go b/libgo/go/testing/benchmark_test.go
index 431bb53..1434c26 100644
--- a/libgo/go/testing/benchmark_test.go
+++ b/libgo/go/testing/benchmark_test.go
@@ -7,63 +7,70 @@ package testing_test
import (
"bytes"
"runtime"
+ "sort"
+ "strings"
"sync/atomic"
"testing"
"text/template"
+ "time"
)
-var roundDownTests = []struct {
- v, expected int
+var prettyPrintTests = []struct {
+ v float64
+ expected string
}{
- {1, 1},
- {9, 1},
- {10, 10},
- {11, 10},
- {100, 100},
- {101, 100},
- {999, 100},
- {1000, 1000},
- {1001, 1000},
+ {0, " 0 x"},
+ {1234.1, " 1234 x"},
+ {-1234.1, " -1234 x"},
+ {99.950001, " 100 x"},
+ {99.949999, " 99.9 x"},
+ {9.9950001, " 10.0 x"},
+ {9.9949999, " 9.99 x"},
+ {-9.9949999, " -9.99 x"},
+ {0.0099950001, " 0.0100 x"},
+ {0.0099949999, " 0.00999 x"},
}
-func TestRoundDown10(t *testing.T) {
- for _, tt := range roundDownTests {
- actual := testing.RoundDown10(tt.v)
- if tt.expected != actual {
- t.Errorf("roundDown10(%d): expected %d, actual %d", tt.v, tt.expected, actual)
+func TestPrettyPrint(t *testing.T) {
+ for _, tt := range prettyPrintTests {
+ buf := new(strings.Builder)
+ testing.PrettyPrint(buf, tt.v, "x")
+ if tt.expected != buf.String() {
+ t.Errorf("prettyPrint(%v): expected %q, actual %q", tt.v, tt.expected, buf.String())
}
}
}
-var roundUpTests = []struct {
- v, expected int
-}{
- {0, 1},
- {1, 1},
- {2, 2},
- {3, 3},
- {5, 5},
- {9, 10},
- {999, 1000},
- {1000, 1000},
- {1400, 2000},
- {1700, 2000},
- {2700, 3000},
- {4999, 5000},
- {5000, 5000},
- {5001, 10000},
-}
+func TestResultString(t *testing.T) {
+ // Test fractional ns/op handling
+ r := testing.BenchmarkResult{
+ N: 100,
+ T: 240 * time.Nanosecond,
+ }
+ if r.NsPerOp() != 2 {
+ t.Errorf("NsPerOp: expected 2, actual %v", r.NsPerOp())
+ }
+ if want, got := " 100\t 2.40 ns/op", r.String(); want != got {
+ t.Errorf("String: expected %q, actual %q", want, got)
+ }
-func TestRoundUp(t *testing.T) {
- for _, tt := range roundUpTests {
- actual := testing.RoundUp(tt.v)
- if tt.expected != actual {
- t.Errorf("roundUp(%d): expected %d, actual %d", tt.v, tt.expected, actual)
- }
+ // Test sub-1 ns/op (issue #31005)
+ r.T = 40 * time.Nanosecond
+ if want, got := " 100\t 0.400 ns/op", r.String(); want != got {
+ t.Errorf("String: expected %q, actual %q", want, got)
+ }
+
+ // Test 0 ns/op
+ r.T = 0
+ if want, got := " 100", r.String(); want != got {
+ t.Errorf("String: expected %q, actual %q", want, got)
}
}
func TestRunParallel(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
testing.Benchmark(func(b *testing.B) {
procs := uint32(0)
iters := uint64(0)
@@ -111,3 +118,38 @@ func ExampleB_RunParallel() {
})
})
}
+
+func TestReportMetric(t *testing.T) {
+ res := testing.Benchmark(func(b *testing.B) {
+ b.ReportMetric(12345, "ns/op")
+ b.ReportMetric(0.2, "frobs/op")
+ })
+ // Test built-in overriding.
+ if res.NsPerOp() != 12345 {
+ t.Errorf("NsPerOp: expected %v, actual %v", 12345, res.NsPerOp())
+ }
+ // Test stringing.
+ res.N = 1 // Make the output stable
+ want := " 1\t 12345 ns/op\t 0.200 frobs/op"
+ if want != res.String() {
+ t.Errorf("expected %q, actual %q", want, res.String())
+ }
+}
+
+func ExampleB_ReportMetric() {
+ // This reports a custom benchmark metric relevant to a
+ // specific algorithm (in this case, sorting).
+ testing.Benchmark(func(b *testing.B) {
+ var compares int64
+ for i := 0; i < b.N; i++ {
+ s := []int{5, 4, 3, 2, 1}
+ sort.Slice(s, func(i, j int) bool {
+ compares++
+ return s[i] < s[j]
+ })
+ }
+ // This metric is per-operation, so divide by b.N and
+ // report it as a "/op" unit.
+ b.ReportMetric(float64(compares)/float64(b.N), "compares/op")
+ })
+}