aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2018-07-31 20:32:08 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2018-07-31 20:32:08 +0000
commit414925ab0cb8d0aea39cb3383b18f72f3ce887a0 (patch)
tree2f08571e507f4d5a958e113cb0fde621c42c138e /libgo/go/runtime
parentce311a8cae4489058f8601bebdf511b7fb5fce26 (diff)
parent8810325ff666643de80110c5c6b4ce1cef921e1b (diff)
downloadgcc-414925ab0cb8d0aea39cb3383b18f72f3ce887a0.zip
gcc-414925ab0cb8d0aea39cb3383b18f72f3ce887a0.tar.gz
gcc-414925ab0cb8d0aea39cb3383b18f72f3ce887a0.tar.bz2
Merge from trunk revision 263114.
From-SVN: r263179
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/ffi.go48
-rw-r--r--libgo/go/runtime/pprof/pprof_test.go51
-rw-r--r--libgo/go/runtime/proc.go30
3 files changed, 115 insertions, 14 deletions
diff --git a/libgo/go/runtime/ffi.go b/libgo/go/runtime/ffi.go
index 164e177..00858f1 100644
--- a/libgo/go/runtime/ffi.go
+++ b/libgo/go/runtime/ffi.go
@@ -225,11 +225,40 @@ func structToFFI(typ *structtype) *__ffi_type {
return emptyStructToFFI()
}
- fields := make([]*__ffi_type, c+1)
+ fields := make([]*__ffi_type, 0, c+1)
+ checkPad := false
for i, v := range typ.fields {
- fields[i] = typeToFFI(v.typ)
+ // Skip zero-sized fields; they confuse libffi,
+ // and there is no value to pass in any case.
+ // We do have to check whether the alignment of the
+ // zero-sized field introduces any padding for the
+ // next field.
+ if v.typ.size == 0 {
+ checkPad = true
+ continue
+ }
+
+ if checkPad {
+ off := uintptr(0)
+ for j := i - 1; j >= 0; j-- {
+ if typ.fields[j].typ.size > 0 {
+ off = typ.fields[j].offset() + typ.fields[j].typ.size
+ break
+ }
+ }
+ off += uintptr(v.typ.align) - 1
+ off &^= uintptr(v.typ.align) - 1
+ if off != v.offset() {
+ fields = append(fields, padFFI(v.offset()-off))
+ }
+ checkPad = false
+ }
+
+ fields = append(fields, typeToFFI(v.typ))
}
- fields[c] = nil
+
+ fields = append(fields, nil)
+
return &__ffi_type{
_type: _FFI_TYPE_STRUCT,
elements: &fields[0],
@@ -305,6 +334,19 @@ func emptyStructToFFI() *__ffi_type {
}
}
+// padFFI returns a padding field of the given size
+func padFFI(size uintptr) *__ffi_type {
+ elements := make([]*__ffi_type, size+1)
+ for i := uintptr(0); i < size; i++ {
+ elements[i] = ffi_type_uint8()
+ }
+ elements[size] = nil
+ return &__ffi_type{
+ _type: _FFI_TYPE_STRUCT,
+ elements: &elements[0],
+ }
+}
+
//go:linkname makeCIF reflect.makeCIF
// makeCIF is used by the reflect package to allocate a CIF.
diff --git a/libgo/go/runtime/pprof/pprof_test.go b/libgo/go/runtime/pprof/pprof_test.go
index 07a7946..02d99f5 100644
--- a/libgo/go/runtime/pprof/pprof_test.go
+++ b/libgo/go/runtime/pprof/pprof_test.go
@@ -72,15 +72,24 @@ func cpuHog2(x int) int {
return foo
}
+// Return a list of functions that we don't want to ever appear in CPU
+// profiles. For gccgo, that list includes the sigprof handler itself.
+func avoidFunctions() []string {
+ if runtime.Compiler == "gccgo" {
+ return []string{"runtime.sigprof"}
+ }
+ return nil
+}
+
func TestCPUProfile(t *testing.T) {
- testCPUProfile(t, []string{"pprof.cpuHog1"}, func(dur time.Duration) {
+ testCPUProfile(t, []string{"pprof.cpuHog1"}, avoidFunctions(), func(dur time.Duration) {
cpuHogger(cpuHog1, &salt1, dur)
})
}
func TestCPUProfileMultithreaded(t *testing.T) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
- testCPUProfile(t, []string{"pprof.cpuHog1", "pprof.cpuHog2"}, func(dur time.Duration) {
+ testCPUProfile(t, []string{"pprof.cpuHog1", "pprof.cpuHog2"}, avoidFunctions(), func(dur time.Duration) {
c := make(chan int)
go func() {
cpuHogger(cpuHog1, &salt1, dur)
@@ -92,7 +101,7 @@ func TestCPUProfileMultithreaded(t *testing.T) {
}
func TestCPUProfileInlining(t *testing.T) {
- testCPUProfile(t, []string{"pprof.inlinedCallee", "pprof.inlinedCaller"}, func(dur time.Duration) {
+ testCPUProfile(t, []string{"pprof.inlinedCallee", "pprof.inlinedCaller"}, avoidFunctions(), func(dur time.Duration) {
cpuHogger(inlinedCaller, &salt1, dur)
})
}
@@ -130,7 +139,7 @@ func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Loca
}
}
-func testCPUProfile(t *testing.T, need []string, f func(dur time.Duration)) {
+func testCPUProfile(t *testing.T, need []string, avoid []string, f func(dur time.Duration)) {
switch runtime.GOOS {
case "darwin":
switch runtime.GOARCH {
@@ -169,7 +178,7 @@ func testCPUProfile(t *testing.T, need []string, f func(dur time.Duration)) {
f(duration)
StopCPUProfile()
- if profileOk(t, need, prof, duration) {
+ if profileOk(t, need, avoid, prof, duration) {
return
}
@@ -202,11 +211,13 @@ func contains(slice []string, s string) bool {
return false
}
-func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Duration) (ok bool) {
+func profileOk(t *testing.T, need []string, avoid []string, prof bytes.Buffer, duration time.Duration) (ok bool) {
ok = true
- // Check that profile is well formed and contains need.
+ // Check that profile is well formed, contains 'need', and does not contain
+ // anything from 'avoid'.
have := make([]uintptr, len(need))
+ avoidSamples := make([]uintptr, len(avoid))
var samples uintptr
var buf bytes.Buffer
parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
@@ -229,6 +240,15 @@ func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Dur
}
}
}
+ for i, name := range avoid {
+ for _, loc := range stk {
+ for _, line := range loc.Line {
+ if strings.Contains(line.Function.Name, name) {
+ avoidSamples[i] += count
+ }
+ }
+ }
+ }
fmt.Fprintf(&buf, "\n")
})
t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
@@ -251,6 +271,14 @@ func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Dur
ok = false
}
+ for i, name := range avoid {
+ bad := avoidSamples[i]
+ if bad != 0 {
+ t.Logf("found %d samples in avoid-function %s\n", bad, name)
+ ok = false
+ }
+ }
+
if len(need) == 0 {
return ok
}
@@ -318,6 +346,9 @@ func TestCPUProfileWithFork(t *testing.T) {
// If it did, it would see inconsistent state and would either record an incorrect stack
// or crash because the stack was malformed.
func TestGoroutineSwitch(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("not applicable for gccgo")
+ }
// How much to try. These defaults take about 1 seconds
// on a 2012 MacBook Pro. The ones in short mode take
// about 0.1 seconds.
@@ -377,7 +408,7 @@ func fprintStack(w io.Writer, stk []*profile.Location) {
// Test that profiling of division operations is okay, especially on ARM. See issue 6681.
func TestMathBigDivide(t *testing.T) {
- testCPUProfile(t, nil, func(duration time.Duration) {
+ testCPUProfile(t, nil, nil, func(duration time.Duration) {
t := time.After(duration)
pi := new(big.Int)
for {
@@ -851,7 +882,7 @@ func TestEmptyCallStack(t *testing.T) {
}
func TestCPUProfileLabel(t *testing.T) {
- testCPUProfile(t, []string{"pprof.cpuHogger;key=value"}, func(dur time.Duration) {
+ testCPUProfile(t, []string{"pprof.cpuHogger;key=value"}, avoidFunctions(), func(dur time.Duration) {
Do(context.Background(), Labels("key", "value"), func(context.Context) {
cpuHogger(cpuHog1, &salt1, dur)
})
@@ -862,7 +893,7 @@ func TestLabelRace(t *testing.T) {
// Test the race detector annotations for synchronization
// between settings labels and consuming them from the
// profile.
- testCPUProfile(t, []string{"pprof.cpuHogger;key=value"}, func(dur time.Duration) {
+ testCPUProfile(t, []string{"pprof.cpuHogger;key=value"}, avoidFunctions(), func(dur time.Duration) {
start := time.Now()
var wg sync.WaitGroup
for time.Since(start) < dur {
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
index a6746c9..5826958 100644
--- a/libgo/go/runtime/proc.go
+++ b/libgo/go/runtime/proc.go
@@ -3418,8 +3418,36 @@ func sigprof(pc uintptr, gp *g, mp *m) {
var stklocs [maxCPUProfStack]location
n = callers(0, stklocs[:])
+ // Issue 26595: the stack trace we've just collected is going
+ // to include frames that we don't want to report in the CPU
+ // profile, including signal handler frames. Here is what we
+ // might typically see at the point of "callers" above for a
+ // signal delivered to the application routine "interesting"
+ // called by "main".
+ //
+ // 0: runtime.sigprof
+ // 1: runtime.sighandler
+ // 2: runtime.sigtrampgo
+ // 3: runtime.sigtramp
+ // 4: <signal handler called>
+ // 5: main.interesting_routine
+ // 6: main.main
+ //
+ // To ensure a sane profile, walk through the frames in
+ // "stklocs" until we find the "runtime.sigtramp" frame, then
+ // report only those frames below the frame one down from
+ // that. If for some reason "runtime.sigtramp" is not present,
+ // don't make any changes.
+ framesToDiscard := 0
+ for i := 0; i < n; i++ {
+ if stklocs[i].function == "runtime.sigtramp" && i+2 < n {
+ framesToDiscard = i + 2
+ n -= framesToDiscard
+ break
+ }
+ }
for i := 0; i < n; i++ {
- stk[i] = stklocs[i].pc
+ stk[i] = stklocs[i+framesToDiscard].pc
}
}