diff options
author | Ian Lance Taylor <ian@gcc.gnu.org> | 2019-05-14 14:59:42 +0000 |
---|---|---|
committer | Ian Lance Taylor <ian@gcc.gnu.org> | 2019-05-14 14:59:42 +0000 |
commit | 1ac09ef2c611d3113665ec8c74e38b125217edb3 (patch) | |
tree | 0bed1e11d205c99ef1f13dd4b7aece761779c360 /libgo/runtime/go-callers.c | |
parent | ce9f305e44ff0353ee9e6cb07599240354ae9ed2 (diff) | |
download | gcc-1ac09ef2c611d3113665ec8c74e38b125217edb3.zip gcc-1ac09ef2c611d3113665ec8c74e38b125217edb3.tar.gz gcc-1ac09ef2c611d3113665ec8c74e38b125217edb3.tar.bz2 |
libgo: reduce overhead for memory/block/mutex profiling
Revise the gccgo version of memory/block/mutex profiling to reduce
runtime overhead. The main change is to collect raw stack traces while
the profile is on line, then post-process the stacks just prior to the
point where we are ready to use the final product. Memory profiling
(at a very low sampling rate) is enabled by default, and the overhead
of the symbolization / DWARF-reading from backtrace_full was slowing
things down relative to the main Go runtime.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/171497
From-SVN: r271172
Diffstat (limited to 'libgo/runtime/go-callers.c')
-rw-r--r-- | libgo/runtime/go-callers.c | 63 |
1 files changed, 62 insertions, 1 deletions
diff --git a/libgo/runtime/go-callers.c b/libgo/runtime/go-callers.c index 31ff474..4a9c1a7 100644 --- a/libgo/runtime/go-callers.c +++ b/libgo/runtime/go-callers.c @@ -63,7 +63,9 @@ callback (void *data, uintptr_t pc, const char *filename, int lineno, /* Skip thunks and recover functions. There is no equivalent to these functions in the gc toolchain, so returning them here means - significantly different results for runtime.Caller(N). */ + significantly different results for runtime.Caller(N). See also + similar code in runtime/mprof.go that strips out such functions + for block/mutex/memory profiles. */ if (function != NULL && !arg->keep_thunks) { const char *p; @@ -262,3 +264,62 @@ Callers (intgo skip, struct __go_open_array pc) return ret; } + +struct callersRaw_data +{ + uintptr* pcbuf; + int skip; + int index; + int max; +}; + +// Callback function for backtrace_simple. Just collect pc's. +// Return zero to continue, non-zero to stop. + +static int callback_raw (void *data, uintptr_t pc) +{ + struct callersRaw_data *arg = (struct callersRaw_data *) data; + + if (arg->skip > 0) + { + --arg->skip; + return 0; + } + + /* On the call to backtrace_simple the pc value was most likely + decremented if there was a normal call, since the pc referred to + the instruction where the call returned and not the call itself. + This was done so that the line number referred to the call + instruction. To make sure the actual pc from the call stack is + used, it is incremented here. + + In the case of a signal, the pc was not decremented by + backtrace_full but still incremented here. That doesn't really + hurt anything since the line number is right and the pc refers to + the same instruction. */ + + arg->pcbuf[arg->index] = pc + 1; + arg->index++; + return arg->index >= arg->max; +} + +/* runtime_callersRaw is similar to runtime_callers() above, but + it returns raw PC values as opposed to file/func/line locations. */ +int32 +runtime_callersRaw (int32 skip, uintptr *pcbuf, int32 m) +{ + struct callersRaw_data data; + struct backtrace_state* state; + + data.pcbuf = pcbuf; + data.skip = skip + 1; + data.index = 0; + data.max = m; + runtime_xadd (&__go_runtime_in_callers, 1); + state = __go_get_backtrace_state (); + backtrace_simple (state, 0, callback_raw, error_callback, &data); + runtime_xadd (&__go_runtime_in_callers, -1); + + return data.index; +} + |