aboutsummaryrefslogtreecommitdiff
path: root/libgo/runtime/proc.c
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/runtime/proc.c')
-rw-r--r--libgo/runtime/proc.c58
1 files changed, 32 insertions, 26 deletions
diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c
index d8a26fd..246ab7d 100644
--- a/libgo/runtime/proc.c
+++ b/libgo/runtime/proc.c
@@ -2686,11 +2686,8 @@ runtime_mcount(void)
}
static struct {
- Lock;
- void (*fn)(uintptr*, int32);
+ uint32 lock;
int32 hz;
- uintptr pcbuf[TracebackMaxFrames];
- Location locbuf[TracebackMaxFrames];
} prof;
static void System(void) {}
@@ -2703,8 +2700,11 @@ runtime_sigprof()
M *mp = g->m;
int32 n, i;
bool traceback;
+ uintptr pcbuf[TracebackMaxFrames];
+ Location locbuf[TracebackMaxFrames];
+ Slice stk;
- if(prof.fn == nil || prof.hz == 0)
+ if(prof.hz == 0)
return;
if(mp == nil)
@@ -2718,12 +2718,6 @@ runtime_sigprof()
if(mp->mcache == nil)
traceback = false;
- runtime_lock(&prof);
- if(prof.fn == nil) {
- runtime_unlock(&prof);
- mp->mallocing--;
- return;
- }
n = 0;
if(runtime_atomicload(&runtime_in_callers) > 0) {
@@ -2735,34 +2729,44 @@ runtime_sigprof()
}
if(traceback) {
- n = runtime_callers(0, prof.locbuf, nelem(prof.locbuf), false);
+ n = runtime_callers(0, locbuf, nelem(locbuf), false);
for(i = 0; i < n; i++)
- prof.pcbuf[i] = prof.locbuf[i].pc;
+ pcbuf[i] = locbuf[i].pc;
}
if(!traceback || n <= 0) {
n = 2;
- prof.pcbuf[0] = (uintptr)runtime_getcallerpc(&n);
+ pcbuf[0] = (uintptr)runtime_getcallerpc(&n);
if(mp->gcing || mp->helpgc)
- prof.pcbuf[1] = (uintptr)GC;
+ pcbuf[1] = (uintptr)GC;
else
- prof.pcbuf[1] = (uintptr)System;
+ pcbuf[1] = (uintptr)System;
+ }
+
+ if (prof.hz != 0) {
+ stk.__values = &pcbuf[0];
+ stk.__count = n;
+ stk.__capacity = n;
+
+ // Simple cas-lock to coordinate with setcpuprofilerate.
+ while (!runtime_cas(&prof.lock, 0, 1)) {
+ runtime_osyield();
+ }
+ if (prof.hz != 0) {
+ runtime_cpuprofAdd(stk);
+ }
+ runtime_atomicstore(&prof.lock, 0);
}
- prof.fn(prof.pcbuf, n);
- runtime_unlock(&prof);
+
mp->mallocing--;
}
// Arrange to call fn with a traceback hz times a second.
void
-runtime_setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz)
+runtime_setcpuprofilerate_m(int32 hz)
{
// Force sane arguments.
if(hz < 0)
hz = 0;
- if(hz == 0)
- fn = nil;
- if(fn == nil)
- hz = 0;
// Disable preemption, otherwise we can be rescheduled to another thread
// that has profiling enabled.
@@ -2773,10 +2777,12 @@ runtime_setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz)
// it would deadlock.
runtime_resetcpuprofiler(0);
- runtime_lock(&prof);
- prof.fn = fn;
+ while (!runtime_cas(&prof.lock, 0, 1)) {
+ runtime_osyield();
+ }
prof.hz = hz;
- runtime_unlock(&prof);
+ runtime_atomicstore(&prof.lock, 0);
+
runtime_lock(&runtime_sched);
runtime_sched.profilehz = hz;
runtime_unlock(&runtime_sched);