aboutsummaryrefslogtreecommitdiff
path: root/libgo/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2012-03-02 16:38:43 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2012-03-02 16:38:43 +0000
commitcbb6491d76c7aa81cdf5d3b3a81386129c5e2fce (patch)
treeefa0c55763b34cbc633bc494c2743d1b5d9aaff3 /libgo/runtime
parentff2f581b00ac6759f6366c16ef902c935163aa13 (diff)
downloadgcc-cbb6491d76c7aa81cdf5d3b3a81386129c5e2fce.zip
gcc-cbb6491d76c7aa81cdf5d3b3a81386129c5e2fce.tar.gz
gcc-cbb6491d76c7aa81cdf5d3b3a81386129c5e2fce.tar.bz2
libgo: Update to weekly.2012-02-14 release.
From-SVN: r184798
Diffstat (limited to 'libgo/runtime')
-rw-r--r--libgo/runtime/chan.c4
-rw-r--r--libgo/runtime/go-assert-interface.c11
-rw-r--r--libgo/runtime/go-callers.c57
-rw-r--r--libgo/runtime/go-check-interface.c15
-rw-r--r--libgo/runtime/go-convert-interface.c24
-rw-r--r--libgo/runtime/go-main.c2
-rw-r--r--libgo/runtime/go-panic.c2
-rw-r--r--libgo/runtime/go-panic.h19
-rw-r--r--libgo/runtime/go-reflect.c192
-rw-r--r--libgo/runtime/go-signal.c216
-rw-r--r--libgo/runtime/go-unreflect.c34
-rw-r--r--libgo/runtime/go-unsafe-new.c7
-rw-r--r--libgo/runtime/go-unsafe-newarray.c8
-rw-r--r--libgo/runtime/malloc.goc20
-rw-r--r--libgo/runtime/mem.c48
-rw-r--r--libgo/runtime/mprof.goc27
-rw-r--r--libgo/runtime/proc.c16
-rw-r--r--libgo/runtime/reflect.goc4
-rw-r--r--libgo/runtime/runtime.h77
-rw-r--r--libgo/runtime/signal_unix.c64
-rw-r--r--libgo/runtime/sigqueue.goc120
21 files changed, 449 insertions, 518 deletions
diff --git a/libgo/runtime/chan.c b/libgo/runtime/chan.c
index 24be950..db91d2c4 100644
--- a/libgo/runtime/chan.c
+++ b/libgo/runtime/chan.c
@@ -662,6 +662,10 @@ newselect(int32 size, Select **selp)
if(size > 1)
n = size-1;
+ // allocate all the memory we need in a single allocation
+ // start with Select with size cases
+ // then lockorder with size entries
+ // then pollorder with size entries
sel = runtime_mal(sizeof(*sel) +
n*sizeof(sel->scase[0]) +
size*sizeof(sel->lockorder[0]) +
diff --git a/libgo/runtime/go-assert-interface.c b/libgo/runtime/go-assert-interface.c
index 57a092d..94bdaee 100644
--- a/libgo/runtime/go-assert-interface.c
+++ b/libgo/runtime/go-assert-interface.c
@@ -8,6 +8,7 @@
#include "go-assert.h"
#include "go-panic.h"
#include "interface.h"
+#include "runtime.h"
/* This is called by the compiler to implement a type assertion from
one interface type to another. This returns the value that should
@@ -26,14 +27,8 @@ __go_assert_interface (const struct __go_type_descriptor *lhs_descriptor,
/* A type assertion is not permitted with a nil interface. */
- newTypeAssertionError (NULL,
- NULL,
- lhs_descriptor,
- NULL,
- NULL,
- lhs_descriptor->__reflection,
- NULL,
- &panic_arg);
+ runtime_newTypeAssertionError (NULL, NULL, lhs_descriptor->__reflection,
+ NULL, &panic_arg);
__go_panic (panic_arg);
}
diff --git a/libgo/runtime/go-callers.c b/libgo/runtime/go-callers.c
new file mode 100644
index 0000000..65babbe
--- /dev/null
+++ b/libgo/runtime/go-callers.c
@@ -0,0 +1,57 @@
+/* go-callers.c -- get callers for Go.
+
+ Copyright 2012 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file. */
+
+#include "config.h"
+
+#include "unwind.h"
+
+#include "runtime.h"
+
+/* Argument passed to backtrace function. */
+
+struct callers_data
+{
+ int skip;
+ uintptr *pcbuf;
+ int index;
+ int max;
+};
+
+static _Unwind_Reason_Code
+backtrace (struct _Unwind_Context *context, void *varg)
+{
+ struct callers_data *arg = (struct callers_data *) varg;
+ uintptr pc;
+
+ pc = _Unwind_GetIP (context);
+
+ /* FIXME: If PC is in the __morestack routine, we should ignore
+ it. */
+
+ if (arg->skip > 0)
+ --arg->skip;
+ else if (arg->index >= arg->max)
+ return _URC_END_OF_STACK;
+ else
+ {
+ arg->pcbuf[arg->index] = pc;
+ ++arg->index;
+ }
+ return _URC_NO_REASON;
+}
+
+int32
+runtime_callers (int32 skip, uintptr *pcbuf, int32 m)
+{
+ struct callers_data arg;
+
+ arg.skip = skip;
+ arg.pcbuf = pcbuf;
+ arg.index = 0;
+ arg.max = m;
+ _Unwind_Backtrace (backtrace, &arg);
+ return arg.index;
+}
diff --git a/libgo/runtime/go-check-interface.c b/libgo/runtime/go-check-interface.c
index d2258a8..963559d 100644
--- a/libgo/runtime/go-check-interface.c
+++ b/libgo/runtime/go-check-interface.c
@@ -6,6 +6,7 @@
#include "go-panic.h"
#include "interface.h"
+#include "runtime.h"
/* Check that an interface type matches for a conversion to a
non-interface type. This panics if the types are bad. The actual
@@ -21,8 +22,8 @@ __go_check_interface_type (
{
struct __go_empty_interface panic_arg;
- newTypeAssertionError(NULL, NULL, lhs_descriptor, NULL, NULL,
- lhs_descriptor->__reflection, NULL, &panic_arg);
+ runtime_newTypeAssertionError(NULL, NULL, lhs_descriptor->__reflection,
+ NULL, &panic_arg);
__go_panic(panic_arg);
}
@@ -35,12 +36,10 @@ __go_check_interface_type (
{
struct __go_empty_interface panic_arg;
- newTypeAssertionError(rhs_inter_descriptor, rhs_descriptor,
- lhs_descriptor,
- rhs_inter_descriptor->__reflection,
- rhs_descriptor->__reflection,
- lhs_descriptor->__reflection,
- NULL, &panic_arg);
+ runtime_newTypeAssertionError(rhs_inter_descriptor->__reflection,
+ rhs_descriptor->__reflection,
+ lhs_descriptor->__reflection,
+ NULL, &panic_arg);
__go_panic(panic_arg);
}
}
diff --git a/libgo/runtime/go-convert-interface.c b/libgo/runtime/go-convert-interface.c
index 259456c..8ce82ea 100644
--- a/libgo/runtime/go-convert-interface.c
+++ b/libgo/runtime/go-convert-interface.c
@@ -8,6 +8,7 @@
#include "go-assert.h"
#include "go-panic.h"
#include "interface.h"
+#include "runtime.h"
/* This is called when converting one interface type into another
interface type. LHS_DESCRIPTOR is the type descriptor of the
@@ -55,14 +56,10 @@ __go_convert_interface_2 (const struct __go_type_descriptor *lhs_descriptor,
if (may_fail)
return NULL;
- newTypeAssertionError (NULL,
- rhs_descriptor,
- lhs_descriptor,
- NULL,
- rhs_descriptor->__reflection,
- lhs_descriptor->__reflection,
- lhs_methods[0].__name,
- &panic_arg);
+ runtime_newTypeAssertionError (NULL, rhs_descriptor->__reflection,
+ lhs_descriptor->__reflection,
+ lhs_methods[0].__name,
+ &panic_arg);
__go_panic (panic_arg);
}
@@ -100,14 +97,9 @@ __go_convert_interface_2 (const struct __go_type_descriptor *lhs_descriptor,
if (may_fail)
return NULL;
- newTypeAssertionError (NULL,
- rhs_descriptor,
- lhs_descriptor,
- NULL,
- rhs_descriptor->__reflection,
- lhs_descriptor->__reflection,
- p_lhs_method->__name,
- &panic_arg);
+ runtime_newTypeAssertionError (NULL, rhs_descriptor->__reflection,
+ lhs_descriptor->__reflection,
+ p_lhs_method->__name, &panic_arg);
__go_panic (panic_arg);
}
diff --git a/libgo/runtime/go-main.c b/libgo/runtime/go-main.c
index 45467ed..5871981 100644
--- a/libgo/runtime/go-main.c
+++ b/libgo/runtime/go-main.c
@@ -40,7 +40,7 @@ static void mainstart (void *);
int
main (int argc, char **argv)
{
- runtime_initsig (0);
+ runtime_initsig ();
runtime_args (argc, (byte **) argv);
runtime_osinit ();
runtime_schedinit ();
diff --git a/libgo/runtime/go-panic.c b/libgo/runtime/go-panic.c
index 24dc74b..7ba426f5 100644
--- a/libgo/runtime/go-panic.c
+++ b/libgo/runtime/go-panic.c
@@ -27,7 +27,7 @@ __printpanics (struct __go_panic_stack *p)
fprintf (stderr, "\t");
}
fprintf (stderr, "panic: ");
- printany (p->__arg);
+ runtime_printany (p->__arg);
if (p->__was_recovered)
fprintf (stderr, " [recovered]");
fputc ('\n', stderr);
diff --git a/libgo/runtime/go-panic.h b/libgo/runtime/go-panic.h
index 41996e4..7641149 100644
--- a/libgo/runtime/go-panic.h
+++ b/libgo/runtime/go-panic.h
@@ -40,23 +40,4 @@ extern struct __go_empty_interface __go_recover (void);
extern void __go_unwind_stack (void);
-/* Functions defined in libgo/go/runtime/error.go. */
-
-extern void newTypeAssertionError(const struct __go_type_descriptor *pt1,
- const struct __go_type_descriptor *pt2,
- const struct __go_type_descriptor *pt3,
- const struct __go_string *ps1,
- const struct __go_string *ps2,
- const struct __go_string *ps3,
- const struct __go_string *pmeth,
- struct __go_empty_interface *ret)
- __asm__ ("libgo_runtime.runtime.NewTypeAssertionError");
-
-extern void runtime_newErrorString(struct __go_string,
- struct __go_empty_interface *)
- __asm__ ("libgo_runtime.runtime.NewErrorString");
-
-extern void printany(struct __go_empty_interface)
- __asm__ ("libgo_runtime.runtime.Printany");
-
#endif /* !defined(LIBGO_GO_PANIC_H) */
diff --git a/libgo/runtime/go-reflect.c b/libgo/runtime/go-reflect.c
deleted file mode 100644
index d14a580..0000000
--- a/libgo/runtime/go-reflect.c
+++ /dev/null
@@ -1,192 +0,0 @@
-/* go-reflect.c -- implement unsafe.Reflect and unsafe.Typeof for Go.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <stdlib.h>
-#include <stdint.h>
-
-#include "runtime.h"
-#include "interface.h"
-#include "go-alloc.h"
-#include "go-string.h"
-#include "go-type.h"
-
-/* For field alignment. */
-
-struct field_align
-{
- char c;
- struct __go_type_descriptor *p;
-};
-
-/* The type descriptors in the runtime package. */
-
-extern const struct __go_type_descriptor ptr_bool_descriptor
- asm ("__go_td_pN30_libgo_runtime.runtime.BoolType");
-extern const struct __go_type_descriptor ptr_float_descriptor
- asm ("__go_td_pN31_libgo_runtime.runtime.FloatType");
-extern const struct __go_type_descriptor ptr_complex_descriptor
- asm ("__go_td_pN33_libgo_runtime.runtime.ComplexType");
-extern const struct __go_type_descriptor ptr_int_descriptor
- asm ("__go_td_pN29_libgo_runtime.runtime.IntType");
-extern const struct __go_type_descriptor ptr_uint_descriptor
- asm ("__go_td_pN30_libgo_runtime.runtime.UintType");
-extern const struct __go_type_descriptor ptr_string_descriptor
- asm ("__go_td_pN32_libgo_runtime.runtime.StringType");
-extern const struct __go_type_descriptor ptr_unsafe_pointer_decriptor
- asm ("__go_td_pN39_libgo_runtime.runtime.UnsafePointerType");
-extern const struct __go_type_descriptor ptr_array_descriptor
- asm ("__go_td_pN31_libgo_runtime.runtime.ArrayType");
-extern const struct __go_type_descriptor ptr_slice_descriptor
- asm ("__go_td_pN31_libgo_runtime.runtime.SliceType");
-extern const struct __go_type_descriptor ptr_chan_descriptor
- asm ("__go_td_pN30_libgo_runtime.runtime.ChanType");
-extern const struct __go_type_descriptor ptr_func_descriptor
- asm ("__go_td_pN30_libgo_runtime.runtime.FuncType");
-extern const struct __go_type_descriptor ptr_interface_descriptor
- asm ("__go_td_pN35_libgo_runtime.runtime.InterfaceType");
-extern const struct __go_type_descriptor ptr_map_descriptor
- asm ("__go_td_pN29_libgo_runtime.runtime.MapType");
-extern const struct __go_type_descriptor ptr_ptr_descriptor
- asm ("__go_td_pN29_libgo_runtime.runtime.PtrType");
-extern const struct __go_type_descriptor ptr_struct_descriptor
- asm ("__go_td_pN32_libgo_runtime.runtime.StructType");
-
-const struct __go_type_descriptor *
-get_descriptor (int code)
-{
- switch (code & GO_CODE_MASK)
- {
- case GO_BOOL:
- return &ptr_bool_descriptor;
- case GO_FLOAT32:
- case GO_FLOAT64:
- return &ptr_float_descriptor;
- case GO_COMPLEX64:
- case GO_COMPLEX128:
- return &ptr_complex_descriptor;
- case GO_INT16:
- case GO_INT32:
- case GO_INT64:
- case GO_INT8:
- case GO_INT:
- return &ptr_int_descriptor;
- case GO_UINT16:
- case GO_UINT32:
- case GO_UINT64:
- case GO_UINT8:
- case GO_UINTPTR:
- case GO_UINT:
- return &ptr_uint_descriptor;
- case GO_STRING:
- return &ptr_string_descriptor;
- case GO_UNSAFE_POINTER:
- return &ptr_unsafe_pointer_decriptor;
- case GO_ARRAY:
- return &ptr_array_descriptor;
- case GO_SLICE:
- return &ptr_slice_descriptor;
- case GO_CHAN:
- return &ptr_chan_descriptor;
- case GO_FUNC:
- return &ptr_func_descriptor;
- case GO_INTERFACE:
- return &ptr_interface_descriptor;
- case GO_MAP:
- return &ptr_map_descriptor;
- case GO_PTR:
- return &ptr_ptr_descriptor;
- case GO_STRUCT:
- return &ptr_struct_descriptor;
- default:
- abort ();
- }
-}
-
-/* Implement unsafe.Reflect. */
-
-struct reflect_ret
-{
- struct __go_empty_interface rettype;
- void *addr;
-};
-
-struct reflect_ret Reflect (struct __go_empty_interface)
- asm ("libgo_unsafe.unsafe.Reflect");
-
-struct reflect_ret
-Reflect (struct __go_empty_interface e)
-{
- struct reflect_ret ret;
-
- if (((uintptr_t) e.__type_descriptor & reflectFlags) != 0)
- runtime_panicstring ("invalid interface value");
-
- if (e.__type_descriptor == NULL)
- {
- ret.rettype.__type_descriptor = NULL;
- ret.rettype.__object = NULL;
- ret.addr = NULL;
- }
- else
- {
- size_t size;
-
- ret.rettype.__type_descriptor =
- get_descriptor (e.__type_descriptor->__code);
-
- /* This memcpy is really just an assignment of a const pointer
- to a non-const pointer. FIXME: We should canonicalize this
- pointer, so that for a given type we always return the same
- pointer. */
- __builtin_memcpy (&ret.rettype.__object, &e.__type_descriptor,
- sizeof (void *));
-
- /* Make a copy of the value. */
- size = e.__type_descriptor->__size;
- if (size <= sizeof (uint64_t))
- ret.addr = __go_alloc (sizeof (uint64_t));
- else
- ret.addr = __go_alloc (size);
- if (__go_is_pointer_type (e.__type_descriptor))
- *(void **) ret.addr = e.__object;
- else
- __builtin_memcpy (ret.addr, e.__object, size);
- }
-
- return ret;
-}
-
-/* Implement unsafe.Typeof. */
-
-struct __go_empty_interface Typeof (struct __go_empty_interface)
- asm ("libgo_unsafe.unsafe.Typeof");
-
-struct __go_empty_interface
-Typeof (const struct __go_empty_interface e)
-{
- struct __go_empty_interface ret;
-
- if (((uintptr_t) e.__type_descriptor & reflectFlags) != 0)
- runtime_panicstring ("invalid interface value");
-
- if (e.__type_descriptor == NULL)
- {
- ret.__type_descriptor = NULL;
- ret.__object = NULL;
- }
- else
- {
- ret.__type_descriptor = get_descriptor (e.__type_descriptor->__code);
-
- /* This memcpy is really just an assignment of a const pointer
- to a non-const pointer. FIXME: We should canonicalize this
- pointer, so that for a given type we always return the same
- pointer. */
- __builtin_memcpy (&ret.__object, &e.__type_descriptor, sizeof (void *));
- }
-
- return ret;
-}
diff --git a/libgo/runtime/go-signal.c b/libgo/runtime/go-signal.c
index fd48f4b..b698ae2 100644
--- a/libgo/runtime/go-signal.c
+++ b/libgo/runtime/go-signal.c
@@ -25,11 +25,11 @@ extern void __splitstack_setcontext(void *context[10]);
#endif
-#define C SigCatch
-#define I SigIgnore
-#define R SigRestart
-#define Q SigQueue
+#define N SigNotify
+#define K SigKill
+#define T SigThrow
#define P SigPanic
+#define D SigDefault
/* Signal actions. This collects the sigtab tables for several
different targets from the master library. SIGKILL, SIGCONT, and
@@ -38,105 +38,105 @@ extern void __splitstack_setcontext(void *context[10]);
SigTab runtime_sigtab[] = {
#ifdef SIGHUP
- { SIGHUP, Q + R },
+ { SIGHUP, N + K },
#endif
#ifdef SIGINT
- { SIGINT, Q + R },
+ { SIGINT, N + K },
#endif
#ifdef SIGQUIT
- { SIGQUIT, C },
+ { SIGQUIT, N + T },
#endif
#ifdef SIGILL
- { SIGILL, C },
+ { SIGILL, T },
#endif
#ifdef SIGTRAP
- { SIGTRAP, C },
+ { SIGTRAP, T },
#endif
#ifdef SIGABRT
- { SIGABRT, C },
+ { SIGABRT, N + T },
#endif
#ifdef SIGBUS
- { SIGBUS, C + P },
+ { SIGBUS, P },
#endif
#ifdef SIGFPE
- { SIGFPE, C + P },
+ { SIGFPE, P },
#endif
#ifdef SIGUSR1
- { SIGUSR1, Q + I + R },
+ { SIGUSR1, N },
#endif
#ifdef SIGSEGV
- { SIGSEGV, C + P },
+ { SIGSEGV, P },
#endif
#ifdef SIGUSR2
- { SIGUSR2, Q + I + R },
+ { SIGUSR2, N },
#endif
#ifdef SIGPIPE
- { SIGPIPE, I },
+ { SIGPIPE, N },
#endif
#ifdef SIGALRM
- { SIGALRM, Q + I + R },
+ { SIGALRM, N },
#endif
#ifdef SIGTERM
- { SIGTERM, Q + R },
+ { SIGTERM, N + K },
#endif
#ifdef SIGSTKFLT
- { SIGSTKFLT, C },
+ { SIGSTKFLT, T },
#endif
#ifdef SIGCHLD
- { SIGCHLD, Q + I + R },
+ { SIGCHLD, N },
#endif
#ifdef SIGTSTP
- { SIGTSTP, Q + I + R },
+ { SIGTSTP, N + D },
#endif
#ifdef SIGTTIN
- { SIGTTIN, Q + I + R },
+ { SIGTTIN, N + D },
#endif
#ifdef SIGTTOU
- { SIGTTOU, Q + I + R },
+ { SIGTTOU, N + D },
#endif
#ifdef SIGURG
- { SIGURG, Q + I + R },
+ { SIGURG, N },
#endif
#ifdef SIGXCPU
- { SIGXCPU, Q + I + R },
+ { SIGXCPU, N },
#endif
#ifdef SIGXFSZ
- { SIGXFSZ, Q + I + R },
+ { SIGXFSZ, N },
#endif
#ifdef SIGVTALRM
- { SIGVTALRM, Q + I + R },
+ { SIGVTALRM, N },
#endif
#ifdef SIGPROF
- { SIGPROF, Q + I + R },
+ { SIGPROF, N },
#endif
#ifdef SIGWINCH
- { SIGWINCH, Q + I + R },
+ { SIGWINCH, N },
#endif
#ifdef SIGIO
- { SIGIO, Q + I + R },
+ { SIGIO, N },
#endif
#ifdef SIGPWR
- { SIGPWR, Q + I + R },
+ { SIGPWR, N },
#endif
#ifdef SIGSYS
- { SIGSYS, C },
+ { SIGSYS, N },
#endif
#ifdef SIGEMT
- { SIGEMT, C },
+ { SIGEMT, T },
#endif
#ifdef SIGINFO
- { SIGINFO, Q + I + R },
+ { SIGINFO, N },
#endif
#ifdef SIGTHR
- { SIGTHR, Q + I + R },
+ { SIGTHR, N },
#endif
{ -1, 0 }
};
-#undef C
-#undef I
-#undef R
-#undef Q
+#undef N
+#undef K
+#undef T
#undef P
+#undef D
/* Handle a signal, for cases where we don't panic. We can split the
stack here. */
@@ -158,21 +158,24 @@ sig_handler (int sig)
for (i = 0; runtime_sigtab[i].sig != -1; ++i)
{
struct sigaction sa;
+ SigTab *t;
- if (runtime_sigtab[i].sig != sig)
+ t = &runtime_sigtab[i];
+
+ if (t->sig != sig)
continue;
- if ((runtime_sigtab[i].flags & SigQueue) != 0)
+ if ((t->flags & SigNotify) != 0)
{
- if (__go_sigsend (sig)
- || (runtime_sigtab[sig].flags & SigIgnore) != 0)
+ if (__go_sigsend (sig))
return;
- runtime_exit (2); // SIGINT, SIGTERM, etc
}
-
- if (runtime_panicking)
+ if ((t->flags & SigKill) != 0)
runtime_exit (2);
- runtime_panicking = 1;
+ if ((t->flags & SigThrow) == 0)
+ return;
+
+ runtime_startpanic ();
/* We should do a stack backtrace here. Until we can do that,
we reraise the signal in order to get a slightly better
@@ -227,7 +230,7 @@ static void
sig_panic_info_handler (int sig, siginfo_t *info,
void *context __attribute__ ((unused)))
{
- if (runtime_g () == NULL)
+ if (runtime_g () == NULL || info->si_code == SI_USER)
{
sig_handler (sig);
return;
@@ -316,16 +319,6 @@ sig_panic_handler (int sig)
#endif /* !defined (SA_SIGINFO) */
-/* Ignore a signal. This is called on the alternate signal stack so
- it may not split the stack. */
-
-static void sig_ignore (int) __attribute__ ((no_split_stack));
-
-static void
-sig_ignore (int sig __attribute__ ((unused)))
-{
-}
-
/* A signal handler used for signals which are not going to panic.
This is called on the alternate signal stack so it may not split
the stack. */
@@ -376,100 +369,41 @@ sig_tramp (int sig)
}
}
-/* Initialize signal handling for Go. This is called when the program
- starts. */
-
void
-runtime_initsig (int32 queue)
+runtime_setsig (int32 i, bool def __attribute__ ((unused)), bool restart)
{
struct sigaction sa;
- int i;
-
- siginit ();
+ int r;
+ SigTab *t;
memset (&sa, 0, sizeof sa);
- i = sigfillset (&sa.sa_mask);
- __go_assert (i == 0);
-
- for (i = 0; runtime_sigtab[i].sig != -1; ++i)
- {
- if (runtime_sigtab[i].flags == 0)
- continue;
- if ((runtime_sigtab[i].flags & SigQueue) != queue)
- continue;
-
- if ((runtime_sigtab[i].flags & (SigCatch | SigQueue)) != 0)
- {
- if ((runtime_sigtab[i].flags & SigPanic) == 0)
- {
- sa.sa_flags = SA_ONSTACK;
- sa.sa_handler = sig_tramp;
- }
- else
- {
-#ifdef SA_SIGINFO
- sa.sa_flags = SA_SIGINFO;
- sa.sa_sigaction = sig_panic_info_handler;
-#else
- sa.sa_flags = 0;
- sa.sa_handler = sig_panic_handler;
-#endif
- }
- }
- else
- {
- sa.sa_flags = SA_ONSTACK;
- sa.sa_handler = sig_ignore;
- }
+ r = sigfillset (&sa.sa_mask);
+ __go_assert (r == 0);
- if ((runtime_sigtab[i].flags & SigRestart) != 0)
- sa.sa_flags |= SA_RESTART;
+ t = &runtime_sigtab[i];
- if (sigaction (runtime_sigtab[i].sig, &sa, NULL) != 0)
- __go_assert (0);
- }
-}
-
-void
-runtime_resetcpuprofiler(int32 hz)
-{
-#ifdef SIGPROF
- struct itimerval it;
- struct sigaction sa;
- int i;
-
- memset (&it, 0, sizeof it);
-
- memset (&sa, 0, sizeof sa);
- i = sigfillset (&sa.sa_mask);
- __go_assert (i == 0);
-
- if (hz == 0)
+ if ((t->flags & SigPanic) == 0)
{
- i = setitimer (ITIMER_PROF, &it, NULL);
- __go_assert (i == 0);
-
- sa.sa_handler = SIG_IGN;
- i = sigaction (SIGPROF, &sa, NULL);
- __go_assert (i == 0);
+ sa.sa_flags = SA_ONSTACK;
+ sa.sa_handler = sig_tramp;
}
else
{
- sa.sa_handler = sig_handler;
- sa.sa_flags = SA_RESTART;
- i = sigaction (SIGPROF, &sa, NULL);
- __go_assert (i == 0);
-
- it.it_interval.tv_sec = 0;
- it.it_interval.tv_usec = 1000000 / hz;
- it.it_value = it.it_interval;
- i = setitimer (ITIMER_PROF, &it, NULL);
- __go_assert (i == 0);
- }
+#ifdef SA_SIGINFO
+ sa.sa_flags = SA_SIGINFO;
+ sa.sa_sigaction = sig_panic_info_handler;
+#else
+ sa.sa_flags = 0;
+ sa.sa_handler = sig_panic_handler;
#endif
+ }
+
+ if (restart)
+ sa.sa_flags |= SA_RESTART;
- runtime_m()->profilehz = hz;
+ if (sigaction (t->sig, &sa, NULL) != 0)
+ __go_assert (0);
}
/* Used by the os package to raise SIGPIPE. */
@@ -494,3 +428,9 @@ os_sigpipe (void)
raise (SIGPIPE);
}
+
+void
+runtime_setprof(bool on)
+{
+ USED(on);
+}
diff --git a/libgo/runtime/go-unreflect.c b/libgo/runtime/go-unreflect.c
deleted file mode 100644
index 6f1ea73..0000000
--- a/libgo/runtime/go-unreflect.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/* go-unreflect.c -- implement unsafe.Unreflect for Go.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-alloc.h"
-#include "go-type.h"
-#include "interface.h"
-
-/* Implement unsafe.Unreflect. */
-
-struct __go_empty_interface Unreflect (struct __go_empty_interface type,
- void *object)
- asm ("libgo_unsafe.unsafe.Unreflect");
-
-struct __go_empty_interface
-Unreflect (struct __go_empty_interface type, void *object)
-{
- struct __go_empty_interface ret;
-
- if (((uintptr_t) type.__type_descriptor & reflectFlags) != 0)
- runtime_panicstring ("invalid interface value");
-
- /* FIXME: We should check __type_descriptor to verify that this is
- really a type descriptor. */
- ret.__type_descriptor = type.__object;
- if (__go_is_pointer_type (ret.__type_descriptor))
- ret.__object = *(void **) object;
- else
- ret.__object = object;
- return ret;
-}
diff --git a/libgo/runtime/go-unsafe-new.c b/libgo/runtime/go-unsafe-new.c
index c60e05b..2b9e044 100644
--- a/libgo/runtime/go-unsafe-new.c
+++ b/libgo/runtime/go-unsafe-new.c
@@ -9,15 +9,16 @@
#include "go-type.h"
#include "interface.h"
-/* Implement unsafe.New. */
+/* Implement unsafe_New, called from the reflect package. */
-void *New (struct __go_empty_interface type) asm ("libgo_unsafe.unsafe.New");
+void *unsafe_New (struct __go_empty_interface type)
+ asm ("libgo_reflect.reflect.unsafe_New");
/* The dynamic type of the argument will be a pointer to a type
descriptor. */
void *
-New (struct __go_empty_interface type)
+unsafe_New (struct __go_empty_interface type)
{
const struct __go_type_descriptor *descriptor;
diff --git a/libgo/runtime/go-unsafe-newarray.c b/libgo/runtime/go-unsafe-newarray.c
index 470ed2d..f4c5595 100644
--- a/libgo/runtime/go-unsafe-newarray.c
+++ b/libgo/runtime/go-unsafe-newarray.c
@@ -9,16 +9,16 @@
#include "go-type.h"
#include "interface.h"
-/* Implement unsafe.NewArray. */
+/* Implement unsafe_NewArray, called from the reflect package. */
-void *NewArray (struct __go_empty_interface type, int n)
- asm ("libgo_unsafe.unsafe.NewArray");
+void *unsafe_NewArray (struct __go_empty_interface type, int n)
+ asm ("libgo_reflect.reflect.unsafe_NewArray");
/* The dynamic type of the argument will be a pointer to a type
descriptor. */
void *
-NewArray (struct __go_empty_interface type, int n)
+unsafe_NewArray (struct __go_empty_interface type, int n)
{
const struct __go_type_descriptor *descriptor;
diff --git a/libgo/runtime/malloc.goc b/libgo/runtime/malloc.goc
index 9ad9eda..8b48c86 100644
--- a/libgo/runtime/malloc.goc
+++ b/libgo/runtime/malloc.goc
@@ -279,6 +279,15 @@ runtime_mallocinit(void)
runtime_sizeof_C_MStats = sizeof(MStats);
+ p = nil;
+ arena_size = 0;
+ bitmap_size = 0;
+
+ // for 64-bit build
+ USED(p);
+ USED(arena_size);
+ USED(bitmap_size);
+
runtime_InitSizes();
// Set up the allocation arena, a contiguous area of memory where
@@ -307,12 +316,13 @@ runtime_mallocinit(void)
// Actually we reserve 17 GB (because the bitmap ends up being 1 GB)
// but it hardly matters: fc is not valid UTF-8 either, and we have to
// allocate 15 GB before we get that far.
+ //
+ // If this fails we fall back to the 32 bit memory mechanism
arena_size = (uintptr)(16LL<<30);
bitmap_size = arena_size / (sizeof(void*)*8/4);
p = runtime_SysReserve((void*)(0x00f8ULL<<32), bitmap_size + arena_size);
- if(p == nil)
- runtime_throw("runtime: cannot reserve arena virtual address space");
- } else {
+ }
+ if (p == nil) {
// On a 32-bit machine, we can't typically get away
// with a giant virtual address space reservation.
// Instead we map the memory information bitmap
@@ -379,8 +389,8 @@ runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
return p;
}
- // On 64-bit, our reservation is all we have.
- if(sizeof(void*) == 8)
+ // If using 64-bit, our reservation is all we have.
+ if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU)
return nil;
// On 32-bit, once the reservation is gone we can
diff --git a/libgo/runtime/mem.c b/libgo/runtime/mem.c
index a1c5eaa..05f071a 100644
--- a/libgo/runtime/mem.c
+++ b/libgo/runtime/mem.c
@@ -39,6 +39,22 @@ addrspace_free(void *v __attribute__ ((unused)), uintptr n __attribute__ ((unuse
return 1;
}
+static void *
+mmap_fixed(byte *v, uintptr n, int32 prot, int32 flags, int32 fd, uint32 offset)
+{
+ void *p;
+
+ p = runtime_mmap(v, n, prot, flags, fd, offset);
+ if(p != v && addrspace_free(v, n)) {
+ // On some systems, mmap ignores v without
+ // MAP_FIXED, so retry if the address space is free.
+ if(p != MAP_FAILED)
+ runtime_munmap(p, n);
+ p = runtime_mmap(v, n, prot, flags|MAP_FIXED, fd, offset);
+ }
+ return p;
+}
+
void*
runtime_SysAlloc(uintptr n)
{
@@ -91,12 +107,6 @@ runtime_SysReserve(void *v, uintptr n)
int fd = -1;
void *p;
- // On 64-bit, people with ulimit -v set complain if we reserve too
- // much address space. Instead, assume that the reservation is okay
- // and check the assumption in SysMap.
- if(sizeof(void*) == 8)
- return v;
-
#ifdef USE_DEV_ZERO
if (dev_zero == -1) {
dev_zero = open("/dev/zero", O_RDONLY);
@@ -108,10 +118,21 @@ runtime_SysReserve(void *v, uintptr n)
fd = dev_zero;
#endif
+ // On 64-bit, people with ulimit -v set complain if we reserve too
+ // much address space. Instead, assume that the reservation is okay
+ // if we can reserve at least 64K and check the assumption in SysMap.
+ // Only user-mode Linux (UML) rejects these requests.
+ if(sizeof(void*) == 8 && (uintptr)v >= 0xffffffffU) {
+ p = mmap_fixed(v, 64<<10, PROT_NONE, MAP_ANON|MAP_PRIVATE, fd, 0);
+ if (p != v)
+ return nil;
+ runtime_munmap(p, 64<<10);
+ return v;
+ }
+
p = runtime_mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, fd, 0);
- if((uintptr)p < 4096 || -(uintptr)p < 4096) {
+ if(p == MAP_FAILED)
return nil;
- }
return p;
}
@@ -135,13 +156,10 @@ runtime_SysMap(void *v, uintptr n)
#endif
// On 64-bit, we don't actually have v reserved, so tread carefully.
- if(sizeof(void*) == 8) {
- p = runtime_mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, fd, 0);
- if(p != v && addrspace_free(v, n)) {
- // On some systems, mmap ignores v without
- // MAP_FIXED, so retry if the address space is free.
- p = runtime_mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, fd, 0);
- }
+ if(sizeof(void*) == 8 && (uintptr)v >= 0xffffffffU) {
+ p = mmap_fixed(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, fd, 0);
+ if(p == MAP_FAILED && errno == ENOMEM)
+ runtime_throw("runtime: out of memory");
if(p != v) {
runtime_printf("runtime: address space conflict: map(%p) = %p\n", v, p);
runtime_throw("runtime: address space conflict");
diff --git a/libgo/runtime/mprof.goc b/libgo/runtime/mprof.goc
index d143d19..95e05fa 100644
--- a/libgo/runtime/mprof.goc
+++ b/libgo/runtime/mprof.goc
@@ -240,7 +240,7 @@ runtime_MProf_Free(void *p, uintptr size)
// Go interface to profile data. (Declared in extern.go)
// Assumes Go sizeof(int) == sizeof(int32)
-// Must match MemProfileRecord in extern.go.
+// Must match MemProfileRecord in debug.go.
typedef struct Record Record;
struct Record {
int64 alloc_bytes, free_bytes;
@@ -292,3 +292,28 @@ runtime_MProf_Mark(void (*scan)(byte *, int64))
scan((byte*)&addrhash, sizeof addrhash);
scan((byte*)&addrfree, sizeof addrfree);
}
+
+// Must match ThreadProfileRecord in debug.go.
+typedef struct TRecord TRecord;
+struct TRecord {
+ uintptr stk[32];
+};
+
+func ThreadProfile(p Slice) (n int32, ok bool) {
+ TRecord *r;
+ M *first, *m;
+
+ first = runtime_atomicloadp(&runtime_allm);
+ n = 0;
+ for(m=first; m; m=m->alllink)
+ n++;
+ ok = false;
+ if(n <= p.__count) {
+ ok = true;
+ r = (TRecord*)p.__values;
+ for(m=first; m; m=m->alllink) {
+ runtime_memmove(r->stk, m->createstack, sizeof r->stk);
+ r++;
+ }
+ }
+}
diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c
index d7e1e5f..a4e4588 100644
--- a/libgo/runtime/proc.c
+++ b/libgo/runtime/proc.c
@@ -536,18 +536,20 @@ runtime_idlegoroutine(void)
static void
mcommoninit(M *m)
{
- // Add to runtime_allm so garbage collector doesn't free m
- // when it is just in a register or thread-local storage.
- m->alllink = runtime_allm;
- // runtime_Cgocalls() iterates over allm w/o schedlock,
- // so we need to publish it safely.
- runtime_atomicstorep((void**)&runtime_allm, m);
-
m->id = runtime_sched.mcount++;
m->fastrand = 0x49f6428aUL + m->id + runtime_cputicks();
if(m->mcache == nil)
m->mcache = runtime_allocmcache();
+
+ runtime_callers(1, m->createstack, nelem(m->createstack));
+
+ // Add to runtime_allm so garbage collector doesn't free m
+ // when it is just in a register or thread-local storage.
+ m->alllink = runtime_allm;
+ // runtime_Cgocalls() iterates over allm w/o schedlock,
+ // so we need to publish it safely.
+ runtime_atomicstorep(&runtime_allm, m);
}
// Try to increment mcpu. Report whether succeeded.
diff --git a/libgo/runtime/reflect.goc b/libgo/runtime/reflect.goc
index d3cde7c..447b786 100644
--- a/libgo/runtime/reflect.goc
+++ b/libgo/runtime/reflect.goc
@@ -17,10 +17,10 @@ func ifaceE2I(inter *Type, e Eface, ret *Iface) {
t = e.__type_descriptor;
if(t == nil) {
// explicit conversions require non-nil interface value.
- newTypeAssertionError(nil, nil, inter,
+ runtime_newTypeAssertionError(
nil, nil, inter->__reflection,
nil, &err);
- __go_panic(err);
+ runtime_panic(err);
}
ret->__object = e.__object;
ret->__methods = __go_convert_interface(inter, t);
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
index 5a64605..713af17 100644
--- a/libgo/runtime/runtime.h
+++ b/libgo/runtime/runtime.h
@@ -174,6 +174,7 @@ struct M
MCache *mcache;
G* lockedg;
G* idleg;
+ uintptr createstack[32]; // Stack that created this thread.
M* nextwaitm; // next M waiting for lock
uintptr waitsema; // semaphore for parking on locks
uint32 waitsemacount;
@@ -187,13 +188,17 @@ struct SigTab
};
enum
{
- SigCatch = 1<<0,
- SigIgnore = 1<<1,
- SigRestart = 1<<2,
- SigQueue = 1<<3,
- SigPanic = 1<<4,
+ SigNotify = 1<<0, // let signal.Notify have signal, even if from kernel
+ SigKill = 1<<1, // if signal.Notify doesn't take it, exit quietly
+ SigThrow = 1<<2, // if signal.Notify doesn't take it, exit loudly
+ SigPanic = 1<<3, // if the signal is from the kernel, panic
+ SigDefault = 1<<4, // if the signal isn't explicitly requested, don't monitor it
};
+#ifndef NSIG
+#define NSIG 32
+#endif
+
/* Macros. */
#ifdef GOOS_windows
@@ -271,7 +276,7 @@ void runtime_throw(const char*) __attribute__ ((noreturn));
void runtime_panicstring(const char*) __attribute__ ((noreturn));
void* runtime_mal(uintptr);
void runtime_schedinit(void);
-void runtime_initsig(int32);
+void runtime_initsig(void);
String runtime_gostringnocopy(const byte*);
void* runtime_mstart(void*);
G* runtime_malg(int32, byte**, size_t*);
@@ -285,6 +290,7 @@ void runtime_entersyscall(void) __asm__("libgo_syscall.syscall.entersyscall");
void runtime_exitsyscall(void) __asm__("libgo_syscall.syscall.exitsyscall");
void siginit(void);
bool __go_sigsend(int32 sig);
+int32 runtime_callers(int32, uintptr*, int32);
int64 runtime_nanotime(void);
int64 runtime_cputicks(void);
@@ -336,9 +342,25 @@ void runtime_futexsleep(uint32*, uint32, int64);
void runtime_futexwakeup(uint32*, uint32);
/*
+ * low level C-called
+ */
+#define runtime_mmap mmap
+#define runtime_munmap munmap
+#define runtime_madvise madvise
+#define runtime_memclr(buf, size) __builtin_memset((buf), 0, (size))
+
+#ifdef __rtems__
+void __wrap_rtems_task_variable_add(void **);
+#endif
+
+/*
* runtime go-called
*/
void runtime_panic(Eface);
+struct __go_func_type;
+void reflect_call(const struct __go_func_type *, const void *, _Bool, _Bool,
+ void **, void **)
+ asm ("libgo_reflect.reflect.call");
/* Functions. */
#define runtime_panic __go_panic
@@ -374,29 +396,40 @@ void runtime_resetcpuprofiler(int32);
void runtime_setcpuprofilerate(void(*)(uintptr*, int32), int32);
void runtime_usleep(uint32);
+/*
+ * runtime c-called (but written in Go)
+ */
+void runtime_newError(String, Eface*);
+void runtime_printany(Eface)
+ __asm__("libgo_runtime.runtime.Printany");
+void runtime_newTypeAssertionError(const String*, const String*, const String*, const String*, Eface*)
+ __asm__("libgo_runtime.runtime.NewTypeAssertionError");
+void runtime_newErrorString(String, Eface*)
+ __asm__("libgo_runtime.runtime.NewErrorString");
+
+/*
+ * wrapped for go users
+ */
void runtime_semacquire(uint32 volatile *);
void runtime_semrelease(uint32 volatile *);
+String runtime_signame(int32 sig);
int32 runtime_gomaxprocsfunc(int32 n);
void runtime_procyield(uint32);
void runtime_osyield(void);
void runtime_LockOSThread(void) __asm__("libgo_runtime.runtime.LockOSThread");
void runtime_UnlockOSThread(void) __asm__("libgo_runtime.runtime.UnlockOSThread");
-/*
- * low level C-called
- */
-#define runtime_mmap mmap
-#define runtime_munmap munmap
-#define runtime_madvise madvise
-#define runtime_memclr(buf, size) __builtin_memset((buf), 0, (size))
-
-struct __go_func_type;
-void reflect_call(const struct __go_func_type *, const void *, _Bool, _Bool,
- void **, void **)
- asm ("libgo_reflect.reflect.call");
-
-#ifdef __rtems__
-void __wrap_rtems_task_variable_add(void **);
-#endif
+// If appropriate, ask the operating system to control whether this
+// thread should receive profiling signals. This is only necessary on OS X.
+// An operating system should not deliver a profiling signal to a
+// thread that is not actually executing (what good is that?), but that's
+// what OS X prefers to do. When profiling is turned on, we mask
+// away the profiling signal when threads go to sleep, so that OS X
+// is forced to deliver the signal to a thread that's actually running.
+// This is a no-op on other systems.
+void runtime_setprof(bool);
void runtime_time_scan(void (*)(byte*, int64));
+
+void runtime_setsig(int32, bool, bool);
+#define runtime_setitimer setitimer
diff --git a/libgo/runtime/signal_unix.c b/libgo/runtime/signal_unix.c
new file mode 100644
index 0000000..3b8f439
--- /dev/null
+++ b/libgo/runtime/signal_unix.c
@@ -0,0 +1,64 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd linux openbsd netbsd
+
+#include <sys/time.h>
+
+#include "runtime.h"
+#include "defs.h"
+
+extern SigTab runtime_sigtab[];
+
+void
+runtime_initsig(void)
+{
+ int32 i;
+ SigTab *t;
+
+ // First call: basic setup.
+ for(i = 0; runtime_sigtab[i].sig != -1; i++) {
+ t = &runtime_sigtab[i];
+ if((t->flags == 0) || (t->flags & SigDefault))
+ continue;
+ runtime_setsig(i, false, true);
+ }
+}
+
+void
+runtime_sigenable(uint32 sig)
+{
+ int32 i;
+ SigTab *t;
+
+ for(i = 0; runtime_sigtab[i].sig != -1; i++) {
+ // ~0 means all signals.
+ if(~sig == 0 || runtime_sigtab[i].sig == (int32)sig) {
+ t = &runtime_sigtab[i];
+ if(t->flags & SigDefault) {
+ runtime_setsig(i, false, true);
+ t->flags &= ~SigDefault; // make this idempotent
+ }
+ }
+ }
+}
+
+void
+runtime_resetcpuprofiler(int32 hz)
+{
+ struct itimerval it;
+
+ runtime_memclr((byte*)&it, sizeof it);
+ if(hz == 0) {
+ runtime_setitimer(ITIMER_PROF, &it, nil);
+ runtime_setprof(false);
+ } else {
+ it.it_interval.tv_sec = 0;
+ it.it_interval.tv_usec = 1000000 / hz;
+ it.it_value = it.it_interval;
+ runtime_setitimer(ITIMER_PROF, &it, nil);
+ runtime_setprof(true);
+ }
+ runtime_m()->profilehz = hz;
+}
diff --git a/libgo/runtime/sigqueue.goc b/libgo/runtime/sigqueue.goc
index e9157190..c550a4e 100644
--- a/libgo/runtime/sigqueue.goc
+++ b/libgo/runtime/sigqueue.goc
@@ -11,7 +11,7 @@
//
// Ownership for sig.Note passes back and forth between
// the signal handler and the signal goroutine in rounds.
-// The initial state is that sig.note is cleared (setup by siginit).
+// The initial state is that sig.note is cleared (setup by signal_enable).
// At the beginning of each round, mask == 0.
// The round goes through three stages:
//
@@ -36,7 +36,7 @@
// ownership by returning from notesleep (caused by the notewakeup)
// and gives up ownership by clearing mask.
-package runtime
+package signal
#include "config.h"
#include "runtime.h"
#include "arch.h"
@@ -45,33 +45,29 @@ package runtime
static struct {
Note;
- uint32 mask;
+ uint32 mask[(NSIG+31)/32];
+ uint32 wanted[(NSIG+31)/32];
+ uint32 kick;
bool inuse;
} sig;
-void
-siginit(void)
-{
- runtime_noteclear(&sig);
-}
-
// Called from sighandler to send a signal back out of the signal handling thread.
bool
__go_sigsend(int32 s)
{
uint32 bit, mask;
- if(!sig.inuse)
+ if(!sig.inuse || s < 0 || (size_t)s >= 32*nelem(sig.wanted) || !(sig.wanted[s/32]&(1U<<(s&31))))
return false;
- bit = 1 << s;
+ bit = 1 << (s&31);
for(;;) {
- mask = sig.mask;
+ mask = sig.mask[s/32];
if(mask & bit)
break; // signal already in queue
- if(runtime_cas(&sig.mask, mask, mask|bit)) {
+ if(runtime_cas(&sig.mask[s/32], mask, mask|bit)) {
// Added to queue.
- // Only send a wakeup for the first signal in each round.
- if(mask == 0)
+ // Only send a wakeup if the receiver needs a kick.
+ if(runtime_cas(&sig.kick, 1, 0))
runtime_notewakeup(&sig);
break;
}
@@ -79,37 +75,77 @@ __go_sigsend(int32 s)
return true;
}
-// Called to receive a bitmask of queued signals.
-func Sigrecv() (m uint32) {
- runtime_entersyscall();
- runtime_notesleep(&sig);
- runtime_exitsyscall();
- runtime_noteclear(&sig);
+// Called to receive the next queued signal.
+// Must only be called from a single goroutine at a time.
+func signal_recv() (m uint32) {
+ static uint32 recv[nelem(sig.mask)];
+ int32 i, more;
+
for(;;) {
- m = sig.mask;
- if(runtime_cas(&sig.mask, m, 0))
- break;
+ // Serve from local copy if there are bits left.
+ for(i=0; i<NSIG; i++) {
+ if(recv[i/32]&(1U<<(i&31))) {
+ recv[i/32] ^= 1U<<(i&31);
+ m = i;
+ goto done;
+ }
+ }
+
+ // Get a new local copy.
+ // Ask for a kick if more signals come in
+ // during or after our check (before the sleep).
+ if(sig.kick == 0) {
+ runtime_noteclear(&sig);
+ runtime_cas(&sig.kick, 0, 1);
+ }
+
+ more = 0;
+ for(i=0; (size_t)i<nelem(sig.mask); i++) {
+ for(;;) {
+ m = sig.mask[i];
+ if(runtime_cas(&sig.mask[i], m, 0))
+ break;
+ }
+ recv[i] = m;
+ if(m != 0)
+ more = 1;
+ }
+ if(more)
+ continue;
+
+ // Sleep waiting for more.
+ runtime_entersyscall();
+ runtime_notesleep(&sig);
+ runtime_exitsyscall();
}
+
+done:;
+ // goc requires that we fall off the end of functions
+ // that return values instead of using our own return
+ // statements.
}
-func Signame(sig int32) (name String) {
- const char* s = NULL;
- char buf[100];
-#if defined(HAVE_STRSIGNAL)
- s = strsignal(sig);
-#endif
- if (s == NULL) {
- snprintf(buf, sizeof buf, "signal %d", sig);
- s = buf;
+// Must only be called from a single goroutine at a time.
+func signal_enable(s uint32) {
+ int32 i;
+
+ if(!sig.inuse) {
+ // The first call to signal_enable is for us
+ // to use for initialization. It does not pass
+ // signal information in m.
+ sig.inuse = true; // enable reception of signals; cannot disable
+ runtime_noteclear(&sig);
+ return;
+ }
+
+ if(~s == 0) {
+ // Special case: want everything.
+ for(i=0; (size_t)i<nelem(sig.wanted); i++)
+ sig.wanted[i] = ~(uint32)0;
+ return;
}
- int32 len = __builtin_strlen(s);
- unsigned char *data = runtime_mallocgc(len, FlagNoPointers, 0, 0);
- __builtin_memcpy(data, s, len);
- name.__data = data;
- name.__length = len;
-}
-func Siginit() {
- runtime_initsig(SigQueue);
- sig.inuse = true; // enable reception of signals; cannot disable
+ if(s >= nelem(sig.wanted)*32)
+ return;
+ sig.wanted[s/32] |= 1U<<(s&31);
}