aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2017-02-23 00:10:22 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2017-02-23 00:10:22 +0000
commit869c22c50b4d254bbfcc7ae47ba51386c229860e (patch)
tree80aaae8eb6d466fd88ebd8a096b6c30c0ec02582
parent1d05ab3690b26285c5b4801c18caaabae7acb333 (diff)
downloadgcc-869c22c50b4d254bbfcc7ae47ba51386c229860e.zip
gcc-869c22c50b4d254bbfcc7ae47ba51386c229860e.tar.gz
gcc-869c22c50b4d254bbfcc7ae47ba51386c229860e.tar.bz2
backport: trunk.
Merge in changes from gofrontend repository that are not in GCC trunk. This includes changes up to e387439bfd24d5e142874b8e68e7039f74c744d7. From-SVN: r245668
-rw-r--r--gcc/go/go-backend.c2
-rw-r--r--gcc/go/go-gcc.cc11
-rw-r--r--gcc/go/gofrontend/backend.h5
-rw-r--r--gcc/go/gofrontend/export.cc8
-rw-r--r--gcc/go/gofrontend/export.h6
-rw-r--r--gcc/go/gofrontend/expressions.cc30
-rw-r--r--gcc/go/gofrontend/expressions.h5
-rw-r--r--gcc/go/gofrontend/gogo.cc2
-rw-r--r--gcc/go/gofrontend/statements.cc4
-rw-r--r--gcc/go/gofrontend/types.cc7
-rw-r--r--libgo/Makefile.am2
-rw-r--r--libgo/Makefile.in35
-rw-r--r--libgo/go/runtime/debug.go4
-rw-r--r--libgo/go/runtime/os_gccgo.go4
-rw-r--r--libgo/go/runtime/panic.go5
-rw-r--r--libgo/go/runtime/pprof/mprof_test.go6
-rw-r--r--libgo/go/runtime/proc.go1147
-rw-r--r--libgo/go/runtime/runtime2.go10
-rw-r--r--libgo/go/runtime/signal_sighandler.go6
-rw-r--r--libgo/go/runtime/signal_unix.go2
-rw-r--r--libgo/go/runtime/stubs.go86
-rw-r--r--libgo/go/runtime/symtab.go6
-rw-r--r--libgo/runtime/go-breakpoint.c17
-rw-r--r--libgo/runtime/go-callers.c20
-rw-r--r--libgo/runtime/go-libmain.c1
-rw-r--r--libgo/runtime/go-main.c1
-rw-r--r--libgo/runtime/malloc.h4
-rw-r--r--libgo/runtime/mgc0.c16
-rw-r--r--libgo/runtime/mheap.c70
-rw-r--r--libgo/runtime/proc.c999
-rw-r--r--libgo/runtime/runtime.h65
-rw-r--r--libgo/runtime/runtime_c.c37
-rw-r--r--libgo/runtime/thread-linux.c8
-rw-r--r--libgo/runtime/thread-sema.c9
34 files changed, 1419 insertions, 1221 deletions
diff --git a/gcc/go/go-backend.c b/gcc/go/go-backend.c
index 1705b84..8f915a0 100644
--- a/gcc/go/go-backend.c
+++ b/gcc/go/go-backend.c
@@ -30,7 +30,7 @@ along with GCC; see the file COPYING3. If not see
#include "intl.h"
#include "output.h" /* for assemble_string */
#include "common/common-target.h"
-
+#include "go-c.h"
/* The segment name we pass to simple_object_start_read to find Go
export data. */
diff --git a/gcc/go/go-gcc.cc b/gcc/go/go-gcc.cc
index ed6fc2c..25de526 100644
--- a/gcc/go/go-gcc.cc
+++ b/gcc/go/go-gcc.cc
@@ -506,6 +506,10 @@ class Gcc_backend : public Backend
const std::vector<Bfunction*>&,
const std::vector<Bvariable*>&);
+ void
+ write_export_data(const char* bytes, unsigned int size);
+
+
private:
// Make a Bexpression from a tree.
Bexpression*
@@ -3211,6 +3215,13 @@ Gcc_backend::write_global_definitions(
delete[] defs;
}
+void
+Gcc_backend::write_export_data(const char* bytes, unsigned int size)
+{
+ go_write_export_data(bytes, size);
+}
+
+
// Define a builtin function. BCODE is the builtin function code
// defined by builtins.def. NAME is the name of the builtin function.
// LIBNAME is the name of the corresponding library function, and is
diff --git a/gcc/go/gofrontend/backend.h b/gcc/go/gofrontend/backend.h
index 93835d9..e51efe4ef 100644
--- a/gcc/go/gofrontend/backend.h
+++ b/gcc/go/gofrontend/backend.h
@@ -750,6 +750,11 @@ class Backend
const std::vector<Bexpression*>& constant_decls,
const std::vector<Bfunction*>& function_decls,
const std::vector<Bvariable*>& variable_decls) = 0;
+
+ // Write SIZE bytes of export data from BYTES to the proper
+ // section in the output object file.
+ virtual void
+ write_export_data(const char* bytes, unsigned int size) = 0;
};
#endif // !defined(GO_BACKEND_H)
diff --git a/gcc/go/gofrontend/export.cc b/gcc/go/gofrontend/export.cc
index 6e08599..2ea543a 100644
--- a/gcc/go/gofrontend/export.cc
+++ b/gcc/go/gofrontend/export.cc
@@ -14,6 +14,9 @@
#include "statements.h"
#include "export.h"
+#include "go-linemap.h"
+#include "backend.h"
+
// This file handles exporting global declarations.
// Class Export.
@@ -727,7 +730,8 @@ Export::Stream::write_checksum(const std::string& s)
// Class Stream_to_section.
-Stream_to_section::Stream_to_section()
+Stream_to_section::Stream_to_section(Backend* backend)
+ : backend_(backend)
{
}
@@ -736,5 +740,5 @@ Stream_to_section::Stream_to_section()
void
Stream_to_section::do_write(const char* bytes, size_t length)
{
- go_write_export_data (bytes, length);
+ this->backend_->write_export_data (bytes, length);
}
diff --git a/gcc/go/gofrontend/export.h b/gcc/go/gofrontend/export.h
index fec73fb..1365677 100644
--- a/gcc/go/gofrontend/export.h
+++ b/gcc/go/gofrontend/export.h
@@ -16,6 +16,7 @@ class Bindings;
class Type;
class Package;
class Import_init_set;
+class Backend;
// Codes used for the builtin types. These are all negative to make
// them easily distinct from the codes assigned by Export::write_type.
@@ -236,11 +237,14 @@ class Export : public String_dump
class Stream_to_section : public Export::Stream
{
public:
- Stream_to_section();
+ Stream_to_section(Backend*);
protected:
void
do_write(const char*, size_t);
+
+ private:
+ Backend* backend_;
};
#endif // !defined(GO_EXPORT_H)
diff --git a/gcc/go/gofrontend/expressions.cc b/gcc/go/gofrontend/expressions.cc
index 8006888..79e9cd1 100644
--- a/gcc/go/gofrontend/expressions.cc
+++ b/gcc/go/gofrontend/expressions.cc
@@ -10231,16 +10231,13 @@ Call_expression::do_get_backend(Translate_context* context)
if (this->results_ != NULL)
{
- go_assert(this->call_temp_ != NULL);
- Expression* call_ref =
- Expression::make_temporary_reference(this->call_temp_, location);
- Bexpression* bcall_ref = call_ref->get_backend(context);
+ Bexpression* bcall_ref = this->call_result_ref(context);
Bfunction* bfunction = context->function()->func_value()->get_decl();
Bstatement* assn_stmt =
gogo->backend()->assignment_statement(bfunction,
bcall_ref, call, location);
- this->call_ = this->set_results(context, bcall_ref);
+ this->call_ = this->set_results(context);
Bexpression* set_and_call =
gogo->backend()->compound_expression(assn_stmt, this->call_,
@@ -10252,16 +10249,32 @@ Call_expression::do_get_backend(Translate_context* context)
return this->call_;
}
+// Return the backend representation of a reference to the struct used
+// to capture the result of a multiple-output call.
+
+Bexpression*
+Call_expression::call_result_ref(Translate_context* context)
+{
+ go_assert(this->call_temp_ != NULL);
+ Location location = this->location();
+ Expression* call_ref =
+ Expression::make_temporary_reference(this->call_temp_, location);
+ Bexpression* bcall_ref = call_ref->get_backend(context);
+ return bcall_ref;
+}
+
// Set the result variables if this call returns multiple results.
Bexpression*
-Call_expression::set_results(Translate_context* context, Bexpression* call)
+Call_expression::set_results(Translate_context* context)
{
Gogo* gogo = context->gogo();
Bexpression* results = NULL;
Location loc = this->location();
+ go_assert(this->call_temp_ != NULL);
+
size_t rc = this->result_count();
for (size_t i = 0; i < rc; ++i)
{
@@ -10277,12 +10290,15 @@ Call_expression::set_results(Translate_context* context, Bexpression* call)
Bfunction* bfunction = context->function()->func_value()->get_decl();
Bexpression* result_ref = ref->get_backend(context);
+ Bexpression* bcall_ref = this->call_result_ref(context);
Bexpression* call_result =
- gogo->backend()->struct_field_expression(call, i, loc);
+ gogo->backend()->struct_field_expression(bcall_ref, i, loc);
Bstatement* assn_stmt =
gogo->backend()->assignment_statement(bfunction,
result_ref, call_result, loc);
+ bcall_ref = this->call_result_ref(context);
+ call_result = gogo->backend()->struct_field_expression(bcall_ref, i, loc);
Bexpression* result =
gogo->backend()->compound_expression(assn_stmt, call_result, loc);
diff --git a/gcc/go/gofrontend/expressions.h b/gcc/go/gofrontend/expressions.h
index e088100..51d27c4 100644
--- a/gcc/go/gofrontend/expressions.h
+++ b/gcc/go/gofrontend/expressions.h
@@ -2267,7 +2267,10 @@ class Call_expression : public Expression
Expression**);
Bexpression*
- set_results(Translate_context*, Bexpression*);
+ set_results(Translate_context*);
+
+ Bexpression*
+ call_result_ref(Translate_context* context);
// The function to call.
Expression* fn_;
diff --git a/gcc/go/gofrontend/gogo.cc b/gcc/go/gofrontend/gogo.cc
index c5ce5d9..28aaecf 100644
--- a/gcc/go/gofrontend/gogo.cc
+++ b/gcc/go/gofrontend/gogo.cc
@@ -4497,7 +4497,7 @@ Gogo::do_exports()
{
// For now we always stream to a section. Later we may want to
// support streaming to a separate file.
- Stream_to_section stream;
+ Stream_to_section stream(this->backend());
// Write out either the prefix or pkgpath depending on how we were
// invoked.
diff --git a/gcc/go/gofrontend/statements.cc b/gcc/go/gofrontend/statements.cc
index d6ab4cc..00367ef 100644
--- a/gcc/go/gofrontend/statements.cc
+++ b/gcc/go/gofrontend/statements.cc
@@ -510,6 +510,10 @@ Temporary_statement::do_get_backend(Translate_context* context)
binit = init->get_backend(context);
}
+ if (binit != NULL)
+ binit = context->backend()->convert_expression(btype, binit,
+ this->location());
+
Bstatement* statement;
this->bvariable_ =
context->backend()->temporary_variable(bfunction, context->bblock(),
diff --git a/gcc/go/gofrontend/types.cc b/gcc/go/gofrontend/types.cc
index 9423ef6..a471e52 100644
--- a/gcc/go/gofrontend/types.cc
+++ b/gcc/go/gofrontend/types.cc
@@ -1177,7 +1177,12 @@ Type::type_descriptor_pointer(Gogo* gogo, Location location)
Bexpression* var_expr =
gogo->backend()->var_expression(t->type_descriptor_var_,
VE_rvalue, location);
- return gogo->backend()->address_expression(var_expr, location);
+ Bexpression* var_addr =
+ gogo->backend()->address_expression(var_expr, location);
+ Type* td_type = Type::make_type_descriptor_type();
+ Btype* td_btype = td_type->get_backend(gogo);
+ Btype* ptd_btype = gogo->backend()->pointer_type(td_btype);
+ return gogo->backend()->convert_expression(ptd_btype, var_addr, location);
}
// A mapping from unnamed types to type descriptor variables.
diff --git a/libgo/Makefile.am b/libgo/Makefile.am
index 515b61b..3f8ff72 100644
--- a/libgo/Makefile.am
+++ b/libgo/Makefile.am
@@ -430,7 +430,6 @@ endif
runtime_files = \
runtime/aeshash.c \
runtime/go-assert.c \
- runtime/go-breakpoint.c \
runtime/go-caller.c \
runtime/go-callers.c \
runtime/go-cdiv.c \
@@ -1296,6 +1295,7 @@ TEST_PACKAGES = \
runtime/internal/sys/check \
runtime/pprof/check \
runtime/pprof/internal/protopprof/check \
+ runtime/trace/check \
sync/atomic/check \
text/scanner/check \
text/tabwriter/check \
diff --git a/libgo/Makefile.in b/libgo/Makefile.in
index d6e3eac..76b31ee 100644
--- a/libgo/Makefile.in
+++ b/libgo/Makefile.in
@@ -191,19 +191,18 @@ libgo_llgo_la_DEPENDENCIES = $(am__DEPENDENCIES_4)
@LIBGO_IS_DARWIN_TRUE@@LIBGO_IS_LINUX_FALSE@am__objects_4 = \
@LIBGO_IS_DARWIN_TRUE@@LIBGO_IS_LINUX_FALSE@ getncpu-bsd.lo
@LIBGO_IS_LINUX_TRUE@am__objects_4 = getncpu-linux.lo
-am__objects_5 = aeshash.lo go-assert.lo go-breakpoint.lo go-caller.lo \
- go-callers.lo go-cdiv.lo go-cgo.lo go-construct-map.lo \
- go-ffi.lo go-fieldtrack.lo go-matherr.lo go-memclr.lo \
- go-memcmp.lo go-memequal.lo go-memmove.lo go-nanotime.lo \
- go-now.lo go-new.lo go-nosys.lo go-reflect-call.lo \
- go-runtime-error.lo go-setenv.lo go-signal.lo go-strslice.lo \
- go-typedesc-equal.lo go-unsafe-new.lo go-unsafe-newarray.lo \
- go-unsafe-pointer.lo go-unsetenv.lo go-unwind.lo go-varargs.lo \
- env_posix.lo heapdump.lo mcache.lo mcentral.lo \
- $(am__objects_1) mfixalloc.lo mgc0.lo mheap.lo msize.lo \
- panic.lo parfor.lo print.lo proc.lo runtime_c.lo thread.lo \
- $(am__objects_2) yield.lo $(am__objects_3) malloc.lo \
- $(am__objects_4)
+am__objects_5 = aeshash.lo go-assert.lo go-caller.lo go-callers.lo \
+ go-cdiv.lo go-cgo.lo go-construct-map.lo go-ffi.lo \
+ go-fieldtrack.lo go-matherr.lo go-memclr.lo go-memcmp.lo \
+ go-memequal.lo go-memmove.lo go-nanotime.lo go-now.lo \
+ go-new.lo go-nosys.lo go-reflect-call.lo go-runtime-error.lo \
+ go-setenv.lo go-signal.lo go-strslice.lo go-typedesc-equal.lo \
+ go-unsafe-new.lo go-unsafe-newarray.lo go-unsafe-pointer.lo \
+ go-unsetenv.lo go-unwind.lo go-varargs.lo env_posix.lo \
+ heapdump.lo mcache.lo mcentral.lo $(am__objects_1) \
+ mfixalloc.lo mgc0.lo mheap.lo msize.lo panic.lo parfor.lo \
+ print.lo proc.lo runtime_c.lo thread.lo $(am__objects_2) \
+ yield.lo $(am__objects_3) malloc.lo $(am__objects_4)
am_libgo_llgo_la_OBJECTS = $(am__objects_5)
libgo_llgo_la_OBJECTS = $(am_libgo_llgo_la_OBJECTS)
libgo_llgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
@@ -777,7 +776,6 @@ toolexeclibgounicode_DATA = \
runtime_files = \
runtime/aeshash.c \
runtime/go-assert.c \
- runtime/go-breakpoint.c \
runtime/go-caller.c \
runtime/go-callers.c \
runtime/go-cdiv.c \
@@ -1329,6 +1327,7 @@ TEST_PACKAGES = \
runtime/internal/sys/check \
runtime/pprof/check \
runtime/pprof/internal/protopprof/check \
+ runtime/trace/check \
sync/atomic/check \
text/scanner/check \
text/tabwriter/check \
@@ -1488,7 +1487,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/getncpu-none.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/getncpu-solaris.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-assert.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-breakpoint.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-caller.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-callers.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-cdiv.Plo@am__quote@
@@ -1618,13 +1616,6 @@ go-assert.lo: runtime/go-assert.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-assert.lo `test -f 'runtime/go-assert.c' || echo '$(srcdir)/'`runtime/go-assert.c
-go-breakpoint.lo: runtime/go-breakpoint.c
-@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-breakpoint.lo -MD -MP -MF $(DEPDIR)/go-breakpoint.Tpo -c -o go-breakpoint.lo `test -f 'runtime/go-breakpoint.c' || echo '$(srcdir)/'`runtime/go-breakpoint.c
-@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-breakpoint.Tpo $(DEPDIR)/go-breakpoint.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/go-breakpoint.c' object='go-breakpoint.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-breakpoint.lo `test -f 'runtime/go-breakpoint.c' || echo '$(srcdir)/'`runtime/go-breakpoint.c
-
go-caller.lo: runtime/go-caller.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-caller.lo -MD -MP -MF $(DEPDIR)/go-caller.Tpo -c -o go-caller.lo `test -f 'runtime/go-caller.c' || echo '$(srcdir)/'`runtime/go-caller.c
@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/go-caller.Tpo $(DEPDIR)/go-caller.Plo
diff --git a/libgo/go/runtime/debug.go b/libgo/go/runtime/debug.go
index a8827f2..6a9efcd 100644
--- a/libgo/go/runtime/debug.go
+++ b/libgo/go/runtime/debug.go
@@ -39,7 +39,9 @@ func GOMAXPROCS(n int) int {
// The set of available CPUs is checked by querying the operating system
// at process startup. Changes to operating system CPU allocation after
// process startup are not reflected.
-func NumCPU() int
+func NumCPU() int {
+ return int(ncpu)
+}
// NumCgoCall returns the number of cgo calls made by the current process.
func NumCgoCall() int64 {
diff --git a/libgo/go/runtime/os_gccgo.go b/libgo/go/runtime/os_gccgo.go
index a8f05a4..358a38b 100644
--- a/libgo/go/runtime/os_gccgo.go
+++ b/libgo/go/runtime/os_gccgo.go
@@ -11,6 +11,10 @@ import (
// Temporary for C code to call:
//go:linkname minit runtime.minit
+func goenvs() {
+ goenvs_unix()
+}
+
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m) {
diff --git a/libgo/go/runtime/panic.go b/libgo/go/runtime/panic.go
index b76bb21..aa196ae 100644
--- a/libgo/go/runtime/panic.go
+++ b/libgo/go/runtime/panic.go
@@ -97,7 +97,6 @@ func deferproc(frame *bool, pfn uintptr, arg unsafe.Pointer) {
n.arg = arg
n.retaddr = 0
n.makefunccanrecover = false
- n.special = false
}
// Allocate a Defer, usually using per-P pool.
@@ -141,10 +140,6 @@ func newdefer() *_defer {
//
//go:nosplit
func freedefer(d *_defer) {
- if d.special {
- return
- }
-
// When C code calls a Go function on a non-Go thread, the
// deferred call to cgocallBackDone will set g to nil.
// Don't crash trying to put d on the free list; just let it
diff --git a/libgo/go/runtime/pprof/mprof_test.go b/libgo/go/runtime/pprof/mprof_test.go
index 079af15..5ebd46b 100644
--- a/libgo/go/runtime/pprof/mprof_test.go
+++ b/libgo/go/runtime/pprof/mprof_test.go
@@ -103,9 +103,11 @@ func TestMemoryProfiler(t *testing.T) {
# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:74
`, memoryProfilerRun, (2<<20)*memoryProfilerRun, memoryProfilerRun, (2<<20)*memoryProfilerRun),
- fmt.Sprintf(`0: 0 \[%v: %v\] @( 0x[0-9,a-f]+)+
+ // This should start with "0: 0" but gccgo's imprecise
+ // GC means that sometimes the value is not collected.
+ fmt.Sprintf(`(0|%v): (0|%v) \[%v: %v\] @( 0x[0-9,a-f]+)+
# 0x[0-9,a-f]+ pprof_test\.allocateReflectTransient\+0x[0-9,a-f]+ .*/mprof_test.go:49
-`, memoryProfilerRun, (2<<20)*memoryProfilerRun),
+`, memoryProfilerRun, (2<<20)*memoryProfilerRun, memoryProfilerRun, (2<<20)*memoryProfilerRun),
}
for _, test := range tests {
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
index ea7f84e..9c6a0ed 100644
--- a/libgo/go/runtime/proc.go
+++ b/libgo/go/runtime/proc.go
@@ -6,61 +6,137 @@ package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
-// Functions temporarily called by C code.
+// Functions called by C code.
+//go:linkname main runtime.main
+//go:linkname goparkunlock runtime.goparkunlock
//go:linkname newextram runtime.newextram
//go:linkname acquirep runtime.acquirep
//go:linkname releasep runtime.releasep
//go:linkname incidlelocked runtime.incidlelocked
-//go:linkname checkdead runtime.checkdead
-//go:linkname sysmon runtime.sysmon
-//go:linkname schedtrace runtime.schedtrace
-//go:linkname allgadd runtime.allgadd
-//go:linkname mcommoninit runtime.mcommoninit
+//go:linkname schedinit runtime.schedinit
//go:linkname ready runtime.ready
//go:linkname gcprocs runtime.gcprocs
-//go:linkname needaddgcproc runtime.needaddgcproc
//go:linkname stopm runtime.stopm
//go:linkname handoffp runtime.handoffp
//go:linkname wakep runtime.wakep
//go:linkname stoplockedm runtime.stoplockedm
//go:linkname schedule runtime.schedule
//go:linkname execute runtime.execute
-//go:linkname gfput runtime.gfput
+//go:linkname goexit1 runtime.goexit1
+//go:linkname reentersyscall runtime.reentersyscall
+//go:linkname reentersyscallblock runtime.reentersyscallblock
+//go:linkname exitsyscall runtime.exitsyscall
//go:linkname gfget runtime.gfget
-//go:linkname lockOSThread runtime.lockOSThread
-//go:linkname unlockOSThread runtime.unlockOSThread
-//go:linkname procresize runtime.procresize
//go:linkname helpgc runtime.helpgc
//go:linkname stopTheWorldWithSema runtime.stopTheWorldWithSema
//go:linkname startTheWorldWithSema runtime.startTheWorldWithSema
-//go:linkname mput runtime.mput
-//go:linkname mget runtime.mget
+//go:linkname mstart runtime.mstart
+//go:linkname mstart1 runtime.mstart1
//go:linkname globrunqput runtime.globrunqput
//go:linkname pidleget runtime.pidleget
-//go:linkname runqempty runtime.runqempty
-//go:linkname runqput runtime.runqput
// Function called by misc/cgo/test.
//go:linkname lockedOSThread runtime.lockedOSThread
// Functions temporarily in C that have not yet been ported.
-func allocm(*p, bool, *unsafe.Pointer, *uintptr) *m
-func malg(bool, bool, *unsafe.Pointer, *uintptr) *g
-func startm(*p, bool)
-func newm(unsafe.Pointer, *p)
func gchelper()
func getfingwait() bool
func getfingwake() bool
func wakefing() *g
+func mallocinit()
-// C functions for ucontext management.
+// C functions for thread and context management.
+func newosproc(*m)
+func malg(bool, bool, *unsafe.Pointer, *uintptr) *g
+func resetNewG(*g, *unsafe.Pointer, *uintptr)
func gogo(*g)
func setGContext()
func makeGContext(*g, unsafe.Pointer, uintptr)
+func mstartInitContext(*g, unsafe.Pointer)
func getTraceback(me, gp *g)
+func _cgo_notify_runtime_init_done()
+func alreadyInCallers() bool
+
+// Functions created by the compiler.
+//extern __go_init_main
+func main_init()
+
+//extern main.main
+func main_main()
+
+var buildVersion = sys.TheVersion
+
+// Goroutine scheduler
+// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
+//
+// The main concepts are:
+// G - goroutine.
+// M - worker thread, or machine.
+// P - processor, a resource that is required to execute Go code.
+// M must have an associated P to execute Go code, however it can be
+// blocked or in a syscall w/o an associated P.
+//
+// Design doc at https://golang.org/s/go11sched.
+
+// Worker thread parking/unparking.
+// We need to balance between keeping enough running worker threads to utilize
+// available hardware parallelism and parking excessive running worker threads
+// to conserve CPU resources and power. This is not simple for two reasons:
+// (1) scheduler state is intentionally distributed (in particular, per-P work
+// queues), so it is not possible to compute global predicates on fast paths;
+// (2) for optimal thread management we would need to know the future (don't park
+// a worker thread when a new goroutine will be readied in near future).
+//
+// Three rejected approaches that would work badly:
+// 1. Centralize all scheduler state (would inhibit scalability).
+// 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
+// is a spare P, unpark a thread and handoff it the thread and the goroutine.
+// This would lead to thread state thrashing, as the thread that readied the
+// goroutine can be out of work the very next moment, we will need to park it.
+// Also, it would destroy locality of computation as we want to preserve
+// dependent goroutines on the same thread; and introduce additional latency.
+// 3. Unpark an additional thread whenever we ready a goroutine and there is an
+// idle P, but don't do handoff. This would lead to excessive thread parking/
+// unparking as the additional threads will instantly park without discovering
+// any work to do.
+//
+// The current approach:
+// We unpark an additional thread when we ready a goroutine if (1) there is an
+// idle P and there are no "spinning" worker threads. A worker thread is considered
+// spinning if it is out of local work and did not find work in global run queue/
+// netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning.
+// Threads unparked this way are also considered spinning; we don't do goroutine
+// handoff so such threads are out of work initially. Spinning threads do some
+// spinning looking for work in per-P run queues before parking. If a spinning
+// thread finds work it takes itself out of the spinning state and proceeds to
+// execution. If it does not find work it takes itself out of the spinning state
+// and then parks.
+// If there is at least one spinning thread (sched.nmspinning>1), we don't unpark
+// new threads when readying goroutines. To compensate for that, if the last spinning
+// thread finds work and stops spinning, it must unpark a new spinning thread.
+// This approach smooths out unjustified spikes of thread unparking,
+// but at the same time guarantees eventual maximal CPU parallelism utilization.
+//
+// The main implementation complication is that we need to be very careful during
+// spinning->non-spinning thread transition. This transition can race with submission
+// of a new goroutine, and either one part or another needs to unpark another worker
+// thread. If they both fail to do that, we can end up with semi-persistent CPU
+// underutilization. The general pattern for goroutine readying is: submit a goroutine
+// to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning.
+// The general pattern for spinning->non-spinning transition is: decrement nmspinning,
+// #StoreLoad-style memory barrier, check all per-P work queues for new work.
+// Note that all this complexity does not apply to global run queue as we are not
+// sloppy about thread unparking when submitting to global queue. Also see comments
+// for nmspinning manipulation.
+
+var (
+ m0 m
+ g0 g
+)
// main_init_done is a signal used by cgocallbackg that initialization
// has been completed. It is made before _cgo_notify_runtime_init_done,
@@ -68,6 +144,159 @@ func getTraceback(me, gp *g)
// it is closed, meaning cgocallbackg can reliably receive from it.
var main_init_done chan bool
+// runtimeInitTime is the nanotime() at which the runtime started.
+var runtimeInitTime int64
+
+// Value to use for signal mask for newly created M's.
+var initSigmask sigset
+
+// The main goroutine.
+func main() {
+ g := getg()
+
+ // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
+ // Using decimal instead of binary GB and MB because
+ // they look nicer in the stack overflow failure message.
+ if sys.PtrSize == 8 {
+ maxstacksize = 1000000000
+ } else {
+ maxstacksize = 250000000
+ }
+
+ // Record when the world started.
+ runtimeInitTime = nanotime()
+
+ systemstack(func() {
+ newm(sysmon, nil)
+ })
+
+ // Lock the main goroutine onto this, the main OS thread,
+ // during initialization. Most programs won't care, but a few
+ // do require certain calls to be made by the main thread.
+ // Those can arrange for main.main to run in the main thread
+ // by calling runtime.LockOSThread during initialization
+ // to preserve the lock.
+ lockOSThread()
+
+ if g.m != &m0 {
+ throw("runtime.main not on m0")
+ }
+
+ // Defer unlock so that runtime.Goexit during init does the unlock too.
+ needUnlock := true
+ defer func() {
+ if needUnlock {
+ unlockOSThread()
+ }
+ }()
+
+ main_init_done = make(chan bool)
+ if iscgo {
+ _cgo_notify_runtime_init_done()
+ }
+
+ fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
+ fn()
+ close(main_init_done)
+
+ needUnlock = false
+ unlockOSThread()
+
+ // For gccgo we have to wait until after main is initialized
+ // to enable GC, because initializing main registers the GC roots.
+ gcenable()
+
+ if isarchive || islibrary {
+ // A program compiled with -buildmode=c-archive or c-shared
+ // has a main, but it is not executed.
+ return
+ }
+ fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
+ fn()
+ if raceenabled {
+ racefini()
+ }
+
+ // Make racy client program work: if panicking on
+ // another goroutine at the same time as main returns,
+ // let the other goroutine finish printing the panic trace.
+ // Once it does, it will exit. See issue 3934.
+ if panicking != 0 {
+ gopark(nil, nil, "panicwait", traceEvGoStop, 1)
+ }
+
+ exit(0)
+ for {
+ var x *int32
+ *x = 0
+ }
+}
+
+// os_beforeExit is called from os.Exit(0).
+//go:linkname os_beforeExit os.runtime_beforeExit
+func os_beforeExit() {
+ if raceenabled {
+ racefini()
+ }
+}
+
+// start forcegc helper goroutine
+func init() {
+ go forcegchelper()
+}
+
+func forcegchelper() {
+ forcegc.g = getg()
+ for {
+ lock(&forcegc.lock)
+ if forcegc.idle != 0 {
+ throw("forcegc: phase error")
+ }
+ atomic.Store(&forcegc.idle, 1)
+ goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1)
+ // this goroutine is explicitly resumed by sysmon
+ if debug.gctrace > 0 {
+ println("GC forced")
+ }
+ gcStart(gcBackgroundMode, true)
+ }
+}
+
+//go:nosplit
+
+// Gosched yields the processor, allowing other goroutines to run. It does not
+// suspend the current goroutine, so execution resumes automatically.
+func Gosched() {
+ mcall(gosched_m)
+}
+
+// Puts the current goroutine into a waiting state and calls unlockf.
+// If unlockf returns false, the goroutine is resumed.
+// unlockf must not access this G's stack, as it may be moved between
+// the call to gopark and the call to unlockf.
+func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) {
+ mp := acquirem()
+ gp := mp.curg
+ status := readgstatus(gp)
+ if status != _Grunning && status != _Gscanrunning {
+ throw("gopark: bad g status")
+ }
+ mp.waitlock = lock
+ mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
+ gp.waitreason = reason
+ mp.waittraceev = traceEv
+ mp.waittraceskip = traceskip
+ releasem(mp)
+ // can't do anything that might move the G between Ms here.
+ mcall(park_m)
+}
+
+// Puts the current goroutine into a waiting state and unlocks the lock.
+// The goroutine can be made runnable again by calling goready(gp).
+func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) {
+ gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
+}
+
func goready(gp *g, traceskip int) {
systemstack(func() {
ready(gp, traceskip, true)
@@ -164,12 +393,11 @@ func releaseSudog(s *sudog) {
// funcPC returns the entry PC of the function f.
// It assumes that f is a func value. Otherwise the behavior is undefined.
-// For gccgo here unless and until we port proc.go.
-// Note that this differs from the gc implementation; the gc implementation
-// adds sys.PtrSize to the address of the interface value, but GCC's
-// alias analysis decides that that can not be a reference to the second
-// field of the interface, and in some cases it drops the initialization
-// of the second field as a dead store.
+// For gccgo note that this differs from the gc implementation; the gc
+// implementation adds sys.PtrSize to the address of the interface
+// value, but GCC's alias analysis decides that that can not be a
+// reference to the second field of the interface, and in some cases
+// it drops the initialization of the second field as a dead store.
//go:nosplit
func funcPC(f interface{}) uintptr {
i := (*iface)(unsafe.Pointer(&f))
@@ -207,6 +435,61 @@ func allgadd(gp *g) {
unlock(&allglock)
}
+const (
+ // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
+ // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
+ _GoidCacheBatch = 16
+)
+
+// The bootstrap sequence is:
+//
+// call osinit
+// call schedinit
+// make & queue new G
+// call runtime·mstart
+//
+// The new G calls runtime·main.
+func schedinit() {
+ _m_ := &m0
+ _g_ := &g0
+ _m_.g0 = _g_
+ _m_.curg = _g_
+ _g_.m = _m_
+ setg(_g_)
+
+ sched.maxmcount = 10000
+
+ mallocinit()
+ mcommoninit(_g_.m)
+ alginit() // maps must not be used before this call
+
+ msigsave(_g_.m)
+ initSigmask = _g_.m.sigmask
+
+ goargs()
+ goenvs()
+ parsedebugvars()
+ gcinit()
+
+ sched.lastpoll = uint64(nanotime())
+ procs := ncpu
+ if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
+ procs = n
+ }
+ if procs > _MaxGomaxprocs {
+ procs = _MaxGomaxprocs
+ }
+ if procresize(procs) != nil {
+ throw("unknown runnable goroutine during bootstrap")
+ }
+
+ if buildVersion == "" {
+ // Condition should never trigger. This code just serves
+ // to ensure runtime·buildVersion is kept in the resulting binary.
+ buildVersion = "unknown"
+ }
+}
+
func dumpgstatus(gp *g) {
_g_ := getg()
print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
@@ -684,11 +967,66 @@ func startTheWorldWithSema() {
// coordinate. This lazy approach works out in practice:
// we don't mind if the first couple gc rounds don't have quite
// the maximum number of procs.
- newm(unsafe.Pointer(funcPC(mhelpgc)), nil)
+ newm(mhelpgc, nil)
}
_g_.m.locks--
}
+// Called to start an M.
+// For gccgo this is called directly by pthread_create.
+//go:nosplit
+func mstart(mpu unsafe.Pointer) unsafe.Pointer {
+ mp := (*m)(mpu)
+ _g_ := mp.g0
+ _g_.m = mp
+ setg(_g_)
+
+ _g_.entry = 0
+ _g_.param = nil
+
+ mstartInitContext(_g_, unsafe.Pointer(&mp))
+
+ // This is never reached, but is required because pthread_create
+ // expects a function that returns a pointer.
+ return nil
+}
+
+// This is called from mstartInitContext.
+func mstart1() {
+ _g_ := getg()
+
+ if _g_ != _g_.m.g0 {
+ throw("bad runtime·mstart")
+ }
+
+ asminit()
+ minit()
+
+ // Install signal handlers; after minit so that minit can
+ // prepare the thread to be able to handle the signals.
+ if _g_.m == &m0 {
+ // Create an extra M for callbacks on threads not created by Go.
+ if iscgo && !cgoHasExtraM {
+ cgoHasExtraM = true
+ newextram()
+ }
+ initsig(false)
+ }
+
+ if fn := _g_.m.mstartfn; fn != nil {
+ fn()
+ }
+
+ if _g_.m.helpgc != 0 {
+ _g_.m.helpgc = 0
+ stopm()
+ } else if _g_.m != &m0 {
+ acquirep(_g_.m.nextp.ptr())
+ _g_.m.nextp = 0
+ }
+ schedule()
+}
+
// forEachP calls fn(p) for every P p when p reaches a GC safe point.
// If a P is currently executing code, this will bring the P to a GC
// safe point and execute fn on that P. If the P is not executing code
@@ -811,6 +1149,35 @@ func runSafePointFn() {
unlock(&sched.lock)
}
+// Allocate a new m unassociated with any thread.
+// Can use p for allocation context if needed.
+// fn is recorded as the new m's m.mstartfn.
+//
+// This function is allowed to have write barriers even if the caller
+// isn't because it borrows _p_.
+//
+//go:yeswritebarrierrec
+func allocm(_p_ *p, fn func(), allocatestack bool) (mp *m, g0Stack unsafe.Pointer, g0StackSize uintptr) {
+ _g_ := getg()
+ _g_.m.locks++ // disable GC because it can be called from sysmon
+ if _g_.m.p == 0 {
+ acquirep(_p_) // temporarily borrow p for mallocs in this function
+ }
+ mp = new(m)
+ mp.mstartfn = fn
+ mcommoninit(mp)
+
+ mp.g0 = malg(allocatestack, false, &g0Stack, &g0StackSize)
+ mp.g0.m = mp
+
+ if _p_ == _g_.m.p.ptr() {
+ releasep()
+ }
+ _g_.m.locks--
+
+ return mp, g0Stack, g0StackSize
+}
+
// needm is called when a cgo callback happens on a
// thread without an m (a thread not created by Go).
// In this case, needm is expected to find an m to use
@@ -884,6 +1251,7 @@ func needm(x byte) {
setGContext()
// Initialize this thread to use the m.
+ asminit()
minit()
}
@@ -915,9 +1283,7 @@ func oneNewExtraM() {
// The sched.pc will never be returned to, but setting it to
// goexit makes clear to the traceback routines where
// the goroutine stack ends.
- var g0SP unsafe.Pointer
- var g0SPSize uintptr
- mp := allocm(nil, true, &g0SP, &g0SPSize)
+ mp, g0SP, g0SPSize := allocm(nil, nil, true)
gp := malg(true, false, nil, nil)
gp.gcscanvalid = true // fresh G, so no dequeueRescan necessary
gp.gcscandone = true
@@ -1051,6 +1417,17 @@ func unlockextra(mp *m) {
atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
}
+// Create a new m. It will start off with a call to fn, or else the scheduler.
+// fn needs to be static and not a heap allocated closure.
+// May run with m.p==nil, so write barriers are not allowed.
+//go:nowritebarrierrec
+func newm(fn func(), _p_ *p) {
+ mp, _, _ := allocm(_p_, fn, false)
+ mp.nextp.set(_p_)
+ mp.sigmask = initSigmask
+ newosproc(mp)
+}
+
// Stops execution of the current m until new work is available.
// Returns with acquired P.
func stopm() {
@@ -1083,6 +1460,59 @@ retry:
_g_.m.nextp = 0
}
+func mspinning() {
+ // startm's caller incremented nmspinning. Set the new M's spinning.
+ getg().m.spinning = true
+}
+
+// Schedules some M to run the p (creates an M if necessary).
+// If p==nil, tries to get an idle P, if no idle P's does nothing.
+// May run with m.p==nil, so write barriers are not allowed.
+// If spinning is set, the caller has incremented nmspinning and startm will
+// either decrement nmspinning or set m.spinning in the newly started M.
+//go:nowritebarrierrec
+func startm(_p_ *p, spinning bool) {
+ lock(&sched.lock)
+ if _p_ == nil {
+ _p_ = pidleget()
+ if _p_ == nil {
+ unlock(&sched.lock)
+ if spinning {
+ // The caller incremented nmspinning, but there are no idle Ps,
+ // so it's okay to just undo the increment and give up.
+ if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
+ throw("startm: negative nmspinning")
+ }
+ }
+ return
+ }
+ }
+ mp := mget()
+ unlock(&sched.lock)
+ if mp == nil {
+ var fn func()
+ if spinning {
+ // The caller incremented nmspinning, so set m.spinning in the new M.
+ fn = mspinning
+ }
+ newm(fn, _p_)
+ return
+ }
+ if mp.spinning {
+ throw("startm: m is spinning")
+ }
+ if mp.nextp != 0 {
+ throw("startm: m has p")
+ }
+ if spinning && !runqempty(_p_) {
+ throw("startm: p has runnable gs")
+ }
+ // The caller incremented nmspinning, so set m.spinning in the new M.
+ mp.spinning = spinning
+ mp.nextp.set(_p_)
+ notewakeup(&mp.park)
+}
+
// Hands off P from syscall or locked M.
// Always runs without a P, so write barriers are not allowed.
//go:nowritebarrierrec
@@ -1636,6 +2066,435 @@ func dropg() {
setGNoWB(&_g_.m.curg, nil)
}
+func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
+ unlock((*mutex)(lock))
+ return true
+}
+
+// park continuation on g0.
+func park_m(gp *g) {
+ _g_ := getg()
+
+ if trace.enabled {
+ traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip, gp)
+ }
+
+ casgstatus(gp, _Grunning, _Gwaiting)
+ dropg()
+
+ if _g_.m.waitunlockf != nil {
+ fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf))
+ ok := fn(gp, _g_.m.waitlock)
+ _g_.m.waitunlockf = nil
+ _g_.m.waitlock = nil
+ if !ok {
+ if trace.enabled {
+ traceGoUnpark(gp, 2)
+ }
+ casgstatus(gp, _Gwaiting, _Grunnable)
+ execute(gp, true) // Schedule it back, never returns.
+ }
+ }
+ schedule()
+}
+
+func goschedImpl(gp *g) {
+ status := readgstatus(gp)
+ if status&^_Gscan != _Grunning {
+ dumpgstatus(gp)
+ throw("bad g status")
+ }
+ casgstatus(gp, _Grunning, _Grunnable)
+ dropg()
+ lock(&sched.lock)
+ globrunqput(gp)
+ unlock(&sched.lock)
+
+ schedule()
+}
+
+// Gosched continuation on g0.
+func gosched_m(gp *g) {
+ if trace.enabled {
+ traceGoSched()
+ }
+ goschedImpl(gp)
+}
+
+// Finishes execution of the current goroutine.
+func goexit1() {
+ if trace.enabled {
+ traceGoEnd()
+ }
+ mcall(goexit0)
+}
+
+// goexit continuation on g0.
+func goexit0(gp *g) {
+ _g_ := getg()
+
+ casgstatus(gp, _Grunning, _Gdead)
+ if isSystemGoroutine(gp) {
+ atomic.Xadd(&sched.ngsys, -1)
+ }
+ gp.m = nil
+ gp.lockedm = nil
+ _g_.m.lockedg = nil
+ gp.entry = 0
+ gp.paniconfault = false
+ gp._defer = nil // should be true already but just in case.
+ gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
+ gp.writebuf = nil
+ gp.waitreason = ""
+ gp.param = nil
+
+ // Note that gp's stack scan is now "valid" because it has no
+ // stack. We could dequeueRescan, but that takes a lock and
+ // isn't really necessary.
+ gp.gcscanvalid = true
+ dropg()
+
+ if _g_.m.locked&^_LockExternal != 0 {
+ print("invalid m->locked = ", _g_.m.locked, "\n")
+ throw("internal lockOSThread error")
+ }
+ _g_.m.locked = 0
+ gfput(_g_.m.p.ptr(), gp)
+ schedule()
+}
+
+// The goroutine g is about to enter a system call.
+// Record that it's not using the cpu anymore.
+// This is called only from the go syscall library and cgocall,
+// not from the low-level system calls used by the runtime.
+//
+// The entersyscall function is written in C, so that it can save the
+// current register context so that the GC will see them.
+// It calls reentersyscall.
+//
+// Syscall tracing:
+// At the start of a syscall we emit traceGoSysCall to capture the stack trace.
+// If the syscall does not block, that is it, we do not emit any other events.
+// If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
+// when syscall returns we emit traceGoSysExit and when the goroutine starts running
+// (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
+// To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
+// we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
+// whoever emits traceGoSysBlock increments p.syscalltick afterwards;
+// and we wait for the increment before emitting traceGoSysExit.
+// Note that the increment is done even if tracing is not enabled,
+// because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
+//
+//go:nosplit
+//go:noinline
+func reentersyscall(pc, sp uintptr) {
+ _g_ := getg()
+
+ // Disable preemption because during this function g is in Gsyscall status,
+ // but can have inconsistent g->sched, do not let GC observe it.
+ _g_.m.locks++
+
+ _g_.syscallsp = sp
+ _g_.syscallpc = pc
+ casgstatus(_g_, _Grunning, _Gsyscall)
+
+ if trace.enabled {
+ systemstack(traceGoSysCall)
+ }
+
+ if atomic.Load(&sched.sysmonwait) != 0 {
+ systemstack(entersyscall_sysmon)
+ }
+
+ if _g_.m.p.ptr().runSafePointFn != 0 {
+ // runSafePointFn may stack split if run on this stack
+ systemstack(runSafePointFn)
+ }
+
+ _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
+ _g_.sysblocktraced = true
+ _g_.m.mcache = nil
+ _g_.m.p.ptr().m = 0
+ atomic.Store(&_g_.m.p.ptr().status, _Psyscall)
+ if sched.gcwaiting != 0 {
+ systemstack(entersyscall_gcwait)
+ }
+
+ _g_.m.locks--
+}
+
+func entersyscall_sysmon() {
+ lock(&sched.lock)
+ if atomic.Load(&sched.sysmonwait) != 0 {
+ atomic.Store(&sched.sysmonwait, 0)
+ notewakeup(&sched.sysmonnote)
+ }
+ unlock(&sched.lock)
+}
+
+func entersyscall_gcwait() {
+ _g_ := getg()
+ _p_ := _g_.m.p.ptr()
+
+ lock(&sched.lock)
+ if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
+ if trace.enabled {
+ traceGoSysBlock(_p_)
+ traceProcStop(_p_)
+ }
+ _p_.syscalltick++
+ if sched.stopwait--; sched.stopwait == 0 {
+ notewakeup(&sched.stopnote)
+ }
+ }
+ unlock(&sched.lock)
+}
+
+// The same as reentersyscall(), but with a hint that the syscall is blocking.
+//go:nosplit
+func reentersyscallblock(pc, sp uintptr) {
+ _g_ := getg()
+
+ _g_.m.locks++ // see comment in entersyscall
+ _g_.throwsplit = true
+ _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
+ _g_.sysblocktraced = true
+ _g_.m.p.ptr().syscalltick++
+
+ // Leave SP around for GC and traceback.
+ _g_.syscallsp = sp
+ _g_.syscallpc = pc
+ casgstatus(_g_, _Grunning, _Gsyscall)
+ systemstack(entersyscallblock_handoff)
+
+ _g_.m.locks--
+}
+
+func entersyscallblock_handoff() {
+ if trace.enabled {
+ traceGoSysCall()
+ traceGoSysBlock(getg().m.p.ptr())
+ }
+ handoffp(releasep())
+}
+
+// The goroutine g exited its system call.
+// Arrange for it to run on a cpu again.
+// This is called only from the go syscall library, not
+// from the low-level system calls used by the runtime.
+//
+// Write barriers are not allowed because our P may have been stolen.
+//
+//go:nosplit
+//go:nowritebarrierrec
+func exitsyscall(dummy int32) {
+ _g_ := getg()
+
+ _g_.m.locks++ // see comment in entersyscall
+
+ _g_.waitsince = 0
+ oldp := _g_.m.p.ptr()
+ if exitsyscallfast() {
+ if _g_.m.mcache == nil {
+ throw("lost mcache")
+ }
+ if trace.enabled {
+ if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
+ systemstack(traceGoStart)
+ }
+ }
+ // There's a cpu for us, so we can run.
+ _g_.m.p.ptr().syscalltick++
+ // We need to cas the status and scan before resuming...
+ casgstatus(_g_, _Gsyscall, _Grunning)
+
+ exitsyscallclear(_g_)
+ _g_.m.locks--
+ _g_.throwsplit = false
+ return
+ }
+
+ _g_.sysexitticks = 0
+ if trace.enabled {
+ // Wait till traceGoSysBlock event is emitted.
+ // This ensures consistency of the trace (the goroutine is started after it is blocked).
+ for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
+ osyield()
+ }
+ // We can't trace syscall exit right now because we don't have a P.
+ // Tracing code can invoke write barriers that cannot run without a P.
+ // So instead we remember the syscall exit time and emit the event
+ // in execute when we have a P.
+ _g_.sysexitticks = cputicks()
+ }
+
+ _g_.m.locks--
+
+ // Call the scheduler.
+ mcall(exitsyscall0)
+
+ if _g_.m.mcache == nil {
+ throw("lost mcache")
+ }
+
+ // Scheduler returned, so we're allowed to run now.
+ // Delete the syscallsp information that we left for
+ // the garbage collector during the system call.
+ // Must wait until now because until gosched returns
+ // we don't know for sure that the garbage collector
+ // is not running.
+ exitsyscallclear(_g_)
+
+ _g_.m.p.ptr().syscalltick++
+ _g_.throwsplit = false
+}
+
+//go:nosplit
+func exitsyscallfast() bool {
+ _g_ := getg()
+
+ // Freezetheworld sets stopwait but does not retake P's.
+ if sched.stopwait == freezeStopWait {
+ _g_.m.mcache = nil
+ _g_.m.p = 0
+ return false
+ }
+
+ // Try to re-acquire the last P.
+ if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
+ // There's a cpu for us, so we can run.
+ exitsyscallfast_reacquired()
+ return true
+ }
+
+ // Try to get any other idle P.
+ oldp := _g_.m.p.ptr()
+ _g_.m.mcache = nil
+ _g_.m.p = 0
+ if sched.pidle != 0 {
+ var ok bool
+ systemstack(func() {
+ ok = exitsyscallfast_pidle()
+ if ok && trace.enabled {
+ if oldp != nil {
+ // Wait till traceGoSysBlock event is emitted.
+ // This ensures consistency of the trace (the goroutine is started after it is blocked).
+ for oldp.syscalltick == _g_.m.syscalltick {
+ osyield()
+ }
+ }
+ traceGoSysExit(0)
+ }
+ })
+ if ok {
+ return true
+ }
+ }
+ return false
+}
+
+// exitsyscallfast_reacquired is the exitsyscall path on which this G
+// has successfully reacquired the P it was running on before the
+// syscall.
+//
+// This function is allowed to have write barriers because exitsyscall
+// has acquired a P at this point.
+//
+//go:yeswritebarrierrec
+//go:nosplit
+func exitsyscallfast_reacquired() {
+ _g_ := getg()
+ _g_.m.mcache = _g_.m.p.ptr().mcache
+ _g_.m.p.ptr().m.set(_g_.m)
+ if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
+ if trace.enabled {
+ // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
+ // traceGoSysBlock for this syscall was already emitted,
+ // but here we effectively retake the p from the new syscall running on the same p.
+ systemstack(func() {
+ // Denote blocking of the new syscall.
+ traceGoSysBlock(_g_.m.p.ptr())
+ // Denote completion of the current syscall.
+ traceGoSysExit(0)
+ })
+ }
+ _g_.m.p.ptr().syscalltick++
+ }
+}
+
+func exitsyscallfast_pidle() bool {
+ lock(&sched.lock)
+ _p_ := pidleget()
+ if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
+ atomic.Store(&sched.sysmonwait, 0)
+ notewakeup(&sched.sysmonnote)
+ }
+ unlock(&sched.lock)
+ if _p_ != nil {
+ acquirep(_p_)
+ return true
+ }
+ return false
+}
+
+// exitsyscall slow path on g0.
+// Failed to acquire P, enqueue gp as runnable.
+//
+//go:nowritebarrierrec
+func exitsyscall0(gp *g) {
+ _g_ := getg()
+
+ casgstatus(gp, _Gsyscall, _Grunnable)
+ dropg()
+ lock(&sched.lock)
+ _p_ := pidleget()
+ if _p_ == nil {
+ globrunqput(gp)
+ } else if atomic.Load(&sched.sysmonwait) != 0 {
+ atomic.Store(&sched.sysmonwait, 0)
+ notewakeup(&sched.sysmonnote)
+ }
+ unlock(&sched.lock)
+ if _p_ != nil {
+ acquirep(_p_)
+ execute(gp, false) // Never returns.
+ }
+ if _g_.m.lockedg != nil {
+ // Wait until another thread schedules gp and so m again.
+ stoplockedm()
+ execute(gp, false) // Never returns.
+ }
+ stopm()
+ schedule() // Never returns.
+}
+
+// exitsyscallclear clears GC-related information that we only track
+// during a syscall.
+func exitsyscallclear(gp *g) {
+ // Garbage collector isn't running (since we are), so okay to
+ // clear syscallsp.
+ gp.syscallsp = 0
+
+ gp.gcstack = nil
+ gp.gcnextsp = nil
+ memclrNoHeapPointers(unsafe.Pointer(&gp.gcregs), unsafe.Sizeof(gp.gcregs))
+}
+
+// Code generated by cgo, and some library code, calls syscall.Entersyscall
+// and syscall.Exitsyscall.
+
+//go:linkname syscall_entersyscall syscall.Entersyscall
+//go:nosplit
+func syscall_entersyscall() {
+ entersyscall(0)
+}
+
+//go:linkname syscall_exitsyscall syscall.Exitsyscall
+//go:nosplit
+func syscall_exitsyscall() {
+ exitsyscall(0)
+}
+
func beforefork() {
gp := getg().m.curg
@@ -1671,6 +2530,83 @@ func syscall_runtime_AfterFork() {
systemstack(afterfork)
}
+// Create a new g running fn passing arg as the single argument.
+// Put it on the queue of g's waiting to run.
+// The compiler turns a go statement into a call to this.
+//go:linkname newproc __go_go
+func newproc(fn uintptr, arg unsafe.Pointer) *g {
+ _g_ := getg()
+
+ if fn == 0 {
+ _g_.m.throwing = -1 // do not dump full stacks
+ throw("go of nil func value")
+ }
+ _g_.m.locks++ // disable preemption because it can be holding p in a local var
+
+ _p_ := _g_.m.p.ptr()
+ newg := gfget(_p_)
+ var (
+ sp unsafe.Pointer
+ spsize uintptr
+ )
+ if newg == nil {
+ newg = malg(true, false, &sp, &spsize)
+ casgstatus(newg, _Gidle, _Gdead)
+ newg.gcRescan = -1
+ allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
+ } else {
+ resetNewG(newg, &sp, &spsize)
+ }
+ newg.traceback = nil
+
+ if readgstatus(newg) != _Gdead {
+ throw("newproc1: new g is not Gdead")
+ }
+
+ newg.entry = fn
+ newg.param = arg
+ newg.gopc = getcallerpc(unsafe.Pointer(&fn))
+ if isSystemGoroutine(newg) {
+ atomic.Xadd(&sched.ngsys, +1)
+ }
+ // The stack is dirty from the argument frame, so queue it for
+ // scanning. Do this before setting it to runnable so we still
+ // own the G. If we're recycling a G, it may already be on the
+ // rescan list.
+ if newg.gcRescan == -1 {
+ queueRescan(newg)
+ } else {
+ // The recycled G is already on the rescan list. Just
+ // mark the stack dirty.
+ newg.gcscanvalid = false
+ }
+ casgstatus(newg, _Gdead, _Grunnable)
+
+ if _p_.goidcache == _p_.goidcacheend {
+ // Sched.goidgen is the last allocated id,
+ // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
+ // At startup sched.goidgen=0, so main goroutine receives goid=1.
+ _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
+ _p_.goidcache -= _GoidCacheBatch - 1
+ _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
+ }
+ newg.goid = int64(_p_.goidcache)
+ _p_.goidcache++
+ if trace.enabled {
+ traceGoCreate(newg, newg.startpc)
+ }
+
+ makeGContext(newg, sp, spsize)
+
+ runqput(_p_, newg, true)
+
+ if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && runtimeInitTime != 0 {
+ wakep()
+ }
+ _g_.m.locks--
+ return newg
+}
+
// Put on gfree list.
// If local list is too long, transfer a batch to the global list.
func gfput(_p_ *p, gp *g) {
@@ -1738,6 +2674,11 @@ func gfpurge(_p_ *p) {
unlock(&sched.gflock)
}
+// Breakpoint executes a breakpoint trap.
+func Breakpoint() {
+ breakpoint()
+}
+
// dolockOSThread is called by LockOSThread and lockOSThread below
// after they modify m.locked. Do not allow preemption during this call,
// or else the m might be different in this function than in the caller.
@@ -1822,6 +2763,152 @@ func mcount() int32 {
return sched.mcount
}
+var prof struct {
+ lock uint32
+ hz int32
+}
+
+func _System() { _System() }
+func _ExternalCode() { _ExternalCode() }
+func _GC() { _GC() }
+
+var _SystemPC = funcPC(_System)
+var _ExternalCodePC = funcPC(_ExternalCode)
+var _GCPC = funcPC(_GC)
+
+// Called if we receive a SIGPROF signal.
+// Called by the signal handler, may run during STW.
+//go:nowritebarrierrec
+func sigprof(pc uintptr, gp *g, mp *m) {
+ if prof.hz == 0 {
+ return
+ }
+
+ // Profiling runs concurrently with GC, so it must not allocate.
+ // Set a trap in case the code does allocate.
+ // Note that on windows, one thread takes profiles of all the
+ // other threads, so mp is usually not getg().m.
+ // In fact mp may not even be stopped.
+ // See golang.org/issue/17165.
+ getg().m.mallocing++
+
+ traceback := true
+
+ // If SIGPROF arrived while already fetching runtime callers
+ // we can have trouble on older systems because the unwind
+ // library calls dl_iterate_phdr which was not reentrant in
+ // the past. alreadyInCallers checks for that.
+ if gp == nil || alreadyInCallers() {
+ traceback = false
+ }
+
+ var stk [maxCPUProfStack]uintptr
+ n := 0
+ if traceback {
+ var stklocs [maxCPUProfStack]location
+ n = callers(0, stklocs[:])
+
+ for i := 0; i < n; i++ {
+ stk[i] = stklocs[i].pc
+ }
+ }
+
+ if n <= 0 {
+ // Normal traceback is impossible or has failed.
+ // Account it against abstract "System" or "GC".
+ n = 2
+ stk[0] = pc
+ if mp.preemptoff != "" || mp.helpgc != 0 {
+ stk[1] = _GCPC + sys.PCQuantum
+ } else {
+ stk[1] = _SystemPC + sys.PCQuantum
+ }
+ }
+
+ if prof.hz != 0 {
+ // Simple cas-lock to coordinate with setcpuprofilerate.
+ for !atomic.Cas(&prof.lock, 0, 1) {
+ osyield()
+ }
+ if prof.hz != 0 {
+ cpuprof.add(stk[:n])
+ }
+ atomic.Store(&prof.lock, 0)
+ }
+ getg().m.mallocing--
+}
+
+// Use global arrays rather than using up lots of stack space in the
+// signal handler. This is safe since while we are executing a SIGPROF
+// signal other SIGPROF signals are blocked.
+var nonprofGoStklocs [maxCPUProfStack]location
+var nonprofGoStk [maxCPUProfStack]uintptr
+
+// sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
+// and the signal handler collected a stack trace in sigprofCallers.
+// When this is called, sigprofCallersUse will be non-zero.
+// g is nil, and what we can do is very limited.
+//go:nosplit
+//go:nowritebarrierrec
+func sigprofNonGo(pc uintptr) {
+ if prof.hz != 0 {
+ n := callers(0, nonprofGoStklocs[:])
+
+ for i := 0; i < n; i++ {
+ nonprofGoStk[i] = nonprofGoStklocs[i].pc
+ }
+
+ if n <= 0 {
+ n = 2
+ nonprofGoStk[0] = pc
+ nonprofGoStk[1] = _ExternalCodePC + sys.PCQuantum
+ }
+
+ // Simple cas-lock to coordinate with setcpuprofilerate.
+ for !atomic.Cas(&prof.lock, 0, 1) {
+ osyield()
+ }
+ if prof.hz != 0 {
+ cpuprof.addNonGo(nonprofGoStk[:n])
+ }
+ atomic.Store(&prof.lock, 0)
+ }
+}
+
+// Arrange to call fn with a traceback hz times a second.
+func setcpuprofilerate_m(hz int32) {
+ // Force sane arguments.
+ if hz < 0 {
+ hz = 0
+ }
+
+ // Disable preemption, otherwise we can be rescheduled to another thread
+ // that has profiling enabled.
+ _g_ := getg()
+ _g_.m.locks++
+
+ // Stop profiler on this thread so that it is safe to lock prof.
+ // if a profiling signal came in while we had prof locked,
+ // it would deadlock.
+ resetcpuprofiler(0)
+
+ for !atomic.Cas(&prof.lock, 0, 1) {
+ osyield()
+ }
+ prof.hz = hz
+ atomic.Store(&prof.lock, 0)
+
+ lock(&sched.lock)
+ sched.profilehz = hz
+ unlock(&sched.lock)
+
+ if hz != 0 {
+ resetcpuprofiler(hz)
+ }
+
+ _g_.m.locks--
+}
+
// Change number of processors. The world is stopped, sched is locked.
// gcworkbufs are not being modified by either the GC or
// the write barrier code.
diff --git a/libgo/go/runtime/runtime2.go b/libgo/go/runtime/runtime2.go
index 195d65b..f5599ee 100644
--- a/libgo/go/runtime/runtime2.go
+++ b/libgo/go/runtime/runtime2.go
@@ -409,8 +409,8 @@ type g struct {
gcinitialsp unsafe.Pointer
gcregs g_ucontext_t
- entry unsafe.Pointer // goroutine entry point
- fromgogo bool // whether entered from gogo function
+ entry uintptr // goroutine entry point
+ fromgogo bool // whether entered from gogo function
issystem bool // do not output in stack dump
isbackground bool // ignore in deadlock detector
@@ -431,7 +431,7 @@ type m struct {
gsignal *g // signal-handling g
sigmask sigset // storage for saved signal mask
// Not for gccgo: tls [6]uintptr // thread-local storage (for x86 extern register)
- mstartfn uintptr
+ mstartfn func()
curg *g // current running goroutine
caughtsig guintptr // goroutine running during fatal signal
p puintptr // attached p for executing go code (nil if not executing go code)
@@ -714,10 +714,6 @@ type _defer struct {
// function function will be somewhere in libffi, so __retaddr
// is not useful.
makefunccanrecover bool
-
- // Set to true if this defer stack entry is not part of the
- // defer pool.
- special bool
}
// panics
diff --git a/libgo/go/runtime/signal_sighandler.go b/libgo/go/runtime/signal_sighandler.go
index 279001b..f3ec7d4 100644
--- a/libgo/go/runtime/signal_sighandler.go
+++ b/libgo/go/runtime/signal_sighandler.go
@@ -29,13 +29,13 @@ func sighandler(sig uint32, info *_siginfo_t, ctxt unsafe.Pointer, gp *g) {
_g_ := getg()
c := sigctxt{info, ctxt}
+ sigfault, sigpc := getSiginfo(info, ctxt)
+
if sig == _SIGPROF {
- sigprof()
+ sigprof(sigpc, gp, _g_.m)
return
}
- sigfault, sigpc := getSiginfo(info, ctxt)
-
flags := int32(_SigThrow)
if sig < uint32(len(sigtable)) {
flags = sigtable[sig].flags
diff --git a/libgo/go/runtime/signal_unix.go b/libgo/go/runtime/signal_unix.go
index 13b7930..ce09225 100644
--- a/libgo/go/runtime/signal_unix.go
+++ b/libgo/go/runtime/signal_unix.go
@@ -216,7 +216,7 @@ func sigtrampgo(sig uint32, info *_siginfo_t, ctx unsafe.Pointer) {
c := sigctxt{info, ctx}
if sig == _SIGPROF {
_, pc := getSiginfo(info, ctx)
- sigprofNonGoPC(pc)
+ sigprofNonGo(pc)
return
}
badsignal(uintptr(sig), &c)
diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go
index bf9f62e..d891686 100644
--- a/libgo/go/runtime/stubs.go
+++ b/libgo/go/runtime/stubs.go
@@ -125,10 +125,13 @@ func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
//go:noescape
func jmpdefer(fv *funcval, argp uintptr)
func exit1(code int32)
-func asminit()
func setg(gg *g)
+
+//extern __builtin_trap
func breakpoint()
+func asminit() {}
+
// reflectcall calls fn with a copy of the n argument bytes pointed at by arg.
// After fn returns, reflectcall copies n-retoffset result bytes
// back into arg+retoffset before returning. If copying result bytes back,
@@ -266,18 +269,6 @@ func setIsCgo() {
iscgo = true
}
-// Temporary for gccgo until we port proc.go.
-//go:linkname makeMainInitDone runtime.makeMainInitDone
-func makeMainInitDone() {
- main_init_done = make(chan bool)
-}
-
-// Temporary for gccgo until we port proc.go.
-//go:linkname closeMainInitDone runtime.closeMainInitDone
-func closeMainInitDone() {
- close(main_init_done)
-}
-
// For gccgo, to communicate from the C code to the Go code.
//go:linkname setCpuidECX runtime.setCpuidECX
func setCpuidECX(v uint32) {
@@ -363,7 +354,8 @@ var writeBarrier struct {
alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
}
-func queueRescan(*g) {
+func queueRescan(gp *g) {
+ gp.gcscanvalid = false
}
// Here for gccgo until we port atomic_pointer.go and mgc.go.
@@ -385,9 +377,6 @@ func errno() int
// Temporary for gccgo until we port proc.go.
func entersyscall(int32)
func entersyscallblock(int32)
-func exitsyscall(int32)
-func gopark(func(*g, unsafe.Pointer) bool, unsafe.Pointer, string, byte, int)
-func goparkunlock(*mutex, string, byte, int)
// Temporary hack for gccgo until we port the garbage collector.
func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {}
@@ -417,27 +406,10 @@ func getMstats() *mstats {
return &memstats
}
-// Temporary for gccgo until we port proc.go.
-func setcpuprofilerate_m(hz int32)
-
// Temporary for gccgo until we port mem_GOOS.go.
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64)
-// Temporary for gccgo until we port proc.go, so that the C signal
-// handler can call into cpuprof.
-//go:linkname cpuprofAdd runtime.cpuprofAdd
-func cpuprofAdd(stk []uintptr) {
- cpuprof.add(stk)
-}
-
-// For gccgo until we port proc.go.
-func Breakpoint()
-func LockOSThread()
-func UnlockOSThread()
-func lockOSThread()
-func unlockOSThread()
-
// Temporary for gccgo until we port malloc.go
func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer
@@ -471,10 +443,6 @@ func getZerobase() *uintptr {
return &zerobase
}
-// Temporary for gccgo until we port proc.go.
-func sigprof()
-func goexit1()
-
// Get signal trampoline, written in C.
func getSigtramp() uintptr
@@ -608,7 +576,9 @@ type mheap struct {
var mheap_ mheap
// Temporary for gccgo until we port mheap.go.
+func scavenge(int32, uint64, uint64)
func (h *mheap) scavenge(k int32, now, limit uint64) {
+ scavenge(k, now, limit)
}
// Temporary for gccgo until we initialize ncpu in Go.
@@ -640,3 +610,43 @@ var gcMarkWorkerModeStrings = [...]string{
"GC (fractional)",
"GC (idle)",
}
+
+// Temporary for gccgo until we port mgc.go.
+func gcenable() {
+ memstats.enablegc = true
+}
+
+// Temporary for gccgo until we port mgc.go.
+func gcinit() {
+}
+
+// Temporary for gccgo until we port mgc.go.
+//go:linkname runtime_m0 runtime.runtime_m0
+func runtime_m0() *m {
+ return &m0
+}
+
+// Temporary for gccgo until we port mgc.go.
+//go:linkname runtime_g0 runtime.runtime_g0
+func runtime_g0() *g {
+ return &g0
+}
+
+// Temporary for gccgo until we port mgc.go.
+type gcMode int
+
+const (
+ gcBackgroundMode gcMode = iota // concurrent GC and sweep
+ gcForceMode // stop-the-world GC now, concurrent sweep
+ gcForceBlockMode // stop-the-world GC now and STW sweep (forced by user)
+)
+
+// Temporary for gccgo until we port mgc.g0.
+func gc(int32)
+func gcStart(mode gcMode, forceTrigger bool) {
+ var force int32
+ if forceTrigger {
+ force = 1
+ }
+ gc(force)
+}
diff --git a/libgo/go/runtime/symtab.go b/libgo/go/runtime/symtab.go
index 52e2d03..bad0347 100644
--- a/libgo/go/runtime/symtab.go
+++ b/libgo/go/runtime/symtab.go
@@ -115,11 +115,17 @@ func FuncForPC(pc uintptr) *Func {
// Name returns the name of the function.
func (f *Func) Name() string {
+ if f == nil {
+ return ""
+ }
return f.name
}
// Entry returns the entry address of the function.
func (f *Func) Entry() uintptr {
+ if f == nil {
+ return 0
+ }
return f.entry
}
diff --git a/libgo/runtime/go-breakpoint.c b/libgo/runtime/go-breakpoint.c
deleted file mode 100644
index e403a2a..0000000
--- a/libgo/runtime/go-breakpoint.c
+++ /dev/null
@@ -1,17 +0,0 @@
-/* go-breakpoint.c -- the runtime.Breakpoint function.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include <sched.h>
-
-#include "runtime.h"
-
-void Breakpoint (void) __asm__ (GOSYM_PREFIX "runtime.Breakpoint");
-
-void
-Breakpoint (void)
-{
- __builtin_trap ();
-}
diff --git a/libgo/runtime/go-callers.c b/libgo/runtime/go-callers.c
index 0c1c603..b5379f7 100644
--- a/libgo/runtime/go-callers.c
+++ b/libgo/runtime/go-callers.c
@@ -16,7 +16,7 @@
older versions of glibc when a SIGPROF signal arrives while
collecting a backtrace. */
-uint32 runtime_in_callers;
+static uint32 runtime_in_callers;
/* Argument passed to callback function. */
@@ -128,8 +128,8 @@ callback (void *data, uintptr_t pc, const char *filename, int lineno,
if (__builtin_strcmp (p, "/proc.c") == 0)
{
if (__builtin_strcmp (function, "kickoff") == 0
- || __builtin_strcmp (function, "runtime_mstart") == 0
- || __builtin_strcmp (function, "runtime_main") == 0)
+ || __builtin_strcmp (function, "runtime.mstart") == 0
+ || __builtin_strcmp (function, "runtime.main") == 0)
return 1;
}
}
@@ -154,6 +154,20 @@ error_callback (void *data __attribute__ ((unused)),
runtime_throw (msg);
}
+/* Return whether we are already collecting a stack trace. This is
+ called from the signal handler. */
+
+bool alreadyInCallers(void)
+ __attribute__ ((no_split_stack));
+bool alreadyInCallers(void)
+ __asm__ (GOSYM_PREFIX "runtime.alreadyInCallers");
+
+bool
+alreadyInCallers()
+{
+ return runtime_atomicload(&runtime_in_callers) > 0;
+}
+
/* Gather caller PC's. */
int32
diff --git a/libgo/runtime/go-libmain.c b/libgo/runtime/go-libmain.c
index 8e07e90..71020de 100644
--- a/libgo/runtime/go-libmain.c
+++ b/libgo/runtime/go-libmain.c
@@ -100,6 +100,7 @@ gostart (void *arg)
runtime_check ();
runtime_args (a->argc, (byte **) a->argv);
runtime_osinit ();
+ runtime_sched = runtime_getsched();
runtime_schedinit ();
__go_go (runtime_main, NULL);
runtime_mstart (runtime_m ());
diff --git a/libgo/runtime/go-main.c b/libgo/runtime/go-main.c
index dba8085..04d5f42 100644
--- a/libgo/runtime/go-main.c
+++ b/libgo/runtime/go-main.c
@@ -53,6 +53,7 @@ main (int argc, char **argv)
runtime_check ();
runtime_args (argc, (byte **) argv);
runtime_osinit ();
+ runtime_sched = runtime_getsched();
runtime_schedinit ();
__go_go (runtime_main, NULL);
runtime_mstart (runtime_m ());
diff --git a/libgo/runtime/malloc.h b/libgo/runtime/malloc.h
index 00e4166..829464f 100644
--- a/libgo/runtime/malloc.h
+++ b/libgo/runtime/malloc.h
@@ -405,14 +405,14 @@ void runtime_MGetSizeClassInfo(int32 sizeclass, uintptr *size, int32 *npages, in
void* runtime_MHeap_SysAlloc(MHeap *h, uintptr n);
void runtime_MHeap_MapBits(MHeap *h);
void runtime_MHeap_MapSpans(MHeap *h);
-void runtime_MHeap_Scavenger(void*);
void runtime_MHeap_SplitSpan(MHeap *h, MSpan *s);
void* runtime_mallocgc(uintptr size, uintptr typ, uint32 flag);
void* runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
__asm__(GOSYM_PREFIX "runtime.persistentalloc");
int32 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s);
-void runtime_gc(int32 force);
+void runtime_gc(int32 force)
+ __asm__(GOSYM_PREFIX "runtime.gc");
uintptr runtime_sweepone(void);
void runtime_markscan(void *v);
void runtime_marknogc(void *v);
diff --git a/libgo/runtime/mgc0.c b/libgo/runtime/mgc0.c
index fc54241..e1a648a 100644
--- a/libgo/runtime/mgc0.c
+++ b/libgo/runtime/mgc0.c
@@ -132,7 +132,7 @@ clearpools(void)
poolcleanup);
}
- for(pp=runtime_allp; (p=*pp) != nil; pp++) {
+ for(pp=runtime_getAllP(); (p=*pp) != nil; pp++) {
// clear tinyalloc pool
c = p->mcache;
if(c != nil) {
@@ -1277,9 +1277,9 @@ markroot(ParFor *desc, uint32 i)
case RootBss:
// For gccgo we use this for all the other global roots.
- enqueue1(&wbuf, (Obj){(byte*)&runtime_m0, sizeof runtime_m0, 0});
- enqueue1(&wbuf, (Obj){(byte*)&runtime_g0, sizeof runtime_g0, 0});
- enqueue1(&wbuf, (Obj){(byte*)&runtime_allp, sizeof runtime_allp, 0});
+ enqueue1(&wbuf, (Obj){(byte*)runtime_m0(), sizeof(M), 0});
+ enqueue1(&wbuf, (Obj){(byte*)runtime_g0(), sizeof(G), 0});
+ enqueue1(&wbuf, (Obj){(byte*)runtime_getAllP(), _MaxGomaxprocs * sizeof(P*), 0});
enqueue1(&wbuf, (Obj){(byte*)&work, sizeof work, 0});
break;
@@ -1821,7 +1821,7 @@ bgsweep(void* dummy __attribute__ ((unused)))
}
sweep.parked = true;
runtime_g()->isbackground = true;
- runtime_parkunlock(&gclock, "GC sweep wait");
+ runtime_goparkunlock(&gclock, runtime_gostringnocopy((const byte*)"GC sweep wait"), traceEvGoBlock, 1);
runtime_g()->isbackground = false;
}
}
@@ -1965,7 +1965,7 @@ cachestats(void)
MCache *c;
P *p, **pp;
- for(pp=runtime_allp; (p=*pp) != nil; pp++) {
+ for(pp=runtime_getAllP(); (p=*pp) != nil; pp++) {
c = p->mcache;
if(c==nil)
continue;
@@ -1980,7 +1980,7 @@ flushallmcaches(void)
MCache *c;
// Flush MCache's to MCentral.
- for(pp=runtime_allp; (p=*pp) != nil; pp++) {
+ for(pp=runtime_getAllP(); (p=*pp) != nil; pp++) {
c = p->mcache;
if(c==nil)
continue;
@@ -2469,7 +2469,7 @@ runfinq(void* dummy __attribute__ ((unused)))
if(fb == nil) {
runtime_fingwait = true;
runtime_g()->isbackground = true;
- runtime_parkunlock(&finlock, "finalizer wait");
+ runtime_goparkunlock(&finlock, runtime_gostringnocopy((const byte*)"finalizer wait"), traceEvGoBlock, 1);
runtime_g()->isbackground = false;
continue;
}
diff --git a/libgo/runtime/mheap.c b/libgo/runtime/mheap.c
index c167bdc..80b9c8f 100644
--- a/libgo/runtime/mheap.c
+++ b/libgo/runtime/mheap.c
@@ -457,15 +457,6 @@ MHeap_FreeLocked(MHeap *h, MSpan *s)
runtime_MSpanList_Insert(&h->freelarge, s);
}
-static void
-forcegchelper(void *vnote)
-{
- Note *note = (Note*)vnote;
-
- runtime_gc(1);
- runtime_notewakeup(note);
-}
-
static uintptr
scavengelist(MSpan *list, uint64 now, uint64 limit)
{
@@ -498,7 +489,10 @@ scavengelist(MSpan *list, uint64 now, uint64 limit)
return sumreleased;
}
-static void
+void scavenge(int32, uint64, uint64)
+ __asm__ (GOSYM_PREFIX "runtime.scavenge");
+
+void
scavenge(int32 k, uint64 now, uint64 limit)
{
uint32 i;
@@ -520,62 +514,6 @@ scavenge(int32 k, uint64 now, uint64 limit)
}
}
-// Release (part of) unused memory to OS.
-// Goroutine created at startup.
-// Loop forever.
-void
-runtime_MHeap_Scavenger(void* dummy)
-{
- G *g;
- MHeap *h;
- uint64 tick, now, forcegc, limit;
- int64 unixnow;
- uint32 k;
- Note note, *notep;
-
- USED(dummy);
-
- g = runtime_g();
- g->issystem = true;
- g->isbackground = true;
-
- // If we go two minutes without a garbage collection, force one to run.
- forcegc = 2*60*1e9;
- // If a span goes unused for 5 minutes after a garbage collection,
- // we hand it back to the operating system.
- limit = 5*60*1e9;
- // Make wake-up period small enough for the sampling to be correct.
- if(forcegc < limit)
- tick = forcegc/2;
- else
- tick = limit/2;
-
- h = &runtime_mheap;
- for(k=0;; k++) {
- runtime_noteclear(&note);
- runtime_notetsleepg(&note, tick);
-
- runtime_lock(h);
- unixnow = runtime_unixnanotime();
- if(unixnow - mstats()->last_gc > forcegc) {
- runtime_unlock(h);
- // The scavenger can not block other goroutines,
- // otherwise deadlock detector can fire spuriously.
- // GC blocks other goroutines via the runtime_worldsema.
- runtime_noteclear(&note);
- notep = &note;
- __go_go(forcegchelper, (void*)notep);
- runtime_notetsleepg(&note, -1);
- if(runtime_debug.gctrace > 0)
- runtime_printf("scvg%d: GC forced\n", k);
- runtime_lock(h);
- }
- now = runtime_nanotime();
- scavenge(k, now, limit);
- runtime_unlock(h);
- }
-}
-
void runtime_debug_freeOSMemory(void) __asm__("runtime_debug.freeOSMemory");
void
diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c
index 06a9c2a..fd0837b 100644
--- a/libgo/runtime/proc.c
+++ b/libgo/runtime/proc.c
@@ -210,8 +210,11 @@ runtime_setg(G* gp)
g = gp;
}
+void runtime_newosproc(M *)
+ __asm__(GOSYM_PREFIX "runtime.newosproc");
+
// Start a new thread.
-static void
+void
runtime_newosproc(M *mp)
{
pthread_attr_t attr;
@@ -265,7 +268,7 @@ kickoff(void)
fn = (void (*)(void*))(g->entry);
param = g->param;
- g->entry = nil;
+ g->entry = 0;
g->param = nil;
fn(param);
runtime_goexit1();
@@ -338,7 +341,7 @@ runtime_mcall(void (*pfn)(G*))
#ifdef USING_SPLIT_STACK
__splitstack_setcontext(&mp->g0->stackcontext[0]);
#endif
- mp->g0->entry = (byte*)pfn;
+ mp->g0->entry = (uintptr)pfn;
mp->g0->param = gp;
// It's OK to set g directly here because this case
@@ -352,6 +355,16 @@ runtime_mcall(void (*pfn)(G*))
}
}
+// mcall called from Go code.
+void gomcall(FuncVal *)
+ __asm__ (GOSYM_PREFIX "runtime.mcall");
+
+void
+gomcall(FuncVal *fv)
+{
+ runtime_mcall((void*)fv->fn);
+}
+
// Goroutine scheduler
// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
//
@@ -364,52 +377,18 @@ runtime_mcall(void (*pfn)(G*))
//
// Design doc at http://golang.org/s/go11sched.
-enum
-{
- // Number of goroutine ids to grab from runtime_sched->goidgen to local per-P cache at once.
- // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
- GoidCacheBatch = 16,
-};
-
-extern Sched* runtime_getsched() __asm__ (GOSYM_PREFIX "runtime.getsched");
extern bool* runtime_getCgoHasExtraM()
__asm__ (GOSYM_PREFIX "runtime.getCgoHasExtraM");
-extern P** runtime_getAllP()
- __asm__ (GOSYM_PREFIX "runtime.getAllP");
extern G* allocg(void)
__asm__ (GOSYM_PREFIX "runtime.allocg");
-extern bool needaddgcproc(void)
- __asm__ (GOSYM_PREFIX "runtime.needaddgcproc");
-extern void startm(P*, bool)
- __asm__(GOSYM_PREFIX "runtime.startm");
-extern void newm(void(*)(void), P*)
- __asm__(GOSYM_PREFIX "runtime.newm");
Sched* runtime_sched;
-M runtime_m0;
-G runtime_g0; // idle goroutine for m0
-G* runtime_lastg;
-P** runtime_allp;
-int8* runtime_goos;
int32 runtime_ncpu;
-bool runtime_precisestack;
bool runtime_isarchive;
-void* runtime_mstart(void*);
-static void exitsyscall0(G*);
-static void park0(G*);
-static void goexit0(G*);
-static bool exitsyscallfast(void);
-
-extern void setncpu(int32)
- __asm__(GOSYM_PREFIX "runtime.setncpu");
-extern void setpagesize(uintptr_t)
- __asm__(GOSYM_PREFIX "runtime.setpagesize");
-extern void allgadd(G*)
- __asm__(GOSYM_PREFIX "runtime.allgadd");
-extern void mcommoninit(M*)
- __asm__(GOSYM_PREFIX "runtime.mcommoninit");
+extern void mstart1(void)
+ __asm__(GOSYM_PREFIX "runtime.mstart1");
extern void stopm(void)
__asm__(GOSYM_PREFIX "runtime.stopm");
extern void handoffp(P*)
@@ -422,107 +401,25 @@ extern void schedule(void)
__asm__(GOSYM_PREFIX "runtime.schedule");
extern void execute(G*, bool)
__asm__(GOSYM_PREFIX "runtime.execute");
-extern void gfput(P*, G*)
- __asm__(GOSYM_PREFIX "runtime.gfput");
+extern void reentersyscall(uintptr, uintptr)
+ __asm__(GOSYM_PREFIX "runtime.reentersyscall");
+extern void reentersyscallblock(uintptr, uintptr)
+ __asm__(GOSYM_PREFIX "runtime.reentersyscallblock");
extern G* gfget(P*)
__asm__(GOSYM_PREFIX "runtime.gfget");
-extern void procresize(int32)
- __asm__(GOSYM_PREFIX "runtime.procresize");
extern void acquirep(P*)
__asm__(GOSYM_PREFIX "runtime.acquirep");
extern P* releasep(void)
__asm__(GOSYM_PREFIX "runtime.releasep");
extern void incidlelocked(int32)
__asm__(GOSYM_PREFIX "runtime.incidlelocked");
-extern void checkdead(void)
- __asm__(GOSYM_PREFIX "runtime.checkdead");
-extern void sysmon(void)
- __asm__(GOSYM_PREFIX "runtime.sysmon");
-extern void mput(M*)
- __asm__(GOSYM_PREFIX "runtime.mput");
-extern M* mget(void)
- __asm__(GOSYM_PREFIX "runtime.mget");
extern void globrunqput(G*)
__asm__(GOSYM_PREFIX "runtime.globrunqput");
extern P* pidleget(void)
__asm__(GOSYM_PREFIX "runtime.pidleget");
-extern bool runqempty(P*)
- __asm__(GOSYM_PREFIX "runtime.runqempty");
-extern void runqput(P*, G*, bool)
- __asm__(GOSYM_PREFIX "runtime.runqput");
bool runtime_isstarted;
-// The bootstrap sequence is:
-//
-// call osinit
-// call schedinit
-// make & queue new G
-// call runtime_mstart
-//
-// The new G calls runtime_main.
-void
-runtime_schedinit(void)
-{
- M *m;
- int32 n, procs;
- String s;
- const byte *p;
- Eface i;
-
- setncpu(runtime_ncpu);
- setpagesize(getpagesize());
- runtime_sched = runtime_getsched();
-
- m = &runtime_m0;
- g = &runtime_g0;
- m->g0 = g;
- m->curg = g;
- g->m = m;
-
- initcontext();
-
- runtime_sched->maxmcount = 10000;
- runtime_precisestack = 0;
-
- // runtime_symtabinit();
- runtime_mallocinit();
- mcommoninit(m);
- runtime_alginit(); // maps must not be used before this call
-
- // Initialize the itable value for newErrorCString,
- // so that the next time it gets called, possibly
- // in a fault during a garbage collection, it will not
- // need to allocated memory.
- runtime_newErrorCString(0, &i);
-
- // Initialize the cached gotraceback value, since
- // gotraceback calls getenv, which mallocs on Plan 9.
- runtime_gotraceback(nil);
-
- runtime_goargs();
- runtime_goenvs();
- runtime_parsedebugvars();
-
- runtime_sched->lastpoll = runtime_nanotime();
- procs = 1;
- s = runtime_getenv("GOMAXPROCS");
- p = s.str;
- if(p != nil && (n = runtime_atoi(p, s.len)) > 0) {
- if(n > _MaxGomaxprocs)
- n = _MaxGomaxprocs;
- procs = n;
- }
- runtime_allp = runtime_getAllP();
- procresize(procs);
-
- // Can not enable GC until all roots are registered.
- // mstats()->enablegc = 1;
-}
-
-extern void main_init(void) __asm__ (GOSYM_PREFIX "__go_init_main");
-extern void main_main(void) __asm__ (GOSYM_PREFIX "main.main");
-
// Used to determine the field alignment.
struct field_align
@@ -531,92 +428,6 @@ struct field_align
Hchan *p;
};
-static void
-initDone(void *arg __attribute__ ((unused))) {
- runtime_unlockOSThread();
-};
-
-// The main goroutine.
-// Note: C frames in general are not copyable during stack growth, for two reasons:
-// 1) We don't know where in a frame to find pointers to other stack locations.
-// 2) There's no guarantee that globals or heap values do not point into the frame.
-//
-// The C frame for runtime.main is copyable, because:
-// 1) There are no pointers to other stack locations in the frame
-// (d.fn points at a global, d.link is nil, d.argp is -1).
-// 2) The only pointer into this frame is from the defer chain,
-// which is explicitly handled during stack copying.
-void
-runtime_main(void* dummy __attribute__((unused)))
-{
- Defer d;
- _Bool frame;
-
- newm(sysmon, nil);
-
- // Lock the main goroutine onto this, the main OS thread,
- // during initialization. Most programs won't care, but a few
- // do require certain calls to be made by the main thread.
- // Those can arrange for main.main to run in the main thread
- // by calling runtime.LockOSThread during initialization
- // to preserve the lock.
- runtime_lockOSThread();
-
- // Defer unlock so that runtime.Goexit during init does the unlock too.
- d.pfn = (uintptr)(void*)initDone;
- d.link = g->_defer;
- d.arg = (void*)-1;
- d._panic = g->_panic;
- d.retaddr = 0;
- d.makefunccanrecover = 0;
- d.frame = &frame;
- d.special = true;
- g->_defer = &d;
-
- if(g->m != &runtime_m0)
- runtime_throw("runtime_main not on m0");
- __go_go(runtime_MHeap_Scavenger, nil);
-
- makeMainInitDone();
-
- _cgo_notify_runtime_init_done();
-
- main_init();
-
- closeMainInitDone();
-
- if(g->_defer != &d || (void*)d.pfn != initDone)
- runtime_throw("runtime: bad defer entry after init");
- g->_defer = d.link;
- runtime_unlockOSThread();
-
- // For gccgo we have to wait until after main is initialized
- // to enable GC, because initializing main registers the GC
- // roots.
- mstats()->enablegc = 1;
-
- if(runtime_isarchive) {
- // This is not a complete program, but is instead a
- // library built using -buildmode=c-archive or
- // c-shared. Now that we are initialized, there is
- // nothing further to do.
- return;
- }
-
- main_main();
-
- // Make racy client program work: if panicking on
- // another goroutine at the same time as main returns,
- // let the other goroutine finish printing the panic trace.
- // Once it does, it will exit. See issue 3934.
- if(runtime_panicking())
- runtime_park(nil, nil, "panicwait");
-
- runtime_exit(0);
- for(;;)
- *(int32*)0 = 0;
-}
-
void getTraceback(G*, G*) __asm__(GOSYM_PREFIX "runtime.getTraceback");
// getTraceback stores a traceback of gp in the g's traceback field
@@ -648,7 +459,7 @@ gtraceback(G* gp)
traceback = gp->traceback;
gp->traceback = nil;
- if(gp->m != nil)
+ if(gp->m != nil && gp->m != g->m)
runtime_throw("gtraceback: m is not nil");
gp->m = traceback->gp->m;
traceback->c = runtime_callers(1, traceback->locbuf,
@@ -657,50 +468,51 @@ gtraceback(G* gp)
runtime_gogo(traceback->gp);
}
-// Called to start an M.
-void*
-runtime_mstart(void* mp)
-{
- M *m;
- G *gp;
+// Called to set up the context information for a new M.
- m = (M*)mp;
- g = m->g0;
- g->m = m;
- gp = g;
+void mstartInitContext(G*, void*)
+ __asm__(GOSYM_PREFIX "runtime.mstartInitContext");
+void
+mstartInitContext(G *gp, void *stack __attribute__ ((unused)))
+{
initcontext();
- gp->entry = nil;
- gp->param = nil;
-
// Record top of stack for use by mcall.
// Once we call schedule we're never coming back,
// so other calls can reuse this stack space.
#ifdef USING_SPLIT_STACK
- __splitstack_getcontext(&g->stackcontext[0]);
+ __splitstack_getcontext(&gp->stackcontext[0]);
#else
- gp->gcinitialsp = &mp;
+ gp->gcinitialsp = stack;
// Setting gcstacksize to 0 is a marker meaning that gcinitialsp
// is the top of the stack, not the bottom.
gp->gcstacksize = 0;
- gp->gcnextsp = &mp;
+ gp->gcnextsp = stack;
#endif
+
+ // Save the currently active context. This will return
+ // multiple times via the setcontext call in mcall.
getcontext(ucontext_arg(&gp->context[0]));
- if(gp->traceback != nil)
+ if(gp->traceback != nil) {
+ // Got here from getTraceback.
+ // I'm not sure this ever actually happens--getTraceback
+ // may always go to the getcontext call in mcall.
gtraceback(gp);
+ }
- if(gp->entry != nil) {
+ if(gp->entry != 0) {
// Got here from mcall.
void (*pfn)(G*) = (void (*)(G*))gp->entry;
G* gp1 = (G*)gp->param;
- gp->entry = nil;
+ gp->entry = 0;
gp->param = nil;
pfn(gp1);
*(int*)0x21 = 0x21;
}
- runtime_minit();
+
+ // Initial call to getcontext--starting thread.
#ifdef USING_SPLIT_STACK
{
@@ -709,36 +521,7 @@ runtime_mstart(void* mp)
}
#endif
- // Install signal handlers; after minit so that minit can
- // prepare the thread to be able to handle the signals.
- if(m == &runtime_m0) {
- if(runtime_iscgo) {
- bool* cgoHasExtraM = runtime_getCgoHasExtraM();
- if(!*cgoHasExtraM) {
- *cgoHasExtraM = true;
- runtime_newextram();
- }
- }
- runtime_initsig(false);
- }
-
- if(m->mstartfn)
- ((void (*)(void))m->mstartfn)();
-
- if(m->helpgc) {
- m->helpgc = 0;
- stopm();
- } else if(m != &runtime_m0) {
- acquirep((P*)m->nextp);
- m->nextp = 0;
- }
- schedule();
-
- // TODO(brainman): This point is never reached, because scheduler
- // does not release os threads at the moment. But once this path
- // is enabled, we must remove our seh here.
-
- return nil;
+ mstart1();
}
typedef struct CgoThreadStart CgoThreadStart;
@@ -750,39 +533,6 @@ struct CgoThreadStart
void (*fn)(void);
};
-M* runtime_allocm(P*, bool, byte**, uintptr*)
- __asm__(GOSYM_PREFIX "runtime.allocm");
-
-// Allocate a new m unassociated with any thread.
-// Can use p for allocation context if needed.
-M*
-runtime_allocm(P *p, bool allocatestack, byte** ret_g0_stack, uintptr* ret_g0_stacksize)
-{
- M *mp;
-
- g->m->locks++; // disable GC because it can be called from sysmon
- if(g->m->p == 0)
- acquirep(p); // temporarily borrow p for mallocs in this function
-#if 0
- if(mtype == nil) {
- Eface e;
- runtime_gc_m_ptr(&e);
- mtype = ((const PtrType*)e.__type_descriptor)->__element_type;
- }
-#endif
-
- mp = runtime_mal(sizeof *mp);
- mcommoninit(mp);
- mp->g0 = runtime_malg(allocatestack, false, ret_g0_stack, ret_g0_stacksize);
- mp->g0->m = mp;
-
- if(p == (P*)g->m->p)
- releasep();
- g->m->locks--;
-
- return mp;
-}
-
void setGContext(void) __asm__ (GOSYM_PREFIX "runtime.setGContext");
// setGContext sets up a new goroutine context for the current g.
@@ -794,7 +544,7 @@ setGContext()
initcontext();
gp = g;
- gp->entry = nil;
+ gp->entry = 0;
gp->param = nil;
#ifdef USING_SPLIT_STACK
__splitstack_getcontext(&gp->stackcontext[0]);
@@ -808,11 +558,11 @@ setGContext()
#endif
getcontext(ucontext_arg(&gp->context[0]));
- if(gp->entry != nil) {
+ if(gp->entry != 0) {
// Got here from mcall.
void (*pfn)(G*) = (void (*)(G*))gp->entry;
G* gp1 = (G*)gp->param;
- gp->entry = nil;
+ gp->entry = 0;
gp->param = nil;
pfn(gp1);
*(int*)0x22 = 0x22;
@@ -834,224 +584,6 @@ makeGContext(G* gp, byte* sp, uintptr spsize) {
makecontext(uc, kickoff, 0);
}
-// Create a new m. It will start off with a call to fn, or else the scheduler.
-void
-newm(void(*fn)(void), P *p)
-{
- M *mp;
-
- mp = runtime_allocm(p, false, nil, nil);
- mp->nextp = (uintptr)p;
- mp->mstartfn = (uintptr)(void*)fn;
-
- runtime_newosproc(mp);
-}
-
-static void
-mspinning(void)
-{
- g->m->spinning = true;
-}
-
-// Schedules some M to run the p (creates an M if necessary).
-// If p==nil, tries to get an idle P, if no idle P's does nothing.
-void
-startm(P *p, bool spinning)
-{
- M *mp;
- void (*fn)(void);
-
- runtime_lock(&runtime_sched->lock);
- if(p == nil) {
- p = pidleget();
- if(p == nil) {
- runtime_unlock(&runtime_sched->lock);
- if(spinning)
- runtime_xadd(&runtime_sched->nmspinning, -1);
- return;
- }
- }
- mp = mget();
- runtime_unlock(&runtime_sched->lock);
- if(mp == nil) {
- fn = nil;
- if(spinning)
- fn = mspinning;
- newm(fn, p);
- return;
- }
- if(mp->spinning)
- runtime_throw("startm: m is spinning");
- if(mp->nextp)
- runtime_throw("startm: m has p");
- if(spinning && !runqempty(p)) {
- runtime_throw("startm: p has runnable gs");
- }
- mp->spinning = spinning;
- mp->nextp = (uintptr)p;
- runtime_notewakeup(&mp->park);
-}
-
-// Puts the current goroutine into a waiting state and calls unlockf.
-// If unlockf returns false, the goroutine is resumed.
-void
-runtime_park(bool(*unlockf)(G*, void*), void *lock, const char *reason)
-{
- if(g->atomicstatus != _Grunning)
- runtime_throw("bad g status");
- g->m->waitlock = lock;
- g->m->waitunlockf = unlockf;
- g->waitreason = runtime_gostringnocopy((const byte*)reason);
- runtime_mcall(park0);
-}
-
-void gopark(FuncVal *, void *, String, byte, int)
- __asm__ ("runtime.gopark");
-
-void
-gopark(FuncVal *unlockf, void *lock, String reason,
- byte traceEv __attribute__ ((unused)),
- int traceskip __attribute__ ((unused)))
-{
- if(g->atomicstatus != _Grunning)
- runtime_throw("bad g status");
- g->m->waitlock = lock;
- g->m->waitunlockf = unlockf == nil ? nil : (void*)unlockf->fn;
- g->waitreason = reason;
- runtime_mcall(park0);
-}
-
-static bool
-parkunlock(G *gp, void *lock)
-{
- USED(gp);
- runtime_unlock(lock);
- return true;
-}
-
-// Puts the current goroutine into a waiting state and unlocks the lock.
-// The goroutine can be made runnable again by calling runtime_ready(gp).
-void
-runtime_parkunlock(Lock *lock, const char *reason)
-{
- runtime_park(parkunlock, lock, reason);
-}
-
-void goparkunlock(Lock *, String, byte, int)
- __asm__ (GOSYM_PREFIX "runtime.goparkunlock");
-
-void
-goparkunlock(Lock *lock, String reason, byte traceEv __attribute__ ((unused)),
- int traceskip __attribute__ ((unused)))
-{
- if(g->atomicstatus != _Grunning)
- runtime_throw("bad g status");
- g->m->waitlock = lock;
- g->m->waitunlockf = parkunlock;
- g->waitreason = reason;
- runtime_mcall(park0);
-}
-
-// runtime_park continuation on g0.
-static void
-park0(G *gp)
-{
- M *m;
- bool ok;
-
- m = g->m;
- gp->atomicstatus = _Gwaiting;
- gp->m = nil;
- m->curg = nil;
- if(m->waitunlockf) {
- ok = ((bool (*)(G*, void*))m->waitunlockf)(gp, m->waitlock);
- m->waitunlockf = nil;
- m->waitlock = nil;
- if(!ok) {
- gp->atomicstatus = _Grunnable;
- execute(gp, true); // Schedule it back, never returns.
- }
- }
- if(m->lockedg) {
- stoplockedm();
- execute(gp, true); // Never returns.
- }
- schedule();
-}
-
-// Scheduler yield.
-void
-runtime_gosched(void)
-{
- if(g->atomicstatus != _Grunning)
- runtime_throw("bad g status");
- runtime_mcall(runtime_gosched0);
-}
-
-// runtime_gosched continuation on g0.
-void
-runtime_gosched0(G *gp)
-{
- M *m;
-
- m = g->m;
- gp->atomicstatus = _Grunnable;
- gp->m = nil;
- m->curg = nil;
- runtime_lock(&runtime_sched->lock);
- globrunqput(gp);
- runtime_unlock(&runtime_sched->lock);
- if(m->lockedg) {
- stoplockedm();
- execute(gp, true); // Never returns.
- }
- schedule();
-}
-
-// Finishes execution of the current goroutine.
-// Need to mark it as nosplit, because it runs with sp > stackbase (as runtime_lessstack).
-// Since it does not return it does not matter. But if it is preempted
-// at the split stack check, GC will complain about inconsistent sp.
-void runtime_goexit1(void) __attribute__ ((noinline));
-void
-runtime_goexit1(void)
-{
- if(g->atomicstatus != _Grunning)
- runtime_throw("bad g status");
- runtime_mcall(goexit0);
-}
-
-// runtime_goexit1 continuation on g0.
-static void
-goexit0(G *gp)
-{
- M *m;
-
- m = g->m;
- gp->atomicstatus = _Gdead;
- gp->entry = nil;
- gp->m = nil;
- gp->lockedm = nil;
- gp->paniconfault = 0;
- gp->_defer = nil; // should be true already but just in case.
- gp->_panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
- gp->writebuf.__values = nil;
- gp->writebuf.__count = 0;
- gp->writebuf.__capacity = 0;
- gp->waitreason = runtime_gostringnocopy(nil);
- gp->param = nil;
- m->curg->m = nil;
- m->curg = nil;
- m->lockedg = nil;
- if(m->locked & ~_LockExternal) {
- runtime_printf("invalid m->locked = %d\n", m->locked);
- runtime_throw("internal lockOSThread error");
- }
- m->locked = 0;
- gfput((P*)m->p, gp);
- schedule();
-}
-
// The goroutine g is about to enter a system call.
// Record that it's not using the cpu anymore.
// This is called only from the go syscall library and cgocall,
@@ -1072,11 +604,8 @@ runtime_entersyscall(int32 dummy __attribute__ ((unused)))
// held in registers will be seen by the garbage collector.
getcontext(ucontext_arg(&g->gcregs[0]));
- // Do the work in a separate function, so that this function
- // doesn't save any registers on its own stack. If this
- // function does save any registers, we might store the wrong
- // value in the call to getcontext.
- //
+ // Note that if this function does save any registers itself,
+ // we might store the wrong value in the call to getcontext.
// FIXME: This assumes that we do not need to save any
// callee-saved registers to access the TLS variable g. We
// don't want to put the ucontext_t on the stack because it is
@@ -1088,10 +617,6 @@ runtime_entersyscall(int32 dummy __attribute__ ((unused)))
static void
doentersyscall(uintptr pc, uintptr sp)
{
- // Disable preemption because during this function g is in _Gsyscall status,
- // but can have inconsistent g->sched, do not let GC observe it.
- g->m->locks++;
-
// Leave SP around for GC and traceback.
#ifdef USING_SPLIT_STACK
{
@@ -1109,43 +634,28 @@ doentersyscall(uintptr pc, uintptr sp)
}
#endif
- g->syscallsp = sp;
- g->syscallpc = pc;
-
- g->atomicstatus = _Gsyscall;
-
- if(runtime_atomicload(&runtime_sched->sysmonwait)) { // TODO: fast atomic
- runtime_lock(&runtime_sched->lock);
- if(runtime_atomicload(&runtime_sched->sysmonwait)) {
- runtime_atomicstore(&runtime_sched->sysmonwait, 0);
- runtime_notewakeup(&runtime_sched->sysmonnote);
- }
- runtime_unlock(&runtime_sched->lock);
- }
-
- g->m->mcache = nil;
- ((P*)(g->m->p))->m = 0;
- runtime_atomicstore(&((P*)g->m->p)->status, _Psyscall);
- if(runtime_atomicload(&runtime_sched->gcwaiting)) {
- runtime_lock(&runtime_sched->lock);
- if (runtime_sched->stopwait > 0 && runtime_cas(&((P*)g->m->p)->status, _Psyscall, _Pgcstop)) {
- if(--runtime_sched->stopwait == 0)
- runtime_notewakeup(&runtime_sched->stopnote);
- }
- runtime_unlock(&runtime_sched->lock);
- }
-
- g->m->locks--;
+ reentersyscall(pc, sp);
}
+static void doentersyscallblock(uintptr, uintptr)
+ __attribute__ ((no_split_stack, noinline));
+
// The same as runtime_entersyscall(), but with a hint that the syscall is blocking.
void
runtime_entersyscallblock(int32 dummy __attribute__ ((unused)))
{
- P *p;
+ // Save the registers in the g structure so that any pointers
+ // held in registers will be seen by the garbage collector.
+ getcontext(ucontext_arg(&g->gcregs[0]));
- g->m->locks++; // see comment in entersyscall
+ // See comment in runtime_entersyscall.
+ doentersyscallblock((uintptr)runtime_getcallerpc(&dummy),
+ (uintptr)runtime_getcallersp(&dummy));
+}
+static void
+doentersyscallblock(uintptr pc, uintptr sp)
+{
// Leave SP around for GC and traceback.
#ifdef USING_SPLIT_STACK
{
@@ -1156,175 +666,14 @@ runtime_entersyscallblock(int32 dummy __attribute__ ((unused)))
g->gcstacksize = (uintptr)gcstacksize;
}
#else
- g->gcnextsp = (byte *) &p;
-#endif
-
- // Save the registers in the g structure so that any pointers
- // held in registers will be seen by the garbage collector.
- getcontext(ucontext_arg(&g->gcregs[0]));
-
- g->syscallpc = (uintptr)runtime_getcallerpc(&dummy);
- g->syscallsp = (uintptr)runtime_getcallersp(&dummy);
-
- g->atomicstatus = _Gsyscall;
-
- p = releasep();
- handoffp(p);
- if(g->isbackground) // do not consider blocked scavenger for deadlock detection
- incidlelocked(1);
-
- g->m->locks--;
-}
-
-// The goroutine g exited its system call.
-// Arrange for it to run on a cpu again.
-// This is called only from the go syscall library, not
-// from the low-level system calls used by the runtime.
-void
-runtime_exitsyscall(int32 dummy __attribute__ ((unused)))
-{
- G *gp;
+ {
+ void *v;
- gp = g;
- gp->m->locks++; // see comment in entersyscall
-
- if(gp->isbackground) // do not consider blocked scavenger for deadlock detection
- incidlelocked(-1);
-
- gp->waitsince = 0;
- if(exitsyscallfast()) {
- // There's a cpu for us, so we can run.
- ((P*)gp->m->p)->syscalltick++;
- gp->atomicstatus = _Grunning;
- // Garbage collector isn't running (since we are),
- // so okay to clear gcstack and gcsp.
-#ifdef USING_SPLIT_STACK
- gp->gcstack = nil;
-#endif
- gp->gcnextsp = nil;
- runtime_memclr(&gp->gcregs[0], sizeof gp->gcregs);
- gp->syscallsp = 0;
- gp->m->locks--;
- return;
+ g->gcnextsp = (byte *) &v;
}
-
- gp->m->locks--;
-
- // Call the scheduler.
- runtime_mcall(exitsyscall0);
-
- // Scheduler returned, so we're allowed to run now.
- // Delete the gcstack information that we left for
- // the garbage collector during the system call.
- // Must wait until now because until gosched returns
- // we don't know for sure that the garbage collector
- // is not running.
-#ifdef USING_SPLIT_STACK
- gp->gcstack = nil;
#endif
- gp->gcnextsp = nil;
- runtime_memclr(&gp->gcregs[0], sizeof gp->gcregs);
-
- gp->syscallsp = 0;
-
- // Note that this gp->m might be different than the earlier
- // gp->m after returning from runtime_mcall.
- ((P*)gp->m->p)->syscalltick++;
-}
-
-static bool
-exitsyscallfast(void)
-{
- G *gp;
- P *p;
-
- gp = g;
-
- // Freezetheworld sets stopwait but does not retake P's.
- if(runtime_sched->stopwait) {
- gp->m->p = 0;
- return false;
- }
-
- // Try to re-acquire the last P.
- if(gp->m->p && ((P*)gp->m->p)->status == _Psyscall && runtime_cas(&((P*)gp->m->p)->status, _Psyscall, _Prunning)) {
- // There's a cpu for us, so we can run.
- gp->m->mcache = ((P*)gp->m->p)->mcache;
- ((P*)gp->m->p)->m = (uintptr)gp->m;
- return true;
- }
- // Try to get any other idle P.
- gp->m->p = 0;
- if(runtime_sched->pidle) {
- runtime_lock(&runtime_sched->lock);
- p = pidleget();
- if(p && runtime_atomicload(&runtime_sched->sysmonwait)) {
- runtime_atomicstore(&runtime_sched->sysmonwait, 0);
- runtime_notewakeup(&runtime_sched->sysmonnote);
- }
- runtime_unlock(&runtime_sched->lock);
- if(p) {
- acquirep(p);
- return true;
- }
- }
- return false;
-}
-
-// runtime_exitsyscall slow path on g0.
-// Failed to acquire P, enqueue gp as runnable.
-static void
-exitsyscall0(G *gp)
-{
- M *m;
- P *p;
-
- m = g->m;
- gp->atomicstatus = _Grunnable;
- gp->m = nil;
- m->curg = nil;
- runtime_lock(&runtime_sched->lock);
- p = pidleget();
- if(p == nil)
- globrunqput(gp);
- else if(runtime_atomicload(&runtime_sched->sysmonwait)) {
- runtime_atomicstore(&runtime_sched->sysmonwait, 0);
- runtime_notewakeup(&runtime_sched->sysmonnote);
- }
- runtime_unlock(&runtime_sched->lock);
- if(p) {
- acquirep(p);
- execute(gp, false); // Never returns.
- }
- if(m->lockedg) {
- // Wait until another thread schedules gp and so m again.
- stoplockedm();
- execute(gp, false); // Never returns.
- }
- stopm();
- schedule(); // Never returns.
-}
-
-void syscall_entersyscall(void)
- __asm__(GOSYM_PREFIX "syscall.Entersyscall");
-
-void syscall_entersyscall(void) __attribute__ ((no_split_stack));
-
-void
-syscall_entersyscall()
-{
- runtime_entersyscall(0);
-}
-
-void syscall_exitsyscall(void)
- __asm__(GOSYM_PREFIX "syscall.Exitsyscall");
-void syscall_exitsyscall(void) __attribute__ ((no_split_stack));
-
-void
-syscall_exitsyscall()
-{
- runtime_exitsyscall(0);
+ reentersyscallblock(pc, sp);
}
// Allocate a new g, with a stack big enough for stacksize bytes.
@@ -1388,187 +737,29 @@ runtime_malg(bool allocatestack, bool signalstack, byte** ret_stack, uintptr* re
return newg;
}
-G*
-__go_go(void (*fn)(void*), void* arg)
-{
- byte *sp;
- size_t spsize;
- G *newg;
- P *p;
-
-//runtime_printf("newproc1 %p %p narg=%d nret=%d\n", fn->fn, argp, narg, nret);
- if(fn == nil) {
- g->m->throwing = -1; // do not dump full stacks
- runtime_throw("go of nil func value");
- }
- g->m->locks++; // disable preemption because it can be holding p in a local var
+void resetNewG(G*, void **, uintptr*)
+ __asm__(GOSYM_PREFIX "runtime.resetNewG");
- p = (P*)g->m->p;
- if((newg = gfget(p)) != nil) {
+// Reset stack information for g pulled out of the cache to start a
+// new goroutine.
+void
+resetNewG(G *newg, void **sp, uintptr *spsize)
+{
#ifdef USING_SPLIT_STACK
- int dont_block_signals = 0;
+ int dont_block_signals = 0;
+ size_t ss_spsize;
- sp = __splitstack_resetcontext(&newg->stackcontext[0],
- &spsize);
- __splitstack_block_signals_context(&newg->stackcontext[0],
- &dont_block_signals, nil);
+ *sp = __splitstack_resetcontext(&newg->stackcontext[0], &ss_spsize);
+ *spsize = ss_spsize;
+ __splitstack_block_signals_context(&newg->stackcontext[0],
+ &dont_block_signals, nil);
#else
- sp = newg->gcinitialsp;
- spsize = newg->gcstacksize;
- if(spsize == 0)
- runtime_throw("bad spsize in __go_go");
- newg->gcnextsp = sp;
+ *sp = newg->gcinitialsp;
+ *spsize = newg->gcstacksize;
+ if(*spsize == 0)
+ runtime_throw("bad spsize in resetNewG");
+ newg->gcnextsp = *sp;
#endif
- newg->traceback = nil;
- } else {
- uintptr malsize;
-
- newg = runtime_malg(true, false, &sp, &malsize);
- spsize = (size_t)malsize;
- newg->atomicstatus = _Gdead;
- allgadd(newg);
- }
-
- newg->entry = (byte*)fn;
- newg->param = arg;
- newg->gopc = (uintptr)__builtin_return_address(0);
- newg->atomicstatus = _Grunnable;
- if(p->goidcache == p->goidcacheend) {
- p->goidcache = runtime_xadd64(&runtime_sched->goidgen, GoidCacheBatch);
- p->goidcacheend = p->goidcache + GoidCacheBatch;
- }
- newg->goid = p->goidcache++;
-
- makeGContext(newg, sp, (uintptr)spsize);
-
- runqput(p, newg, true);
-
- if(runtime_atomicload(&runtime_sched->npidle) != 0 && runtime_atomicload(&runtime_sched->nmspinning) == 0 && fn != runtime_main) // TODO: fast atomic
- wakep();
- g->m->locks--;
- return newg;
-}
-
-void
-runtime_Breakpoint(void)
-{
- runtime_breakpoint();
-}
-
-void runtime_Gosched (void) __asm__ (GOSYM_PREFIX "runtime.Gosched");
-
-void
-runtime_Gosched(void)
-{
- runtime_gosched();
-}
-
-static struct {
- uint32 lock;
- int32 hz;
-} prof;
-
-static void System(void) {}
-static void GC(void) {}
-
-// Called if we receive a SIGPROF signal.
-void
-runtime_sigprof()
-{
- M *mp = g->m;
- int32 n, i;
- bool traceback;
- uintptr pcbuf[TracebackMaxFrames];
- Location locbuf[TracebackMaxFrames];
- Slice stk;
-
- if(prof.hz == 0)
- return;
-
- if(mp == nil)
- return;
-
- // Profiling runs concurrently with GC, so it must not allocate.
- mp->mallocing++;
-
- traceback = true;
-
- if(mp->mcache == nil)
- traceback = false;
-
- n = 0;
-
- if(runtime_atomicload(&runtime_in_callers) > 0) {
- // If SIGPROF arrived while already fetching runtime
- // callers we can have trouble on older systems
- // because the unwind library calls dl_iterate_phdr
- // which was not recursive in the past.
- traceback = false;
- }
-
- if(traceback) {
- n = runtime_callers(0, locbuf, nelem(locbuf), false);
- for(i = 0; i < n; i++)
- pcbuf[i] = locbuf[i].pc;
- }
- if(!traceback || n <= 0) {
- n = 2;
- pcbuf[0] = (uintptr)runtime_getcallerpc(&n);
- if(mp->gcing || mp->helpgc)
- pcbuf[1] = (uintptr)GC;
- else
- pcbuf[1] = (uintptr)System;
- }
-
- if (prof.hz != 0) {
- stk.__values = &pcbuf[0];
- stk.__count = n;
- stk.__capacity = n;
-
- // Simple cas-lock to coordinate with setcpuprofilerate.
- while (!runtime_cas(&prof.lock, 0, 1)) {
- runtime_osyield();
- }
- if (prof.hz != 0) {
- runtime_cpuprofAdd(stk);
- }
- runtime_atomicstore(&prof.lock, 0);
- }
-
- mp->mallocing--;
-}
-
-// Arrange to call fn with a traceback hz times a second.
-void
-runtime_setcpuprofilerate_m(int32 hz)
-{
- // Force sane arguments.
- if(hz < 0)
- hz = 0;
-
- // Disable preemption, otherwise we can be rescheduled to another thread
- // that has profiling enabled.
- g->m->locks++;
-
- // Stop profiler on this thread so that it is safe to lock prof.
- // if a profiling signal came in while we had prof locked,
- // it would deadlock.
- runtime_resetcpuprofiler(0);
-
- while (!runtime_cas(&prof.lock, 0, 1)) {
- runtime_osyield();
- }
- prof.hz = hz;
- runtime_atomicstore(&prof.lock, 0);
-
- runtime_lock(&runtime_sched->lock);
- runtime_sched->profilehz = hz;
- runtime_unlock(&runtime_sched->lock);
-
- if(hz != 0)
- runtime_resetcpuprofiler(hz);
-
- g->m->locks--;
}
// Return whether we are waiting for a GC. This gc toolchain uses
@@ -1578,21 +769,3 @@ runtime_gcwaiting(void)
{
return runtime_sched->gcwaiting;
}
-
-// os_beforeExit is called from os.Exit(0).
-//go:linkname os_beforeExit os.runtime_beforeExit
-
-extern void os_beforeExit() __asm__ (GOSYM_PREFIX "os.runtime_beforeExit");
-
-void
-os_beforeExit()
-{
-}
-
-intgo NumCPU(void) __asm__ (GOSYM_PREFIX "runtime.NumCPU");
-
-intgo
-NumCPU()
-{
- return (intgo)(runtime_ncpu);
-}
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
index 644fe92..93dd7bc 100644
--- a/libgo/runtime/runtime.h
+++ b/libgo/runtime/runtime.h
@@ -122,8 +122,10 @@ extern M* runtime_m(void);
extern G* runtime_g(void)
__asm__(GOSYM_PREFIX "runtime.getg");
-extern M runtime_m0;
-extern G runtime_g0;
+extern M* runtime_m0(void)
+ __asm__(GOSYM_PREFIX "runtime.runtime_m0");
+extern G* runtime_g0(void)
+ __asm__(GOSYM_PREFIX "runtime.runtime_g0");
enum
{
@@ -198,7 +200,6 @@ struct ParFor
uint64 nsleep;
};
-extern bool runtime_precisestack;
extern bool runtime_copystack;
/*
@@ -236,16 +237,12 @@ extern G* runtime_getallg(intgo)
__asm__(GOSYM_PREFIX "runtime.getallg");
extern uintptr runtime_getallglen(void)
__asm__(GOSYM_PREFIX "runtime.getallglen");
-extern G* runtime_lastg;
extern M* runtime_getallm(void)
__asm__(GOSYM_PREFIX "runtime.getallm");
-extern P** runtime_allp;
extern Sched* runtime_sched;
extern uint32 runtime_panicking(void)
__asm__ (GOSYM_PREFIX "runtime.getPanicking");
-extern int8* runtime_goos;
extern int32 runtime_ncpu;
-extern void (*runtime_sysargs)(int32, uint8**);
extern struct debugVars runtime_debug;
extern bool runtime_isstarted;
@@ -270,9 +267,6 @@ void runtime_alginit(void)
__asm__ (GOSYM_PREFIX "runtime.alginit");
void runtime_goargs(void)
__asm__ (GOSYM_PREFIX "runtime.goargs");
-void runtime_goenvs(void);
-void runtime_goenvs_unix(void)
- __asm__ (GOSYM_PREFIX "runtime.goenvs_unix");
void runtime_throw(const char*) __attribute__ ((noreturn));
void runtime_panicstring(const char*) __attribute__ ((noreturn));
bool runtime_canpanic(G*);
@@ -283,10 +277,10 @@ int32 runtime_snprintf(byte*, int32, const char*, ...);
void* runtime_mal(uintptr);
String runtime_gostringnocopy(const byte*)
__asm__ (GOSYM_PREFIX "runtime.gostringnocopy");
-void runtime_schedinit(void);
+void runtime_schedinit(void)
+ __asm__ (GOSYM_PREFIX "runtime.schedinit");
void runtime_initsig(bool)
__asm__ (GOSYM_PREFIX "runtime.initsig");
-int32 runtime_gotraceback(bool *crash);
void runtime_goroutineheader(G*)
__asm__ (GOSYM_PREFIX "runtime.goroutineheader");
void runtime_printtrace(Slice, G*)
@@ -299,7 +293,8 @@ void runtime_ready(G*, intgo, bool)
__asm__ (GOSYM_PREFIX "runtime.ready");
String runtime_getenv(const char*);
int32 runtime_atoi(const byte*, intgo);
-void* runtime_mstart(void*);
+void* runtime_mstart(void*)
+ __asm__(GOSYM_PREFIX "runtime.mstart");
G* runtime_malg(bool, bool, byte**, uintptr*)
__asm__(GOSYM_PREFIX "runtime.malg");
void runtime_minit(void)
@@ -310,7 +305,8 @@ MCache* runtime_allocmcache(void)
__asm__ (GOSYM_PREFIX "runtime.allocmcache");
void runtime_freemcache(MCache*)
__asm__ (GOSYM_PREFIX "runtime.freemcache");
-void runtime_mallocinit(void);
+void runtime_mallocinit(void)
+ __asm__ (GOSYM_PREFIX "runtime.mallocinit");
void runtime_mprofinit(void);
#define runtime_getcallersp(p) __builtin_frame_address(0)
void runtime_mcall(void(*)(G*));
@@ -342,15 +338,13 @@ void runtime_setg(G*)
void runtime_newextram(void)
__asm__ (GOSYM_PREFIX "runtime.newextram");
#define runtime_exit(s) exit(s)
-#define runtime_breakpoint() __builtin_trap()
-void runtime_gosched(void);
-void runtime_gosched0(G*);
+void runtime_gosched(void)
+ __asm__ (GOSYM_PREFIX "runtime.Gosched");
void runtime_schedtrace(bool)
__asm__ (GOSYM_PREFIX "runtime.schedtrace");
-void runtime_park(bool(*)(G*, void*), void*, const char*);
-void runtime_parkunlock(Lock*, const char*);
+void runtime_goparkunlock(Lock*, String, byte, intgo)
+ __asm__ (GOSYM_PREFIX "runtime.goparkunlock");
void runtime_tsleep(int64, const char*);
-M* runtime_newm(void);
void runtime_goexit1(void)
__asm__ (GOSYM_PREFIX "runtime.goexit1");
void runtime_entersyscall(int32)
@@ -369,14 +363,6 @@ void runtime_dopanic(int32) __attribute__ ((noreturn));
void runtime_startpanic(void)
__asm__ (GOSYM_PREFIX "runtime.startpanic");
void runtime_unwindstack(G*, byte*);
-void runtime_sigprof()
- __asm__ (GOSYM_PREFIX "runtime.sigprof");
-void runtime_resetcpuprofiler(int32)
- __asm__ (GOSYM_PREFIX "runtime.resetcpuprofiler");
-void runtime_setcpuprofilerate_m(int32)
- __asm__ (GOSYM_PREFIX "runtime.setcpuprofilerate_m");
-void runtime_cpuprofAdd(Slice)
- __asm__ (GOSYM_PREFIX "runtime.cpuprofAdd");
void runtime_usleep(uint32)
__asm__ (GOSYM_PREFIX "runtime.usleep");
int64 runtime_cputicks(void)
@@ -510,10 +496,6 @@ void runtime_procyield(uint32)
__asm__(GOSYM_PREFIX "runtime.procyield");
void runtime_osyield(void)
__asm__(GOSYM_PREFIX "runtime.osyield");
-void runtime_lockOSThread(void)
- __asm__(GOSYM_PREFIX "runtime.lockOSThread");
-void runtime_unlockOSThread(void)
- __asm__(GOSYM_PREFIX "runtime.unlockOSThread");
void runtime_printcreatedby(G*)
__asm__(GOSYM_PREFIX "runtime.printcreatedby");
@@ -551,8 +533,8 @@ extern uintptr runtime_stacks_sys;
struct backtrace_state;
extern struct backtrace_state *__go_get_backtrace_state(void);
extern _Bool __go_file_line(uintptr, int, String*, String*, intgo *);
-extern void runtime_main(void*);
-extern uint32 runtime_in_callers;
+extern void runtime_main(void*)
+ __asm__(GOSYM_PREFIX "runtime.main");
int32 getproccount(void);
@@ -573,7 +555,8 @@ struct time_now_ret now() __asm__ (GOSYM_PREFIX "time.now")
__attribute__ ((no_split_stack));
extern void _cgo_wait_runtime_init_done (void);
-extern void _cgo_notify_runtime_init_done (void);
+extern void _cgo_notify_runtime_init_done (void)
+ __asm__ (GOSYM_PREFIX "runtime._cgo_notify_runtime_init_done");
extern _Bool runtime_iscgo;
extern uintptr __go_end __attribute__ ((weak));
extern void *getitab(const struct __go_type_descriptor *,
@@ -588,9 +571,13 @@ extern void setCpuidECX(uint32)
__asm__ (GOSYM_PREFIX "runtime.setCpuidECX");
extern void setSupportAES(bool)
__asm__ (GOSYM_PREFIX "runtime.setSupportAES");
-extern void makeMainInitDone(void)
- __asm__ (GOSYM_PREFIX "runtime.makeMainInitDone");
-extern void closeMainInitDone(void)
- __asm__ (GOSYM_PREFIX "runtime.closeMainInitDone");
extern void typedmemmove(const Type *, void *, const void *)
__asm__ (GOSYM_PREFIX "runtime.typedmemmove");
+extern void setncpu(int32)
+ __asm__(GOSYM_PREFIX "runtime.setncpu");
+extern P** runtime_getAllP()
+ __asm__ (GOSYM_PREFIX "runtime.getAllP");
+extern Sched* runtime_getsched()
+ __asm__ (GOSYM_PREFIX "runtime.getsched");
+extern void setpagesize(uintptr_t)
+ __asm__(GOSYM_PREFIX "runtime.setpagesize");
diff --git a/libgo/runtime/runtime_c.c b/libgo/runtime/runtime_c.c
index 9883e0a..63eb141 100644
--- a/libgo/runtime/runtime_c.c
+++ b/libgo/runtime/runtime_c.c
@@ -16,34 +16,6 @@
#include "arch.h"
#include "array.h"
-enum {
- maxround = sizeof(uintptr),
-};
-
-extern volatile intgo runtime_MemProfileRate
- __asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
-
-struct gotraceback_ret {
- int32 level;
- bool all;
- bool crash;
-};
-
-extern struct gotraceback_ret gotraceback(void)
- __asm__ (GOSYM_PREFIX "runtime.gotraceback");
-
-// runtime_gotraceback is the C interface to runtime.gotraceback.
-int32
-runtime_gotraceback(bool *crash)
-{
- struct gotraceback_ret r;
-
- r = gotraceback();
- if(crash != nil)
- *crash = r.crash;
- return r.level;
-}
-
int32
runtime_atoi(const byte *p, intgo len)
{
@@ -116,15 +88,6 @@ runtime_setdebug(struct debugVars* d) {
runtime_debug = *d;
}
-void memclrBytes(Slice)
- __asm__ (GOSYM_PREFIX "runtime.memclrBytes");
-
-void
-memclrBytes(Slice s)
-{
- runtime_memclr(s.__values, s.__count);
-}
-
int32 go_open(char *, int32, int32)
__asm__ (GOSYM_PREFIX "runtime.open");
diff --git a/libgo/runtime/thread-linux.c b/libgo/runtime/thread-linux.c
index 81ad0f9..f6d8be9 100644
--- a/libgo/runtime/thread-linux.c
+++ b/libgo/runtime/thread-linux.c
@@ -15,10 +15,6 @@ void
runtime_osinit(void)
{
runtime_ncpu = getproccount();
-}
-
-void
-runtime_goenvs(void)
-{
- runtime_goenvs_unix();
+ setncpu(runtime_ncpu);
+ setpagesize(getpagesize());
}
diff --git a/libgo/runtime/thread-sema.c b/libgo/runtime/thread-sema.c
index b74b1da..77d53c4 100644
--- a/libgo/runtime/thread-sema.c
+++ b/libgo/runtime/thread-sema.c
@@ -8,16 +8,13 @@
#include <errno.h>
#include <stdlib.h>
#include <time.h>
+#include <unistd.h>
#include <semaphore.h>
void
runtime_osinit (void)
{
runtime_ncpu = getproccount();
-}
-
-void
-runtime_goenvs (void)
-{
- runtime_goenvs_unix ();
+ setncpu(runtime_ncpu);
+ setpagesize(getpagesize());
}