aboutsummaryrefslogtreecommitdiff
path: root/libgo
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2019-07-03 23:13:09 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2019-07-03 23:13:09 +0000
commit0baa9d1d59bf17177e80838ebe66df10a7a909c0 (patch)
treea1b956eacf43ba6ac1d052faad8a2df8f4f6ef5a /libgo
parent133d3bd8362f0c438017ca18adb51afb7288f78b (diff)
parent651c754cfbd1928abd8ac6b3121fc37c85907dcb (diff)
downloadgcc-0baa9d1d59bf17177e80838ebe66df10a7a909c0.zip
gcc-0baa9d1d59bf17177e80838ebe66df10a7a909c0.tar.gz
gcc-0baa9d1d59bf17177e80838ebe66df10a7a909c0.tar.bz2
Merge from trunk revision 273026.
From-SVN: r273027
Diffstat (limited to 'libgo')
-rw-r--r--libgo/Makefile.am4
-rw-r--r--libgo/Makefile.in63
-rw-r--r--libgo/aclocal.m420
-rwxr-xr-xlibgo/configure152
-rw-r--r--libgo/configure.ac5
-rw-r--r--libgo/go/cmd/go/internal/work/gccgo.go9
-rw-r--r--libgo/go/go/internal/gccgoimporter/parser.go70
-rw-r--r--libgo/go/reflect/type.go131
-rw-r--r--libgo/go/runtime/alg.go11
-rw-r--r--libgo/go/runtime/heapdump.go10
-rw-r--r--libgo/go/runtime/iface.go49
-rw-r--r--libgo/go/runtime/map_fast32.go9
-rw-r--r--libgo/go/runtime/map_fast64.go9
-rw-r--r--libgo/go/runtime/map_faststr.go8
-rw-r--r--libgo/go/runtime/mgcmark.go2
-rw-r--r--libgo/go/runtime/mprof.go349
-rw-r--r--libgo/go/runtime/panic.go4
-rw-r--r--libgo/go/runtime/proc.go18
-rw-r--r--libgo/go/runtime/signal_gccgo.go5
-rw-r--r--libgo/go/runtime/string.go28
-rw-r--r--libgo/go/runtime/stubs.go76
-rw-r--r--libgo/go/runtime/symtab.go8
-rw-r--r--libgo/go/runtime/traceback_gccgo.go16
-rw-r--r--libgo/go/runtime/type.go81
-rw-r--r--libgo/go/syscall/wait.c2
-rwxr-xr-xlibgo/mksysinfo.sh4
-rw-r--r--libgo/runtime/go-caller.c18
-rw-r--r--libgo/runtime/go-callers.c55
-rw-r--r--libgo/runtime/go-context.S69
-rw-r--r--libgo/runtime/go-libmain.c2
-rw-r--r--libgo/runtime/go-main.c2
-rw-r--r--libgo/runtime/go-memclr.c3
-rw-r--r--libgo/runtime/go-memcmp.c13
-rw-r--r--libgo/runtime/go-memequal.c3
-rw-r--r--libgo/runtime/go-runtime-error.c24
-rw-r--r--libgo/runtime/go-strslice.c30
-rw-r--r--libgo/runtime/go-type.h140
-rw-r--r--libgo/runtime/go-typedesc-equal.c28
-rw-r--r--libgo/runtime/proc.c46
-rw-r--r--libgo/runtime/runtime.h37
-rw-r--r--libgo/testsuite/Makefile.in3
-rwxr-xr-xlibgo/testsuite/gotest7
42 files changed, 961 insertions, 662 deletions
diff --git a/libgo/Makefile.am b/libgo/Makefile.am
index ebbdee8..88f885b 100644
--- a/libgo/Makefile.am
+++ b/libgo/Makefile.am
@@ -459,7 +459,6 @@ runtime_files = \
runtime/go-fieldtrack.c \
runtime/go-matherr.c \
runtime/go-memclr.c \
- runtime/go-memcmp.c \
runtime/go-memequal.c \
runtime/go-nanotime.c \
runtime/go-now.c \
@@ -468,8 +467,6 @@ runtime_files = \
runtime/go-runtime-error.c \
runtime/go-setenv.c \
runtime/go-signal.c \
- runtime/go-strslice.c \
- runtime/go-typedesc-equal.c \
runtime/go-unsafe-pointer.c \
runtime/go-unsetenv.c \
runtime/go-unwind.c \
@@ -481,6 +478,7 @@ runtime_files = \
runtime/runtime_c.c \
runtime/stack.c \
runtime/yield.c \
+ runtime/go-context.S \
$(rtems_task_variable_add_file) \
$(runtime_getncpu_file)
diff --git a/libgo/Makefile.in b/libgo/Makefile.in
index 737b01e..f53ca3f 100644
--- a/libgo/Makefile.in
+++ b/libgo/Makefile.in
@@ -244,16 +244,16 @@ am__objects_3 = runtime/aeshash.lo runtime/go-assert.lo \
runtime/go-cgo.lo runtime/go-construct-map.lo \
runtime/go-ffi.lo runtime/go-fieldtrack.lo \
runtime/go-matherr.lo runtime/go-memclr.lo \
- runtime/go-memcmp.lo runtime/go-memequal.lo \
- runtime/go-nanotime.lo runtime/go-now.lo runtime/go-nosys.lo \
+ runtime/go-memequal.lo runtime/go-nanotime.lo \
+ runtime/go-now.lo runtime/go-nosys.lo \
runtime/go-reflect-call.lo runtime/go-runtime-error.lo \
runtime/go-setenv.lo runtime/go-signal.lo \
- runtime/go-strslice.lo runtime/go-typedesc-equal.lo \
runtime/go-unsafe-pointer.lo runtime/go-unsetenv.lo \
runtime/go-unwind.lo runtime/go-varargs.lo \
runtime/env_posix.lo runtime/panic.lo runtime/print.lo \
runtime/proc.lo runtime/runtime_c.lo runtime/stack.lo \
- runtime/yield.lo $(am__objects_1) $(am__objects_2)
+ runtime/yield.lo runtime/go-context.lo $(am__objects_1) \
+ $(am__objects_2)
am_libgo_llgo_la_OBJECTS = $(am__objects_3)
libgo_llgo_la_OBJECTS = $(am_libgo_llgo_la_OBJECTS)
AM_V_lt = $(am__v_lt_@AM_V@)
@@ -287,6 +287,16 @@ DEFAULT_INCLUDES = -I.@am__isrc@
depcomp = $(SHELL) $(top_srcdir)/../depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
+CPPASCOMPILE = $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS)
+LTCPPASCOMPILE = $(LIBTOOL) $(AM_V_lt) $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) \
+ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+ $(AM_CCASFLAGS) $(CCASFLAGS)
+AM_V_CPPAS = $(am__v_CPPAS_@AM_V@)
+am__v_CPPAS_ = $(am__v_CPPAS_@AM_DEFAULT_V@)
+am__v_CPPAS_0 = @echo " CPPAS " $@;
+am__v_CPPAS_1 =
COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
@@ -380,6 +390,9 @@ AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
CCDEPMODE = @CCDEPMODE@
CC_FOR_BUILD = @CC_FOR_BUILD@
CFLAGS = @CFLAGS@
@@ -879,7 +892,6 @@ runtime_files = \
runtime/go-fieldtrack.c \
runtime/go-matherr.c \
runtime/go-memclr.c \
- runtime/go-memcmp.c \
runtime/go-memequal.c \
runtime/go-nanotime.c \
runtime/go-now.c \
@@ -888,8 +900,6 @@ runtime_files = \
runtime/go-runtime-error.c \
runtime/go-setenv.c \
runtime/go-signal.c \
- runtime/go-strslice.c \
- runtime/go-typedesc-equal.c \
runtime/go-unsafe-pointer.c \
runtime/go-unsetenv.c \
runtime/go-unwind.c \
@@ -901,6 +911,7 @@ runtime_files = \
runtime/runtime_c.c \
runtime/stack.c \
runtime/yield.c \
+ runtime/go-context.S \
$(rtems_task_variable_add_file) \
$(runtime_getncpu_file)
@@ -1158,7 +1169,7 @@ all: config.h
$(MAKE) $(AM_MAKEFLAGS) all-recursive
.SUFFIXES:
-.SUFFIXES: .c .go .gox .o .obj .lo .a
+.SUFFIXES: .c .go .gox .o .obj .lo .a .S
am--refresh: Makefile
@:
$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/../multilib.am $(am__configure_deps)
@@ -1331,8 +1342,6 @@ runtime/go-matherr.lo: runtime/$(am__dirstamp) \
runtime/$(DEPDIR)/$(am__dirstamp)
runtime/go-memclr.lo: runtime/$(am__dirstamp) \
runtime/$(DEPDIR)/$(am__dirstamp)
-runtime/go-memcmp.lo: runtime/$(am__dirstamp) \
- runtime/$(DEPDIR)/$(am__dirstamp)
runtime/go-memequal.lo: runtime/$(am__dirstamp) \
runtime/$(DEPDIR)/$(am__dirstamp)
runtime/go-nanotime.lo: runtime/$(am__dirstamp) \
@@ -1349,10 +1358,6 @@ runtime/go-setenv.lo: runtime/$(am__dirstamp) \
runtime/$(DEPDIR)/$(am__dirstamp)
runtime/go-signal.lo: runtime/$(am__dirstamp) \
runtime/$(DEPDIR)/$(am__dirstamp)
-runtime/go-strslice.lo: runtime/$(am__dirstamp) \
- runtime/$(DEPDIR)/$(am__dirstamp)
-runtime/go-typedesc-equal.lo: runtime/$(am__dirstamp) \
- runtime/$(DEPDIR)/$(am__dirstamp)
runtime/go-unsafe-pointer.lo: runtime/$(am__dirstamp) \
runtime/$(DEPDIR)/$(am__dirstamp)
runtime/go-unsetenv.lo: runtime/$(am__dirstamp) \
@@ -1375,6 +1380,8 @@ runtime/stack.lo: runtime/$(am__dirstamp) \
runtime/$(DEPDIR)/$(am__dirstamp)
runtime/yield.lo: runtime/$(am__dirstamp) \
runtime/$(DEPDIR)/$(am__dirstamp)
+runtime/go-context.lo: runtime/$(am__dirstamp) \
+ runtime/$(DEPDIR)/$(am__dirstamp)
runtime/rtems-task-variable-add.lo: runtime/$(am__dirstamp) \
runtime/$(DEPDIR)/$(am__dirstamp)
runtime/getncpu-none.lo: runtime/$(am__dirstamp) \
@@ -1421,11 +1428,11 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-cdiv.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-cgo.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-construct-map.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-context.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-ffi.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-fieldtrack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-matherr.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-memclr.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-memcmp.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-memequal.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-nanotime.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-nosys.Plo@am__quote@
@@ -1434,8 +1441,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-runtime-error.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-setenv.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-signal.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-strslice.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-typedesc-equal.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-unsafe-pointer.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-unsetenv.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/go-unwind.Plo@am__quote@
@@ -1451,6 +1456,30 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@runtime/$(DEPDIR)/yield.Plo@am__quote@
+.S.o:
+@am__fastdepCCAS_TRUE@ $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\
+@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
+@am__fastdepCCAS_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ $(AM_V_CPPAS)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(AM_V_CPPAS@am__nodep@)$(CPPASCOMPILE) -c -o $@ $<
+
+.S.obj:
+@am__fastdepCCAS_TRUE@ $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\
+@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\
+@am__fastdepCCAS_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ $(AM_V_CPPAS)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(AM_V_CPPAS@am__nodep@)$(CPPASCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.S.lo:
+@am__fastdepCCAS_TRUE@ $(AM_V_CPPAS)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\
+@am__fastdepCCAS_TRUE@ $(LTCPPASCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
+@am__fastdepCCAS_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ $(AM_V_CPPAS)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(AM_V_CPPAS@am__nodep@)$(LTCPPASCOMPILE) -c -o $@ $<
+
.c.o:
@am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\
@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\
diff --git a/libgo/aclocal.m4 b/libgo/aclocal.m4
index b55bb82..951aed3 100644
--- a/libgo/aclocal.m4
+++ b/libgo/aclocal.m4
@@ -56,6 +56,26 @@ m4_ifndef([AC_AUTOCONF_VERSION],
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
+# Figure out how to run the assembler. -*- Autoconf -*-
+
+# Copyright (C) 2001-2017 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# AM_PROG_AS
+# ----------
+AC_DEFUN([AM_PROG_AS],
+[# By default we simply use the C compiler to build assembly code.
+AC_REQUIRE([AC_PROG_CC])
+test "${CCAS+set}" = set || CCAS=$CC
+test "${CCASFLAGS+set}" = set || CCASFLAGS=$CFLAGS
+AC_ARG_VAR([CCAS], [assembler compiler command (defaults to CC)])
+AC_ARG_VAR([CCASFLAGS], [assembler compiler flags (defaults to CFLAGS)])
+_AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl
+])
+
# AM_AUX_DIR_EXPAND -*- Autoconf -*-
# Copyright (C) 2001-2017 Free Software Foundation, Inc.
diff --git a/libgo/configure b/libgo/configure
index 06b68b0..22ef79c 100755
--- a/libgo/configure
+++ b/libgo/configure
@@ -729,6 +729,11 @@ SED
MAINT
MAINTAINER_MODE_FALSE
MAINTAINER_MODE_TRUE
+am__fastdepCCAS_FALSE
+am__fastdepCCAS_TRUE
+CCASDEPMODE
+CCASFLAGS
+CCAS
GOFLAGS
GOC
am__fastdepCC_FALSE
@@ -1510,6 +1515,8 @@ Some influential environment variables:
you have headers in a nonstandard directory <include dir>
GOC Go compiler command
GOFLAGS Go compiler flags
+ CCAS assembler compiler command (defaults to CC)
+ CCASFLAGS assembler compiler flags (defaults to CFLAGS)
CPP C preprocessor
Use these variables to override the choices made by `configure' or to help
@@ -4556,6 +4563,139 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
ac_compiler_gnu=$ac_cv_c_compiler_gnu
+# By default we simply use the C compiler to build assembly code.
+
+test "${CCAS+set}" = set || CCAS=$CC
+test "${CCASFLAGS+set}" = set || CCASFLAGS=$CFLAGS
+
+
+
+depcc="$CCAS" am_compiler_list=
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if ${am_cv_CCAS_dependencies_compiler_type+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+ # We make a subdir and do the tests there. Otherwise we can end up
+ # making bogus files that we don't know about and never remove. For
+ # instance it was reported that on HP-UX the gcc test will end up
+ # making a dummy file named 'D' -- because '-MD' means "put the output
+ # in D".
+ rm -rf conftest.dir
+ mkdir conftest.dir
+ # Copy depcomp to subdir because otherwise we won't find it if we're
+ # using a relative directory.
+ cp "$am_depcomp" conftest.dir
+ cd conftest.dir
+ # We will build objects and dependencies in a subdirectory because
+ # it helps to detect inapplicable dependency modes. For instance
+ # both Tru64's cc and ICC support -MD to output dependencies as a
+ # side effect of compilation, but ICC will put the dependencies in
+ # the current directory while Tru64 will put them in the object
+ # directory.
+ mkdir sub
+
+ am_cv_CCAS_dependencies_compiler_type=none
+ if test "$am_compiler_list" = ""; then
+ am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+ fi
+ am__universal=false
+
+
+ for depmode in $am_compiler_list; do
+ # Setup a source with many dependencies, because some compilers
+ # like to wrap large dependency lists on column 80 (with \), and
+ # we should not choose a depcomp mode which is confused by this.
+ #
+ # We need to recreate these files for each test, as the compiler may
+ # overwrite some of them when testing with obscure command lines.
+ # This happens at least with the AIX C compiler.
+ : > sub/conftest.c
+ for i in 1 2 3 4 5 6; do
+ echo '#include "conftst'$i'.h"' >> sub/conftest.c
+ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
+ # Solaris 10 /bin/sh.
+ echo '/* dummy */' > sub/conftst$i.h
+ done
+ echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+ # We check with '-c' and '-o' for the sake of the "dashmstdout"
+ # mode. It turns out that the SunPro C++ compiler does not properly
+ # handle '-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs.
+ am__obj=sub/conftest.${OBJEXT-o}
+ am__minus_obj="-o $am__obj"
+ case $depmode in
+ gcc)
+ # This depmode causes a compiler race in universal mode.
+ test "$am__universal" = false || continue
+ ;;
+ nosideeffect)
+ # After this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested.
+ if test "x$enable_dependency_tracking" = xyes; then
+ continue
+ else
+ break
+ fi
+ ;;
+ msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+ # This compiler won't grok '-c -o', but also, the minuso test has
+ # not run yet. These depmodes are late enough in the game, and
+ # so weak that their functioning should not be impacted.
+ am__obj=conftest.${OBJEXT-o}
+ am__minus_obj=
+ ;;
+ none) break ;;
+ esac
+ if depmode=$depmode \
+ source=sub/conftest.c object=$am__obj \
+ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+ >/dev/null 2>conftest.err &&
+ grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+ grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+ ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+ # icc doesn't choke on unknown options, it will just issue warnings
+ # or remarks (even with -Werror). So we grep stderr for any message
+ # that says an option was ignored or not supported.
+ # When given -MP, icc 7.0 and 7.1 complain thusly:
+ # icc: Command line warning: ignoring option '-M'; no argument required
+ # The diagnosis changed in icc 8.0:
+ # icc: Command line remark: option '-MP' not supported
+ if (grep 'ignoring option' conftest.err ||
+ grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+ am_cv_CCAS_dependencies_compiler_type=$depmode
+ break
+ fi
+ fi
+ done
+
+ cd ..
+ rm -rf conftest.dir
+else
+ am_cv_CCAS_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CCAS_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CCAS_dependencies_compiler_type" >&6; }
+CCASDEPMODE=depmode=$am_cv_CCAS_dependencies_compiler_type
+
+ if
+ test "x$enable_dependency_tracking" != xno \
+ && test "$am_cv_CCAS_dependencies_compiler_type" = gcc3; then
+ am__fastdepCCAS_TRUE=
+ am__fastdepCCAS_FALSE='#'
+else
+ am__fastdepCCAS_TRUE='#'
+ am__fastdepCCAS_FALSE=
+fi
+
+
@@ -11344,7 +11484,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 11347 "configure"
+#line 11487 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -11450,7 +11590,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 11453 "configure"
+#line 11593 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -13712,7 +13852,7 @@ $as_echo "#define USE_LIBFFI 1" >>confdefs.h
fi
- if test "$with_liffi" != "no"; then
+ if test "$with_libffi" != "no"; then
USE_LIBFFI_TRUE=
USE_LIBFFI_FALSE='#'
else
@@ -14138,7 +14278,7 @@ case "$target" in
# msghdr in <sys/socket.h>.
OSCFLAGS="$OSCFLAGS -D_XOPEN_SOURCE=500"
;;
- *-*-solaris2.1[01])
+ *-*-solaris2.*)
# Solaris 10+ needs this so struct msghdr gets the msg_control
# etc. fields in <sys/socket.h> (_XPG4_2). _XOPEN_SOURCE=600 as
# above doesn't work with C99.
@@ -15838,6 +15978,10 @@ if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then
as_fn_error $? "conditional \"am__fastdepCC\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
+if test -z "${am__fastdepCCAS_TRUE}" && test -z "${am__fastdepCCAS_FALSE}"; then
+ as_fn_error $? "conditional \"am__fastdepCCAS\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then
as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
diff --git a/libgo/configure.ac b/libgo/configure.ac
index 03c07fe..0ae8162 100644
--- a/libgo/configure.ac
+++ b/libgo/configure.ac
@@ -26,6 +26,7 @@ m4_rename([_AC_ARG_VAR_PRECIOUS],[glibgo_PRECIOUS])
m4_define([_AC_ARG_VAR_PRECIOUS],[])
AC_PROG_CC
AC_PROG_GO
+AM_PROG_AS
m4_rename_force([glibgo_PRECIOUS],[_AC_ARG_VAR_PRECIOUS])
AC_SUBST(CFLAGS)
@@ -128,7 +129,7 @@ if test "$with_libffi" != no; then
fi
AC_SUBST(LIBFFI)
AC_SUBST(LIBFFIINCS)
-AM_CONDITIONAL(USE_LIBFFI, test "$with_liffi" != "no")
+AM_CONDITIONAL(USE_LIBFFI, test "$with_libffi" != "no")
# See if the user wants to configure without libatomic. This is useful if we are
# on an architecture for which libgo does not need an atomic support library and
@@ -397,7 +398,7 @@ case "$target" in
# msghdr in <sys/socket.h>.
OSCFLAGS="$OSCFLAGS -D_XOPEN_SOURCE=500"
;;
- *-*-solaris2.1[[01]])
+ *-*-solaris2.*)
# Solaris 10+ needs this so struct msghdr gets the msg_control
# etc. fields in <sys/socket.h> (_XPG4_2). _XOPEN_SOURCE=600 as
# above doesn't work with C99.
diff --git a/libgo/go/cmd/go/internal/work/gccgo.go b/libgo/go/cmd/go/internal/work/gccgo.go
index a0eb2d3..3b97209 100644
--- a/libgo/go/cmd/go/internal/work/gccgo.go
+++ b/libgo/go/cmd/go/internal/work/gccgo.go
@@ -209,9 +209,16 @@ func (tools gccgoToolchain) pack(b *Builder, a *Action, afile string, ofiles []s
}
absAfile := mkAbs(objdir, afile)
// Try with D modifier first, then without if that fails.
- if b.run(a, p.Dir, p.ImportPath, nil, tools.ar(), arArgs, "rcD", absAfile, absOfiles) != nil {
+ output, err := b.runOut(p.Dir, nil, tools.ar(), arArgs, "rcD", absAfile, absOfiles)
+ if err != nil {
return b.run(a, p.Dir, p.ImportPath, nil, tools.ar(), arArgs, "rc", absAfile, absOfiles)
}
+
+ if len(output) > 0 {
+ // Show the output if there is any even without errors.
+ b.showOutput(a, p.Dir, p.ImportPath, b.processOutput(output))
+ }
+
return nil
}
diff --git a/libgo/go/go/internal/gccgoimporter/parser.go b/libgo/go/go/internal/gccgoimporter/parser.go
index 42f43a1..5881d9c 100644
--- a/libgo/go/go/internal/gccgoimporter/parser.go
+++ b/libgo/go/go/internal/gccgoimporter/parser.go
@@ -261,6 +261,10 @@ func (p *parser) parseField(pkg *types.Package) (field *types.Var, tag string) {
// Param = Name ["..."] Type .
func (p *parser) parseParam(pkg *types.Package) (param *types.Var, isVariadic bool) {
name := p.parseName()
+ // Ignore names invented for inlinable functions.
+ if strings.HasPrefix(name, "p.") || strings.HasPrefix(name, "r.") || strings.HasPrefix(name, "$ret") {
+ name = ""
+ }
if p.tok == '<' && p.scanner.Peek() == 'e' {
// EscInfo = "<esc:" int ">" . (optional and ignored)
p.next()
@@ -286,7 +290,14 @@ func (p *parser) parseParam(pkg *types.Package) (param *types.Var, isVariadic bo
// Var = Name Type .
func (p *parser) parseVar(pkg *types.Package) *types.Var {
name := p.parseName()
- return types.NewVar(token.NoPos, pkg, name, p.parseType(pkg))
+ v := types.NewVar(token.NoPos, pkg, name, p.parseType(pkg))
+ if name[0] == '.' || name[0] == '<' {
+ // This is an unexported variable,
+ // or a variable defined in a different package.
+ // We only want to record exported variables.
+ return nil
+ }
+ return v
}
// Conversion = "convert" "(" Type "," ConstValue ")" .
@@ -539,10 +550,12 @@ func (p *parser) parseNamedType(nlist []int) types.Type {
for p.tok == scanner.Ident {
p.expectKeyword("func")
if p.tok == '/' {
- // Skip a /*nointerface*/ comment.
+ // Skip a /*nointerface*/ or /*asm ID */ comment.
p.expect('/')
p.expect('*')
- p.expect(scanner.Ident)
+ if p.expect(scanner.Ident) == "asm" {
+ p.parseUnquotedString()
+ }
p.expect('*')
p.expect('/')
}
@@ -727,15 +740,29 @@ func (p *parser) parseFunctionType(pkg *types.Package, nlist []int) *types.Signa
// Func = Name FunctionType [InlineBody] .
func (p *parser) parseFunc(pkg *types.Package) *types.Func {
- name := p.parseName()
- if strings.ContainsRune(name, '$') {
- // This is a Type$equal or Type$hash function, which we don't want to parse,
- // except for the types.
- p.discardDirectiveWhileParsingTypes(pkg)
- return nil
+ if p.tok == '/' {
+ // Skip an /*asm ID */ comment.
+ p.expect('/')
+ p.expect('*')
+ if p.expect(scanner.Ident) == "asm" {
+ p.parseUnquotedString()
+ }
+ p.expect('*')
+ p.expect('/')
}
+
+ name := p.parseName()
f := types.NewFunc(token.NoPos, pkg, name, p.parseFunctionType(pkg, nil))
p.skipInlineBody()
+
+ if name[0] == '.' || name[0] == '<' || strings.ContainsRune(name, '$') {
+ // This is an unexported function,
+ // or a function defined in a different package,
+ // or a type$equal or type$hash function.
+ // We only want to record exported functions.
+ return nil
+ }
+
return f
}
@@ -756,7 +783,9 @@ func (p *parser) parseInterfaceType(pkg *types.Package, nlist []int) types.Type
embeddeds = append(embeddeds, p.parseType(pkg))
} else {
method := p.parseFunc(pkg)
- methods = append(methods, method)
+ if method != nil {
+ methods = append(methods, method)
+ }
}
p.expect(';')
}
@@ -1037,23 +1066,6 @@ func (p *parser) parsePackageInit() PackageInit {
return PackageInit{Name: name, InitFunc: initfunc, Priority: priority}
}
-// Throw away tokens until we see a newline or ';'.
-// If we see a '<', attempt to parse as a type.
-func (p *parser) discardDirectiveWhileParsingTypes(pkg *types.Package) {
- for {
- switch p.tok {
- case '\n', ';':
- return
- case '<':
- p.parseType(pkg)
- case scanner.EOF:
- p.error("unexpected EOF")
- default:
- p.next()
- }
- }
-}
-
// Create the package if we have parsed both the package path and package name.
func (p *parser) maybeCreatePackage() {
if p.pkgname != "" && p.pkgpath != "" {
@@ -1191,7 +1203,9 @@ func (p *parser) parseDirective() {
case "var":
p.next()
v := p.parseVar(p.pkg)
- p.pkg.Scope().Insert(v)
+ if v != nil {
+ p.pkg.Scope().Insert(v)
+ }
p.expectEOL()
case "const":
diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go
index fb2e5d4..8493d87 100644
--- a/libgo/go/reflect/type.go
+++ b/libgo/go/reflect/type.go
@@ -1105,15 +1105,14 @@ func (t *rtype) ptrTo() *rtype {
return &pi.(*ptrType).rtype
}
+ // Look in known types.
s := "*" + *t.string
-
- canonicalTypeLock.RLock()
- r, ok := canonicalType[s]
- canonicalTypeLock.RUnlock()
- if ok {
- p := (*ptrType)(unsafe.Pointer(r.(*rtype)))
- pi, _ := ptrMap.LoadOrStore(t, p)
- return &pi.(*ptrType).rtype
+ if tt := lookupType(s); tt != nil {
+ p := (*ptrType)(unsafe.Pointer(tt))
+ if p.elem == t {
+ pi, _ := ptrMap.LoadOrStore(t, p)
+ return &pi.(*ptrType).rtype
+ }
}
// Create a new ptrType starting with the description
@@ -1138,10 +1137,7 @@ func (t *rtype) ptrTo() *rtype {
pp.ptrToThis = nil
pp.elem = t
- q := canonicalize(&pp.rtype)
- p := (*ptrType)(unsafe.Pointer(q.(*rtype)))
-
- pi, _ := ptrMap.LoadOrStore(t, p)
+ pi, _ := ptrMap.LoadOrStore(t, &pp)
return &pi.(*ptrType).rtype
}
@@ -1447,6 +1443,13 @@ func ChanOf(dir ChanDir, t Type) Type {
case BothDir:
s = "chan " + *typ.string
}
+ if tt := lookupType(s); tt != nil {
+ ch := (*chanType)(unsafe.Pointer(tt))
+ if ch.elem == typ && ch.dir == uintptr(dir) {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
// Make a channel type.
var ichan interface{} = (chan unsafe.Pointer)(nil)
@@ -1472,10 +1475,8 @@ func ChanOf(dir ChanDir, t Type) Type {
ch.uncommonType = nil
ch.ptrToThis = nil
- // Canonicalize before storing in lookupCache
- ti := toType(&ch.rtype)
- lookupCache.Store(ckey, ti.(*rtype))
- return ti
+ ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
+ return ti.(Type)
}
func ismapkey(*rtype) bool // implemented in runtime
@@ -1502,6 +1503,13 @@ func MapOf(key, elem Type) Type {
// Look in known types.
s := "map[" + *ktyp.string + "]" + *etyp.string
+ if tt := lookupType(s); tt != nil {
+ mt := (*mapType)(unsafe.Pointer(tt))
+ if mt.key == ktyp && mt.elem == etyp {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
// Make a map type.
// Note: flag values must match those used in the TMAP case
@@ -1544,10 +1552,8 @@ func MapOf(key, elem Type) Type {
mt.flags |= 16
}
- // Canonicalize before storing in lookupCache
- ti := toType(&mt.rtype)
- lookupCache.Store(ckey, ti.(*rtype))
- return ti
+ ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
+ return ti.(Type)
}
// FuncOf returns the function type with the given argument and result types.
@@ -1625,15 +1631,17 @@ func FuncOf(in, out []Type, variadic bool) Type {
}
str := funcStr(ft)
+ if tt := lookupType(str); tt != nil {
+ if haveIdenticalUnderlyingType(&ft.rtype, tt, true) {
+ return addToCache(tt)
+ }
+ }
// Populate the remaining fields of ft and store in cache.
ft.string = &str
ft.uncommonType = nil
ft.ptrToThis = nil
-
- // Canonicalize before storing in funcLookupCache
- tc := toType(&ft.rtype)
- return addToCache(tc.(*rtype))
+ return addToCache(&ft.rtype)
}
// funcStr builds a string representation of a funcType.
@@ -1873,6 +1881,13 @@ func SliceOf(t Type) Type {
// Look in known types.
s := "[]" + *typ.string
+ if tt := lookupType(s); tt != nil {
+ slice := (*sliceType)(unsafe.Pointer(tt))
+ if slice.elem == typ {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
// Make a slice type.
var islice interface{} = ([]unsafe.Pointer)(nil)
@@ -1888,10 +1903,8 @@ func SliceOf(t Type) Type {
slice.uncommonType = nil
slice.ptrToThis = nil
- // Canonicalize before storing in lookupCache
- ti := toType(&slice.rtype)
- lookupCache.Store(ckey, ti.(*rtype))
- return ti
+ ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
+ return ti.(Type)
}
// The structLookupCache caches StructOf lookups.
@@ -2106,6 +2119,13 @@ func StructOf(fields []StructField) Type {
return t
}
+ // Look in known types.
+ if tt := lookupType(str); tt != nil {
+ if haveIdenticalUnderlyingType(&typ.rtype, tt, true) {
+ return addToCache(tt)
+ }
+ }
+
typ.string = &str
typ.hash = hash
typ.size = size
@@ -2214,10 +2234,7 @@ func StructOf(fields []StructField) Type {
typ.uncommonType = nil
typ.ptrToThis = nil
-
- // Canonicalize before storing in structLookupCache
- ti := toType(&typ.rtype)
- return addToCache(ti.(*rtype))
+ return addToCache(&typ.rtype)
}
func runtimeStructField(field StructField) structField {
@@ -2300,6 +2317,13 @@ func ArrayOf(count int, elem Type) Type {
// Look in known types.
s := "[" + strconv.Itoa(count) + "]" + *typ.string
+ if tt := lookupType(s); tt != nil {
+ array := (*arrayType)(unsafe.Pointer(tt))
+ if array.elem == typ {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
// Make an array type.
var iarray interface{} = [1]unsafe.Pointer{}
@@ -2451,10 +2475,8 @@ func ArrayOf(count int, elem Type) Type {
}
}
- // Canonicalize before storing in lookupCache
- ti := toType(&array.rtype)
- lookupCache.Store(ckey, ti.(*rtype))
- return ti
+ ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
+ return ti.(Type)
}
func appendVarint(x []byte, v uintptr) []byte {
@@ -2466,42 +2488,19 @@ func appendVarint(x []byte, v uintptr) []byte {
}
// toType converts from a *rtype to a Type that can be returned
-// to the client of package reflect. In gc, the only concern is that
-// a nil *rtype must be replaced by a nil Type, but in gccgo this
-// function takes care of ensuring that multiple *rtype for the same
-// type are coalesced into a single Type.
-var canonicalType = make(map[string]Type)
-
-var canonicalTypeLock sync.RWMutex
-
-func canonicalize(t Type) Type {
- if t == nil {
- return nil
- }
- s := t.rawString()
- canonicalTypeLock.RLock()
- if r, ok := canonicalType[s]; ok {
- canonicalTypeLock.RUnlock()
- return r
- }
- canonicalTypeLock.RUnlock()
- canonicalTypeLock.Lock()
- if r, ok := canonicalType[s]; ok {
- canonicalTypeLock.Unlock()
- return r
- }
- canonicalType[s] = t
- canonicalTypeLock.Unlock()
- return t
-}
-
+// to the client of package reflect. The only concern is that
+// a nil *rtype must be replaced by a nil Type.
func toType(p *rtype) Type {
if p == nil {
return nil
}
- return canonicalize(p)
+ return p
}
+// Look up a compiler-generated type descriptor.
+// Implemented in runtime.
+func lookupType(s string) *rtype
+
// ifaceIndir reports whether t is stored indirectly in an interface value.
func ifaceIndir(t *rtype) bool {
return t.kind&kindDirectIface == 0
diff --git a/libgo/go/runtime/alg.go b/libgo/go/runtime/alg.go
index c6bc6b6..a2bb5bb 100644
--- a/libgo/go/runtime/alg.go
+++ b/libgo/go/runtime/alg.go
@@ -44,7 +44,6 @@ import (
//go:linkname ifacevaleq runtime.ifacevaleq
//go:linkname ifaceefaceeq runtime.ifaceefaceeq
//go:linkname efacevaleq runtime.efacevaleq
-//go:linkname eqstring runtime.eqstring
//go:linkname cmpstring runtime.cmpstring
//
// Temporary to be called from C code.
@@ -205,7 +204,7 @@ func nilinterequal(p, q unsafe.Pointer) bool {
}
func efaceeq(x, y eface) bool {
t := x._type
- if !eqtype(t, y._type) {
+ if t != y._type {
return false
}
if t == nil {
@@ -229,7 +228,7 @@ func ifaceeq(x, y iface) bool {
return false
}
t := *(**_type)(xtab)
- if !eqtype(t, *(**_type)(y.tab)) {
+ if t != *(**_type)(y.tab) {
return false
}
eq := t.equalfn
@@ -247,7 +246,7 @@ func ifacevaleq(x iface, t *_type, p unsafe.Pointer) bool {
return false
}
xt := *(**_type)(x.tab)
- if !eqtype(xt, t) {
+ if xt != t {
return false
}
eq := t.equalfn
@@ -268,7 +267,7 @@ func ifaceefaceeq(x iface, y eface) bool {
return false
}
xt := *(**_type)(x.tab)
- if !eqtype(xt, y._type) {
+ if xt != y._type {
return false
}
eq := xt.equalfn
@@ -285,7 +284,7 @@ func efacevaleq(x eface, t *_type, p unsafe.Pointer) bool {
if x._type == nil {
return false
}
- if !eqtype(x._type, t) {
+ if x._type != t {
return false
}
eq := t.equalfn
diff --git a/libgo/go/runtime/heapdump.go b/libgo/go/runtime/heapdump.go
index 3aa9e8a..b0506a8 100644
--- a/libgo/go/runtime/heapdump.go
+++ b/libgo/go/runtime/heapdump.go
@@ -437,17 +437,15 @@ func dumpmemstats() {
dumpint(uint64(memstats.numgc))
}
-func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *location, size, allocs, frees uintptr) {
- stk := (*[100000]location)(unsafe.Pointer(pstk))
+func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
+ stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
dumpint(tagMemProf)
dumpint(uint64(uintptr(unsafe.Pointer(b))))
dumpint(uint64(size))
dumpint(uint64(nstk))
for i := uintptr(0); i < nstk; i++ {
- pc := stk[i].pc
- fn := stk[i].function
- file := stk[i].filename
- line := stk[i].lineno
+ pc := stk[i]
+ fn, file, line, _ := funcfileline(pc, -1)
if fn == "" {
var buf [64]byte
n := len(buf)
diff --git a/libgo/go/runtime/iface.go b/libgo/go/runtime/iface.go
index 1c3a5f3..d434f9e 100644
--- a/libgo/go/runtime/iface.go
+++ b/libgo/go/runtime/iface.go
@@ -15,10 +15,7 @@ import (
//
//go:linkname requireitab runtime.requireitab
//go:linkname assertitab runtime.assertitab
-//go:linkname assertI2T runtime.assertI2T
-//go:linkname ifacetypeeq runtime.ifacetypeeq
-//go:linkname efacetype runtime.efacetype
-//go:linkname ifacetype runtime.ifacetype
+//go:linkname panicdottype runtime.panicdottype
//go:linkname ifaceE2E2 runtime.ifaceE2E2
//go:linkname ifaceI2E2 runtime.ifaceI2E2
//go:linkname ifaceE2I2 runtime.ifaceE2I2
@@ -236,7 +233,7 @@ func (m *itab) init() string {
ri++
}
- if !eqtype(lhsMethod.typ, rhsMethod.mtyp) {
+ if lhsMethod.typ != rhsMethod.mtyp {
m.methods[1] = nil
return *lhsMethod.name
}
@@ -356,35 +353,9 @@ func assertitab(lhs, rhs *_type) unsafe.Pointer {
return getitab(lhs, rhs, false)
}
-// Check whether an interface type may be converted to a non-interface
-// type, panicing if not.
-func assertI2T(lhs, rhs, inter *_type) {
- if rhs == nil {
- panic(&TypeAssertionError{nil, nil, lhs, ""})
- }
- if !eqtype(lhs, rhs) {
- panic(&TypeAssertionError{inter, rhs, lhs, ""})
- }
-}
-
-// Compare two type descriptors for equality.
-func ifacetypeeq(a, b *_type) bool {
- return eqtype(a, b)
-}
-
-// Return the type descriptor of an empty interface.
-// FIXME: This should be inlined by the compiler.
-func efacetype(e eface) *_type {
- return e._type
-}
-
-// Return the type descriptor of a non-empty interface.
-// FIXME: This should be inlined by the compiler.
-func ifacetype(i iface) *_type {
- if i.tab == nil {
- return nil
- }
- return *(**_type)(i.tab)
+// panicdottype is called when doing an i.(T) conversion and the conversion fails.
+func panicdottype(lhs, rhs, inter *_type) {
+ panic(&TypeAssertionError{inter, rhs, lhs, ""})
}
// Convert an empty interface to an empty interface, for a comma-ok
@@ -435,7 +406,7 @@ func ifaceI2I2(inter *_type, i iface) (iface, bool) {
// Convert an empty interface to a pointer non-interface type.
func ifaceE2T2P(t *_type, e eface) (unsafe.Pointer, bool) {
- if !eqtype(t, e._type) {
+ if t != e._type {
return nil, false
} else {
return e.data, true
@@ -444,7 +415,7 @@ func ifaceE2T2P(t *_type, e eface) (unsafe.Pointer, bool) {
// Convert a non-empty interface to a pointer non-interface type.
func ifaceI2T2P(t *_type, i iface) (unsafe.Pointer, bool) {
- if i.tab == nil || !eqtype(t, *(**_type)(i.tab)) {
+ if i.tab == nil || t != *(**_type)(i.tab) {
return nil, false
} else {
return i.data, true
@@ -453,7 +424,7 @@ func ifaceI2T2P(t *_type, i iface) (unsafe.Pointer, bool) {
// Convert an empty interface to a non-pointer non-interface type.
func ifaceE2T2(t *_type, e eface, ret unsafe.Pointer) bool {
- if !eqtype(t, e._type) {
+ if t != e._type {
typedmemclr(t, ret)
return false
} else {
@@ -468,7 +439,7 @@ func ifaceE2T2(t *_type, e eface, ret unsafe.Pointer) bool {
// Convert a non-empty interface to a non-pointer non-interface type.
func ifaceI2T2(t *_type, i iface, ret unsafe.Pointer) bool {
- if i.tab == nil || !eqtype(t, *(**_type)(i.tab)) {
+ if i.tab == nil || t != *(**_type)(i.tab) {
typedmemclr(t, ret)
return false
} else {
@@ -514,7 +485,7 @@ func ifaceT2Ip(to, from *_type) bool {
ri++
}
- if !eqtype(fromMethod.mtyp, toMethod.typ) {
+ if fromMethod.mtyp != toMethod.typ {
return false
}
diff --git a/libgo/go/runtime/map_fast32.go b/libgo/go/runtime/map_fast32.go
index 1fa5cd9..07a35e1 100644
--- a/libgo/go/runtime/map_fast32.go
+++ b/libgo/go/runtime/map_fast32.go
@@ -9,6 +9,15 @@ import (
"unsafe"
)
+// For gccgo, use go:linkname to rename compiler-called functions to
+// themselves, so that the compiler will export them.
+//
+//go:linkname mapaccess1_fast32 runtime.mapaccess1_fast32
+//go:linkname mapaccess2_fast32 runtime.mapaccess2_fast32
+//go:linkname mapassign_fast32 runtime.mapassign_fast32
+//go:linkname mapassign_fast32ptr runtime.mapassign_fast32ptr
+//go:linkname mapdelete_fast32 runtime.mapdelete_fast32
+
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
diff --git a/libgo/go/runtime/map_fast64.go b/libgo/go/runtime/map_fast64.go
index d23ac23..d21bf06 100644
--- a/libgo/go/runtime/map_fast64.go
+++ b/libgo/go/runtime/map_fast64.go
@@ -9,6 +9,15 @@ import (
"unsafe"
)
+// For gccgo, use go:linkname to rename compiler-called functions to
+// themselves, so that the compiler will export them.
+//
+//go:linkname mapaccess1_fast64 runtime.mapaccess1_fast64
+//go:linkname mapaccess2_fast64 runtime.mapaccess2_fast64
+//go:linkname mapassign_fast64 runtime.mapassign_fast64
+//go:linkname mapassign_fast64ptr runtime.mapassign_fast64ptr
+//go:linkname mapdelete_fast64 runtime.mapdelete_fast64
+
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
diff --git a/libgo/go/runtime/map_faststr.go b/libgo/go/runtime/map_faststr.go
index eced15a..083980f 100644
--- a/libgo/go/runtime/map_faststr.go
+++ b/libgo/go/runtime/map_faststr.go
@@ -9,6 +9,14 @@ import (
"unsafe"
)
+// For gccgo, use go:linkname to rename compiler-called functions to
+// themselves, so that the compiler will export them.
+//
+//go:linkname mapaccess1_faststr runtime.mapaccess1_faststr
+//go:linkname mapaccess2_faststr runtime.mapaccess2_faststr
+//go:linkname mapassign_faststr runtime.mapassign_faststr
+//go:linkname mapdelete_faststr runtime.mapdelete_faststr
+
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
diff --git a/libgo/go/runtime/mgcmark.go b/libgo/go/runtime/mgcmark.go
index dc5e797..1b8a7a3 100644
--- a/libgo/go/runtime/mgcmark.go
+++ b/libgo/go/runtime/mgcmark.go
@@ -1085,7 +1085,7 @@ func scanstackblockwithmap(pc, b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
span != nil && span.state != mSpanManual &&
(obj < span.base() || obj >= span.limit || span.state != mSpanInUse) {
print("runtime: found in object at *(", hex(b), "+", hex(i), ") = ", hex(obj), ", pc=", hex(pc), "\n")
- name, file, line := funcfileline(pc, -1)
+ name, file, line, _ := funcfileline(pc, -1)
print(name, "\n", file, ":", line, "\n")
//gcDumpObject("object", b, i)
throw("found bad pointer in Go stack (incorrect use of unsafe or cgo?)")
diff --git a/libgo/go/runtime/mprof.go b/libgo/go/runtime/mprof.go
index ab97569..132c2ff 100644
--- a/libgo/go/runtime/mprof.go
+++ b/libgo/go/runtime/mprof.go
@@ -24,6 +24,10 @@ const (
blockProfile
mutexProfile
+ // a profile bucket from one of the categories above whose stack
+ // trace has been fixed up / pruned.
+ prunedProfile
+
// size of bucket hash table
buckHashSize = 179999
@@ -52,6 +56,7 @@ type bucket struct {
hash uintptr
size uintptr
nstk uintptr
+ skip int
}
// A memRecord is the bucket data for a bucket of type memProfile,
@@ -138,11 +143,13 @@ type blockRecord struct {
}
var (
- mbuckets *bucket // memory profile buckets
- bbuckets *bucket // blocking profile buckets
- xbuckets *bucket // mutex profile buckets
- buckhash *[179999]*bucket
- bucketmem uintptr
+ mbuckets *bucket // memory profile buckets
+ bbuckets *bucket // blocking profile buckets
+ xbuckets *bucket // mutex profile buckets
+ sbuckets *bucket // pre-symbolization profile buckets (stacks fixed up)
+ freebuckets *bucket // freelist of unused fixed up profile buckets
+ buckhash *[179999]*bucket
+ bucketmem uintptr
mProf struct {
// All fields in mProf are protected by proflock.
@@ -158,12 +165,35 @@ var (
const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
+// payloadOffset() returns a pointer into the part of a bucket
+// containing the profile payload (skips past the bucket struct itself
+// and then the stack trace).
+func payloadOffset(typ bucketType, nstk uintptr) uintptr {
+ if typ == prunedProfile {
+ // To allow reuse of prunedProfile buckets between different
+ // collections, allocate them with the max stack size (the portion
+ // of the stack used will vary from trace to trace).
+ nstk = maxStack
+ }
+ return unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr)
+}
+
+func max(x, y uintptr) uintptr {
+ if x > y {
+ return x
+ }
+ return y
+}
+
// newBucket allocates a bucket with the given type and number of stack entries.
-func newBucket(typ bucketType, nstk int) *bucket {
- size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(location{})
+func newBucket(typ bucketType, nstk int, skipCount int) *bucket {
+ size := payloadOffset(typ, uintptr(nstk))
switch typ {
default:
throw("invalid profile bucket type")
+ case prunedProfile:
+ // stack-fixed buckets are large enough to accommodate any payload.
+ size += max(unsafe.Sizeof(memRecord{}), unsafe.Sizeof(blockRecord{}))
case memProfile:
size += unsafe.Sizeof(memRecord{})
case blockProfile, mutexProfile:
@@ -174,35 +204,34 @@ func newBucket(typ bucketType, nstk int) *bucket {
bucketmem += size
b.typ = typ
b.nstk = uintptr(nstk)
+ b.skip = skipCount
return b
}
// stk returns the slice in b holding the stack.
-func (b *bucket) stk() []location {
- stk := (*[maxStack]location)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
+func (b *bucket) stk() []uintptr {
+ stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
return stk[:b.nstk:b.nstk]
}
// mp returns the memRecord associated with the memProfile bucket b.
func (b *bucket) mp() *memRecord {
- if b.typ != memProfile {
+ if b.typ != memProfile && b.typ != prunedProfile {
throw("bad use of bucket.mp")
}
- data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(location{}))
- return (*memRecord)(data)
+ return (*memRecord)(add(unsafe.Pointer(b), payloadOffset(b.typ, b.nstk)))
}
// bp returns the blockRecord associated with the blockProfile bucket b.
func (b *bucket) bp() *blockRecord {
- if b.typ != blockProfile && b.typ != mutexProfile {
+ if b.typ != blockProfile && b.typ != mutexProfile && b.typ != prunedProfile {
throw("bad use of bucket.bp")
}
- data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(location{}))
- return (*blockRecord)(data)
+ return (*blockRecord)(add(unsafe.Pointer(b), payloadOffset(b.typ, b.nstk)))
}
// Return the bucket for stk[0:nstk], allocating new bucket if needed.
-func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket {
+func stkbucket(typ bucketType, size uintptr, skip int, stk []uintptr, alloc bool) *bucket {
if buckhash == nil {
buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
if buckhash == nil {
@@ -212,8 +241,8 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket
// Hash stack.
var h uintptr
- for _, loc := range stk {
- h += loc.pc
+ for _, pc := range stk {
+ h += pc
h += h << 10
h ^= h >> 6
}
@@ -237,7 +266,7 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket
}
// Create new bucket.
- b := newBucket(typ, len(stk))
+ b := newBucket(typ, len(stk), skip)
copy(b.stk(), stk)
b.hash = h
b.size = size
@@ -249,6 +278,9 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket
} else if typ == mutexProfile {
b.allnext = xbuckets
xbuckets = b
+ } else if typ == prunedProfile {
+ b.allnext = sbuckets
+ sbuckets = b
} else {
b.allnext = bbuckets
bbuckets = b
@@ -256,7 +288,7 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket
return b
}
-func eqslice(x, y []location) bool {
+func eqslice(x, y []uintptr) bool {
if len(x) != len(y) {
return false
}
@@ -338,10 +370,11 @@ func mProf_PostSweep() {
// Called by malloc to record a profiled block.
func mProf_Malloc(p unsafe.Pointer, size uintptr) {
- var stk [maxStack]location
- nstk := callers(4, stk[:])
+ var stk [maxStack]uintptr
+ nstk := callersRaw(stk[:])
lock(&proflock)
- b := stkbucket(memProfile, size, stk[:nstk], true)
+ skip := 1
+ b := stkbucket(memProfile, size, skip, stk[:nstk], true)
c := mProf.cycle
mp := b.mp()
mpc := &mp.future[(c+2)%uint32(len(mp.future))]
@@ -414,16 +447,16 @@ func blocksampled(cycles int64) bool {
func saveblockevent(cycles int64, skip int, which bucketType) {
gp := getg()
var nstk int
- var stk [maxStack]location
+ var stk [maxStack]uintptr
if gp.m.curg == nil || gp.m.curg == gp {
- nstk = callers(skip, stk[:])
+ nstk = callersRaw(stk[:])
} else {
// FIXME: This should get a traceback of gp.m.curg.
// nstk = gcallers(gp.m.curg, skip, stk[:])
- nstk = callers(skip, stk[:])
+ nstk = callersRaw(stk[:])
}
lock(&proflock)
- b := stkbucket(which, 0, stk[:nstk], true)
+ b := stkbucket(which, 0, skip, stk[:nstk], true)
b.bp().count++
b.bp().cycles += cycles
unlock(&proflock)
@@ -521,6 +554,163 @@ func (r *MemProfileRecord) Stack() []uintptr {
return r.Stack0[0:]
}
+// reusebucket tries to pick a prunedProfile bucket off
+// the freebuckets list, returning it if one is available or nil
+// if the free list is empty.
+func reusebucket(nstk int) *bucket {
+ var b *bucket
+ if freebuckets != nil {
+ b = freebuckets
+ freebuckets = freebuckets.allnext
+ b.typ = prunedProfile
+ b.nstk = uintptr(nstk)
+ mp := b.mp()
+ // Hack: rely on the fact that memprofile records are
+ // larger than blockprofile records when clearing.
+ *mp = memRecord{}
+ }
+ return b
+}
+
+// freebucket appends the specified prunedProfile bucket
+// onto the free list, and removes references to it from the hash.
+func freebucket(tofree *bucket) *bucket {
+ // Thread this bucket into the free list.
+ ret := tofree.allnext
+ tofree.allnext = freebuckets
+ freebuckets = tofree
+
+ // Clean up the hash. The hash may point directly to this bucket...
+ i := int(tofree.hash % buckHashSize)
+ if buckhash[i] == tofree {
+ buckhash[i] = tofree.next
+ } else {
+ // ... or when this bucket was inserted by stkbucket, it may have been
+ // chained off some other unrelated bucket.
+ for b := buckhash[i]; b != nil; b = b.next {
+ if b.next == tofree {
+ b.next = tofree.next
+ break
+ }
+ }
+ }
+ return ret
+}
+
+// fixupStack takes a 'raw' stack trace (stack of PCs generated by
+// callersRaw) and performs pre-symbolization fixup on it, returning
+// the results in 'canonStack'. For each frame we look at the
+// file/func/line information, then use that info to decide whether to
+// include the frame in the final symbolized stack (removing frames
+// corresponding to 'morestack' routines, for example). We also expand
+// frames if the PC values to which they refer correponds to inlined
+// functions to allow for expanded symbolic info to be filled in
+// later. Note: there is code in go-callers.c's backtrace_full callback()
+// function that performs very similar fixups; these two code paths
+// should be kept in sync.
+func fixupStack(stk []uintptr, skip int, canonStack *[maxStack]uintptr, size uintptr) int {
+ var cidx int
+ var termTrace bool
+ // Increase the skip count to take into account the frames corresponding
+ // to runtime.callersRaw and to the C routine that it invokes.
+ skip += 2
+ for _, pc := range stk {
+ // Subtract 1 from PC to undo the 1 we added in callback in
+ // go-callers.c.
+ function, file, _, frames := funcfileline(pc-1, -1)
+
+ // Skip split-stack functions (match by function name)
+ skipFrame := false
+ if hasPrefix(function, "_____morestack_") || hasPrefix(function, "__morestack_") {
+ skipFrame = true
+ }
+
+ // Skip split-stack functions (match by file)
+ if hasSuffix(file, "/morestack.S") {
+ skipFrame = true
+ }
+
+ // Skip thunks and recover functions. There is no equivalent to
+ // these functions in the gc toolchain.
+ fcn := function
+ if hasSuffix(fcn, "..r") {
+ skipFrame = true
+ } else {
+ for fcn != "" && (fcn[len(fcn)-1] >= '0' && fcn[len(fcn)-1] <= '9') {
+ fcn = fcn[:len(fcn)-1]
+ }
+ if hasSuffix(fcn, "..stub") || hasSuffix(fcn, "..thunk") {
+ skipFrame = true
+ }
+ }
+ if skipFrame {
+ continue
+ }
+
+ // Terminate the trace if we encounter a frame corresponding to
+ // runtime.main, runtime.kickoff, makecontext, etc. See the
+ // corresponding code in go-callers.c, callback function used
+ // with backtrace_full.
+ if function == "makecontext" {
+ termTrace = true
+ }
+ if hasSuffix(file, "/proc.c") && function == "runtime_mstart" {
+ termTrace = true
+ }
+ if hasSuffix(file, "/proc.go") &&
+ (function == "runtime.main" || function == "runtime.kickoff") {
+ termTrace = true
+ }
+
+ // Expand inline frames.
+ for i := 0; i < frames; i++ {
+ (*canonStack)[cidx] = pc
+ cidx++
+ if cidx >= maxStack {
+ termTrace = true
+ break
+ }
+ }
+ if termTrace {
+ break
+ }
+ }
+
+ // Apply skip count. Needs to be done after expanding inline frames.
+ if skip != 0 {
+ if skip >= cidx {
+ return 0
+ }
+ copy(canonStack[:cidx-skip], canonStack[skip:])
+ return cidx - skip
+ }
+
+ return cidx
+}
+
+// fixupBucket takes a raw memprofile bucket and creates a new bucket
+// in which the stack trace has been fixed up (inline frames expanded,
+// unwanted frames stripped out). Original bucket is left unmodified;
+// a new symbolizeProfile bucket may be generated as a side effect.
+// Payload information from the original bucket is incorporated into
+// the new bucket.
+func fixupBucket(b *bucket) {
+ var canonStack [maxStack]uintptr
+ frames := fixupStack(b.stk(), b.skip, &canonStack, b.size)
+ cb := stkbucket(prunedProfile, b.size, 0, canonStack[:frames], true)
+ switch b.typ {
+ default:
+ throw("invalid profile bucket type")
+ case memProfile:
+ rawrecord := b.mp()
+ cb.mp().active.add(&rawrecord.active)
+ case blockProfile, mutexProfile:
+ bpcount := b.bp().count
+ cb.bp().count += bpcount
+ cb.bp().cycles += bpcount
+ }
+}
+
// MemProfile returns a profile of memory allocated and freed per allocation
// site.
//
@@ -576,15 +766,31 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
}
}
if n <= len(p) {
- ok = true
- idx := 0
- for b := mbuckets; b != nil; b = b.allnext {
+ var bnext *bucket
+
+ // Post-process raw buckets to fix up their stack traces
+ for b := mbuckets; b != nil; b = bnext {
+ bnext = b.allnext
mp := b.mp()
if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
- record(&p[idx], b)
- idx++
+ fixupBucket(b)
}
}
+
+ // Record pruned/fixed-up buckets
+ ok = true
+ idx := 0
+ for b := sbuckets; b != nil; b = b.allnext {
+ record(&p[idx], b)
+ idx++
+ }
+ n = idx
+
+ // Free up pruned buckets for use in next round
+ for b := sbuckets; b != nil; b = bnext {
+ bnext = freebucket(b)
+ }
+ sbuckets = nil
}
unlock(&proflock)
return
@@ -597,18 +803,18 @@ func record(r *MemProfileRecord, b *bucket) {
r.FreeBytes = int64(mp.active.free_bytes)
r.AllocObjects = int64(mp.active.allocs)
r.FreeObjects = int64(mp.active.frees)
- for i, loc := range b.stk() {
+ for i, pc := range b.stk() {
if i >= len(r.Stack0) {
break
}
- r.Stack0[i] = loc.pc
+ r.Stack0[i] = pc
}
for i := int(b.nstk); i < len(r.Stack0); i++ {
r.Stack0[i] = 0
}
}
-func iterate_memprof(fn func(*bucket, uintptr, *location, uintptr, uintptr, uintptr)) {
+func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
lock(&proflock)
for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp()
@@ -625,39 +831,59 @@ type BlockProfileRecord struct {
StackRecord
}
-// BlockProfile returns n, the number of records in the current blocking profile.
-// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
-// If len(p) < n, BlockProfile does not change p and returns n, false.
-//
-// Most clients should use the runtime/pprof package or
-// the testing package's -test.blockprofile flag instead
-// of calling BlockProfile directly.
-func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
- lock(&proflock)
- for b := bbuckets; b != nil; b = b.allnext {
+func harvestBlockMutexProfile(buckets *bucket, p []BlockProfileRecord) (n int, ok bool) {
+ for b := buckets; b != nil; b = b.allnext {
n++
}
if n <= len(p) {
+ var bnext *bucket
+
+ // Post-process raw buckets to create pruned/fixed-up buckets
+ for b := buckets; b != nil; b = bnext {
+ bnext = b.allnext
+ fixupBucket(b)
+ }
+
+ // Record
ok = true
- for b := bbuckets; b != nil; b = b.allnext {
+ for b := sbuckets; b != nil; b = b.allnext {
bp := b.bp()
r := &p[0]
r.Count = bp.count
r.Cycles = bp.cycles
i := 0
- var loc location
- for i, loc = range b.stk() {
+ var pc uintptr
+ for i, pc = range b.stk() {
if i >= len(r.Stack0) {
break
}
- r.Stack0[i] = loc.pc
+ r.Stack0[i] = pc
}
for ; i < len(r.Stack0); i++ {
r.Stack0[i] = 0
}
p = p[1:]
}
+
+ // Free up pruned buckets for use in next round.
+ for b := sbuckets; b != nil; b = bnext {
+ bnext = freebucket(b)
+ }
+ sbuckets = nil
}
+ return
+}
+
+// BlockProfile returns n, the number of records in the current blocking profile.
+// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
+// If len(p) < n, BlockProfile does not change p and returns n, false.
+//
+// Most clients should use the runtime/pprof package or
+// the testing package's -test.blockprofile flag instead
+// of calling BlockProfile directly.
+func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
+ lock(&proflock)
+ n, ok = harvestBlockMutexProfile(bbuckets, p)
unlock(&proflock)
return
}
@@ -670,30 +896,7 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
// instead of calling MutexProfile directly.
func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
lock(&proflock)
- for b := xbuckets; b != nil; b = b.allnext {
- n++
- }
- if n <= len(p) {
- ok = true
- for b := xbuckets; b != nil; b = b.allnext {
- bp := b.bp()
- r := &p[0]
- r.Count = int64(bp.count)
- r.Cycles = bp.cycles
- i := 0
- var loc location
- for i, loc = range b.stk() {
- if i >= len(r.Stack0) {
- break
- }
- r.Stack0[i] = loc.pc
- }
- for ; i < len(r.Stack0); i++ {
- r.Stack0[i] = 0
- }
- p = p[1:]
- }
- }
+ n, ok = harvestBlockMutexProfile(xbuckets, p)
unlock(&proflock)
return
}
diff --git a/libgo/go/runtime/panic.go b/libgo/go/runtime/panic.go
index 9b8ffb9..264ad38 100644
--- a/libgo/go/runtime/panic.go
+++ b/libgo/go/runtime/panic.go
@@ -53,7 +53,7 @@ var indexError = error(errorString("index out of range"))
// entire runtime stack for easier debugging.
func panicindex() {
- name, _, _ := funcfileline(getcallerpc()-1, -1)
+ name, _, _, _ := funcfileline(getcallerpc()-1, -1)
if hasPrefix(name, "runtime.") {
throw(string(indexError.(errorString)))
}
@@ -64,7 +64,7 @@ func panicindex() {
var sliceError = error(errorString("slice bounds out of range"))
func panicslice() {
- name, _, _ := funcfileline(getcallerpc()-1, -1)
+ name, _, _, _ := funcfileline(getcallerpc()-1, -1)
if hasPrefix(name, "runtime.") {
throw(string(sliceError.(errorString)))
}
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
index 8146c1d..b40198e 100644
--- a/libgo/go/runtime/proc.go
+++ b/libgo/go/runtime/proc.go
@@ -18,6 +18,7 @@ import (
//go:linkname acquirep runtime.acquirep
//go:linkname releasep runtime.releasep
//go:linkname incidlelocked runtime.incidlelocked
+//go:linkname ginit runtime.ginit
//go:linkname schedinit runtime.schedinit
//go:linkname ready runtime.ready
//go:linkname stopm runtime.stopm
@@ -515,6 +516,15 @@ func cpuinit() {
cpu.Initialize(env)
}
+func ginit() {
+ _m_ := &m0
+ _g_ := &g0
+ _m_.g0 = _g_
+ _m_.curg = _g_
+ _g_.m = _m_
+ setg(_g_)
+}
+
// The bootstrap sequence is:
//
// call osinit
@@ -524,13 +534,7 @@ func cpuinit() {
//
// The new G calls runtime·main.
func schedinit() {
- _m_ := &m0
- _g_ := &g0
- _m_.g0 = _g_
- _m_.curg = _g_
- _g_.m = _m_
- setg(_g_)
-
+ _g_ := getg()
sched.maxmcount = 10000
usestackmaps = probestackmaps()
diff --git a/libgo/go/runtime/signal_gccgo.go b/libgo/go/runtime/signal_gccgo.go
index b3c78f6..6f362fc 100644
--- a/libgo/go/runtime/signal_gccgo.go
+++ b/libgo/go/runtime/signal_gccgo.go
@@ -60,11 +60,6 @@ type sigctxt struct {
}
func (c *sigctxt) sigcode() uint64 {
- if c.info == nil {
- // This can happen on Solaris 10. We don't know the
- // code, just avoid a misleading value.
- return _SI_USER + 1
- }
return uint64(c.info.si_code)
}
diff --git a/libgo/go/runtime/string.go b/libgo/go/runtime/string.go
index 025ea7a..9bcfc996 100644
--- a/libgo/go/runtime/string.go
+++ b/libgo/go/runtime/string.go
@@ -13,10 +13,6 @@ import (
// themselves, so that the compiler will export them.
//
//go:linkname concatstrings runtime.concatstrings
-//go:linkname concatstring2 runtime.concatstring2
-//go:linkname concatstring3 runtime.concatstring3
-//go:linkname concatstring4 runtime.concatstring4
-//go:linkname concatstring5 runtime.concatstring5
//go:linkname slicebytetostring runtime.slicebytetostring
//go:linkname slicebytetostringtmp runtime.slicebytetostringtmp
//go:linkname stringtoslicebyte runtime.stringtoslicebyte
@@ -38,7 +34,9 @@ type tmpBuf [tmpStringBufSize]byte
// If buf != nil, the compiler has determined that the result does not
// escape the calling function, so the string data can be stored in buf
// if small enough.
-func concatstrings(buf *tmpBuf, a []string) string {
+func concatstrings(buf *tmpBuf, p *string, n int) string {
+ var a []string
+ *(*slice)(unsafe.Pointer(&a)) = slice{unsafe.Pointer(p), n, n}
// idx := 0
l := 0
count := 0
@@ -73,22 +71,6 @@ func concatstrings(buf *tmpBuf, a []string) string {
return s
}
-func concatstring2(buf *tmpBuf, a [2]string) string {
- return concatstrings(buf, a[:])
-}
-
-func concatstring3(buf *tmpBuf, a [3]string) string {
- return concatstrings(buf, a[:])
-}
-
-func concatstring4(buf *tmpBuf, a [4]string) string {
- return concatstrings(buf, a[:])
-}
-
-func concatstring5(buf *tmpBuf, a [5]string) string {
- return concatstrings(buf, a[:])
-}
-
// Buf is a fixed-size buffer for the result,
// it is not nil if the result does not escape.
func slicebytetostring(buf *tmpBuf, b []byte) (str string) {
@@ -360,6 +342,10 @@ func hasPrefix(s, prefix string) bool {
return len(s) >= len(prefix) && s[:len(prefix)] == prefix
}
+func hasSuffix(s, suffix string) bool {
+ return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
+}
+
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go
index 435cdf7..e00d759 100644
--- a/libgo/go/runtime/stubs.go
+++ b/libgo/go/runtime/stubs.go
@@ -273,18 +273,6 @@ func checkASM() bool {
return true
}
-func eqstring(x, y string) bool {
- a := stringStructOf(&x)
- b := stringStructOf(&y)
- if a.len != b.len {
- return false
- }
- if a.str == b.str {
- return true
- }
- return memequal(a.str, b.str, uintptr(a.len))
-}
-
// For gccgo this is in the C code.
func osyield()
@@ -310,13 +298,6 @@ func errno() int
func entersyscall()
func entersyscallblock()
-// For gccgo to call from C code, so that the C code and the Go code
-// can share the memstats variable for now.
-//go:linkname getMstats runtime.getMstats
-func getMstats() *mstats {
- return &memstats
-}
-
// Get signal trampoline, written in C.
func getSigtramp() uintptr
@@ -338,48 +319,12 @@ func dumpregs(*_siginfo_t, unsafe.Pointer)
// Implemented in C for gccgo.
func setRandomNumber(uint32)
-// Temporary for gccgo until we port proc.go.
-//go:linkname getsched runtime.getsched
-func getsched() *schedt {
- return &sched
-}
-
-// Temporary for gccgo until we port proc.go.
-//go:linkname getCgoHasExtraM runtime.getCgoHasExtraM
-func getCgoHasExtraM() *bool {
- return &cgoHasExtraM
-}
-
-// Temporary for gccgo until we port proc.go.
-//go:linkname getAllP runtime.getAllP
-func getAllP() **p {
- return &allp[0]
-}
-
-// Temporary for gccgo until we port proc.go.
+// Called by gccgo's proc.c.
//go:linkname allocg runtime.allocg
func allocg() *g {
return new(g)
}
-// Temporary for gccgo until we port the garbage collector.
-//go:linkname getallglen runtime.getallglen
-func getallglen() uintptr {
- return allglen
-}
-
-// Temporary for gccgo until we port the garbage collector.
-//go:linkname getallg runtime.getallg
-func getallg(i int) *g {
- return allgs[i]
-}
-
-// Temporary for gccgo until we port the garbage collector.
-//go:linkname getallm runtime.getallm
-func getallm() *m {
- return allm
-}
-
// Throw and rethrow an exception.
func throwException()
func rethrowException()
@@ -388,13 +333,6 @@ func rethrowException()
// used by the stack unwinder.
func unwindExceptionSize() uintptr
-// Temporary for gccgo until C code no longer needs it.
-//go:nosplit
-//go:linkname getPanicking runtime.getPanicking
-func getPanicking() uint32 {
- return panicking
-}
-
// Called by C code to set the number of CPUs.
//go:linkname setncpu runtime.setncpu
func setncpu(n int32) {
@@ -409,18 +347,6 @@ func setpagesize(s uintptr) {
}
}
-// Called by C code during library initialization.
-//go:linkname runtime_m0 runtime.runtime_m0
-func runtime_m0() *m {
- return &m0
-}
-
-// Temporary for gccgo until we port mgc.go.
-//go:linkname runtime_g0 runtime.runtime_g0
-func runtime_g0() *g {
- return &g0
-}
-
const uintptrMask = 1<<(8*sys.PtrSize) - 1
type bitvector struct {
diff --git a/libgo/go/runtime/symtab.go b/libgo/go/runtime/symtab.go
index d7e8c18..8f3c843 100644
--- a/libgo/go/runtime/symtab.go
+++ b/libgo/go/runtime/symtab.go
@@ -79,7 +79,7 @@ func (ci *Frames) Next() (frame Frame, more bool) {
// Subtract 1 from PC to undo the 1 we added in callback in
// go-callers.c.
- function, file, line := funcfileline(pc-1, int32(i))
+ function, file, line, _ := funcfileline(pc-1, int32(i))
if function == "" && file == "" {
return Frame{}, more
}
@@ -158,7 +158,7 @@ const (
// the a *Func describing the innermost function, but with an entry
// of the outermost function.
func FuncForPC(pc uintptr) *Func {
- name, _, _ := funcfileline(pc, -1)
+ name, _, _, _ := funcfileline(pc, -1)
if name == "" {
return nil
}
@@ -187,7 +187,7 @@ func (f *Func) Entry() uintptr {
// The result will not be accurate if pc is not a program
// counter within f.
func (f *Func) FileLine(pc uintptr) (file string, line int) {
- _, file, line = funcfileline(pc, -1)
+ _, file, line, _ = funcfileline(pc, -1)
return file, line
}
@@ -261,5 +261,5 @@ func demangleSymbol(s string) string {
}
// implemented in go-caller.c
-func funcfileline(uintptr, int32) (string, string, int)
+func funcfileline(uintptr, int32) (string, string, int, int)
func funcentry(uintptr) uintptr
diff --git a/libgo/go/runtime/traceback_gccgo.go b/libgo/go/runtime/traceback_gccgo.go
index 7581798..4134d28 100644
--- a/libgo/go/runtime/traceback_gccgo.go
+++ b/libgo/go/runtime/traceback_gccgo.go
@@ -20,7 +20,7 @@ func printcreatedby(gp *g) {
if entry != 0 && tracepc > entry {
tracepc -= sys.PCQuantum
}
- function, file, line := funcfileline(tracepc, -1)
+ function, file, line, _ := funcfileline(tracepc, -1)
if function != "" && showframe(function, gp, false) && gp.goid != 1 {
printcreatedby1(function, file, line, entry, pc)
}
@@ -61,6 +61,16 @@ func callers(skip int, locbuf []location) int {
return int(n)
}
+//go:noescape
+//extern runtime_callersRaw
+func c_callersRaw(pcs *uintptr, max int32) int32
+
+// callersRaw returns a raw (PCs only) stack trace of the current goroutine.
+func callersRaw(pcbuf []uintptr) int {
+ n := c_callersRaw(&pcbuf[0], int32(len(pcbuf)))
+ return int(n)
+}
+
// traceback prints a traceback of the current goroutine.
// This differs from the gc version, which is given pc, sp, lr and g and
// can print a traceback of any goroutine.
@@ -83,7 +93,7 @@ func traceback(skip int32) {
func printAncestorTraceback(ancestor ancestorInfo) {
print("[originating from goroutine ", ancestor.goid, "]:\n")
for fidx, pc := range ancestor.pcs {
- function, file, line := funcfileline(pc, -1)
+ function, file, line, _ := funcfileline(pc, -1)
if showfuncinfo(function, fidx == 0) {
printAncestorTracebackFuncInfo(function, file, line, pc)
}
@@ -92,7 +102,7 @@ func printAncestorTraceback(ancestor ancestorInfo) {
print("...additional frames elided...\n")
}
// Show what created goroutine, except main goroutine (goid 1).
- function, file, line := funcfileline(ancestor.gopc, -1)
+ function, file, line, _ := funcfileline(ancestor.gopc, -1)
if function != "" && showfuncinfo(function, false) && ancestor.goid != 1 {
printcreatedby1(function, file, line, funcentry(ancestor.gopc), ancestor.gopc)
}
diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go
index 5cafa38..8af6246 100644
--- a/libgo/go/runtime/type.go
+++ b/libgo/go/runtime/type.go
@@ -6,7 +6,11 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
type _type struct {
size uintptr
@@ -44,22 +48,6 @@ func (t *_type) pkgpath() string {
return ""
}
-// Return whether two type descriptors are equal.
-// This is gccgo-specific, as gccgo, unlike gc, permits multiple
-// independent descriptors for a single type.
-func eqtype(t1, t2 *_type) bool {
- switch {
- case t1 == t2:
- return true
- case t1 == nil || t2 == nil:
- return false
- case t1.kind != t2.kind || t1.hash != t2.hash:
- return false
- default:
- return t1.string() == t2.string()
- }
-}
-
type method struct {
name *string
pkgPath *string
@@ -164,3 +152,62 @@ type structtype struct {
typ _type
fields []structfield
}
+
+// typeDescriptorList holds a list of type descriptors generated
+// by the compiler. This is used for the compiler to register
+// type descriptors to the runtime.
+// The layout is known to the compiler.
+//go:notinheap
+type typeDescriptorList struct {
+ count int
+ types [1]uintptr // variable length
+}
+
+// typelist holds all type descriptors generated by the comiler.
+// This is for the reflect package to deduplicate type descriptors
+// when it creates a type that is also a compiler-generated type.
+var typelist struct {
+ initialized uint32
+ lists []*typeDescriptorList // one element per package
+ types map[string]uintptr // map from a type's string to *_type, lazily populated
+ // TODO: use a sorted array instead?
+}
+var typelistLock mutex
+
+// The compiler generates a call of this function in the main
+// package's init function, to register compiler-generated
+// type descriptors.
+// p points to a list of *typeDescriptorList, n is the length
+// of the list.
+//go:linkname registerTypeDescriptors runtime.registerTypeDescriptors
+func registerTypeDescriptors(n int, p unsafe.Pointer) {
+ *(*slice)(unsafe.Pointer(&typelist.lists)) = slice{p, n, n}
+}
+
+// The reflect package uses this function to look up a compiler-
+// generated type descriptor.
+//go:linkname reflect_lookupType reflect.lookupType
+func reflect_lookupType(s string) *_type {
+ // Lazy initialization. We don't need to do this if we never create
+ // types through reflection.
+ if atomic.Load(&typelist.initialized) == 0 {
+ lock(&typelistLock)
+ if atomic.Load(&typelist.initialized) == 0 {
+ n := 0
+ for _, list := range typelist.lists {
+ n += list.count
+ }
+ typelist.types = make(map[string]uintptr, n)
+ for _, list := range typelist.lists {
+ for i := 0; i < list.count; i++ {
+ typ := *(**_type)(add(unsafe.Pointer(&list.types), uintptr(i)*sys.PtrSize))
+ typelist.types[typ.string()] = uintptr(unsafe.Pointer(typ))
+ }
+ }
+ atomic.Store(&typelist.initialized, 1)
+ }
+ unlock(&typelistLock)
+ }
+
+ return (*_type)(unsafe.Pointer(typelist.types[s]))
+}
diff --git a/libgo/go/syscall/wait.c b/libgo/go/syscall/wait.c
index 0b234d0..39bc035 100644
--- a/libgo/go/syscall/wait.c
+++ b/libgo/go/syscall/wait.c
@@ -51,7 +51,7 @@ extern _Bool Continued (uint32_t *w)
__asm__ (GOSYM_PREFIX "syscall.WaitStatus.Continued");
_Bool
-Continued (uint32_t *w)
+Continued (uint32_t *w __attribute__ ((unused)))
{
return WIFCONTINUED (*w) != 0;
}
diff --git a/libgo/mksysinfo.sh b/libgo/mksysinfo.sh
index c9dd8d4..5f7b5f0 100755
--- a/libgo/mksysinfo.sh
+++ b/libgo/mksysinfo.sh
@@ -735,13 +735,9 @@ if ! grep "const EAI_OVERFLOW " ${OUT} >/dev/null 2>&1; then
fi
# The passwd struct.
-# Force uid and gid from int32 to uint32 for consistency; they are
-# int32 on Solaris 10 but uint32 everywhere else including Solaris 11.
grep '^type _passwd ' gen-sysinfo.go | \
sed -e 's/_passwd/Passwd/' \
-e 's/ pw_/ Pw_/g' \
- -e 's/ Pw_uid int32/ Pw_uid uint32/' \
- -e 's/ Pw_gid int32/ Pw_gid uint32/' \
>> ${OUT}
# The group struct.
diff --git a/libgo/runtime/go-caller.c b/libgo/runtime/go-caller.c
index 2143446..5e31f91 100644
--- a/libgo/runtime/go-caller.c
+++ b/libgo/runtime/go-caller.c
@@ -26,11 +26,13 @@ struct caller
String file;
intgo line;
intgo index;
+ intgo frames;
};
/* Collect file/line information for a PC value. If this is called
- more than once, due to inlined functions, we use the last call, as
- that is usually the most useful one. */
+ more than once, due to inlined functions, we record the number of
+ inlined frames but return file/func/line for the last call, as
+ that is usually the most useful one. */
static int
callback (void *data, uintptr_t pc __attribute__ ((unused)),
@@ -38,6 +40,8 @@ callback (void *data, uintptr_t pc __attribute__ ((unused)),
{
struct caller *c = (struct caller *) data;
+ c->frames++;
+
/* The libbacktrace library says that these strings might disappear,
but with the current implementation they won't. We can't easily
allocate memory here, so for now assume that we can save a
@@ -125,18 +129,19 @@ __go_get_backtrace_state ()
return back_state;
}
-/* Return function/file/line information for PC. The index parameter
+/* Return function/file/line/nframes information for PC. The index parameter
is the entry on the stack of inlined functions; -1 means the last
- one. */
+ one, with *nframes set to the count of inlined frames for this PC. */
static _Bool
-__go_file_line (uintptr pc, int index, String *fn, String *file, intgo *line)
+__go_file_line (uintptr pc, int index, String *fn, String *file, intgo *line, intgo *nframes)
{
struct caller c;
struct backtrace_state *state;
runtime_memclr (&c, sizeof c);
c.index = index;
+ c.frames = 0;
runtime_xadd (&__go_runtime_in_callers, 1);
state = __go_get_backtrace_state ();
runtime_xadd (&__go_runtime_in_callers, -1);
@@ -144,6 +149,7 @@ __go_file_line (uintptr pc, int index, String *fn, String *file, intgo *line)
*fn = c.fn;
*file = c.file;
*line = c.line;
+ *nframes = c.frames;
// If backtrace_pcinfo didn't get the function name from the debug
// info, try to get it from the symbol table.
@@ -222,7 +228,7 @@ runtime_funcfileline (uintptr targetpc, int32 index)
struct funcfileline_return ret;
if (!__go_file_line (targetpc, index, &ret.retfn, &ret.retfile,
- &ret.retline))
+ &ret.retline, &ret.retframes))
runtime_memclr (&ret, sizeof ret);
return ret;
}
diff --git a/libgo/runtime/go-callers.c b/libgo/runtime/go-callers.c
index 31ff474..e7d53a3 100644
--- a/libgo/runtime/go-callers.c
+++ b/libgo/runtime/go-callers.c
@@ -63,7 +63,9 @@ callback (void *data, uintptr_t pc, const char *filename, int lineno,
/* Skip thunks and recover functions. There is no equivalent to
these functions in the gc toolchain, so returning them here means
- significantly different results for runtime.Caller(N). */
+ significantly different results for runtime.Caller(N). See also
+ similar code in runtime/mprof.go that strips out such functions
+ for block/mutex/memory profiles. */
if (function != NULL && !arg->keep_thunks)
{
const char *p;
@@ -262,3 +264,54 @@ Callers (intgo skip, struct __go_open_array pc)
return ret;
}
+
+struct callersRaw_data
+{
+ uintptr* pcbuf;
+ int index;
+ int max;
+};
+
+// Callback function for backtrace_simple. Just collect pc's.
+// Return zero to continue, non-zero to stop.
+
+static int callback_raw (void *data, uintptr_t pc)
+{
+ struct callersRaw_data *arg = (struct callersRaw_data *) data;
+
+ /* On the call to backtrace_simple the pc value was most likely
+ decremented if there was a normal call, since the pc referred to
+ the instruction where the call returned and not the call itself.
+ This was done so that the line number referred to the call
+ instruction. To make sure the actual pc from the call stack is
+ used, it is incremented here.
+
+ In the case of a signal, the pc was not decremented by
+ backtrace_full but still incremented here. That doesn't really
+ hurt anything since the line number is right and the pc refers to
+ the same instruction. */
+
+ arg->pcbuf[arg->index] = pc + 1;
+ arg->index++;
+ return arg->index >= arg->max;
+}
+
+/* runtime_callersRaw is similar to runtime_callers() above, but
+ it returns raw PC values as opposed to file/func/line locations. */
+int32
+runtime_callersRaw (uintptr *pcbuf, int32 m)
+{
+ struct callersRaw_data data;
+ struct backtrace_state* state;
+
+ data.pcbuf = pcbuf;
+ data.index = 0;
+ data.max = m;
+ runtime_xadd (&__go_runtime_in_callers, 1);
+ state = __go_get_backtrace_state ();
+ backtrace_simple (state, 0, callback_raw, error_callback, &data);
+ runtime_xadd (&__go_runtime_in_callers, -1);
+
+ return data.index;
+}
+
diff --git a/libgo/runtime/go-context.S b/libgo/runtime/go-context.S
new file mode 100644
index 0000000..8beeebf
--- /dev/null
+++ b/libgo/runtime/go-context.S
@@ -0,0 +1,69 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This provides a simplified version of getcontext and
+// setcontext. They are like the corresponding functions
+// in libc, but we only save/restore the callee-save
+// registers and PC, SP. Unlike the libc functions, we
+// don't save/restore the signal masks and floating point
+// environment.
+
+#if defined(__x86_64__) && defined(__linux__) && !defined(__CET__)
+
+#define RBP_OFF (0*8)
+#define RBX_OFF (1*8)
+#define R12_OFF (2*8)
+#define R13_OFF (3*8)
+#define R14_OFF (4*8)
+#define R15_OFF (5*8)
+#define SP_OFF (6*8)
+#define PC_OFF (7*8)
+
+.globl __go_getcontext
+.text
+__go_getcontext:
+ movq %rbx, RBX_OFF(%rdi)
+ movq %rbp, RBP_OFF(%rdi)
+ movq %r12, R12_OFF(%rdi)
+ movq %r13, R13_OFF(%rdi)
+ movq %r14, R14_OFF(%rdi)
+ movq %r15, R15_OFF(%rdi)
+
+ movq (%rsp), %rax // return PC
+ movq %rax, PC_OFF(%rdi)
+ leaq 8(%rsp), %rax // the SP before pushing return PC
+ movq %rax, SP_OFF(%rdi)
+
+ ret
+
+.globl __go_setcontext
+.text
+__go_setcontext:
+ movq RBX_OFF(%rdi), %rbx
+ movq RBP_OFF(%rdi), %rbp
+ movq R12_OFF(%rdi), %r12
+ movq R13_OFF(%rdi), %r13
+ movq R14_OFF(%rdi), %r14
+ movq R15_OFF(%rdi), %r15
+ movq SP_OFF(%rdi), %rsp
+ movq PC_OFF(%rdi), %rdx
+
+ jmp *%rdx
+
+.globl __go_makecontext
+.text
+__go_makecontext:
+ addq %rcx, %rdx
+
+ // Align the SP, and push a dummy return address.
+ andq $~0xf, %rdx
+ subq $8, %rdx
+ movq $0, (%rdx)
+
+ movq %rdx, SP_OFF(%rdi)
+ movq %rsi, PC_OFF(%rdi)
+
+ ret
+
+#endif
diff --git a/libgo/runtime/go-libmain.c b/libgo/runtime/go-libmain.c
index 00a8e6b..f379569 100644
--- a/libgo/runtime/go-libmain.c
+++ b/libgo/runtime/go-libmain.c
@@ -225,11 +225,11 @@ gostart (void *arg)
return NULL;
runtime_isstarted = true;
+ runtime_ginit ();
runtime_check ();
runtime_args (a->argc, (byte **) a->argv);
setncpu (getproccount ());
setpagesize (getpagesize ());
- runtime_sched = runtime_getsched();
runtime_schedinit ();
__go_go ((uintptr)(runtime_main), NULL);
runtime_mstart (runtime_m ());
diff --git a/libgo/runtime/go-main.c b/libgo/runtime/go-main.c
index 301ac4e..51ce15f 100644
--- a/libgo/runtime/go-main.c
+++ b/libgo/runtime/go-main.c
@@ -48,12 +48,12 @@ main (int argc, char **argv)
setIsCgo ();
__go_end = (uintptr)_end;
+ runtime_ginit ();
runtime_cpuinit ();
runtime_check ();
runtime_args (argc, (byte **) argv);
setncpu (getproccount ());
setpagesize (getpagesize ());
- runtime_sched = runtime_getsched();
runtime_schedinit ();
__go_go ((uintptr)(runtime_main), NULL);
runtime_mstart (runtime_m ());
diff --git a/libgo/runtime/go-memclr.c b/libgo/runtime/go-memclr.c
index e478b65..b5d4975 100644
--- a/libgo/runtime/go-memclr.c
+++ b/libgo/runtime/go-memclr.c
@@ -7,7 +7,8 @@
#include "runtime.h"
void memclrNoHeapPointers(void *, uintptr)
- __asm__ (GOSYM_PREFIX "runtime.memclrNoHeapPointers");
+ __asm__ (GOSYM_PREFIX "runtime.memclrNoHeapPointers")
+ __attribute__ ((no_split_stack));
void
memclrNoHeapPointers (void *p1, uintptr len)
diff --git a/libgo/runtime/go-memcmp.c b/libgo/runtime/go-memcmp.c
deleted file mode 100644
index 78a356b..0000000
--- a/libgo/runtime/go-memcmp.c
+++ /dev/null
@@ -1,13 +0,0 @@
-/* go-memcmp.c -- the go memory comparison function.
-
- Copyright 2012 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-
-intgo
-__go_memcmp (const void *p1, const void *p2, uintptr len)
-{
- return __builtin_memcmp (p1, p2, len);
-}
diff --git a/libgo/runtime/go-memequal.c b/libgo/runtime/go-memequal.c
index 5f514aa..9648cba 100644
--- a/libgo/runtime/go-memequal.c
+++ b/libgo/runtime/go-memequal.c
@@ -7,7 +7,8 @@
#include "runtime.h"
_Bool memequal (void *, void *, uintptr)
- __asm__ (GOSYM_PREFIX "runtime.memequal");
+ __asm__ (GOSYM_PREFIX "runtime.memequal")
+ __attribute__ ((no_split_stack));
_Bool
memequal (void *p1, void *p2, uintptr len)
diff --git a/libgo/runtime/go-runtime-error.c b/libgo/runtime/go-runtime-error.c
index 5db3555..c9ccf98 100644
--- a/libgo/runtime/go-runtime-error.c
+++ b/libgo/runtime/go-runtime-error.c
@@ -38,21 +38,24 @@ enum
memory locations. */
NIL_DEREFERENCE = 6,
- /* Slice length or capacity out of bounds in make: negative or
- overflow or length greater than capacity. */
- MAKE_SLICE_OUT_OF_BOUNDS = 7,
+ /* Slice length out of bounds in make: negative or overflow or length
+ greater than capacity. */
+ MAKE_SLICE_LEN_OUT_OF_BOUNDS = 7,
+
+ /* Slice capacity out of bounds in make: negative. */
+ MAKE_SLICE_CAP_OUT_OF_BOUNDS = 8,
/* Map capacity out of bounds in make: negative or overflow. */
- MAKE_MAP_OUT_OF_BOUNDS = 8,
+ MAKE_MAP_OUT_OF_BOUNDS = 9,
/* Channel capacity out of bounds in make: negative or overflow. */
- MAKE_CHAN_OUT_OF_BOUNDS = 9,
+ MAKE_CHAN_OUT_OF_BOUNDS = 10,
/* Integer division by zero. */
- DIVISION_BY_ZERO = 10,
+ DIVISION_BY_ZERO = 11,
/* Go statement with nil function. */
- GO_NIL = 11
+ GO_NIL = 12
};
extern void __go_runtime_error (int32) __attribute__ ((noreturn));
@@ -88,8 +91,11 @@ __go_runtime_error (int32 i)
case NIL_DEREFERENCE:
runtime_panicstring ("nil pointer dereference");
- case MAKE_SLICE_OUT_OF_BOUNDS:
- runtime_panicstring ("make slice len or cap out of range");
+ case MAKE_SLICE_LEN_OUT_OF_BOUNDS:
+ runtime_panicstring ("make slice len out of range");
+
+ case MAKE_SLICE_CAP_OUT_OF_BOUNDS:
+ runtime_panicstring ("make slice cap out of range");
case MAKE_MAP_OUT_OF_BOUNDS:
runtime_panicstring ("make map len out of range");
diff --git a/libgo/runtime/go-strslice.c b/libgo/runtime/go-strslice.c
deleted file mode 100644
index d51c249..0000000
--- a/libgo/runtime/go-strslice.c
+++ /dev/null
@@ -1,30 +0,0 @@
-/* go-strslice.c -- the go string slice function.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-
-String
-__go_string_slice (String s, intgo start, intgo end)
-{
- intgo len;
- String ret;
-
- len = s.len;
- if (end == -1)
- end = len;
- if (start > len || end < start || end > len)
- runtime_panicstring ("string index out of bounds");
- ret.len = end - start;
- // If the length of the new string is zero, the str field doesn't
- // matter, so just set it to nil. This avoids the problem of
- // s.str + start pointing just past the end of the string,
- // which may keep the next memory block alive unnecessarily.
- if (ret.len == 0)
- ret.str = nil;
- else
- ret.str = s.str + start;
- return ret;
-}
diff --git a/libgo/runtime/go-type.h b/libgo/runtime/go-type.h
index 03806f6..1935703 100644
--- a/libgo/runtime/go-type.h
+++ b/libgo/runtime/go-type.h
@@ -153,53 +153,6 @@ struct __go_uncommon_type
struct __go_open_array __methods;
};
-/* The type descriptor for a fixed array type. */
-
-struct __go_array_type
-{
- /* Starts like all type descriptors. */
- struct __go_type_descriptor __common;
-
- /* The element type. */
- struct __go_type_descriptor *__element_type;
-
- /* The type of a slice of the same element type. */
- struct __go_type_descriptor *__slice_type;
-
- /* The length of the array. */
- uintptr_t __len;
-};
-
-/* The type descriptor for a slice. */
-
-struct __go_slice_type
-{
- /* Starts like all other type descriptors. */
- struct __go_type_descriptor __common;
-
- /* The element type. */
- struct __go_type_descriptor *__element_type;
-};
-
-/* The direction of a channel. */
-#define CHANNEL_RECV_DIR 1
-#define CHANNEL_SEND_DIR 2
-#define CHANNEL_BOTH_DIR (CHANNEL_RECV_DIR | CHANNEL_SEND_DIR)
-
-/* The type descriptor for a channel. */
-
-struct __go_channel_type
-{
- /* Starts like all other type descriptors. */
- struct __go_type_descriptor __common;
-
- /* The element type. */
- const struct __go_type_descriptor *__element_type;
-
- /* The direction. */
- uintptr_t __dir;
-};
-
/* The type descriptor for a function. */
struct __go_func_type
@@ -221,34 +174,6 @@ struct __go_func_type
struct __go_open_array __out;
};
-/* A method on an interface type. */
-
-struct __go_interface_method
-{
- /* The name of the method. */
- const struct String *__name;
-
- /* This is NULL for an exported method, or the name of the package
- where it lives. */
- const struct String *__pkg_path;
-
- /* The real type of the method. */
- struct __go_type_descriptor *__type;
-};
-
-/* An interface type. */
-
-struct __go_interface_type
-{
- /* Starts like all other type descriptors. */
- struct __go_type_descriptor __common;
-
- /* Array of __go_interface_method . The methods are sorted in the
- same order that they appear in the definition of the
- interface. */
- struct __go_open_array __methods;
-};
-
/* A map type. */
struct __go_map_type
@@ -301,69 +226,4 @@ struct __go_ptr_type
const struct __go_type_descriptor *__element_type;
};
-/* A field in a structure. */
-
-struct __go_struct_field
-{
- /* The name of the field--NULL for an anonymous field. */
- const struct String *__name;
-
- /* This is NULL for an exported method, or the name of the package
- where it lives. */
- const struct String *__pkg_path;
-
- /* The type of the field. */
- const struct __go_type_descriptor *__type;
-
- /* The field tag, or NULL. */
- const struct String *__tag;
-
- /* The offset of the field in the struct. */
- uintptr_t __offset;
-};
-
-/* A struct type. */
-
-struct __go_struct_type
-{
- /* Starts like all other type descriptors. */
- struct __go_type_descriptor __common;
-
- /* An array of struct __go_struct_field. */
- struct __go_open_array __fields;
-};
-
-/* Whether a type descriptor is a pointer. */
-
-static inline _Bool
-__go_is_pointer_type (const struct __go_type_descriptor *td)
-{
- return ((td->__code & GO_CODE_MASK) == GO_PTR
- || (td->__code & GO_CODE_MASK) == GO_UNSAFE_POINTER);
-}
-
-/* Call a type hash function, given the __hashfn value. */
-
-static inline uintptr_t
-__go_call_hashfn (const FuncVal *hashfn, const void *p, uintptr_t seed,
- uintptr_t size)
-{
- uintptr_t (*h) (const void *, uintptr_t, uintptr_t) = (void *) hashfn->fn;
- return __builtin_call_with_static_chain (h (p, seed, size), hashfn);
-}
-
-/* Call a type equality function, given the __equalfn value. */
-
-static inline _Bool
-__go_call_equalfn (const FuncVal *equalfn, const void *p1, const void *p2,
- uintptr_t size)
-{
- _Bool (*e) (const void *, const void *, uintptr_t) = (void *) equalfn->fn;
- return __builtin_call_with_static_chain (e (p1, p2, size), equalfn);
-}
-
-extern _Bool
-__go_type_descriptors_equal(const struct __go_type_descriptor*,
- const struct __go_type_descriptor*);
-
#endif /* !defined(LIBGO_GO_TYPE_H) */
diff --git a/libgo/runtime/go-typedesc-equal.c b/libgo/runtime/go-typedesc-equal.c
deleted file mode 100644
index 90079f2..0000000
--- a/libgo/runtime/go-typedesc-equal.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/* go-typedesc-equal.c -- return whether two type descriptors are equal.
-
- Copyright 2009 The Go Authors. All rights reserved.
- Use of this source code is governed by a BSD-style
- license that can be found in the LICENSE file. */
-
-#include "runtime.h"
-#include "go-string.h"
-#include "go-type.h"
-
-/* Compare type descriptors for equality. This is necessary because
- types may have different descriptors in different shared libraries.
- Also, unnamed types may have multiple type descriptors even in a
- single shared library. */
-
-_Bool
-__go_type_descriptors_equal (const struct __go_type_descriptor *td1,
- const struct __go_type_descriptor *td2)
-{
- if (td1 == td2)
- return 1;
- /* In a type switch we can get a NULL descriptor. */
- if (td1 == NULL || td2 == NULL)
- return 0;
- if (td1->__code != td2->__code || td1->__hash != td2->__hash)
- return 0;
- return __go_ptr_strings_equal (td1->__reflection, td2->__reflection);
-}
diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c
index 1569b5b..26125cc 100644
--- a/libgo/runtime/proc.c
+++ b/libgo/runtime/proc.c
@@ -75,7 +75,7 @@ initcontext(void)
}
static inline void
-fixcontext(ucontext_t *c __attribute__ ((unused)))
+fixcontext(__go_context_t *c __attribute__ ((unused)))
{
}
@@ -182,18 +182,18 @@ fixcontext(ucontext_t* c)
// Go, and Go has no simple way to align a field to such a boundary.
// So we make the field larger in runtime2.go and pick an appropriate
// offset within the field here.
-static ucontext_t*
+static __go_context_t*
ucontext_arg(uintptr_t* go_ucontext)
{
uintptr_t p = (uintptr_t)go_ucontext;
- size_t align = __alignof__(ucontext_t);
+ size_t align = __alignof__(__go_context_t);
if(align > 16) {
// We only ensured space for up to a 16 byte alignment
// in libgo/go/runtime/runtime2.go.
- runtime_throw("required alignment of ucontext_t too large");
+ runtime_throw("required alignment of __go_context_t too large");
}
p = (p + align - 1) &~ (uintptr_t)(align - 1);
- return (ucontext_t*)p;
+ return (__go_context_t*)p;
}
// We can not always refer to the TLS variables directly. The
@@ -289,7 +289,7 @@ runtime_gogo(G* newg)
g = newg;
newg->fromgogo = true;
fixcontext(ucontext_arg(&newg->context[0]));
- setcontext(ucontext_arg(&newg->context[0]));
+ __go_setcontext(ucontext_arg(&newg->context[0]));
runtime_throw("gogo setcontext returned");
}
@@ -328,7 +328,7 @@ runtime_mcall(FuncVal *fv)
gp->gcnextsp2 = (uintptr)(secondary_stack_pointer());
#endif
gp->fromgogo = false;
- getcontext(ucontext_arg(&gp->context[0]));
+ __go_getcontext(ucontext_arg(&gp->context[0]));
// When we return from getcontext, we may be running
// in a new thread. That means that g may have
@@ -358,7 +358,7 @@ runtime_mcall(FuncVal *fv)
g = mp->g0;
fixcontext(ucontext_arg(&mp->g0->context[0]));
- setcontext(ucontext_arg(&mp->g0->context[0]));
+ __go_setcontext(ucontext_arg(&mp->g0->context[0]));
runtime_throw("runtime: mcall function returned");
}
}
@@ -378,8 +378,6 @@ runtime_mcall(FuncVal *fv)
extern G* allocg(void)
__asm__ (GOSYM_PREFIX "runtime.allocg");
-Sched* runtime_sched;
-
bool runtime_isarchive;
extern void kickoff(void)
@@ -450,7 +448,7 @@ void getTraceback(G* me, G* gp)
#ifdef USING_SPLIT_STACK
__splitstack_getcontext((void*)(&me->stackcontext[0]));
#endif
- getcontext(ucontext_arg(&me->context[0]));
+ __go_getcontext(ucontext_arg(&me->context[0]));
if (gp->traceback != 0) {
runtime_gogo(gp);
@@ -493,7 +491,7 @@ doscanstackswitch(G* me, G* gp)
#ifdef USING_SPLIT_STACK
__splitstack_getcontext((void*)(&me->stackcontext[0]));
#endif
- getcontext(ucontext_arg(&me->context[0]));
+ __go_getcontext(ucontext_arg(&me->context[0]));
if(me->entry != nil) {
// Got here from mcall.
@@ -574,7 +572,7 @@ runtime_mstart(void *arg)
// Save the currently active context. This will return
// multiple times via the setcontext call in mcall.
- getcontext(ucontext_arg(&gp->context[0]));
+ __go_getcontext(ucontext_arg(&gp->context[0]));
if(gp->traceback != 0) {
// Got here from getTraceback.
@@ -652,7 +650,7 @@ setGContext(void)
gp->gcinitialsp2 = secondary_stack_pointer();
gp->gcnextsp2 = (uintptr)(gp->gcinitialsp2);
#endif
- getcontext(ucontext_arg(&gp->context[0]));
+ __go_getcontext(ucontext_arg(&gp->context[0]));
if(gp->entry != nil) {
// Got here from mcall.
@@ -672,13 +670,11 @@ void makeGContext(G*, byte*, uintptr)
// makeGContext makes a new context for a g.
void
makeGContext(G* gp, byte* sp, uintptr spsize) {
- ucontext_t *uc;
+ __go_context_t *uc;
uc = ucontext_arg(&gp->context[0]);
- getcontext(uc);
- uc->uc_stack.ss_sp = sp;
- uc->uc_stack.ss_size = (size_t)spsize;
- makecontext(uc, kickoff, 0);
+ __go_getcontext(uc);
+ __go_makecontext(uc, kickoff, sp, (size_t)spsize);
}
// The goroutine g is about to enter a system call.
@@ -700,7 +696,7 @@ runtime_entersyscall()
// Save the registers in the g structure so that any pointers
// held in registers will be seen by the garbage collector.
if (!runtime_usestackmaps)
- getcontext(ucontext_arg(&g->gcregs[0]));
+ __go_getcontext(ucontext_arg(&g->gcregs[0]));
// Note that if this function does save any registers itself,
// we might store the wrong value in the call to getcontext.
@@ -747,7 +743,7 @@ runtime_entersyscallblock()
// Save the registers in the g structure so that any pointers
// held in registers will be seen by the garbage collector.
if (!runtime_usestackmaps)
- getcontext(ucontext_arg(&g->gcregs[0]));
+ __go_getcontext(ucontext_arg(&g->gcregs[0]));
// See comment in runtime_entersyscall.
doentersyscallblock((uintptr)runtime_getcallerpc(),
@@ -890,11 +886,3 @@ resetNewG(G *newg, void **sp, uintptr *spsize)
newg->gcnextsp2 = (uintptr)(newg->gcinitialsp2);
#endif
}
-
-// Return whether we are waiting for a GC. This gc toolchain uses
-// preemption instead.
-bool
-runtime_gcwaiting(void)
-{
- return runtime_sched->gcwaiting;
-}
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
index 97b1f11..6da7bdf 100644
--- a/libgo/runtime/runtime.h
+++ b/libgo/runtime/runtime.h
@@ -78,9 +78,7 @@ typedef struct _panic Panic;
typedef struct __go_ptr_type PtrType;
typedef struct __go_func_type FuncType;
-typedef struct __go_interface_type InterfaceType;
typedef struct __go_map_type MapType;
-typedef struct __go_channel_type ChanType;
typedef struct tracebackg Traceback;
@@ -117,11 +115,6 @@ extern M* runtime_m(void);
extern G* runtime_g(void)
__asm__(GOSYM_PREFIX "runtime.getg");
-extern M* runtime_m0(void)
- __asm__(GOSYM_PREFIX "runtime.runtime_m0");
-extern G* runtime_g0(void)
- __asm__(GOSYM_PREFIX "runtime.runtime_g0");
-
enum
{
true = 1,
@@ -198,15 +191,6 @@ void runtime_hashinit(void);
*/
extern uintptr* runtime_getZerobase(void)
__asm__(GOSYM_PREFIX "runtime.getZerobase");
-extern G* runtime_getallg(intgo)
- __asm__(GOSYM_PREFIX "runtime.getallg");
-extern uintptr runtime_getallglen(void)
- __asm__(GOSYM_PREFIX "runtime.getallglen");
-extern M* runtime_getallm(void)
- __asm__(GOSYM_PREFIX "runtime.getallm");
-extern Sched* runtime_sched;
-extern uint32 runtime_panicking(void)
- __asm__ (GOSYM_PREFIX "runtime.getPanicking");
extern bool runtime_isstarted;
extern bool runtime_isarchive;
@@ -240,6 +224,8 @@ int32 runtime_snprintf(byte*, int32, const char*, ...);
#define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s))
String runtime_gostringnocopy(const byte*)
__asm__ (GOSYM_PREFIX "runtime.gostringnocopy");
+void runtime_ginit(void)
+ __asm__ (GOSYM_PREFIX "runtime.ginit");
void runtime_schedinit(void)
__asm__ (GOSYM_PREFIX "runtime.schedinit");
void runtime_initsig(bool)
@@ -447,7 +433,6 @@ int32 getproccount(void);
#define PREFETCH(p) __builtin_prefetch(p)
-bool runtime_gcwaiting(void);
void runtime_badsignal(int);
Defer* runtime_newdefer(void);
void runtime_freedefer(Defer*);
@@ -483,6 +468,7 @@ struct funcfileline_return
String retfn;
String retfile;
intgo retline;
+ intgo retframes;
};
struct funcfileline_return
@@ -507,3 +493,20 @@ bool probestackmaps(void)
// older versions of glibc when a SIGPROF signal arrives while
// collecting a backtrace.
extern uint32 __go_runtime_in_callers;
+
+// Cheaper context switch functions. Currently only defined on
+// Linux/AMD64.
+#if defined(__x86_64__) && defined(__linux__) && !defined(__CET__)
+typedef struct {
+ uint64 regs[8];
+} __go_context_t;
+int __go_getcontext(__go_context_t*);
+int __go_setcontext(__go_context_t*);
+void __go_makecontext(__go_context_t*, void (*)(), void*, size_t);
+#else
+#define __go_context_t ucontext_t
+#define __go_getcontext(c) getcontext(c)
+#define __go_setcontext(c) setcontext(c)
+#define __go_makecontext(c, fn, sp, size) \
+ ((c)->uc_stack.ss_sp = sp, (c)->uc_stack.ss_size = size, makecontext(c, fn, 0))
+#endif
diff --git a/libgo/testsuite/Makefile.in b/libgo/testsuite/Makefile.in
index 1307589..41d81b5 100644
--- a/libgo/testsuite/Makefile.in
+++ b/libgo/testsuite/Makefile.in
@@ -141,6 +141,9 @@ AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
CCDEPMODE = @CCDEPMODE@
CC_FOR_BUILD = @CC_FOR_BUILD@
CFLAGS = @CFLAGS@
diff --git a/libgo/testsuite/gotest b/libgo/testsuite/gotest
index c9c1465..5256bb6 100755
--- a/libgo/testsuite/gotest
+++ b/libgo/testsuite/gotest
@@ -501,6 +501,13 @@ localname() {
symtogo() {
result=""
for tp in $*; do
+ # Discard symbols with a leading dot.
+ # On AIX, this will remove function text symbols (with a leading dot).
+ # Therefore, only function descriptor symbols (without this leading dot)
+ # will be used to retrieve the go symbols, avoiding duplication.
+ if expr "$tp" : '^\.' >/dev/null 2>&1; then
+ continue
+ fi
s=$(echo "$tp" | sed -e 's/\.\.z2f/%/g' | sed -e 's/.*%//')
# Screen out methods (X.Y.Z).
if ! expr "$s" : '^[^.]*\.[^.]*$' >/dev/null 2>&1; then