aboutsummaryrefslogtreecommitdiff
path: root/libgo/go
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2019-07-03 23:13:09 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2019-07-03 23:13:09 +0000
commit0baa9d1d59bf17177e80838ebe66df10a7a909c0 (patch)
treea1b956eacf43ba6ac1d052faad8a2df8f4f6ef5a /libgo/go
parent133d3bd8362f0c438017ca18adb51afb7288f78b (diff)
parent651c754cfbd1928abd8ac6b3121fc37c85907dcb (diff)
downloadgcc-0baa9d1d59bf17177e80838ebe66df10a7a909c0.zip
gcc-0baa9d1d59bf17177e80838ebe66df10a7a909c0.tar.gz
gcc-0baa9d1d59bf17177e80838ebe66df10a7a909c0.tar.bz2
Merge from trunk revision 273026.
From-SVN: r273027
Diffstat (limited to 'libgo/go')
-rw-r--r--libgo/go/cmd/go/internal/work/gccgo.go9
-rw-r--r--libgo/go/go/internal/gccgoimporter/parser.go70
-rw-r--r--libgo/go/reflect/type.go131
-rw-r--r--libgo/go/runtime/alg.go11
-rw-r--r--libgo/go/runtime/heapdump.go10
-rw-r--r--libgo/go/runtime/iface.go49
-rw-r--r--libgo/go/runtime/map_fast32.go9
-rw-r--r--libgo/go/runtime/map_fast64.go9
-rw-r--r--libgo/go/runtime/map_faststr.go8
-rw-r--r--libgo/go/runtime/mgcmark.go2
-rw-r--r--libgo/go/runtime/mprof.go349
-rw-r--r--libgo/go/runtime/panic.go4
-rw-r--r--libgo/go/runtime/proc.go18
-rw-r--r--libgo/go/runtime/signal_gccgo.go5
-rw-r--r--libgo/go/runtime/string.go28
-rw-r--r--libgo/go/runtime/stubs.go76
-rw-r--r--libgo/go/runtime/symtab.go8
-rw-r--r--libgo/go/runtime/traceback_gccgo.go16
-rw-r--r--libgo/go/runtime/type.go81
-rw-r--r--libgo/go/syscall/wait.c2
20 files changed, 540 insertions, 355 deletions
diff --git a/libgo/go/cmd/go/internal/work/gccgo.go b/libgo/go/cmd/go/internal/work/gccgo.go
index a0eb2d3..3b97209 100644
--- a/libgo/go/cmd/go/internal/work/gccgo.go
+++ b/libgo/go/cmd/go/internal/work/gccgo.go
@@ -209,9 +209,16 @@ func (tools gccgoToolchain) pack(b *Builder, a *Action, afile string, ofiles []s
}
absAfile := mkAbs(objdir, afile)
// Try with D modifier first, then without if that fails.
- if b.run(a, p.Dir, p.ImportPath, nil, tools.ar(), arArgs, "rcD", absAfile, absOfiles) != nil {
+ output, err := b.runOut(p.Dir, nil, tools.ar(), arArgs, "rcD", absAfile, absOfiles)
+ if err != nil {
return b.run(a, p.Dir, p.ImportPath, nil, tools.ar(), arArgs, "rc", absAfile, absOfiles)
}
+
+ if len(output) > 0 {
+ // Show the output if there is any even without errors.
+ b.showOutput(a, p.Dir, p.ImportPath, b.processOutput(output))
+ }
+
return nil
}
diff --git a/libgo/go/go/internal/gccgoimporter/parser.go b/libgo/go/go/internal/gccgoimporter/parser.go
index 42f43a1..5881d9c 100644
--- a/libgo/go/go/internal/gccgoimporter/parser.go
+++ b/libgo/go/go/internal/gccgoimporter/parser.go
@@ -261,6 +261,10 @@ func (p *parser) parseField(pkg *types.Package) (field *types.Var, tag string) {
// Param = Name ["..."] Type .
func (p *parser) parseParam(pkg *types.Package) (param *types.Var, isVariadic bool) {
name := p.parseName()
+ // Ignore names invented for inlinable functions.
+ if strings.HasPrefix(name, "p.") || strings.HasPrefix(name, "r.") || strings.HasPrefix(name, "$ret") {
+ name = ""
+ }
if p.tok == '<' && p.scanner.Peek() == 'e' {
// EscInfo = "<esc:" int ">" . (optional and ignored)
p.next()
@@ -286,7 +290,14 @@ func (p *parser) parseParam(pkg *types.Package) (param *types.Var, isVariadic bo
// Var = Name Type .
func (p *parser) parseVar(pkg *types.Package) *types.Var {
name := p.parseName()
- return types.NewVar(token.NoPos, pkg, name, p.parseType(pkg))
+ v := types.NewVar(token.NoPos, pkg, name, p.parseType(pkg))
+ if name[0] == '.' || name[0] == '<' {
+ // This is an unexported variable,
+ // or a variable defined in a different package.
+ // We only want to record exported variables.
+ return nil
+ }
+ return v
}
// Conversion = "convert" "(" Type "," ConstValue ")" .
@@ -539,10 +550,12 @@ func (p *parser) parseNamedType(nlist []int) types.Type {
for p.tok == scanner.Ident {
p.expectKeyword("func")
if p.tok == '/' {
- // Skip a /*nointerface*/ comment.
+ // Skip a /*nointerface*/ or /*asm ID */ comment.
p.expect('/')
p.expect('*')
- p.expect(scanner.Ident)
+ if p.expect(scanner.Ident) == "asm" {
+ p.parseUnquotedString()
+ }
p.expect('*')
p.expect('/')
}
@@ -727,15 +740,29 @@ func (p *parser) parseFunctionType(pkg *types.Package, nlist []int) *types.Signa
// Func = Name FunctionType [InlineBody] .
func (p *parser) parseFunc(pkg *types.Package) *types.Func {
- name := p.parseName()
- if strings.ContainsRune(name, '$') {
- // This is a Type$equal or Type$hash function, which we don't want to parse,
- // except for the types.
- p.discardDirectiveWhileParsingTypes(pkg)
- return nil
+ if p.tok == '/' {
+ // Skip an /*asm ID */ comment.
+ p.expect('/')
+ p.expect('*')
+ if p.expect(scanner.Ident) == "asm" {
+ p.parseUnquotedString()
+ }
+ p.expect('*')
+ p.expect('/')
}
+
+ name := p.parseName()
f := types.NewFunc(token.NoPos, pkg, name, p.parseFunctionType(pkg, nil))
p.skipInlineBody()
+
+ if name[0] == '.' || name[0] == '<' || strings.ContainsRune(name, '$') {
+ // This is an unexported function,
+ // or a function defined in a different package,
+ // or a type$equal or type$hash function.
+ // We only want to record exported functions.
+ return nil
+ }
+
return f
}
@@ -756,7 +783,9 @@ func (p *parser) parseInterfaceType(pkg *types.Package, nlist []int) types.Type
embeddeds = append(embeddeds, p.parseType(pkg))
} else {
method := p.parseFunc(pkg)
- methods = append(methods, method)
+ if method != nil {
+ methods = append(methods, method)
+ }
}
p.expect(';')
}
@@ -1037,23 +1066,6 @@ func (p *parser) parsePackageInit() PackageInit {
return PackageInit{Name: name, InitFunc: initfunc, Priority: priority}
}
-// Throw away tokens until we see a newline or ';'.
-// If we see a '<', attempt to parse as a type.
-func (p *parser) discardDirectiveWhileParsingTypes(pkg *types.Package) {
- for {
- switch p.tok {
- case '\n', ';':
- return
- case '<':
- p.parseType(pkg)
- case scanner.EOF:
- p.error("unexpected EOF")
- default:
- p.next()
- }
- }
-}
-
// Create the package if we have parsed both the package path and package name.
func (p *parser) maybeCreatePackage() {
if p.pkgname != "" && p.pkgpath != "" {
@@ -1191,7 +1203,9 @@ func (p *parser) parseDirective() {
case "var":
p.next()
v := p.parseVar(p.pkg)
- p.pkg.Scope().Insert(v)
+ if v != nil {
+ p.pkg.Scope().Insert(v)
+ }
p.expectEOL()
case "const":
diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go
index fb2e5d4..8493d87 100644
--- a/libgo/go/reflect/type.go
+++ b/libgo/go/reflect/type.go
@@ -1105,15 +1105,14 @@ func (t *rtype) ptrTo() *rtype {
return &pi.(*ptrType).rtype
}
+ // Look in known types.
s := "*" + *t.string
-
- canonicalTypeLock.RLock()
- r, ok := canonicalType[s]
- canonicalTypeLock.RUnlock()
- if ok {
- p := (*ptrType)(unsafe.Pointer(r.(*rtype)))
- pi, _ := ptrMap.LoadOrStore(t, p)
- return &pi.(*ptrType).rtype
+ if tt := lookupType(s); tt != nil {
+ p := (*ptrType)(unsafe.Pointer(tt))
+ if p.elem == t {
+ pi, _ := ptrMap.LoadOrStore(t, p)
+ return &pi.(*ptrType).rtype
+ }
}
// Create a new ptrType starting with the description
@@ -1138,10 +1137,7 @@ func (t *rtype) ptrTo() *rtype {
pp.ptrToThis = nil
pp.elem = t
- q := canonicalize(&pp.rtype)
- p := (*ptrType)(unsafe.Pointer(q.(*rtype)))
-
- pi, _ := ptrMap.LoadOrStore(t, p)
+ pi, _ := ptrMap.LoadOrStore(t, &pp)
return &pi.(*ptrType).rtype
}
@@ -1447,6 +1443,13 @@ func ChanOf(dir ChanDir, t Type) Type {
case BothDir:
s = "chan " + *typ.string
}
+ if tt := lookupType(s); tt != nil {
+ ch := (*chanType)(unsafe.Pointer(tt))
+ if ch.elem == typ && ch.dir == uintptr(dir) {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
// Make a channel type.
var ichan interface{} = (chan unsafe.Pointer)(nil)
@@ -1472,10 +1475,8 @@ func ChanOf(dir ChanDir, t Type) Type {
ch.uncommonType = nil
ch.ptrToThis = nil
- // Canonicalize before storing in lookupCache
- ti := toType(&ch.rtype)
- lookupCache.Store(ckey, ti.(*rtype))
- return ti
+ ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
+ return ti.(Type)
}
func ismapkey(*rtype) bool // implemented in runtime
@@ -1502,6 +1503,13 @@ func MapOf(key, elem Type) Type {
// Look in known types.
s := "map[" + *ktyp.string + "]" + *etyp.string
+ if tt := lookupType(s); tt != nil {
+ mt := (*mapType)(unsafe.Pointer(tt))
+ if mt.key == ktyp && mt.elem == etyp {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
// Make a map type.
// Note: flag values must match those used in the TMAP case
@@ -1544,10 +1552,8 @@ func MapOf(key, elem Type) Type {
mt.flags |= 16
}
- // Canonicalize before storing in lookupCache
- ti := toType(&mt.rtype)
- lookupCache.Store(ckey, ti.(*rtype))
- return ti
+ ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
+ return ti.(Type)
}
// FuncOf returns the function type with the given argument and result types.
@@ -1625,15 +1631,17 @@ func FuncOf(in, out []Type, variadic bool) Type {
}
str := funcStr(ft)
+ if tt := lookupType(str); tt != nil {
+ if haveIdenticalUnderlyingType(&ft.rtype, tt, true) {
+ return addToCache(tt)
+ }
+ }
// Populate the remaining fields of ft and store in cache.
ft.string = &str
ft.uncommonType = nil
ft.ptrToThis = nil
-
- // Canonicalize before storing in funcLookupCache
- tc := toType(&ft.rtype)
- return addToCache(tc.(*rtype))
+ return addToCache(&ft.rtype)
}
// funcStr builds a string representation of a funcType.
@@ -1873,6 +1881,13 @@ func SliceOf(t Type) Type {
// Look in known types.
s := "[]" + *typ.string
+ if tt := lookupType(s); tt != nil {
+ slice := (*sliceType)(unsafe.Pointer(tt))
+ if slice.elem == typ {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
// Make a slice type.
var islice interface{} = ([]unsafe.Pointer)(nil)
@@ -1888,10 +1903,8 @@ func SliceOf(t Type) Type {
slice.uncommonType = nil
slice.ptrToThis = nil
- // Canonicalize before storing in lookupCache
- ti := toType(&slice.rtype)
- lookupCache.Store(ckey, ti.(*rtype))
- return ti
+ ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
+ return ti.(Type)
}
// The structLookupCache caches StructOf lookups.
@@ -2106,6 +2119,13 @@ func StructOf(fields []StructField) Type {
return t
}
+ // Look in known types.
+ if tt := lookupType(str); tt != nil {
+ if haveIdenticalUnderlyingType(&typ.rtype, tt, true) {
+ return addToCache(tt)
+ }
+ }
+
typ.string = &str
typ.hash = hash
typ.size = size
@@ -2214,10 +2234,7 @@ func StructOf(fields []StructField) Type {
typ.uncommonType = nil
typ.ptrToThis = nil
-
- // Canonicalize before storing in structLookupCache
- ti := toType(&typ.rtype)
- return addToCache(ti.(*rtype))
+ return addToCache(&typ.rtype)
}
func runtimeStructField(field StructField) structField {
@@ -2300,6 +2317,13 @@ func ArrayOf(count int, elem Type) Type {
// Look in known types.
s := "[" + strconv.Itoa(count) + "]" + *typ.string
+ if tt := lookupType(s); tt != nil {
+ array := (*arrayType)(unsafe.Pointer(tt))
+ if array.elem == typ {
+ ti, _ := lookupCache.LoadOrStore(ckey, tt)
+ return ti.(Type)
+ }
+ }
// Make an array type.
var iarray interface{} = [1]unsafe.Pointer{}
@@ -2451,10 +2475,8 @@ func ArrayOf(count int, elem Type) Type {
}
}
- // Canonicalize before storing in lookupCache
- ti := toType(&array.rtype)
- lookupCache.Store(ckey, ti.(*rtype))
- return ti
+ ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
+ return ti.(Type)
}
func appendVarint(x []byte, v uintptr) []byte {
@@ -2466,42 +2488,19 @@ func appendVarint(x []byte, v uintptr) []byte {
}
// toType converts from a *rtype to a Type that can be returned
-// to the client of package reflect. In gc, the only concern is that
-// a nil *rtype must be replaced by a nil Type, but in gccgo this
-// function takes care of ensuring that multiple *rtype for the same
-// type are coalesced into a single Type.
-var canonicalType = make(map[string]Type)
-
-var canonicalTypeLock sync.RWMutex
-
-func canonicalize(t Type) Type {
- if t == nil {
- return nil
- }
- s := t.rawString()
- canonicalTypeLock.RLock()
- if r, ok := canonicalType[s]; ok {
- canonicalTypeLock.RUnlock()
- return r
- }
- canonicalTypeLock.RUnlock()
- canonicalTypeLock.Lock()
- if r, ok := canonicalType[s]; ok {
- canonicalTypeLock.Unlock()
- return r
- }
- canonicalType[s] = t
- canonicalTypeLock.Unlock()
- return t
-}
-
+// to the client of package reflect. The only concern is that
+// a nil *rtype must be replaced by a nil Type.
func toType(p *rtype) Type {
if p == nil {
return nil
}
- return canonicalize(p)
+ return p
}
+// Look up a compiler-generated type descriptor.
+// Implemented in runtime.
+func lookupType(s string) *rtype
+
// ifaceIndir reports whether t is stored indirectly in an interface value.
func ifaceIndir(t *rtype) bool {
return t.kind&kindDirectIface == 0
diff --git a/libgo/go/runtime/alg.go b/libgo/go/runtime/alg.go
index c6bc6b6..a2bb5bb 100644
--- a/libgo/go/runtime/alg.go
+++ b/libgo/go/runtime/alg.go
@@ -44,7 +44,6 @@ import (
//go:linkname ifacevaleq runtime.ifacevaleq
//go:linkname ifaceefaceeq runtime.ifaceefaceeq
//go:linkname efacevaleq runtime.efacevaleq
-//go:linkname eqstring runtime.eqstring
//go:linkname cmpstring runtime.cmpstring
//
// Temporary to be called from C code.
@@ -205,7 +204,7 @@ func nilinterequal(p, q unsafe.Pointer) bool {
}
func efaceeq(x, y eface) bool {
t := x._type
- if !eqtype(t, y._type) {
+ if t != y._type {
return false
}
if t == nil {
@@ -229,7 +228,7 @@ func ifaceeq(x, y iface) bool {
return false
}
t := *(**_type)(xtab)
- if !eqtype(t, *(**_type)(y.tab)) {
+ if t != *(**_type)(y.tab) {
return false
}
eq := t.equalfn
@@ -247,7 +246,7 @@ func ifacevaleq(x iface, t *_type, p unsafe.Pointer) bool {
return false
}
xt := *(**_type)(x.tab)
- if !eqtype(xt, t) {
+ if xt != t {
return false
}
eq := t.equalfn
@@ -268,7 +267,7 @@ func ifaceefaceeq(x iface, y eface) bool {
return false
}
xt := *(**_type)(x.tab)
- if !eqtype(xt, y._type) {
+ if xt != y._type {
return false
}
eq := xt.equalfn
@@ -285,7 +284,7 @@ func efacevaleq(x eface, t *_type, p unsafe.Pointer) bool {
if x._type == nil {
return false
}
- if !eqtype(x._type, t) {
+ if x._type != t {
return false
}
eq := t.equalfn
diff --git a/libgo/go/runtime/heapdump.go b/libgo/go/runtime/heapdump.go
index 3aa9e8a..b0506a8 100644
--- a/libgo/go/runtime/heapdump.go
+++ b/libgo/go/runtime/heapdump.go
@@ -437,17 +437,15 @@ func dumpmemstats() {
dumpint(uint64(memstats.numgc))
}
-func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *location, size, allocs, frees uintptr) {
- stk := (*[100000]location)(unsafe.Pointer(pstk))
+func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
+ stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
dumpint(tagMemProf)
dumpint(uint64(uintptr(unsafe.Pointer(b))))
dumpint(uint64(size))
dumpint(uint64(nstk))
for i := uintptr(0); i < nstk; i++ {
- pc := stk[i].pc
- fn := stk[i].function
- file := stk[i].filename
- line := stk[i].lineno
+ pc := stk[i]
+ fn, file, line, _ := funcfileline(pc, -1)
if fn == "" {
var buf [64]byte
n := len(buf)
diff --git a/libgo/go/runtime/iface.go b/libgo/go/runtime/iface.go
index 1c3a5f3..d434f9e 100644
--- a/libgo/go/runtime/iface.go
+++ b/libgo/go/runtime/iface.go
@@ -15,10 +15,7 @@ import (
//
//go:linkname requireitab runtime.requireitab
//go:linkname assertitab runtime.assertitab
-//go:linkname assertI2T runtime.assertI2T
-//go:linkname ifacetypeeq runtime.ifacetypeeq
-//go:linkname efacetype runtime.efacetype
-//go:linkname ifacetype runtime.ifacetype
+//go:linkname panicdottype runtime.panicdottype
//go:linkname ifaceE2E2 runtime.ifaceE2E2
//go:linkname ifaceI2E2 runtime.ifaceI2E2
//go:linkname ifaceE2I2 runtime.ifaceE2I2
@@ -236,7 +233,7 @@ func (m *itab) init() string {
ri++
}
- if !eqtype(lhsMethod.typ, rhsMethod.mtyp) {
+ if lhsMethod.typ != rhsMethod.mtyp {
m.methods[1] = nil
return *lhsMethod.name
}
@@ -356,35 +353,9 @@ func assertitab(lhs, rhs *_type) unsafe.Pointer {
return getitab(lhs, rhs, false)
}
-// Check whether an interface type may be converted to a non-interface
-// type, panicing if not.
-func assertI2T(lhs, rhs, inter *_type) {
- if rhs == nil {
- panic(&TypeAssertionError{nil, nil, lhs, ""})
- }
- if !eqtype(lhs, rhs) {
- panic(&TypeAssertionError{inter, rhs, lhs, ""})
- }
-}
-
-// Compare two type descriptors for equality.
-func ifacetypeeq(a, b *_type) bool {
- return eqtype(a, b)
-}
-
-// Return the type descriptor of an empty interface.
-// FIXME: This should be inlined by the compiler.
-func efacetype(e eface) *_type {
- return e._type
-}
-
-// Return the type descriptor of a non-empty interface.
-// FIXME: This should be inlined by the compiler.
-func ifacetype(i iface) *_type {
- if i.tab == nil {
- return nil
- }
- return *(**_type)(i.tab)
+// panicdottype is called when doing an i.(T) conversion and the conversion fails.
+func panicdottype(lhs, rhs, inter *_type) {
+ panic(&TypeAssertionError{inter, rhs, lhs, ""})
}
// Convert an empty interface to an empty interface, for a comma-ok
@@ -435,7 +406,7 @@ func ifaceI2I2(inter *_type, i iface) (iface, bool) {
// Convert an empty interface to a pointer non-interface type.
func ifaceE2T2P(t *_type, e eface) (unsafe.Pointer, bool) {
- if !eqtype(t, e._type) {
+ if t != e._type {
return nil, false
} else {
return e.data, true
@@ -444,7 +415,7 @@ func ifaceE2T2P(t *_type, e eface) (unsafe.Pointer, bool) {
// Convert a non-empty interface to a pointer non-interface type.
func ifaceI2T2P(t *_type, i iface) (unsafe.Pointer, bool) {
- if i.tab == nil || !eqtype(t, *(**_type)(i.tab)) {
+ if i.tab == nil || t != *(**_type)(i.tab) {
return nil, false
} else {
return i.data, true
@@ -453,7 +424,7 @@ func ifaceI2T2P(t *_type, i iface) (unsafe.Pointer, bool) {
// Convert an empty interface to a non-pointer non-interface type.
func ifaceE2T2(t *_type, e eface, ret unsafe.Pointer) bool {
- if !eqtype(t, e._type) {
+ if t != e._type {
typedmemclr(t, ret)
return false
} else {
@@ -468,7 +439,7 @@ func ifaceE2T2(t *_type, e eface, ret unsafe.Pointer) bool {
// Convert a non-empty interface to a non-pointer non-interface type.
func ifaceI2T2(t *_type, i iface, ret unsafe.Pointer) bool {
- if i.tab == nil || !eqtype(t, *(**_type)(i.tab)) {
+ if i.tab == nil || t != *(**_type)(i.tab) {
typedmemclr(t, ret)
return false
} else {
@@ -514,7 +485,7 @@ func ifaceT2Ip(to, from *_type) bool {
ri++
}
- if !eqtype(fromMethod.mtyp, toMethod.typ) {
+ if fromMethod.mtyp != toMethod.typ {
return false
}
diff --git a/libgo/go/runtime/map_fast32.go b/libgo/go/runtime/map_fast32.go
index 1fa5cd9..07a35e1 100644
--- a/libgo/go/runtime/map_fast32.go
+++ b/libgo/go/runtime/map_fast32.go
@@ -9,6 +9,15 @@ import (
"unsafe"
)
+// For gccgo, use go:linkname to rename compiler-called functions to
+// themselves, so that the compiler will export them.
+//
+//go:linkname mapaccess1_fast32 runtime.mapaccess1_fast32
+//go:linkname mapaccess2_fast32 runtime.mapaccess2_fast32
+//go:linkname mapassign_fast32 runtime.mapassign_fast32
+//go:linkname mapassign_fast32ptr runtime.mapassign_fast32ptr
+//go:linkname mapdelete_fast32 runtime.mapdelete_fast32
+
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
diff --git a/libgo/go/runtime/map_fast64.go b/libgo/go/runtime/map_fast64.go
index d23ac23..d21bf06 100644
--- a/libgo/go/runtime/map_fast64.go
+++ b/libgo/go/runtime/map_fast64.go
@@ -9,6 +9,15 @@ import (
"unsafe"
)
+// For gccgo, use go:linkname to rename compiler-called functions to
+// themselves, so that the compiler will export them.
+//
+//go:linkname mapaccess1_fast64 runtime.mapaccess1_fast64
+//go:linkname mapaccess2_fast64 runtime.mapaccess2_fast64
+//go:linkname mapassign_fast64 runtime.mapassign_fast64
+//go:linkname mapassign_fast64ptr runtime.mapassign_fast64ptr
+//go:linkname mapdelete_fast64 runtime.mapdelete_fast64
+
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
diff --git a/libgo/go/runtime/map_faststr.go b/libgo/go/runtime/map_faststr.go
index eced15a..083980f 100644
--- a/libgo/go/runtime/map_faststr.go
+++ b/libgo/go/runtime/map_faststr.go
@@ -9,6 +9,14 @@ import (
"unsafe"
)
+// For gccgo, use go:linkname to rename compiler-called functions to
+// themselves, so that the compiler will export them.
+//
+//go:linkname mapaccess1_faststr runtime.mapaccess1_faststr
+//go:linkname mapaccess2_faststr runtime.mapaccess2_faststr
+//go:linkname mapassign_faststr runtime.mapassign_faststr
+//go:linkname mapdelete_faststr runtime.mapdelete_faststr
+
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
diff --git a/libgo/go/runtime/mgcmark.go b/libgo/go/runtime/mgcmark.go
index dc5e797..1b8a7a3 100644
--- a/libgo/go/runtime/mgcmark.go
+++ b/libgo/go/runtime/mgcmark.go
@@ -1085,7 +1085,7 @@ func scanstackblockwithmap(pc, b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
span != nil && span.state != mSpanManual &&
(obj < span.base() || obj >= span.limit || span.state != mSpanInUse) {
print("runtime: found in object at *(", hex(b), "+", hex(i), ") = ", hex(obj), ", pc=", hex(pc), "\n")
- name, file, line := funcfileline(pc, -1)
+ name, file, line, _ := funcfileline(pc, -1)
print(name, "\n", file, ":", line, "\n")
//gcDumpObject("object", b, i)
throw("found bad pointer in Go stack (incorrect use of unsafe or cgo?)")
diff --git a/libgo/go/runtime/mprof.go b/libgo/go/runtime/mprof.go
index ab97569..132c2ff 100644
--- a/libgo/go/runtime/mprof.go
+++ b/libgo/go/runtime/mprof.go
@@ -24,6 +24,10 @@ const (
blockProfile
mutexProfile
+ // a profile bucket from one of the categories above whose stack
+ // trace has been fixed up / pruned.
+ prunedProfile
+
// size of bucket hash table
buckHashSize = 179999
@@ -52,6 +56,7 @@ type bucket struct {
hash uintptr
size uintptr
nstk uintptr
+ skip int
}
// A memRecord is the bucket data for a bucket of type memProfile,
@@ -138,11 +143,13 @@ type blockRecord struct {
}
var (
- mbuckets *bucket // memory profile buckets
- bbuckets *bucket // blocking profile buckets
- xbuckets *bucket // mutex profile buckets
- buckhash *[179999]*bucket
- bucketmem uintptr
+ mbuckets *bucket // memory profile buckets
+ bbuckets *bucket // blocking profile buckets
+ xbuckets *bucket // mutex profile buckets
+ sbuckets *bucket // pre-symbolization profile buckets (stacks fixed up)
+ freebuckets *bucket // freelist of unused fixed up profile buckets
+ buckhash *[179999]*bucket
+ bucketmem uintptr
mProf struct {
// All fields in mProf are protected by proflock.
@@ -158,12 +165,35 @@ var (
const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
+// payloadOffset() returns a pointer into the part of a bucket
+// containing the profile payload (skips past the bucket struct itself
+// and then the stack trace).
+func payloadOffset(typ bucketType, nstk uintptr) uintptr {
+ if typ == prunedProfile {
+ // To allow reuse of prunedProfile buckets between different
+ // collections, allocate them with the max stack size (the portion
+ // of the stack used will vary from trace to trace).
+ nstk = maxStack
+ }
+ return unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr)
+}
+
+func max(x, y uintptr) uintptr {
+ if x > y {
+ return x
+ }
+ return y
+}
+
// newBucket allocates a bucket with the given type and number of stack entries.
-func newBucket(typ bucketType, nstk int) *bucket {
- size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(location{})
+func newBucket(typ bucketType, nstk int, skipCount int) *bucket {
+ size := payloadOffset(typ, uintptr(nstk))
switch typ {
default:
throw("invalid profile bucket type")
+ case prunedProfile:
+ // stack-fixed buckets are large enough to accommodate any payload.
+ size += max(unsafe.Sizeof(memRecord{}), unsafe.Sizeof(blockRecord{}))
case memProfile:
size += unsafe.Sizeof(memRecord{})
case blockProfile, mutexProfile:
@@ -174,35 +204,34 @@ func newBucket(typ bucketType, nstk int) *bucket {
bucketmem += size
b.typ = typ
b.nstk = uintptr(nstk)
+ b.skip = skipCount
return b
}
// stk returns the slice in b holding the stack.
-func (b *bucket) stk() []location {
- stk := (*[maxStack]location)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
+func (b *bucket) stk() []uintptr {
+ stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
return stk[:b.nstk:b.nstk]
}
// mp returns the memRecord associated with the memProfile bucket b.
func (b *bucket) mp() *memRecord {
- if b.typ != memProfile {
+ if b.typ != memProfile && b.typ != prunedProfile {
throw("bad use of bucket.mp")
}
- data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(location{}))
- return (*memRecord)(data)
+ return (*memRecord)(add(unsafe.Pointer(b), payloadOffset(b.typ, b.nstk)))
}
// bp returns the blockRecord associated with the blockProfile bucket b.
func (b *bucket) bp() *blockRecord {
- if b.typ != blockProfile && b.typ != mutexProfile {
+ if b.typ != blockProfile && b.typ != mutexProfile && b.typ != prunedProfile {
throw("bad use of bucket.bp")
}
- data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(location{}))
- return (*blockRecord)(data)
+ return (*blockRecord)(add(unsafe.Pointer(b), payloadOffset(b.typ, b.nstk)))
}
// Return the bucket for stk[0:nstk], allocating new bucket if needed.
-func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket {
+func stkbucket(typ bucketType, size uintptr, skip int, stk []uintptr, alloc bool) *bucket {
if buckhash == nil {
buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
if buckhash == nil {
@@ -212,8 +241,8 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket
// Hash stack.
var h uintptr
- for _, loc := range stk {
- h += loc.pc
+ for _, pc := range stk {
+ h += pc
h += h << 10
h ^= h >> 6
}
@@ -237,7 +266,7 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket
}
// Create new bucket.
- b := newBucket(typ, len(stk))
+ b := newBucket(typ, len(stk), skip)
copy(b.stk(), stk)
b.hash = h
b.size = size
@@ -249,6 +278,9 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket
} else if typ == mutexProfile {
b.allnext = xbuckets
xbuckets = b
+ } else if typ == prunedProfile {
+ b.allnext = sbuckets
+ sbuckets = b
} else {
b.allnext = bbuckets
bbuckets = b
@@ -256,7 +288,7 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket
return b
}
-func eqslice(x, y []location) bool {
+func eqslice(x, y []uintptr) bool {
if len(x) != len(y) {
return false
}
@@ -338,10 +370,11 @@ func mProf_PostSweep() {
// Called by malloc to record a profiled block.
func mProf_Malloc(p unsafe.Pointer, size uintptr) {
- var stk [maxStack]location
- nstk := callers(4, stk[:])
+ var stk [maxStack]uintptr
+ nstk := callersRaw(stk[:])
lock(&proflock)
- b := stkbucket(memProfile, size, stk[:nstk], true)
+ skip := 1
+ b := stkbucket(memProfile, size, skip, stk[:nstk], true)
c := mProf.cycle
mp := b.mp()
mpc := &mp.future[(c+2)%uint32(len(mp.future))]
@@ -414,16 +447,16 @@ func blocksampled(cycles int64) bool {
func saveblockevent(cycles int64, skip int, which bucketType) {
gp := getg()
var nstk int
- var stk [maxStack]location
+ var stk [maxStack]uintptr
if gp.m.curg == nil || gp.m.curg == gp {
- nstk = callers(skip, stk[:])
+ nstk = callersRaw(stk[:])
} else {
// FIXME: This should get a traceback of gp.m.curg.
// nstk = gcallers(gp.m.curg, skip, stk[:])
- nstk = callers(skip, stk[:])
+ nstk = callersRaw(stk[:])
}
lock(&proflock)
- b := stkbucket(which, 0, stk[:nstk], true)
+ b := stkbucket(which, 0, skip, stk[:nstk], true)
b.bp().count++
b.bp().cycles += cycles
unlock(&proflock)
@@ -521,6 +554,163 @@ func (r *MemProfileRecord) Stack() []uintptr {
return r.Stack0[0:]
}
+// reusebucket tries to pick a prunedProfile bucket off
+// the freebuckets list, returning it if one is available or nil
+// if the free list is empty.
+func reusebucket(nstk int) *bucket {
+ var b *bucket
+ if freebuckets != nil {
+ b = freebuckets
+ freebuckets = freebuckets.allnext
+ b.typ = prunedProfile
+ b.nstk = uintptr(nstk)
+ mp := b.mp()
+ // Hack: rely on the fact that memprofile records are
+ // larger than blockprofile records when clearing.
+ *mp = memRecord{}
+ }
+ return b
+}
+
+// freebucket appends the specified prunedProfile bucket
+// onto the free list, and removes references to it from the hash.
+func freebucket(tofree *bucket) *bucket {
+ // Thread this bucket into the free list.
+ ret := tofree.allnext
+ tofree.allnext = freebuckets
+ freebuckets = tofree
+
+ // Clean up the hash. The hash may point directly to this bucket...
+ i := int(tofree.hash % buckHashSize)
+ if buckhash[i] == tofree {
+ buckhash[i] = tofree.next
+ } else {
+ // ... or when this bucket was inserted by stkbucket, it may have been
+ // chained off some other unrelated bucket.
+ for b := buckhash[i]; b != nil; b = b.next {
+ if b.next == tofree {
+ b.next = tofree.next
+ break
+ }
+ }
+ }
+ return ret
+}
+
+// fixupStack takes a 'raw' stack trace (stack of PCs generated by
+// callersRaw) and performs pre-symbolization fixup on it, returning
+// the results in 'canonStack'. For each frame we look at the
+// file/func/line information, then use that info to decide whether to
+// include the frame in the final symbolized stack (removing frames
+// corresponding to 'morestack' routines, for example). We also expand
+// frames if the PC values to which they refer correponds to inlined
+// functions to allow for expanded symbolic info to be filled in
+// later. Note: there is code in go-callers.c's backtrace_full callback()
+// function that performs very similar fixups; these two code paths
+// should be kept in sync.
+func fixupStack(stk []uintptr, skip int, canonStack *[maxStack]uintptr, size uintptr) int {
+ var cidx int
+ var termTrace bool
+ // Increase the skip count to take into account the frames corresponding
+ // to runtime.callersRaw and to the C routine that it invokes.
+ skip += 2
+ for _, pc := range stk {
+ // Subtract 1 from PC to undo the 1 we added in callback in
+ // go-callers.c.
+ function, file, _, frames := funcfileline(pc-1, -1)
+
+ // Skip split-stack functions (match by function name)
+ skipFrame := false
+ if hasPrefix(function, "_____morestack_") || hasPrefix(function, "__morestack_") {
+ skipFrame = true
+ }
+
+ // Skip split-stack functions (match by file)
+ if hasSuffix(file, "/morestack.S") {
+ skipFrame = true
+ }
+
+ // Skip thunks and recover functions. There is no equivalent to
+ // these functions in the gc toolchain.
+ fcn := function
+ if hasSuffix(fcn, "..r") {
+ skipFrame = true
+ } else {
+ for fcn != "" && (fcn[len(fcn)-1] >= '0' && fcn[len(fcn)-1] <= '9') {
+ fcn = fcn[:len(fcn)-1]
+ }
+ if hasSuffix(fcn, "..stub") || hasSuffix(fcn, "..thunk") {
+ skipFrame = true
+ }
+ }
+ if skipFrame {
+ continue
+ }
+
+ // Terminate the trace if we encounter a frame corresponding to
+ // runtime.main, runtime.kickoff, makecontext, etc. See the
+ // corresponding code in go-callers.c, callback function used
+ // with backtrace_full.
+ if function == "makecontext" {
+ termTrace = true
+ }
+ if hasSuffix(file, "/proc.c") && function == "runtime_mstart" {
+ termTrace = true
+ }
+ if hasSuffix(file, "/proc.go") &&
+ (function == "runtime.main" || function == "runtime.kickoff") {
+ termTrace = true
+ }
+
+ // Expand inline frames.
+ for i := 0; i < frames; i++ {
+ (*canonStack)[cidx] = pc
+ cidx++
+ if cidx >= maxStack {
+ termTrace = true
+ break
+ }
+ }
+ if termTrace {
+ break
+ }
+ }
+
+ // Apply skip count. Needs to be done after expanding inline frames.
+ if skip != 0 {
+ if skip >= cidx {
+ return 0
+ }
+ copy(canonStack[:cidx-skip], canonStack[skip:])
+ return cidx - skip
+ }
+
+ return cidx
+}
+
+// fixupBucket takes a raw memprofile bucket and creates a new bucket
+// in which the stack trace has been fixed up (inline frames expanded,
+// unwanted frames stripped out). Original bucket is left unmodified;
+// a new symbolizeProfile bucket may be generated as a side effect.
+// Payload information from the original bucket is incorporated into
+// the new bucket.
+func fixupBucket(b *bucket) {
+ var canonStack [maxStack]uintptr
+ frames := fixupStack(b.stk(), b.skip, &canonStack, b.size)
+ cb := stkbucket(prunedProfile, b.size, 0, canonStack[:frames], true)
+ switch b.typ {
+ default:
+ throw("invalid profile bucket type")
+ case memProfile:
+ rawrecord := b.mp()
+ cb.mp().active.add(&rawrecord.active)
+ case blockProfile, mutexProfile:
+ bpcount := b.bp().count
+ cb.bp().count += bpcount
+ cb.bp().cycles += bpcount
+ }
+}
+
// MemProfile returns a profile of memory allocated and freed per allocation
// site.
//
@@ -576,15 +766,31 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
}
}
if n <= len(p) {
- ok = true
- idx := 0
- for b := mbuckets; b != nil; b = b.allnext {
+ var bnext *bucket
+
+ // Post-process raw buckets to fix up their stack traces
+ for b := mbuckets; b != nil; b = bnext {
+ bnext = b.allnext
mp := b.mp()
if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
- record(&p[idx], b)
- idx++
+ fixupBucket(b)
}
}
+
+ // Record pruned/fixed-up buckets
+ ok = true
+ idx := 0
+ for b := sbuckets; b != nil; b = b.allnext {
+ record(&p[idx], b)
+ idx++
+ }
+ n = idx
+
+ // Free up pruned buckets for use in next round
+ for b := sbuckets; b != nil; b = bnext {
+ bnext = freebucket(b)
+ }
+ sbuckets = nil
}
unlock(&proflock)
return
@@ -597,18 +803,18 @@ func record(r *MemProfileRecord, b *bucket) {
r.FreeBytes = int64(mp.active.free_bytes)
r.AllocObjects = int64(mp.active.allocs)
r.FreeObjects = int64(mp.active.frees)
- for i, loc := range b.stk() {
+ for i, pc := range b.stk() {
if i >= len(r.Stack0) {
break
}
- r.Stack0[i] = loc.pc
+ r.Stack0[i] = pc
}
for i := int(b.nstk); i < len(r.Stack0); i++ {
r.Stack0[i] = 0
}
}
-func iterate_memprof(fn func(*bucket, uintptr, *location, uintptr, uintptr, uintptr)) {
+func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
lock(&proflock)
for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp()
@@ -625,39 +831,59 @@ type BlockProfileRecord struct {
StackRecord
}
-// BlockProfile returns n, the number of records in the current blocking profile.
-// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
-// If len(p) < n, BlockProfile does not change p and returns n, false.
-//
-// Most clients should use the runtime/pprof package or
-// the testing package's -test.blockprofile flag instead
-// of calling BlockProfile directly.
-func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
- lock(&proflock)
- for b := bbuckets; b != nil; b = b.allnext {
+func harvestBlockMutexProfile(buckets *bucket, p []BlockProfileRecord) (n int, ok bool) {
+ for b := buckets; b != nil; b = b.allnext {
n++
}
if n <= len(p) {
+ var bnext *bucket
+
+ // Post-process raw buckets to create pruned/fixed-up buckets
+ for b := buckets; b != nil; b = bnext {
+ bnext = b.allnext
+ fixupBucket(b)
+ }
+
+ // Record
ok = true
- for b := bbuckets; b != nil; b = b.allnext {
+ for b := sbuckets; b != nil; b = b.allnext {
bp := b.bp()
r := &p[0]
r.Count = bp.count
r.Cycles = bp.cycles
i := 0
- var loc location
- for i, loc = range b.stk() {
+ var pc uintptr
+ for i, pc = range b.stk() {
if i >= len(r.Stack0) {
break
}
- r.Stack0[i] = loc.pc
+ r.Stack0[i] = pc
}
for ; i < len(r.Stack0); i++ {
r.Stack0[i] = 0
}
p = p[1:]
}
+
+ // Free up pruned buckets for use in next round.
+ for b := sbuckets; b != nil; b = bnext {
+ bnext = freebucket(b)
+ }
+ sbuckets = nil
}
+ return
+}
+
+// BlockProfile returns n, the number of records in the current blocking profile.
+// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
+// If len(p) < n, BlockProfile does not change p and returns n, false.
+//
+// Most clients should use the runtime/pprof package or
+// the testing package's -test.blockprofile flag instead
+// of calling BlockProfile directly.
+func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
+ lock(&proflock)
+ n, ok = harvestBlockMutexProfile(bbuckets, p)
unlock(&proflock)
return
}
@@ -670,30 +896,7 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
// instead of calling MutexProfile directly.
func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
lock(&proflock)
- for b := xbuckets; b != nil; b = b.allnext {
- n++
- }
- if n <= len(p) {
- ok = true
- for b := xbuckets; b != nil; b = b.allnext {
- bp := b.bp()
- r := &p[0]
- r.Count = int64(bp.count)
- r.Cycles = bp.cycles
- i := 0
- var loc location
- for i, loc = range b.stk() {
- if i >= len(r.Stack0) {
- break
- }
- r.Stack0[i] = loc.pc
- }
- for ; i < len(r.Stack0); i++ {
- r.Stack0[i] = 0
- }
- p = p[1:]
- }
- }
+ n, ok = harvestBlockMutexProfile(xbuckets, p)
unlock(&proflock)
return
}
diff --git a/libgo/go/runtime/panic.go b/libgo/go/runtime/panic.go
index 9b8ffb9..264ad38 100644
--- a/libgo/go/runtime/panic.go
+++ b/libgo/go/runtime/panic.go
@@ -53,7 +53,7 @@ var indexError = error(errorString("index out of range"))
// entire runtime stack for easier debugging.
func panicindex() {
- name, _, _ := funcfileline(getcallerpc()-1, -1)
+ name, _, _, _ := funcfileline(getcallerpc()-1, -1)
if hasPrefix(name, "runtime.") {
throw(string(indexError.(errorString)))
}
@@ -64,7 +64,7 @@ func panicindex() {
var sliceError = error(errorString("slice bounds out of range"))
func panicslice() {
- name, _, _ := funcfileline(getcallerpc()-1, -1)
+ name, _, _, _ := funcfileline(getcallerpc()-1, -1)
if hasPrefix(name, "runtime.") {
throw(string(sliceError.(errorString)))
}
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
index 8146c1d..b40198e 100644
--- a/libgo/go/runtime/proc.go
+++ b/libgo/go/runtime/proc.go
@@ -18,6 +18,7 @@ import (
//go:linkname acquirep runtime.acquirep
//go:linkname releasep runtime.releasep
//go:linkname incidlelocked runtime.incidlelocked
+//go:linkname ginit runtime.ginit
//go:linkname schedinit runtime.schedinit
//go:linkname ready runtime.ready
//go:linkname stopm runtime.stopm
@@ -515,6 +516,15 @@ func cpuinit() {
cpu.Initialize(env)
}
+func ginit() {
+ _m_ := &m0
+ _g_ := &g0
+ _m_.g0 = _g_
+ _m_.curg = _g_
+ _g_.m = _m_
+ setg(_g_)
+}
+
// The bootstrap sequence is:
//
// call osinit
@@ -524,13 +534,7 @@ func cpuinit() {
//
// The new G calls runtimeĀ·main.
func schedinit() {
- _m_ := &m0
- _g_ := &g0
- _m_.g0 = _g_
- _m_.curg = _g_
- _g_.m = _m_
- setg(_g_)
-
+ _g_ := getg()
sched.maxmcount = 10000
usestackmaps = probestackmaps()
diff --git a/libgo/go/runtime/signal_gccgo.go b/libgo/go/runtime/signal_gccgo.go
index b3c78f6..6f362fc 100644
--- a/libgo/go/runtime/signal_gccgo.go
+++ b/libgo/go/runtime/signal_gccgo.go
@@ -60,11 +60,6 @@ type sigctxt struct {
}
func (c *sigctxt) sigcode() uint64 {
- if c.info == nil {
- // This can happen on Solaris 10. We don't know the
- // code, just avoid a misleading value.
- return _SI_USER + 1
- }
return uint64(c.info.si_code)
}
diff --git a/libgo/go/runtime/string.go b/libgo/go/runtime/string.go
index 025ea7a..9bcfc996 100644
--- a/libgo/go/runtime/string.go
+++ b/libgo/go/runtime/string.go
@@ -13,10 +13,6 @@ import (
// themselves, so that the compiler will export them.
//
//go:linkname concatstrings runtime.concatstrings
-//go:linkname concatstring2 runtime.concatstring2
-//go:linkname concatstring3 runtime.concatstring3
-//go:linkname concatstring4 runtime.concatstring4
-//go:linkname concatstring5 runtime.concatstring5
//go:linkname slicebytetostring runtime.slicebytetostring
//go:linkname slicebytetostringtmp runtime.slicebytetostringtmp
//go:linkname stringtoslicebyte runtime.stringtoslicebyte
@@ -38,7 +34,9 @@ type tmpBuf [tmpStringBufSize]byte
// If buf != nil, the compiler has determined that the result does not
// escape the calling function, so the string data can be stored in buf
// if small enough.
-func concatstrings(buf *tmpBuf, a []string) string {
+func concatstrings(buf *tmpBuf, p *string, n int) string {
+ var a []string
+ *(*slice)(unsafe.Pointer(&a)) = slice{unsafe.Pointer(p), n, n}
// idx := 0
l := 0
count := 0
@@ -73,22 +71,6 @@ func concatstrings(buf *tmpBuf, a []string) string {
return s
}
-func concatstring2(buf *tmpBuf, a [2]string) string {
- return concatstrings(buf, a[:])
-}
-
-func concatstring3(buf *tmpBuf, a [3]string) string {
- return concatstrings(buf, a[:])
-}
-
-func concatstring4(buf *tmpBuf, a [4]string) string {
- return concatstrings(buf, a[:])
-}
-
-func concatstring5(buf *tmpBuf, a [5]string) string {
- return concatstrings(buf, a[:])
-}
-
// Buf is a fixed-size buffer for the result,
// it is not nil if the result does not escape.
func slicebytetostring(buf *tmpBuf, b []byte) (str string) {
@@ -360,6 +342,10 @@ func hasPrefix(s, prefix string) bool {
return len(s) >= len(prefix) && s[:len(prefix)] == prefix
}
+func hasSuffix(s, suffix string) bool {
+ return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
+}
+
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go
index 435cdf7..e00d759 100644
--- a/libgo/go/runtime/stubs.go
+++ b/libgo/go/runtime/stubs.go
@@ -273,18 +273,6 @@ func checkASM() bool {
return true
}
-func eqstring(x, y string) bool {
- a := stringStructOf(&x)
- b := stringStructOf(&y)
- if a.len != b.len {
- return false
- }
- if a.str == b.str {
- return true
- }
- return memequal(a.str, b.str, uintptr(a.len))
-}
-
// For gccgo this is in the C code.
func osyield()
@@ -310,13 +298,6 @@ func errno() int
func entersyscall()
func entersyscallblock()
-// For gccgo to call from C code, so that the C code and the Go code
-// can share the memstats variable for now.
-//go:linkname getMstats runtime.getMstats
-func getMstats() *mstats {
- return &memstats
-}
-
// Get signal trampoline, written in C.
func getSigtramp() uintptr
@@ -338,48 +319,12 @@ func dumpregs(*_siginfo_t, unsafe.Pointer)
// Implemented in C for gccgo.
func setRandomNumber(uint32)
-// Temporary for gccgo until we port proc.go.
-//go:linkname getsched runtime.getsched
-func getsched() *schedt {
- return &sched
-}
-
-// Temporary for gccgo until we port proc.go.
-//go:linkname getCgoHasExtraM runtime.getCgoHasExtraM
-func getCgoHasExtraM() *bool {
- return &cgoHasExtraM
-}
-
-// Temporary for gccgo until we port proc.go.
-//go:linkname getAllP runtime.getAllP
-func getAllP() **p {
- return &allp[0]
-}
-
-// Temporary for gccgo until we port proc.go.
+// Called by gccgo's proc.c.
//go:linkname allocg runtime.allocg
func allocg() *g {
return new(g)
}
-// Temporary for gccgo until we port the garbage collector.
-//go:linkname getallglen runtime.getallglen
-func getallglen() uintptr {
- return allglen
-}
-
-// Temporary for gccgo until we port the garbage collector.
-//go:linkname getallg runtime.getallg
-func getallg(i int) *g {
- return allgs[i]
-}
-
-// Temporary for gccgo until we port the garbage collector.
-//go:linkname getallm runtime.getallm
-func getallm() *m {
- return allm
-}
-
// Throw and rethrow an exception.
func throwException()
func rethrowException()
@@ -388,13 +333,6 @@ func rethrowException()
// used by the stack unwinder.
func unwindExceptionSize() uintptr
-// Temporary for gccgo until C code no longer needs it.
-//go:nosplit
-//go:linkname getPanicking runtime.getPanicking
-func getPanicking() uint32 {
- return panicking
-}
-
// Called by C code to set the number of CPUs.
//go:linkname setncpu runtime.setncpu
func setncpu(n int32) {
@@ -409,18 +347,6 @@ func setpagesize(s uintptr) {
}
}
-// Called by C code during library initialization.
-//go:linkname runtime_m0 runtime.runtime_m0
-func runtime_m0() *m {
- return &m0
-}
-
-// Temporary for gccgo until we port mgc.go.
-//go:linkname runtime_g0 runtime.runtime_g0
-func runtime_g0() *g {
- return &g0
-}
-
const uintptrMask = 1<<(8*sys.PtrSize) - 1
type bitvector struct {
diff --git a/libgo/go/runtime/symtab.go b/libgo/go/runtime/symtab.go
index d7e8c18..8f3c843 100644
--- a/libgo/go/runtime/symtab.go
+++ b/libgo/go/runtime/symtab.go
@@ -79,7 +79,7 @@ func (ci *Frames) Next() (frame Frame, more bool) {
// Subtract 1 from PC to undo the 1 we added in callback in
// go-callers.c.
- function, file, line := funcfileline(pc-1, int32(i))
+ function, file, line, _ := funcfileline(pc-1, int32(i))
if function == "" && file == "" {
return Frame{}, more
}
@@ -158,7 +158,7 @@ const (
// the a *Func describing the innermost function, but with an entry
// of the outermost function.
func FuncForPC(pc uintptr) *Func {
- name, _, _ := funcfileline(pc, -1)
+ name, _, _, _ := funcfileline(pc, -1)
if name == "" {
return nil
}
@@ -187,7 +187,7 @@ func (f *Func) Entry() uintptr {
// The result will not be accurate if pc is not a program
// counter within f.
func (f *Func) FileLine(pc uintptr) (file string, line int) {
- _, file, line = funcfileline(pc, -1)
+ _, file, line, _ = funcfileline(pc, -1)
return file, line
}
@@ -261,5 +261,5 @@ func demangleSymbol(s string) string {
}
// implemented in go-caller.c
-func funcfileline(uintptr, int32) (string, string, int)
+func funcfileline(uintptr, int32) (string, string, int, int)
func funcentry(uintptr) uintptr
diff --git a/libgo/go/runtime/traceback_gccgo.go b/libgo/go/runtime/traceback_gccgo.go
index 7581798..4134d28 100644
--- a/libgo/go/runtime/traceback_gccgo.go
+++ b/libgo/go/runtime/traceback_gccgo.go
@@ -20,7 +20,7 @@ func printcreatedby(gp *g) {
if entry != 0 && tracepc > entry {
tracepc -= sys.PCQuantum
}
- function, file, line := funcfileline(tracepc, -1)
+ function, file, line, _ := funcfileline(tracepc, -1)
if function != "" && showframe(function, gp, false) && gp.goid != 1 {
printcreatedby1(function, file, line, entry, pc)
}
@@ -61,6 +61,16 @@ func callers(skip int, locbuf []location) int {
return int(n)
}
+//go:noescape
+//extern runtime_callersRaw
+func c_callersRaw(pcs *uintptr, max int32) int32
+
+// callersRaw returns a raw (PCs only) stack trace of the current goroutine.
+func callersRaw(pcbuf []uintptr) int {
+ n := c_callersRaw(&pcbuf[0], int32(len(pcbuf)))
+ return int(n)
+}
+
// traceback prints a traceback of the current goroutine.
// This differs from the gc version, which is given pc, sp, lr and g and
// can print a traceback of any goroutine.
@@ -83,7 +93,7 @@ func traceback(skip int32) {
func printAncestorTraceback(ancestor ancestorInfo) {
print("[originating from goroutine ", ancestor.goid, "]:\n")
for fidx, pc := range ancestor.pcs {
- function, file, line := funcfileline(pc, -1)
+ function, file, line, _ := funcfileline(pc, -1)
if showfuncinfo(function, fidx == 0) {
printAncestorTracebackFuncInfo(function, file, line, pc)
}
@@ -92,7 +102,7 @@ func printAncestorTraceback(ancestor ancestorInfo) {
print("...additional frames elided...\n")
}
// Show what created goroutine, except main goroutine (goid 1).
- function, file, line := funcfileline(ancestor.gopc, -1)
+ function, file, line, _ := funcfileline(ancestor.gopc, -1)
if function != "" && showfuncinfo(function, false) && ancestor.goid != 1 {
printcreatedby1(function, file, line, funcentry(ancestor.gopc), ancestor.gopc)
}
diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go
index 5cafa38..8af6246 100644
--- a/libgo/go/runtime/type.go
+++ b/libgo/go/runtime/type.go
@@ -6,7 +6,11 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
type _type struct {
size uintptr
@@ -44,22 +48,6 @@ func (t *_type) pkgpath() string {
return ""
}
-// Return whether two type descriptors are equal.
-// This is gccgo-specific, as gccgo, unlike gc, permits multiple
-// independent descriptors for a single type.
-func eqtype(t1, t2 *_type) bool {
- switch {
- case t1 == t2:
- return true
- case t1 == nil || t2 == nil:
- return false
- case t1.kind != t2.kind || t1.hash != t2.hash:
- return false
- default:
- return t1.string() == t2.string()
- }
-}
-
type method struct {
name *string
pkgPath *string
@@ -164,3 +152,62 @@ type structtype struct {
typ _type
fields []structfield
}
+
+// typeDescriptorList holds a list of type descriptors generated
+// by the compiler. This is used for the compiler to register
+// type descriptors to the runtime.
+// The layout is known to the compiler.
+//go:notinheap
+type typeDescriptorList struct {
+ count int
+ types [1]uintptr // variable length
+}
+
+// typelist holds all type descriptors generated by the comiler.
+// This is for the reflect package to deduplicate type descriptors
+// when it creates a type that is also a compiler-generated type.
+var typelist struct {
+ initialized uint32
+ lists []*typeDescriptorList // one element per package
+ types map[string]uintptr // map from a type's string to *_type, lazily populated
+ // TODO: use a sorted array instead?
+}
+var typelistLock mutex
+
+// The compiler generates a call of this function in the main
+// package's init function, to register compiler-generated
+// type descriptors.
+// p points to a list of *typeDescriptorList, n is the length
+// of the list.
+//go:linkname registerTypeDescriptors runtime.registerTypeDescriptors
+func registerTypeDescriptors(n int, p unsafe.Pointer) {
+ *(*slice)(unsafe.Pointer(&typelist.lists)) = slice{p, n, n}
+}
+
+// The reflect package uses this function to look up a compiler-
+// generated type descriptor.
+//go:linkname reflect_lookupType reflect.lookupType
+func reflect_lookupType(s string) *_type {
+ // Lazy initialization. We don't need to do this if we never create
+ // types through reflection.
+ if atomic.Load(&typelist.initialized) == 0 {
+ lock(&typelistLock)
+ if atomic.Load(&typelist.initialized) == 0 {
+ n := 0
+ for _, list := range typelist.lists {
+ n += list.count
+ }
+ typelist.types = make(map[string]uintptr, n)
+ for _, list := range typelist.lists {
+ for i := 0; i < list.count; i++ {
+ typ := *(**_type)(add(unsafe.Pointer(&list.types), uintptr(i)*sys.PtrSize))
+ typelist.types[typ.string()] = uintptr(unsafe.Pointer(typ))
+ }
+ }
+ atomic.Store(&typelist.initialized, 1)
+ }
+ unlock(&typelistLock)
+ }
+
+ return (*_type)(unsafe.Pointer(typelist.types[s]))
+}
diff --git a/libgo/go/syscall/wait.c b/libgo/go/syscall/wait.c
index 0b234d0..39bc035 100644
--- a/libgo/go/syscall/wait.c
+++ b/libgo/go/syscall/wait.c
@@ -51,7 +51,7 @@ extern _Bool Continued (uint32_t *w)
__asm__ (GOSYM_PREFIX "syscall.WaitStatus.Continued");
_Bool
-Continued (uint32_t *w)
+Continued (uint32_t *w __attribute__ ((unused)))
{
return WIFCONTINUED (*w) != 0;
}