From c130ab6aad57e0309ec02f58f383aece584ac8cb Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Sat, 11 May 2019 01:12:37 +0000 Subject: runtime: set up g early runtime.throw needs a g to work properly. Set up g early, to ensure that if something goes wrong in the runtime startup (e.g. runtime.check fails), the program terminates in a reasonable way. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/176657 From-SVN: r271088 --- libgo/go/runtime/proc.go | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'libgo/go') diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go index 8146c1d..b40198e 100644 --- a/libgo/go/runtime/proc.go +++ b/libgo/go/runtime/proc.go @@ -18,6 +18,7 @@ import ( //go:linkname acquirep runtime.acquirep //go:linkname releasep runtime.releasep //go:linkname incidlelocked runtime.incidlelocked +//go:linkname ginit runtime.ginit //go:linkname schedinit runtime.schedinit //go:linkname ready runtime.ready //go:linkname stopm runtime.stopm @@ -515,6 +516,15 @@ func cpuinit() { cpu.Initialize(env) } +func ginit() { + _m_ := &m0 + _g_ := &g0 + _m_.g0 = _g_ + _m_.curg = _g_ + _g_.m = _m_ + setg(_g_) +} + // The bootstrap sequence is: // // call osinit @@ -524,13 +534,7 @@ func cpuinit() { // // The new G calls runtimeĀ·main. func schedinit() { - _m_ := &m0 - _g_ := &g0 - _m_.g0 = _g_ - _m_.curg = _g_ - _g_.m = _m_ - setg(_g_) - + _g_ := getg() sched.maxmcount = 10000 usestackmaps = probestackmaps() -- cgit v1.1 From 93ee143d1888051fbf3156212accc154af3a667b Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Mon, 13 May 2019 20:26:24 +0000 Subject: libgo: drop Solaris 10 support Based on patch by Rainer Orth. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/176938 From-SVN: r271135 --- libgo/go/runtime/signal_gccgo.go | 5 ----- 1 file changed, 5 deletions(-) (limited to 'libgo/go') diff --git a/libgo/go/runtime/signal_gccgo.go b/libgo/go/runtime/signal_gccgo.go index b3c78f6..6f362fc 100644 --- a/libgo/go/runtime/signal_gccgo.go +++ b/libgo/go/runtime/signal_gccgo.go @@ -60,11 +60,6 @@ type sigctxt struct { } func (c *sigctxt) sigcode() uint64 { - if c.info == nil { - // This can happen on Solaris 10. We don't know the - // code, just avoid a misleading value. - return _SI_USER + 1 - } return uint64(c.info.si_code) } -- cgit v1.1 From 1ac09ef2c611d3113665ec8c74e38b125217edb3 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Tue, 14 May 2019 14:59:42 +0000 Subject: libgo: reduce overhead for memory/block/mutex profiling Revise the gccgo version of memory/block/mutex profiling to reduce runtime overhead. The main change is to collect raw stack traces while the profile is on line, then post-process the stacks just prior to the point where we are ready to use the final product. Memory profiling (at a very low sampling rate) is enabled by default, and the overhead of the symbolization / DWARF-reading from backtrace_full was slowing things down relative to the main Go runtime. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/171497 From-SVN: r271172 --- libgo/go/runtime/heapdump.go | 10 +- libgo/go/runtime/mgcmark.go | 2 +- libgo/go/runtime/mprof.go | 325 ++++++++++++++++++++++++++++-------- libgo/go/runtime/panic.go | 4 +- libgo/go/runtime/string.go | 4 + libgo/go/runtime/symtab.go | 8 +- libgo/go/runtime/traceback_gccgo.go | 16 +- 7 files changed, 284 insertions(+), 85 deletions(-) (limited to 'libgo/go') diff --git a/libgo/go/runtime/heapdump.go b/libgo/go/runtime/heapdump.go index 3aa9e8a..b0506a8 100644 --- a/libgo/go/runtime/heapdump.go +++ b/libgo/go/runtime/heapdump.go @@ -437,17 +437,15 @@ func dumpmemstats() { dumpint(uint64(memstats.numgc)) } -func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *location, size, allocs, frees uintptr) { - stk := (*[100000]location)(unsafe.Pointer(pstk)) +func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) { + stk := (*[100000]uintptr)(unsafe.Pointer(pstk)) dumpint(tagMemProf) dumpint(uint64(uintptr(unsafe.Pointer(b)))) dumpint(uint64(size)) dumpint(uint64(nstk)) for i := uintptr(0); i < nstk; i++ { - pc := stk[i].pc - fn := stk[i].function - file := stk[i].filename - line := stk[i].lineno + pc := stk[i] + fn, file, line, _ := funcfileline(pc, -1) if fn == "" { var buf [64]byte n := len(buf) diff --git a/libgo/go/runtime/mgcmark.go b/libgo/go/runtime/mgcmark.go index dc5e797..1b8a7a3 100644 --- a/libgo/go/runtime/mgcmark.go +++ b/libgo/go/runtime/mgcmark.go @@ -1085,7 +1085,7 @@ func scanstackblockwithmap(pc, b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) { span != nil && span.state != mSpanManual && (obj < span.base() || obj >= span.limit || span.state != mSpanInUse) { print("runtime: found in object at *(", hex(b), "+", hex(i), ") = ", hex(obj), ", pc=", hex(pc), "\n") - name, file, line := funcfileline(pc, -1) + name, file, line, _ := funcfileline(pc, -1) print(name, "\n", file, ":", line, "\n") //gcDumpObject("object", b, i) throw("found bad pointer in Go stack (incorrect use of unsafe or cgo?)") diff --git a/libgo/go/runtime/mprof.go b/libgo/go/runtime/mprof.go index ab97569..9238e2bb 100644 --- a/libgo/go/runtime/mprof.go +++ b/libgo/go/runtime/mprof.go @@ -24,6 +24,10 @@ const ( blockProfile mutexProfile + // a profile bucket from one of the categories above whose stack + // trace has been fixed up / pruned. + prunedProfile + // size of bucket hash table buckHashSize = 179999 @@ -138,11 +142,13 @@ type blockRecord struct { } var ( - mbuckets *bucket // memory profile buckets - bbuckets *bucket // blocking profile buckets - xbuckets *bucket // mutex profile buckets - buckhash *[179999]*bucket - bucketmem uintptr + mbuckets *bucket // memory profile buckets + bbuckets *bucket // blocking profile buckets + xbuckets *bucket // mutex profile buckets + sbuckets *bucket // pre-symbolization profile buckets (stacks fixed up) + freebuckets *bucket // freelist of unused fixed up profile buckets + buckhash *[179999]*bucket + bucketmem uintptr mProf struct { // All fields in mProf are protected by proflock. @@ -158,12 +164,35 @@ var ( const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24) +// payloadOffset() returns a pointer into the part of a bucket +// containing the profile payload (skips past the bucket struct itself +// and then the stack trace). +func payloadOffset(typ bucketType, nstk uintptr) uintptr { + if typ == prunedProfile { + // To allow reuse of prunedProfile buckets between different + // collections, allocate them with the max stack size (the portion + // of the stack used will vary from trace to trace). + nstk = maxStack + } + return unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr) +} + +func max(x, y uintptr) uintptr { + if x > y { + return x + } + return y +} + // newBucket allocates a bucket with the given type and number of stack entries. func newBucket(typ bucketType, nstk int) *bucket { - size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(location{}) + size := payloadOffset(typ, uintptr(nstk)) switch typ { default: throw("invalid profile bucket type") + case prunedProfile: + // stack-fixed buckets are large enough to accommodate any payload. + size += max(unsafe.Sizeof(memRecord{}), unsafe.Sizeof(blockRecord{})) case memProfile: size += unsafe.Sizeof(memRecord{}) case blockProfile, mutexProfile: @@ -178,31 +207,29 @@ func newBucket(typ bucketType, nstk int) *bucket { } // stk returns the slice in b holding the stack. -func (b *bucket) stk() []location { - stk := (*[maxStack]location)(add(unsafe.Pointer(b), unsafe.Sizeof(*b))) +func (b *bucket) stk() []uintptr { + stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b))) return stk[:b.nstk:b.nstk] } // mp returns the memRecord associated with the memProfile bucket b. func (b *bucket) mp() *memRecord { - if b.typ != memProfile { + if b.typ != memProfile && b.typ != prunedProfile { throw("bad use of bucket.mp") } - data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(location{})) - return (*memRecord)(data) + return (*memRecord)(add(unsafe.Pointer(b), payloadOffset(b.typ, b.nstk))) } // bp returns the blockRecord associated with the blockProfile bucket b. func (b *bucket) bp() *blockRecord { - if b.typ != blockProfile && b.typ != mutexProfile { + if b.typ != blockProfile && b.typ != mutexProfile && b.typ != prunedProfile { throw("bad use of bucket.bp") } - data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(location{})) - return (*blockRecord)(data) + return (*blockRecord)(add(unsafe.Pointer(b), payloadOffset(b.typ, b.nstk))) } // Return the bucket for stk[0:nstk], allocating new bucket if needed. -func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket { +func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket { if buckhash == nil { buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys)) if buckhash == nil { @@ -212,8 +239,8 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket // Hash stack. var h uintptr - for _, loc := range stk { - h += loc.pc + for _, pc := range stk { + h += pc h += h << 10 h ^= h >> 6 } @@ -249,6 +276,9 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket } else if typ == mutexProfile { b.allnext = xbuckets xbuckets = b + } else if typ == prunedProfile { + b.allnext = sbuckets + sbuckets = b } else { b.allnext = bbuckets bbuckets = b @@ -256,7 +286,7 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket return b } -func eqslice(x, y []location) bool { +func eqslice(x, y []uintptr) bool { if len(x) != len(y) { return false } @@ -338,8 +368,8 @@ func mProf_PostSweep() { // Called by malloc to record a profiled block. func mProf_Malloc(p unsafe.Pointer, size uintptr) { - var stk [maxStack]location - nstk := callers(4, stk[:]) + var stk [maxStack]uintptr + nstk := callersRaw(1, stk[:]) lock(&proflock) b := stkbucket(memProfile, size, stk[:nstk], true) c := mProf.cycle @@ -414,13 +444,13 @@ func blocksampled(cycles int64) bool { func saveblockevent(cycles int64, skip int, which bucketType) { gp := getg() var nstk int - var stk [maxStack]location + var stk [maxStack]uintptr if gp.m.curg == nil || gp.m.curg == gp { - nstk = callers(skip, stk[:]) + nstk = callersRaw(skip, stk[:]) } else { // FIXME: This should get a traceback of gp.m.curg. // nstk = gcallers(gp.m.curg, skip, stk[:]) - nstk = callers(skip, stk[:]) + nstk = callersRaw(skip, stk[:]) } lock(&proflock) b := stkbucket(which, 0, stk[:nstk], true) @@ -521,6 +551,150 @@ func (r *MemProfileRecord) Stack() []uintptr { return r.Stack0[0:] } +// reusebucket tries to pick a prunedProfile bucket off +// the freebuckets list, returning it if one is available or nil +// if the free list is empty. +func reusebucket(nstk int) *bucket { + var b *bucket + if freebuckets != nil { + b = freebuckets + freebuckets = freebuckets.allnext + b.typ = prunedProfile + b.nstk = uintptr(nstk) + mp := b.mp() + // Hack: rely on the fact that memprofile records are + // larger than blockprofile records when clearing. + *mp = memRecord{} + } + return b +} + +// freebucket appends the specified prunedProfile bucket +// onto the free list, and removes references to it from the hash. +func freebucket(tofree *bucket) *bucket { + // Thread this bucket into the free list. + ret := tofree.allnext + tofree.allnext = freebuckets + freebuckets = tofree + + // Clean up the hash. The hash may point directly to this bucket... + i := int(tofree.hash % buckHashSize) + if buckhash[i] == tofree { + buckhash[i] = tofree.next + } else { + // ... or when this bucket was inserted by stkbucket, it may have been + // chained off some other unrelated bucket. + for b := buckhash[i]; b != nil; b = b.next { + if b.next == tofree { + b.next = tofree.next + break + } + } + } + return ret +} + +// fixupStack takes a 'raw' stack trace (stack of PCs generated by +// callersRaw) and performs pre-symbolization fixup on it, returning +// the results in 'canonStack'. For each frame we look at the +// file/func/line information, then use that info to decide whether to +// include the frame in the final symbolized stack (removing frames +// corresponding to 'morestack' routines, for example). We also expand +// frames if the PC values to which they refer correponds to inlined +// functions to allow for expanded symbolic info to be filled in +// later. Note: there is code in go-callers.c's backtrace_full callback() +// function that performs very similar fixups; these two code paths +// should be kept in sync. +func fixupStack(stk []uintptr, canonStack *[maxStack]uintptr, size uintptr) int { + var cidx int + var termTrace bool + for _, pc := range stk { + // Subtract 1 from PC to undo the 1 we added in callback in + // go-callers.c. + function, file, _, frames := funcfileline(pc-1, -1) + + // Skip split-stack functions (match by function name) + skipFrame := false + if hasPrefix(function, "_____morestack_") || hasPrefix(function, "__morestack_") { + skipFrame = true + } + + // Skip split-stack functions (match by file) + if hasSuffix(file, "/morestack.S") { + skipFrame = true + } + + // Skip thunks and recover functions. There is no equivalent to + // these functions in the gc toolchain. + fcn := function + if hasSuffix(fcn, "..r") { + skipFrame = true + } else { + for fcn != "" && (fcn[len(fcn)-1] >= '0' && fcn[len(fcn)-1] <= '9') { + fcn = fcn[:len(fcn)-1] + } + if hasSuffix(fcn, "..stub") || hasSuffix(fcn, "..thunk") { + skipFrame = true + } + } + if skipFrame { + continue + } + + // Terminate the trace if we encounter a frame corresponding to + // runtime.main, runtime.kickoff, makecontext, etc. See the + // corresponding code in go-callers.c, callback function used + // with backtrace_full. + if function == "makecontext" { + termTrace = true + } + if hasSuffix(file, "/proc.c") && function == "runtime_mstart" { + termTrace = true + } + if hasSuffix(file, "/proc.go") && + (function == "runtime.main" || function == "runtime.kickoff") { + termTrace = true + } + + // Expand inline frames. + for i := 0; i < frames; i++ { + (*canonStack)[cidx] = pc + cidx++ + if cidx >= maxStack { + termTrace = true + break + } + } + if termTrace { + break + } + } + return cidx +} + +// fixupBucket takes a raw memprofile bucket and creates a new bucket +// in which the stack trace has been fixed up (inline frames expanded, +// unwanted frames stripped out). Original bucket is left unmodified; +// a new symbolizeProfile bucket may be generated as a side effect. +// Payload information from the original bucket is incorporated into +// the new bucket. +func fixupBucket(b *bucket) { + var canonStack [maxStack]uintptr + frames := fixupStack(b.stk(), &canonStack, b.size) + cb := stkbucket(prunedProfile, b.size, canonStack[:frames], true) + switch b.typ { + default: + throw("invalid profile bucket type") + case memProfile: + rawrecord := b.mp() + cb.mp().active.add(&rawrecord.active) + case blockProfile, mutexProfile: + bpcount := b.bp().count + cb.bp().count += bpcount + cb.bp().cycles += bpcount + } +} + // MemProfile returns a profile of memory allocated and freed per allocation // site. // @@ -576,15 +750,31 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { } } if n <= len(p) { - ok = true - idx := 0 - for b := mbuckets; b != nil; b = b.allnext { + var bnext *bucket + + // Post-process raw buckets to fix up their stack traces + for b := mbuckets; b != nil; b = bnext { + bnext = b.allnext mp := b.mp() if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes { - record(&p[idx], b) - idx++ + fixupBucket(b) } } + + // Record pruned/fixed-up buckets + ok = true + idx := 0 + for b := sbuckets; b != nil; b = b.allnext { + record(&p[idx], b) + idx++ + } + n = idx + + // Free up pruned buckets for use in next round + for b := sbuckets; b != nil; b = bnext { + bnext = freebucket(b) + } + sbuckets = nil } unlock(&proflock) return @@ -597,18 +787,18 @@ func record(r *MemProfileRecord, b *bucket) { r.FreeBytes = int64(mp.active.free_bytes) r.AllocObjects = int64(mp.active.allocs) r.FreeObjects = int64(mp.active.frees) - for i, loc := range b.stk() { + for i, pc := range b.stk() { if i >= len(r.Stack0) { break } - r.Stack0[i] = loc.pc + r.Stack0[i] = pc } for i := int(b.nstk); i < len(r.Stack0); i++ { r.Stack0[i] = 0 } } -func iterate_memprof(fn func(*bucket, uintptr, *location, uintptr, uintptr, uintptr)) { +func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) { lock(&proflock) for b := mbuckets; b != nil; b = b.allnext { mp := b.mp() @@ -625,39 +815,59 @@ type BlockProfileRecord struct { StackRecord } -// BlockProfile returns n, the number of records in the current blocking profile. -// If len(p) >= n, BlockProfile copies the profile into p and returns n, true. -// If len(p) < n, BlockProfile does not change p and returns n, false. -// -// Most clients should use the runtime/pprof package or -// the testing package's -test.blockprofile flag instead -// of calling BlockProfile directly. -func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { - lock(&proflock) - for b := bbuckets; b != nil; b = b.allnext { +func harvestBlockMutexProfile(buckets *bucket, p []BlockProfileRecord) (n int, ok bool) { + for b := buckets; b != nil; b = b.allnext { n++ } if n <= len(p) { + var bnext *bucket + + // Post-process raw buckets to create pruned/fixed-up buckets + for b := buckets; b != nil; b = bnext { + bnext = b.allnext + fixupBucket(b) + } + + // Record ok = true - for b := bbuckets; b != nil; b = b.allnext { + for b := sbuckets; b != nil; b = b.allnext { bp := b.bp() r := &p[0] r.Count = bp.count r.Cycles = bp.cycles i := 0 - var loc location - for i, loc = range b.stk() { + var pc uintptr + for i, pc = range b.stk() { if i >= len(r.Stack0) { break } - r.Stack0[i] = loc.pc + r.Stack0[i] = pc } for ; i < len(r.Stack0); i++ { r.Stack0[i] = 0 } p = p[1:] } + + // Free up pruned buckets for use in next round. + for b := sbuckets; b != nil; b = bnext { + bnext = freebucket(b) + } + sbuckets = nil } + return +} + +// BlockProfile returns n, the number of records in the current blocking profile. +// If len(p) >= n, BlockProfile copies the profile into p and returns n, true. +// If len(p) < n, BlockProfile does not change p and returns n, false. +// +// Most clients should use the runtime/pprof package or +// the testing package's -test.blockprofile flag instead +// of calling BlockProfile directly. +func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { + lock(&proflock) + n, ok = harvestBlockMutexProfile(bbuckets, p) unlock(&proflock) return } @@ -670,30 +880,7 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { // instead of calling MutexProfile directly. func MutexProfile(p []BlockProfileRecord) (n int, ok bool) { lock(&proflock) - for b := xbuckets; b != nil; b = b.allnext { - n++ - } - if n <= len(p) { - ok = true - for b := xbuckets; b != nil; b = b.allnext { - bp := b.bp() - r := &p[0] - r.Count = int64(bp.count) - r.Cycles = bp.cycles - i := 0 - var loc location - for i, loc = range b.stk() { - if i >= len(r.Stack0) { - break - } - r.Stack0[i] = loc.pc - } - for ; i < len(r.Stack0); i++ { - r.Stack0[i] = 0 - } - p = p[1:] - } - } + n, ok = harvestBlockMutexProfile(xbuckets, p) unlock(&proflock) return } diff --git a/libgo/go/runtime/panic.go b/libgo/go/runtime/panic.go index 9b8ffb9..264ad38 100644 --- a/libgo/go/runtime/panic.go +++ b/libgo/go/runtime/panic.go @@ -53,7 +53,7 @@ var indexError = error(errorString("index out of range")) // entire runtime stack for easier debugging. func panicindex() { - name, _, _ := funcfileline(getcallerpc()-1, -1) + name, _, _, _ := funcfileline(getcallerpc()-1, -1) if hasPrefix(name, "runtime.") { throw(string(indexError.(errorString))) } @@ -64,7 +64,7 @@ func panicindex() { var sliceError = error(errorString("slice bounds out of range")) func panicslice() { - name, _, _ := funcfileline(getcallerpc()-1, -1) + name, _, _, _ := funcfileline(getcallerpc()-1, -1) if hasPrefix(name, "runtime.") { throw(string(sliceError.(errorString))) } diff --git a/libgo/go/runtime/string.go b/libgo/go/runtime/string.go index 025ea7a..eac94bf 100644 --- a/libgo/go/runtime/string.go +++ b/libgo/go/runtime/string.go @@ -360,6 +360,10 @@ func hasPrefix(s, prefix string) bool { return len(s) >= len(prefix) && s[:len(prefix)] == prefix } +func hasSuffix(s, suffix string) bool { + return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix +} + const ( maxUint = ^uint(0) maxInt = int(maxUint >> 1) diff --git a/libgo/go/runtime/symtab.go b/libgo/go/runtime/symtab.go index d7e8c18..8f3c843 100644 --- a/libgo/go/runtime/symtab.go +++ b/libgo/go/runtime/symtab.go @@ -79,7 +79,7 @@ func (ci *Frames) Next() (frame Frame, more bool) { // Subtract 1 from PC to undo the 1 we added in callback in // go-callers.c. - function, file, line := funcfileline(pc-1, int32(i)) + function, file, line, _ := funcfileline(pc-1, int32(i)) if function == "" && file == "" { return Frame{}, more } @@ -158,7 +158,7 @@ const ( // the a *Func describing the innermost function, but with an entry // of the outermost function. func FuncForPC(pc uintptr) *Func { - name, _, _ := funcfileline(pc, -1) + name, _, _, _ := funcfileline(pc, -1) if name == "" { return nil } @@ -187,7 +187,7 @@ func (f *Func) Entry() uintptr { // The result will not be accurate if pc is not a program // counter within f. func (f *Func) FileLine(pc uintptr) (file string, line int) { - _, file, line = funcfileline(pc, -1) + _, file, line, _ = funcfileline(pc, -1) return file, line } @@ -261,5 +261,5 @@ func demangleSymbol(s string) string { } // implemented in go-caller.c -func funcfileline(uintptr, int32) (string, string, int) +func funcfileline(uintptr, int32) (string, string, int, int) func funcentry(uintptr) uintptr diff --git a/libgo/go/runtime/traceback_gccgo.go b/libgo/go/runtime/traceback_gccgo.go index 7581798..b0eecf2 100644 --- a/libgo/go/runtime/traceback_gccgo.go +++ b/libgo/go/runtime/traceback_gccgo.go @@ -20,7 +20,7 @@ func printcreatedby(gp *g) { if entry != 0 && tracepc > entry { tracepc -= sys.PCQuantum } - function, file, line := funcfileline(tracepc, -1) + function, file, line, _ := funcfileline(tracepc, -1) if function != "" && showframe(function, gp, false) && gp.goid != 1 { printcreatedby1(function, file, line, entry, pc) } @@ -61,6 +61,16 @@ func callers(skip int, locbuf []location) int { return int(n) } +//go:noescape +//extern runtime_callersRaw +func c_callersRaw(skip int32, pcs *uintptr, max int32) int32 + +// callersRaw returns a raw (PCs only) stack trace of the current goroutine. +func callersRaw(skip int, pcbuf []uintptr) int { + n := c_callersRaw(int32(skip)+1, &pcbuf[0], int32(len(pcbuf))) + return int(n) +} + // traceback prints a traceback of the current goroutine. // This differs from the gc version, which is given pc, sp, lr and g and // can print a traceback of any goroutine. @@ -83,7 +93,7 @@ func traceback(skip int32) { func printAncestorTraceback(ancestor ancestorInfo) { print("[originating from goroutine ", ancestor.goid, "]:\n") for fidx, pc := range ancestor.pcs { - function, file, line := funcfileline(pc, -1) + function, file, line, _ := funcfileline(pc, -1) if showfuncinfo(function, fidx == 0) { printAncestorTracebackFuncInfo(function, file, line, pc) } @@ -92,7 +102,7 @@ func printAncestorTraceback(ancestor ancestorInfo) { print("...additional frames elided...\n") } // Show what created goroutine, except main goroutine (goid 1). - function, file, line := funcfileline(ancestor.gopc, -1) + function, file, line, _ := funcfileline(ancestor.gopc, -1) if function != "" && showfuncinfo(function, false) && ancestor.goid != 1 { printcreatedby1(function, file, line, funcentry(ancestor.gopc), ancestor.gopc) } -- cgit v1.1 From 8b33101442a91db2a9c083e6844177ccbb2d3d0b Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Mon, 27 May 2019 00:10:34 +0000 Subject: =?UTF-8?q?re=20PR=20go/90614=20(gcc-9.1.0/libgo/go/syscall/wait.c?= =?UTF-8?q?:54:22:=20error:=20unused=20parameter=20=E2=80=98w=E2=80=99=20[?= =?UTF-8?q?-Werror=3Dunused-parameter]=20Continued=20(uint32=5Ft=20*w))?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR go/90614 syscall: avoid unused parameter error if WIFCONTINUED not defined Fixes https://gcc.gnu.org/PR90614 Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/178997 From-SVN: r271638 --- libgo/go/syscall/wait.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'libgo/go') diff --git a/libgo/go/syscall/wait.c b/libgo/go/syscall/wait.c index 0b234d0..39bc035 100644 --- a/libgo/go/syscall/wait.c +++ b/libgo/go/syscall/wait.c @@ -51,7 +51,7 @@ extern _Bool Continued (uint32_t *w) __asm__ (GOSYM_PREFIX "syscall.WaitStatus.Continued"); _Bool -Continued (uint32_t *w) +Continued (uint32_t *w __attribute__ ((unused))) { return WIFCONTINUED (*w) != 0; } -- cgit v1.1 From a920eb0cb08da30f4d7d4345f42067fdf8ce7b9b Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Mon, 3 Jun 2019 23:02:43 +0000 Subject: runtime: remove unnecessary functions calling between C and Go These functions were needed during the transition of the runtime from C to Go, but are no longer necessary. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/179879 From-SVN: r271890 --- libgo/go/runtime/stubs.go | 64 +---------------------------------------------- 1 file changed, 1 insertion(+), 63 deletions(-) (limited to 'libgo/go') diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go index 435cdf7..530997b 100644 --- a/libgo/go/runtime/stubs.go +++ b/libgo/go/runtime/stubs.go @@ -310,13 +310,6 @@ func errno() int func entersyscall() func entersyscallblock() -// For gccgo to call from C code, so that the C code and the Go code -// can share the memstats variable for now. -//go:linkname getMstats runtime.getMstats -func getMstats() *mstats { - return &memstats -} - // Get signal trampoline, written in C. func getSigtramp() uintptr @@ -338,48 +331,12 @@ func dumpregs(*_siginfo_t, unsafe.Pointer) // Implemented in C for gccgo. func setRandomNumber(uint32) -// Temporary for gccgo until we port proc.go. -//go:linkname getsched runtime.getsched -func getsched() *schedt { - return &sched -} - -// Temporary for gccgo until we port proc.go. -//go:linkname getCgoHasExtraM runtime.getCgoHasExtraM -func getCgoHasExtraM() *bool { - return &cgoHasExtraM -} - -// Temporary for gccgo until we port proc.go. -//go:linkname getAllP runtime.getAllP -func getAllP() **p { - return &allp[0] -} - -// Temporary for gccgo until we port proc.go. +// Called by gccgo's proc.c. //go:linkname allocg runtime.allocg func allocg() *g { return new(g) } -// Temporary for gccgo until we port the garbage collector. -//go:linkname getallglen runtime.getallglen -func getallglen() uintptr { - return allglen -} - -// Temporary for gccgo until we port the garbage collector. -//go:linkname getallg runtime.getallg -func getallg(i int) *g { - return allgs[i] -} - -// Temporary for gccgo until we port the garbage collector. -//go:linkname getallm runtime.getallm -func getallm() *m { - return allm -} - // Throw and rethrow an exception. func throwException() func rethrowException() @@ -388,13 +345,6 @@ func rethrowException() // used by the stack unwinder. func unwindExceptionSize() uintptr -// Temporary for gccgo until C code no longer needs it. -//go:nosplit -//go:linkname getPanicking runtime.getPanicking -func getPanicking() uint32 { - return panicking -} - // Called by C code to set the number of CPUs. //go:linkname setncpu runtime.setncpu func setncpu(n int32) { @@ -409,18 +359,6 @@ func setpagesize(s uintptr) { } } -// Called by C code during library initialization. -//go:linkname runtime_m0 runtime.runtime_m0 -func runtime_m0() *m { - return &m0 -} - -// Temporary for gccgo until we port mgc.go. -//go:linkname runtime_g0 runtime.runtime_g0 -func runtime_g0() *g { - return &g0 -} - const uintptrMask = 1<<(8*sys.PtrSize) - 1 type bitvector struct { -- cgit v1.1 From c533ffe04d5fc1baa85dddd8fd5f651128cf11ed Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Mon, 3 Jun 2019 23:07:54 +0000 Subject: libgo: delay applying profile stack-frame skip until fixup When the runtime collects a stack trace to associate it with some profiling event (mem alloc, mutex, etc) there is a skip count passed to runtime.Callers (or equivalent) to skip some known count of frames in order to get to the "interesting" frame corresponding to the profile event. Now that the profiling mechanism uses lazy fixup (when removing compiler artifacts like thunks, morestack calls etc), we also need to move the frame skipping logic after the fixup, so as to insure that the skip count isn't thrown off by these artifacts. Fixes golang/go#32290. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/179740 From-SVN: r271892 --- libgo/go/runtime/mprof.go | 38 ++++++++++++++++++++++++++----------- libgo/go/runtime/traceback_gccgo.go | 6 +++--- 2 files changed, 30 insertions(+), 14 deletions(-) (limited to 'libgo/go') diff --git a/libgo/go/runtime/mprof.go b/libgo/go/runtime/mprof.go index 9238e2bb..132c2ff 100644 --- a/libgo/go/runtime/mprof.go +++ b/libgo/go/runtime/mprof.go @@ -56,6 +56,7 @@ type bucket struct { hash uintptr size uintptr nstk uintptr + skip int } // A memRecord is the bucket data for a bucket of type memProfile, @@ -185,7 +186,7 @@ func max(x, y uintptr) uintptr { } // newBucket allocates a bucket with the given type and number of stack entries. -func newBucket(typ bucketType, nstk int) *bucket { +func newBucket(typ bucketType, nstk int, skipCount int) *bucket { size := payloadOffset(typ, uintptr(nstk)) switch typ { default: @@ -203,6 +204,7 @@ func newBucket(typ bucketType, nstk int) *bucket { bucketmem += size b.typ = typ b.nstk = uintptr(nstk) + b.skip = skipCount return b } @@ -229,7 +231,7 @@ func (b *bucket) bp() *blockRecord { } // Return the bucket for stk[0:nstk], allocating new bucket if needed. -func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket { +func stkbucket(typ bucketType, size uintptr, skip int, stk []uintptr, alloc bool) *bucket { if buckhash == nil { buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys)) if buckhash == nil { @@ -264,7 +266,7 @@ func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket } // Create new bucket. - b := newBucket(typ, len(stk)) + b := newBucket(typ, len(stk), skip) copy(b.stk(), stk) b.hash = h b.size = size @@ -369,9 +371,10 @@ func mProf_PostSweep() { // Called by malloc to record a profiled block. func mProf_Malloc(p unsafe.Pointer, size uintptr) { var stk [maxStack]uintptr - nstk := callersRaw(1, stk[:]) + nstk := callersRaw(stk[:]) lock(&proflock) - b := stkbucket(memProfile, size, stk[:nstk], true) + skip := 1 + b := stkbucket(memProfile, size, skip, stk[:nstk], true) c := mProf.cycle mp := b.mp() mpc := &mp.future[(c+2)%uint32(len(mp.future))] @@ -446,14 +449,14 @@ func saveblockevent(cycles int64, skip int, which bucketType) { var nstk int var stk [maxStack]uintptr if gp.m.curg == nil || gp.m.curg == gp { - nstk = callersRaw(skip, stk[:]) + nstk = callersRaw(stk[:]) } else { // FIXME: This should get a traceback of gp.m.curg. // nstk = gcallers(gp.m.curg, skip, stk[:]) - nstk = callersRaw(skip, stk[:]) + nstk = callersRaw(stk[:]) } lock(&proflock) - b := stkbucket(which, 0, stk[:nstk], true) + b := stkbucket(which, 0, skip, stk[:nstk], true) b.bp().count++ b.bp().cycles += cycles unlock(&proflock) @@ -605,9 +608,12 @@ func freebucket(tofree *bucket) *bucket { // later. Note: there is code in go-callers.c's backtrace_full callback() // function that performs very similar fixups; these two code paths // should be kept in sync. -func fixupStack(stk []uintptr, canonStack *[maxStack]uintptr, size uintptr) int { +func fixupStack(stk []uintptr, skip int, canonStack *[maxStack]uintptr, size uintptr) int { var cidx int var termTrace bool + // Increase the skip count to take into account the frames corresponding + // to runtime.callersRaw and to the C routine that it invokes. + skip += 2 for _, pc := range stk { // Subtract 1 from PC to undo the 1 we added in callback in // go-callers.c. @@ -669,6 +675,16 @@ func fixupStack(stk []uintptr, canonStack *[maxStack]uintptr, size uintptr) int break } } + + // Apply skip count. Needs to be done after expanding inline frames. + if skip != 0 { + if skip >= cidx { + return 0 + } + copy(canonStack[:cidx-skip], canonStack[skip:]) + return cidx - skip + } + return cidx } @@ -680,8 +696,8 @@ func fixupStack(stk []uintptr, canonStack *[maxStack]uintptr, size uintptr) int // the new bucket. func fixupBucket(b *bucket) { var canonStack [maxStack]uintptr - frames := fixupStack(b.stk(), &canonStack, b.size) - cb := stkbucket(prunedProfile, b.size, canonStack[:frames], true) + frames := fixupStack(b.stk(), b.skip, &canonStack, b.size) + cb := stkbucket(prunedProfile, b.size, 0, canonStack[:frames], true) switch b.typ { default: throw("invalid profile bucket type") diff --git a/libgo/go/runtime/traceback_gccgo.go b/libgo/go/runtime/traceback_gccgo.go index b0eecf2..4134d28 100644 --- a/libgo/go/runtime/traceback_gccgo.go +++ b/libgo/go/runtime/traceback_gccgo.go @@ -63,11 +63,11 @@ func callers(skip int, locbuf []location) int { //go:noescape //extern runtime_callersRaw -func c_callersRaw(skip int32, pcs *uintptr, max int32) int32 +func c_callersRaw(pcs *uintptr, max int32) int32 // callersRaw returns a raw (PCs only) stack trace of the current goroutine. -func callersRaw(skip int, pcbuf []uintptr) int { - n := c_callersRaw(int32(skip)+1, &pcbuf[0], int32(len(pcbuf))) +func callersRaw(pcbuf []uintptr) int { + n := c_callersRaw(&pcbuf[0], int32(len(pcbuf))) return int(n) } -- cgit v1.1 From 39c0aa5f74be114ec472a97a12409067b74ac0dc Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Mon, 3 Jun 2019 23:37:04 +0000 Subject: compiler, runtime, reflect: generate unique type descriptors Currently, the compiler already generates common symbols for type descriptors, so the type descriptors are unique. However, when a type is created through reflection, it is not deduplicated with compiler-generated types. As a consequence, we cannot assume type descriptors are unique, and cannot use pointer equality to compare them. Also, when constructing a reflect.Type, it has to go through a canonicalization map, which introduces overhead to reflect.TypeOf, and lock contentions in concurrent programs. In order for the reflect package to deduplicate types with compiler-created types, we register all the compiler-created type descriptors at startup time. The reflect package, when it needs to create a type, looks up the registry of compiler-created types before creates a new one. There is no lock contention since the registry is read-only after initialization. This lets us get rid of the canonicalization map, and also makes it possible to compare type descriptors with pointer equality. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/179598 From-SVN: r271894 --- libgo/go/reflect/type.go | 131 +++++++++++++++++++++++------------------------ libgo/go/runtime/type.go | 78 +++++++++++++++++++++++----- 2 files changed, 130 insertions(+), 79 deletions(-) (limited to 'libgo/go') diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go index fb2e5d4..8493d87 100644 --- a/libgo/go/reflect/type.go +++ b/libgo/go/reflect/type.go @@ -1105,15 +1105,14 @@ func (t *rtype) ptrTo() *rtype { return &pi.(*ptrType).rtype } + // Look in known types. s := "*" + *t.string - - canonicalTypeLock.RLock() - r, ok := canonicalType[s] - canonicalTypeLock.RUnlock() - if ok { - p := (*ptrType)(unsafe.Pointer(r.(*rtype))) - pi, _ := ptrMap.LoadOrStore(t, p) - return &pi.(*ptrType).rtype + if tt := lookupType(s); tt != nil { + p := (*ptrType)(unsafe.Pointer(tt)) + if p.elem == t { + pi, _ := ptrMap.LoadOrStore(t, p) + return &pi.(*ptrType).rtype + } } // Create a new ptrType starting with the description @@ -1138,10 +1137,7 @@ func (t *rtype) ptrTo() *rtype { pp.ptrToThis = nil pp.elem = t - q := canonicalize(&pp.rtype) - p := (*ptrType)(unsafe.Pointer(q.(*rtype))) - - pi, _ := ptrMap.LoadOrStore(t, p) + pi, _ := ptrMap.LoadOrStore(t, &pp) return &pi.(*ptrType).rtype } @@ -1447,6 +1443,13 @@ func ChanOf(dir ChanDir, t Type) Type { case BothDir: s = "chan " + *typ.string } + if tt := lookupType(s); tt != nil { + ch := (*chanType)(unsafe.Pointer(tt)) + if ch.elem == typ && ch.dir == uintptr(dir) { + ti, _ := lookupCache.LoadOrStore(ckey, tt) + return ti.(Type) + } + } // Make a channel type. var ichan interface{} = (chan unsafe.Pointer)(nil) @@ -1472,10 +1475,8 @@ func ChanOf(dir ChanDir, t Type) Type { ch.uncommonType = nil ch.ptrToThis = nil - // Canonicalize before storing in lookupCache - ti := toType(&ch.rtype) - lookupCache.Store(ckey, ti.(*rtype)) - return ti + ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype) + return ti.(Type) } func ismapkey(*rtype) bool // implemented in runtime @@ -1502,6 +1503,13 @@ func MapOf(key, elem Type) Type { // Look in known types. s := "map[" + *ktyp.string + "]" + *etyp.string + if tt := lookupType(s); tt != nil { + mt := (*mapType)(unsafe.Pointer(tt)) + if mt.key == ktyp && mt.elem == etyp { + ti, _ := lookupCache.LoadOrStore(ckey, tt) + return ti.(Type) + } + } // Make a map type. // Note: flag values must match those used in the TMAP case @@ -1544,10 +1552,8 @@ func MapOf(key, elem Type) Type { mt.flags |= 16 } - // Canonicalize before storing in lookupCache - ti := toType(&mt.rtype) - lookupCache.Store(ckey, ti.(*rtype)) - return ti + ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype) + return ti.(Type) } // FuncOf returns the function type with the given argument and result types. @@ -1625,15 +1631,17 @@ func FuncOf(in, out []Type, variadic bool) Type { } str := funcStr(ft) + if tt := lookupType(str); tt != nil { + if haveIdenticalUnderlyingType(&ft.rtype, tt, true) { + return addToCache(tt) + } + } // Populate the remaining fields of ft and store in cache. ft.string = &str ft.uncommonType = nil ft.ptrToThis = nil - - // Canonicalize before storing in funcLookupCache - tc := toType(&ft.rtype) - return addToCache(tc.(*rtype)) + return addToCache(&ft.rtype) } // funcStr builds a string representation of a funcType. @@ -1873,6 +1881,13 @@ func SliceOf(t Type) Type { // Look in known types. s := "[]" + *typ.string + if tt := lookupType(s); tt != nil { + slice := (*sliceType)(unsafe.Pointer(tt)) + if slice.elem == typ { + ti, _ := lookupCache.LoadOrStore(ckey, tt) + return ti.(Type) + } + } // Make a slice type. var islice interface{} = ([]unsafe.Pointer)(nil) @@ -1888,10 +1903,8 @@ func SliceOf(t Type) Type { slice.uncommonType = nil slice.ptrToThis = nil - // Canonicalize before storing in lookupCache - ti := toType(&slice.rtype) - lookupCache.Store(ckey, ti.(*rtype)) - return ti + ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype) + return ti.(Type) } // The structLookupCache caches StructOf lookups. @@ -2106,6 +2119,13 @@ func StructOf(fields []StructField) Type { return t } + // Look in known types. + if tt := lookupType(str); tt != nil { + if haveIdenticalUnderlyingType(&typ.rtype, tt, true) { + return addToCache(tt) + } + } + typ.string = &str typ.hash = hash typ.size = size @@ -2214,10 +2234,7 @@ func StructOf(fields []StructField) Type { typ.uncommonType = nil typ.ptrToThis = nil - - // Canonicalize before storing in structLookupCache - ti := toType(&typ.rtype) - return addToCache(ti.(*rtype)) + return addToCache(&typ.rtype) } func runtimeStructField(field StructField) structField { @@ -2300,6 +2317,13 @@ func ArrayOf(count int, elem Type) Type { // Look in known types. s := "[" + strconv.Itoa(count) + "]" + *typ.string + if tt := lookupType(s); tt != nil { + array := (*arrayType)(unsafe.Pointer(tt)) + if array.elem == typ { + ti, _ := lookupCache.LoadOrStore(ckey, tt) + return ti.(Type) + } + } // Make an array type. var iarray interface{} = [1]unsafe.Pointer{} @@ -2451,10 +2475,8 @@ func ArrayOf(count int, elem Type) Type { } } - // Canonicalize before storing in lookupCache - ti := toType(&array.rtype) - lookupCache.Store(ckey, ti.(*rtype)) - return ti + ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype) + return ti.(Type) } func appendVarint(x []byte, v uintptr) []byte { @@ -2466,42 +2488,19 @@ func appendVarint(x []byte, v uintptr) []byte { } // toType converts from a *rtype to a Type that can be returned -// to the client of package reflect. In gc, the only concern is that -// a nil *rtype must be replaced by a nil Type, but in gccgo this -// function takes care of ensuring that multiple *rtype for the same -// type are coalesced into a single Type. -var canonicalType = make(map[string]Type) - -var canonicalTypeLock sync.RWMutex - -func canonicalize(t Type) Type { - if t == nil { - return nil - } - s := t.rawString() - canonicalTypeLock.RLock() - if r, ok := canonicalType[s]; ok { - canonicalTypeLock.RUnlock() - return r - } - canonicalTypeLock.RUnlock() - canonicalTypeLock.Lock() - if r, ok := canonicalType[s]; ok { - canonicalTypeLock.Unlock() - return r - } - canonicalType[s] = t - canonicalTypeLock.Unlock() - return t -} - +// to the client of package reflect. The only concern is that +// a nil *rtype must be replaced by a nil Type. func toType(p *rtype) Type { if p == nil { return nil } - return canonicalize(p) + return p } +// Look up a compiler-generated type descriptor. +// Implemented in runtime. +func lookupType(s string) *rtype + // ifaceIndir reports whether t is stored indirectly in an interface value. func ifaceIndir(t *rtype) bool { return t.kind&kindDirectIface == 0 diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go index 5cafa38..3bdb8f1 100644 --- a/libgo/go/runtime/type.go +++ b/libgo/go/runtime/type.go @@ -6,7 +6,11 @@ package runtime -import "unsafe" +import ( + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) type _type struct { size uintptr @@ -45,19 +49,8 @@ func (t *_type) pkgpath() string { } // Return whether two type descriptors are equal. -// This is gccgo-specific, as gccgo, unlike gc, permits multiple -// independent descriptors for a single type. func eqtype(t1, t2 *_type) bool { - switch { - case t1 == t2: - return true - case t1 == nil || t2 == nil: - return false - case t1.kind != t2.kind || t1.hash != t2.hash: - return false - default: - return t1.string() == t2.string() - } + return t1 == t2 } type method struct { @@ -164,3 +157,62 @@ type structtype struct { typ _type fields []structfield } + +// typeDescriptorList holds a list of type descriptors generated +// by the compiler. This is used for the compiler to register +// type descriptors to the runtime. +// The layout is known to the compiler. +//go:notinheap +type typeDescriptorList struct { + count int + types [1]uintptr // variable length +} + +// typelist holds all type descriptors generated by the comiler. +// This is for the reflect package to deduplicate type descriptors +// when it creates a type that is also a compiler-generated type. +var typelist struct { + initialized uint32 + lists []*typeDescriptorList // one element per package + types map[string]uintptr // map from a type's string to *_type, lazily populated + // TODO: use a sorted array instead? +} +var typelistLock mutex + +// The compiler generates a call of this function in the main +// package's init function, to register compiler-generated +// type descriptors. +// p points to a list of *typeDescriptorList, n is the length +// of the list. +//go:linkname registerTypeDescriptors runtime.registerTypeDescriptors +func registerTypeDescriptors(n int, p unsafe.Pointer) { + *(*slice)(unsafe.Pointer(&typelist.lists)) = slice{p, n, n} +} + +// The reflect package uses this function to look up a compiler- +// generated type descriptor. +//go:linkname reflect_lookupType reflect.lookupType +func reflect_lookupType(s string) *_type { + // Lazy initialization. We don't need to do this if we never create + // types through reflection. + if atomic.Load(&typelist.initialized) == 0 { + lock(&typelistLock) + if atomic.Load(&typelist.initialized) == 0 { + n := 0 + for _, list := range typelist.lists { + n += list.count + } + typelist.types = make(map[string]uintptr, n) + for _, list := range typelist.lists { + for i := 0; i < list.count; i++ { + typ := *(**_type)(add(unsafe.Pointer(&list.types), uintptr(i)*sys.PtrSize)) + typelist.types[typ.string()] = uintptr(unsafe.Pointer(typ)) + } + } + atomic.Store(&typelist.initialized, 1) + } + unlock(&typelistLock) + } + + return (*_type)(unsafe.Pointer(typelist.types[s])) +} -- cgit v1.1 From 5a9422664e8646313278d50666e2e4c8427cd5df Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 5 Jun 2019 21:05:38 +0000 Subject: compiler: inline call expressions and function references Scan inlinable methods for references to global variables and functions (forgot to do that earlier). Track all packages mentioned by exports (that should have been done earlier too). Record assembler name in export data, so that we can inline calls to non-Go functions. Modify gccgoimporter code to skip assembler name. This increases the number of inlinable functions in the standard library from 215 to 439. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/180677 From-SVN: r271976 --- libgo/go/go/internal/gccgoimporter/parser.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'libgo/go') diff --git a/libgo/go/go/internal/gccgoimporter/parser.go b/libgo/go/go/internal/gccgoimporter/parser.go index 42f43a1..956a9a8 100644 --- a/libgo/go/go/internal/gccgoimporter/parser.go +++ b/libgo/go/go/internal/gccgoimporter/parser.go @@ -539,10 +539,12 @@ func (p *parser) parseNamedType(nlist []int) types.Type { for p.tok == scanner.Ident { p.expectKeyword("func") if p.tok == '/' { - // Skip a /*nointerface*/ comment. + // Skip a /*nointerface*/ or /*asm ID */ comment. p.expect('/') p.expect('*') - p.expect(scanner.Ident) + if p.expect(scanner.Ident) == "asm" { + p.parseUnquotedString() + } p.expect('*') p.expect('/') } @@ -727,6 +729,17 @@ func (p *parser) parseFunctionType(pkg *types.Package, nlist []int) *types.Signa // Func = Name FunctionType [InlineBody] . func (p *parser) parseFunc(pkg *types.Package) *types.Func { + if p.tok == '/' { + // Skip an /*asm ID */ comment. + p.expect('/') + p.expect('*') + if p.expect(scanner.Ident) == "asm" { + p.parseUnquotedString() + } + p.expect('*') + p.expect('/') + } + name := p.parseName() if strings.ContainsRune(name, '$') { // This is a Type$equal or Type$hash function, which we don't want to parse, -- cgit v1.1 From 269f05ff58289cfdd3a35f0d9afc8e229a98f50e Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Thu, 6 Jun 2019 00:44:01 +0000 Subject: compiler: make use of specialized fast map routines In the runtime there are specialized fast map routines for certain kep types. This CL lets the compiler make use of these functions, instead of always using the generic ones. As we now generate multiple versions of map delete calls, to make things easier we delay the expansion of the built-in delete function to flatten phase. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/180858 From-SVN: r271983 --- libgo/go/runtime/map_fast32.go | 9 +++++++++ libgo/go/runtime/map_fast64.go | 9 +++++++++ libgo/go/runtime/map_faststr.go | 8 ++++++++ 3 files changed, 26 insertions(+) (limited to 'libgo/go') diff --git a/libgo/go/runtime/map_fast32.go b/libgo/go/runtime/map_fast32.go index 1fa5cd9..07a35e1 100644 --- a/libgo/go/runtime/map_fast32.go +++ b/libgo/go/runtime/map_fast32.go @@ -9,6 +9,15 @@ import ( "unsafe" ) +// For gccgo, use go:linkname to rename compiler-called functions to +// themselves, so that the compiler will export them. +// +//go:linkname mapaccess1_fast32 runtime.mapaccess1_fast32 +//go:linkname mapaccess2_fast32 runtime.mapaccess2_fast32 +//go:linkname mapassign_fast32 runtime.mapassign_fast32 +//go:linkname mapassign_fast32ptr runtime.mapassign_fast32ptr +//go:linkname mapdelete_fast32 runtime.mapdelete_fast32 + func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { if raceenabled && h != nil { callerpc := getcallerpc() diff --git a/libgo/go/runtime/map_fast64.go b/libgo/go/runtime/map_fast64.go index d23ac23..d21bf06 100644 --- a/libgo/go/runtime/map_fast64.go +++ b/libgo/go/runtime/map_fast64.go @@ -9,6 +9,15 @@ import ( "unsafe" ) +// For gccgo, use go:linkname to rename compiler-called functions to +// themselves, so that the compiler will export them. +// +//go:linkname mapaccess1_fast64 runtime.mapaccess1_fast64 +//go:linkname mapaccess2_fast64 runtime.mapaccess2_fast64 +//go:linkname mapassign_fast64 runtime.mapassign_fast64 +//go:linkname mapassign_fast64ptr runtime.mapassign_fast64ptr +//go:linkname mapdelete_fast64 runtime.mapdelete_fast64 + func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { if raceenabled && h != nil { callerpc := getcallerpc() diff --git a/libgo/go/runtime/map_faststr.go b/libgo/go/runtime/map_faststr.go index eced15a..083980f 100644 --- a/libgo/go/runtime/map_faststr.go +++ b/libgo/go/runtime/map_faststr.go @@ -9,6 +9,14 @@ import ( "unsafe" ) +// For gccgo, use go:linkname to rename compiler-called functions to +// themselves, so that the compiler will export them. +// +//go:linkname mapaccess1_faststr runtime.mapaccess1_faststr +//go:linkname mapaccess2_faststr runtime.mapaccess2_faststr +//go:linkname mapassign_faststr runtime.mapassign_faststr +//go:linkname mapdelete_faststr runtime.mapdelete_faststr + func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { if raceenabled && h != nil { callerpc := getcallerpc() -- cgit v1.1 From ffaa3a1c7433742373c2a71257d573d234428270 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Fri, 7 Jun 2019 00:07:50 +0000 Subject: go/internal/gccgoimporter: ignore unexported and imported names Due to inlining, we can now see unexported functions and variables, and functions and variables imported from different packages. Ignore them rather than reporting them from this package. Handle $hash and $equal functions consistently, so that we discard the inline body if there is one. Ignore names created for result parameters for inlining purposes. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/180758 From-SVN: r272023 --- libgo/go/go/internal/gccgoimporter/parser.go | 53 ++++++++++++++-------------- 1 file changed, 27 insertions(+), 26 deletions(-) (limited to 'libgo/go') diff --git a/libgo/go/go/internal/gccgoimporter/parser.go b/libgo/go/go/internal/gccgoimporter/parser.go index 956a9a8..5881d9c 100644 --- a/libgo/go/go/internal/gccgoimporter/parser.go +++ b/libgo/go/go/internal/gccgoimporter/parser.go @@ -261,6 +261,10 @@ func (p *parser) parseField(pkg *types.Package) (field *types.Var, tag string) { // Param = Name ["..."] Type . func (p *parser) parseParam(pkg *types.Package) (param *types.Var, isVariadic bool) { name := p.parseName() + // Ignore names invented for inlinable functions. + if strings.HasPrefix(name, "p.") || strings.HasPrefix(name, "r.") || strings.HasPrefix(name, "$ret") { + name = "" + } if p.tok == '<' && p.scanner.Peek() == 'e' { // EscInfo = "" . (optional and ignored) p.next() @@ -286,7 +290,14 @@ func (p *parser) parseParam(pkg *types.Package) (param *types.Var, isVariadic bo // Var = Name Type . func (p *parser) parseVar(pkg *types.Package) *types.Var { name := p.parseName() - return types.NewVar(token.NoPos, pkg, name, p.parseType(pkg)) + v := types.NewVar(token.NoPos, pkg, name, p.parseType(pkg)) + if name[0] == '.' || name[0] == '<' { + // This is an unexported variable, + // or a variable defined in a different package. + // We only want to record exported variables. + return nil + } + return v } // Conversion = "convert" "(" Type "," ConstValue ")" . @@ -741,14 +752,17 @@ func (p *parser) parseFunc(pkg *types.Package) *types.Func { } name := p.parseName() - if strings.ContainsRune(name, '$') { - // This is a Type$equal or Type$hash function, which we don't want to parse, - // except for the types. - p.discardDirectiveWhileParsingTypes(pkg) - return nil - } f := types.NewFunc(token.NoPos, pkg, name, p.parseFunctionType(pkg, nil)) p.skipInlineBody() + + if name[0] == '.' || name[0] == '<' || strings.ContainsRune(name, '$') { + // This is an unexported function, + // or a function defined in a different package, + // or a type$equal or type$hash function. + // We only want to record exported functions. + return nil + } + return f } @@ -769,7 +783,9 @@ func (p *parser) parseInterfaceType(pkg *types.Package, nlist []int) types.Type embeddeds = append(embeddeds, p.parseType(pkg)) } else { method := p.parseFunc(pkg) - methods = append(methods, method) + if method != nil { + methods = append(methods, method) + } } p.expect(';') } @@ -1050,23 +1066,6 @@ func (p *parser) parsePackageInit() PackageInit { return PackageInit{Name: name, InitFunc: initfunc, Priority: priority} } -// Throw away tokens until we see a newline or ';'. -// If we see a '<', attempt to parse as a type. -func (p *parser) discardDirectiveWhileParsingTypes(pkg *types.Package) { - for { - switch p.tok { - case '\n', ';': - return - case '<': - p.parseType(pkg) - case scanner.EOF: - p.error("unexpected EOF") - default: - p.next() - } - } -} - // Create the package if we have parsed both the package path and package name. func (p *parser) maybeCreatePackage() { if p.pkgname != "" && p.pkgpath != "" { @@ -1204,7 +1203,9 @@ func (p *parser) parseDirective() { case "var": p.next() v := p.parseVar(p.pkg) - p.pkg.Scope().Insert(v) + if v != nil { + p.pkg.Scope().Insert(v) + } p.expectEOL() case "const": -- cgit v1.1 From 4349775a30600906f5811ba7c743a5c22bdb3d7d Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 19 Jun 2019 15:13:53 +0000 Subject: compiler: optimize string concatenations runtime.concatstring{2,3,4,5} are just wrappers of concatstrings. These wrappers don't provide any benefit, at least in the C calling convention we use, where passing arrays by value isn't an efficient thing. Change it to always use concatstrings. Also, the cap field of the slice passed to concatstrings is not necessary. So change it to pass a pointer and a length directly, which is more efficient than passing a slice header by value. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/182539 From-SVN: r272476 --- libgo/go/runtime/string.go | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) (limited to 'libgo/go') diff --git a/libgo/go/runtime/string.go b/libgo/go/runtime/string.go index eac94bf..9bcfc996 100644 --- a/libgo/go/runtime/string.go +++ b/libgo/go/runtime/string.go @@ -13,10 +13,6 @@ import ( // themselves, so that the compiler will export them. // //go:linkname concatstrings runtime.concatstrings -//go:linkname concatstring2 runtime.concatstring2 -//go:linkname concatstring3 runtime.concatstring3 -//go:linkname concatstring4 runtime.concatstring4 -//go:linkname concatstring5 runtime.concatstring5 //go:linkname slicebytetostring runtime.slicebytetostring //go:linkname slicebytetostringtmp runtime.slicebytetostringtmp //go:linkname stringtoslicebyte runtime.stringtoslicebyte @@ -38,7 +34,9 @@ type tmpBuf [tmpStringBufSize]byte // If buf != nil, the compiler has determined that the result does not // escape the calling function, so the string data can be stored in buf // if small enough. -func concatstrings(buf *tmpBuf, a []string) string { +func concatstrings(buf *tmpBuf, p *string, n int) string { + var a []string + *(*slice)(unsafe.Pointer(&a)) = slice{unsafe.Pointer(p), n, n} // idx := 0 l := 0 count := 0 @@ -73,22 +71,6 @@ func concatstrings(buf *tmpBuf, a []string) string { return s } -func concatstring2(buf *tmpBuf, a [2]string) string { - return concatstrings(buf, a[:]) -} - -func concatstring3(buf *tmpBuf, a [3]string) string { - return concatstrings(buf, a[:]) -} - -func concatstring4(buf *tmpBuf, a [4]string) string { - return concatstrings(buf, a[:]) -} - -func concatstring5(buf *tmpBuf, a [5]string) string { - return concatstrings(buf, a[:]) -} - // Buf is a fixed-size buffer for the result, // it is not nil if the result does not escape. func slicebytetostring(buf *tmpBuf, b []byte) (str string) { -- cgit v1.1 From 0514cb33749fefd2542e7294a35d0ef0ccae30b3 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Fri, 21 Jun 2019 22:00:57 +0000 Subject: compiler: open code some type assertions Now that type equality is just simple pointer equality, we can open code some type assertions instead of making runtime calls. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/182977 From-SVN: r272577 --- libgo/go/runtime/iface.go | 37 ++++--------------------------------- 1 file changed, 4 insertions(+), 33 deletions(-) (limited to 'libgo/go') diff --git a/libgo/go/runtime/iface.go b/libgo/go/runtime/iface.go index 1c3a5f3..6def738 100644 --- a/libgo/go/runtime/iface.go +++ b/libgo/go/runtime/iface.go @@ -15,10 +15,7 @@ import ( // //go:linkname requireitab runtime.requireitab //go:linkname assertitab runtime.assertitab -//go:linkname assertI2T runtime.assertI2T -//go:linkname ifacetypeeq runtime.ifacetypeeq -//go:linkname efacetype runtime.efacetype -//go:linkname ifacetype runtime.ifacetype +//go:linkname panicdottype runtime.panicdottype //go:linkname ifaceE2E2 runtime.ifaceE2E2 //go:linkname ifaceI2E2 runtime.ifaceI2E2 //go:linkname ifaceE2I2 runtime.ifaceE2I2 @@ -356,35 +353,9 @@ func assertitab(lhs, rhs *_type) unsafe.Pointer { return getitab(lhs, rhs, false) } -// Check whether an interface type may be converted to a non-interface -// type, panicing if not. -func assertI2T(lhs, rhs, inter *_type) { - if rhs == nil { - panic(&TypeAssertionError{nil, nil, lhs, ""}) - } - if !eqtype(lhs, rhs) { - panic(&TypeAssertionError{inter, rhs, lhs, ""}) - } -} - -// Compare two type descriptors for equality. -func ifacetypeeq(a, b *_type) bool { - return eqtype(a, b) -} - -// Return the type descriptor of an empty interface. -// FIXME: This should be inlined by the compiler. -func efacetype(e eface) *_type { - return e._type -} - -// Return the type descriptor of a non-empty interface. -// FIXME: This should be inlined by the compiler. -func ifacetype(i iface) *_type { - if i.tab == nil { - return nil - } - return *(**_type)(i.tab) +// panicdottype is called when doing an i.(T) conversion and the conversion fails. +func panicdottype(lhs, rhs, inter *_type) { + panic(&TypeAssertionError{inter, rhs, lhs, ""}) } // Convert an empty interface to an empty interface, for a comma-ok -- cgit v1.1 From f4e7200b1df3dde7d2d9cec8861c6567356db40f Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Fri, 21 Jun 2019 22:21:40 +0000 Subject: runtime: inline and remove eqtype Now that type equality is just a pointer equality, write it inlined and remove the eqtype function. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/182978 From-SVN: r272578 --- libgo/go/runtime/alg.go | 10 +++++----- libgo/go/runtime/iface.go | 12 ++++++------ libgo/go/runtime/type.go | 5 ----- 3 files changed, 11 insertions(+), 16 deletions(-) (limited to 'libgo/go') diff --git a/libgo/go/runtime/alg.go b/libgo/go/runtime/alg.go index c6bc6b6..ec951e3 100644 --- a/libgo/go/runtime/alg.go +++ b/libgo/go/runtime/alg.go @@ -205,7 +205,7 @@ func nilinterequal(p, q unsafe.Pointer) bool { } func efaceeq(x, y eface) bool { t := x._type - if !eqtype(t, y._type) { + if t != y._type { return false } if t == nil { @@ -229,7 +229,7 @@ func ifaceeq(x, y iface) bool { return false } t := *(**_type)(xtab) - if !eqtype(t, *(**_type)(y.tab)) { + if t != *(**_type)(y.tab) { return false } eq := t.equalfn @@ -247,7 +247,7 @@ func ifacevaleq(x iface, t *_type, p unsafe.Pointer) bool { return false } xt := *(**_type)(x.tab) - if !eqtype(xt, t) { + if xt != t { return false } eq := t.equalfn @@ -268,7 +268,7 @@ func ifaceefaceeq(x iface, y eface) bool { return false } xt := *(**_type)(x.tab) - if !eqtype(xt, y._type) { + if xt != y._type { return false } eq := xt.equalfn @@ -285,7 +285,7 @@ func efacevaleq(x eface, t *_type, p unsafe.Pointer) bool { if x._type == nil { return false } - if !eqtype(x._type, t) { + if x._type != t { return false } eq := t.equalfn diff --git a/libgo/go/runtime/iface.go b/libgo/go/runtime/iface.go index 6def738..d434f9e 100644 --- a/libgo/go/runtime/iface.go +++ b/libgo/go/runtime/iface.go @@ -233,7 +233,7 @@ func (m *itab) init() string { ri++ } - if !eqtype(lhsMethod.typ, rhsMethod.mtyp) { + if lhsMethod.typ != rhsMethod.mtyp { m.methods[1] = nil return *lhsMethod.name } @@ -406,7 +406,7 @@ func ifaceI2I2(inter *_type, i iface) (iface, bool) { // Convert an empty interface to a pointer non-interface type. func ifaceE2T2P(t *_type, e eface) (unsafe.Pointer, bool) { - if !eqtype(t, e._type) { + if t != e._type { return nil, false } else { return e.data, true @@ -415,7 +415,7 @@ func ifaceE2T2P(t *_type, e eface) (unsafe.Pointer, bool) { // Convert a non-empty interface to a pointer non-interface type. func ifaceI2T2P(t *_type, i iface) (unsafe.Pointer, bool) { - if i.tab == nil || !eqtype(t, *(**_type)(i.tab)) { + if i.tab == nil || t != *(**_type)(i.tab) { return nil, false } else { return i.data, true @@ -424,7 +424,7 @@ func ifaceI2T2P(t *_type, i iface) (unsafe.Pointer, bool) { // Convert an empty interface to a non-pointer non-interface type. func ifaceE2T2(t *_type, e eface, ret unsafe.Pointer) bool { - if !eqtype(t, e._type) { + if t != e._type { typedmemclr(t, ret) return false } else { @@ -439,7 +439,7 @@ func ifaceE2T2(t *_type, e eface, ret unsafe.Pointer) bool { // Convert a non-empty interface to a non-pointer non-interface type. func ifaceI2T2(t *_type, i iface, ret unsafe.Pointer) bool { - if i.tab == nil || !eqtype(t, *(**_type)(i.tab)) { + if i.tab == nil || t != *(**_type)(i.tab) { typedmemclr(t, ret) return false } else { @@ -485,7 +485,7 @@ func ifaceT2Ip(to, from *_type) bool { ri++ } - if !eqtype(fromMethod.mtyp, toMethod.typ) { + if fromMethod.mtyp != toMethod.typ { return false } diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go index 3bdb8f1..8af6246 100644 --- a/libgo/go/runtime/type.go +++ b/libgo/go/runtime/type.go @@ -48,11 +48,6 @@ func (t *_type) pkgpath() string { return "" } -// Return whether two type descriptors are equal. -func eqtype(t1, t2 *_type) bool { - return t1 == t2 -} - type method struct { name *string pkgPath *string -- cgit v1.1 From 609c7da9ab08b009fb7a9cacf68a40b3b39231b3 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Mon, 24 Jun 2019 17:54:07 +0000 Subject: compiler: open code string equality Open code string equality with builtin memcmp. This allows further optimizations in the backend. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/183538 From-SVN: r272624 --- libgo/go/runtime/alg.go | 1 - libgo/go/runtime/stubs.go | 12 ------------ 2 files changed, 13 deletions(-) (limited to 'libgo/go') diff --git a/libgo/go/runtime/alg.go b/libgo/go/runtime/alg.go index ec951e3..a2bb5bb 100644 --- a/libgo/go/runtime/alg.go +++ b/libgo/go/runtime/alg.go @@ -44,7 +44,6 @@ import ( //go:linkname ifacevaleq runtime.ifacevaleq //go:linkname ifaceefaceeq runtime.ifaceefaceeq //go:linkname efacevaleq runtime.efacevaleq -//go:linkname eqstring runtime.eqstring //go:linkname cmpstring runtime.cmpstring // // Temporary to be called from C code. diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go index 530997b..e00d759 100644 --- a/libgo/go/runtime/stubs.go +++ b/libgo/go/runtime/stubs.go @@ -273,18 +273,6 @@ func checkASM() bool { return true } -func eqstring(x, y string) bool { - a := stringStructOf(&x) - b := stringStructOf(&y) - if a.len != b.len { - return false - } - if a.str == b.str { - return true - } - return memequal(a.str, b.str, uintptr(a.len)) -} - // For gccgo this is in the C code. func osyield() -- cgit v1.1 From c31a34018aa6c4f34102e8c5a93cc98def3b5a7b Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 26 Jun 2019 00:04:36 +0000 Subject: cmd/go: silence ar with D flag failures The first call of ar must not show its output in order to avoid useless error messages about D flag. The corresponding Go toolchain patch is CL 182077. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/183817 From-SVN: r272661 --- libgo/go/cmd/go/internal/work/gccgo.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'libgo/go') diff --git a/libgo/go/cmd/go/internal/work/gccgo.go b/libgo/go/cmd/go/internal/work/gccgo.go index a0eb2d3..3b97209 100644 --- a/libgo/go/cmd/go/internal/work/gccgo.go +++ b/libgo/go/cmd/go/internal/work/gccgo.go @@ -209,9 +209,16 @@ func (tools gccgoToolchain) pack(b *Builder, a *Action, afile string, ofiles []s } absAfile := mkAbs(objdir, afile) // Try with D modifier first, then without if that fails. - if b.run(a, p.Dir, p.ImportPath, nil, tools.ar(), arArgs, "rcD", absAfile, absOfiles) != nil { + output, err := b.runOut(p.Dir, nil, tools.ar(), arArgs, "rcD", absAfile, absOfiles) + if err != nil { return b.run(a, p.Dir, p.ImportPath, nil, tools.ar(), arArgs, "rc", absAfile, absOfiles) } + + if len(output) > 0 { + // Show the output if there is any even without errors. + b.showOutput(a, p.Dir, p.ImportPath, b.processOutput(output)) + } + return nil } -- cgit v1.1