From 4f4a855d82a889cebcfca150a7a43909bcb6a346 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Fri, 18 Jan 2019 19:04:36 +0000 Subject: libgo: update to Go1.12beta2 Reviewed-on: https://go-review.googlesource.com/c/158019 gotools/: * Makefile.am (go_cmd_vet_files): Update for Go1.12beta2 release. (GOTOOLS_TEST_TIMEOUT): Increase to 600. (check-runtime): Export LD_LIBRARY_PATH before computing GOARCH and GOOS. (check-vet): Copy golang.org/x/tools into check-vet-dir. * Makefile.in: Regenerate. gcc/testsuite/: * go.go-torture/execute/names-1.go: Stop using debug/xcoff, which is no longer externally visible. From-SVN: r268084 --- libgo/go/runtime/malloc.go | 149 ++++++++++++++++++++++++++++++++------------- 1 file changed, 105 insertions(+), 44 deletions(-) (limited to 'libgo/go/runtime/malloc.go') diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go index ac4759f..36417fb 100644 --- a/libgo/go/runtime/malloc.go +++ b/libgo/go/runtime/malloc.go @@ -106,6 +106,7 @@ package runtime import ( "runtime/internal/atomic" + "runtime/internal/math" "runtime/internal/sys" "unsafe" ) @@ -135,8 +136,6 @@ const ( // have the most objects per span. maxObjsPerSpan = pageSize / 8 - mSpanInUse = _MSpanInUse - concurrentSweep = _ConcurrentSweep _PageSize = 1 << _PageShift @@ -149,8 +148,7 @@ const ( _TinySize = 16 _TinySizeClass = int8(2) - _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc - _MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap. + _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc // Per-P, per order stack segment cache size. _StackCacheSize = 32 * 1024 @@ -173,7 +171,7 @@ const ( // amd64, addresses are sign-extended beyond heapAddrBits. On // other arches, they are zero-extended. // - // On 64-bit platforms, we limit this to 48 bits based on a + // On most 64-bit platforms, we limit this to 48 bits based on a // combination of hardware and OS limitations. // // amd64 hardware limits addresses to 48 bits, sign-extended @@ -191,10 +189,9 @@ const ( // bits, in the range [0, 1<<48). // // ppc64, mips64, and s390x support arbitrary 64 bit addresses - // in hardware. However, since Go only supports Linux on - // these, we lean on OS limits. Based on Linux's processor.h, - // the user address space is limited as follows on 64-bit - // architectures: + // in hardware. On Linux, Go leans on stricter OS limits. Based + // on Linux's processor.h, the user address space is limited as + // follows on 64-bit architectures: // // Architecture Name Maximum Value (exclusive) // --------------------------------------------------------------------- @@ -211,13 +208,17 @@ const ( // exceed Go's 48 bit limit, it's extremely unlikely in // practice. // + // On aix/ppc64, the limits is increased to 1<<60 to accept addresses + // returned by mmap syscall. These are in range: + // 0x0a00000000000000 - 0x0afffffffffffff + // // On 32-bit platforms, we accept the full 32-bit address // space because doing so is cheap. // mips32 only has access to the low 2GB of virtual memory, so // we further limit it to 31 bits. // // WebAssembly currently has a limit of 4GB linear memory. - heapAddrBits = (_64bit*(1-sys.GoarchWasm))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosAix))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 60*sys.GoosAix // maxAlloc is the maximum size of an allocation. On 64-bit, // it's theoretically possible to allocate 1<= 0; i-- { var p uintptr switch { @@ -443,10 +448,11 @@ func mallocinit() { p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) case GOOS == "aix": if i == 0 { - p = uintptrMask&(1<<42) | uintptrMask&(0xa0<<52) - } else { - p = uintptr(i)<<42 | uintptrMask&(0x70<<52) + // We don't use addresses directly after 0x0A00000000000000 + // to avoid collisions with others mmaps done by non-go programs. + continue } + p = uintptr(i)<<40 | uintptrMask&(0xa0<<52) case raceenabled: // The TSAN runtime requires the heap // to be in the range [0x00c000000000, @@ -480,7 +486,7 @@ func mallocinit() { // 3. We try to stake out a reasonably large initial // heap reservation. - const arenaMetaSize = unsafe.Sizeof([1 << arenaBits]heapArena{}) + const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) meta := uintptr(sysReserve(nil, arenaMetaSize)) if meta != 0 { mheap_.heapArenaAlloc.init(meta, arenaMetaSize) @@ -663,6 +669,27 @@ mapped: } } + // Add the arena to the arenas list. + if len(h.allArenas) == cap(h.allArenas) { + size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize + if size == 0 { + size = physPageSize + } + newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gc_sys)) + if newArray == nil { + throw("out of memory allocating allArenas") + } + oldSlice := h.allArenas + *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)} + copy(h.allArenas, oldSlice) + // Do not free the old backing array because + // there may be concurrent readers. Since we + // double the array each time, this can lead + // to at most 2x waste. + } + h.allArenas = h.allArenas[:len(h.allArenas)+1] + h.allArenas[len(h.allArenas)-1] = ri + // Store atomically just in case an object from the // new heap arena becomes visible before the heap lock // is released (which shouldn't happen, but there's @@ -755,6 +782,9 @@ func nextFreeFast(s *mspan) gclinkptr { // weight allocation. If it is a heavy weight allocation the caller must // determine whether a new GC cycle needs to be started or if the GC is active // whether this goroutine needs to assist the GC. +// +// Must run in a non-preemptible context since otherwise the owner of +// c could change. func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) { s = c.alloc[spc] shouldhelpgc = false @@ -765,9 +795,7 @@ func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bo println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) throw("s.allocCount != s.nelems && freeIndex == s.nelems") } - systemstack(func() { - c.refill(spc) - }) + c.refill(spc) shouldhelpgc = true s = c.alloc[spc] @@ -1018,7 +1046,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { if shouldhelpgc { if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { - gcStart(gcBackgroundMode, t) + gcStart(t) } } @@ -1076,10 +1104,11 @@ func newarray(typ *_type, n int) unsafe.Pointer { if n == 1 { return mallocgc(typ.size, typ, true) } - if n < 0 || uintptr(n) > maxSliceCap(typ.size) { + mem, overflow := math.MulUintptr(typ.size, uintptr(n)) + if overflow || mem > maxAlloc || n < 0 { panic(plainError("runtime: allocation size out of range")) } - return mallocgc(typ.size*uintptr(n), typ, true) + return mallocgc(mem, typ, true) } //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray @@ -1164,6 +1193,15 @@ var globalAlloc struct { persistentAlloc } +// persistentChunkSize is the number of bytes we allocate when we grow +// a persistentAlloc. +const persistentChunkSize = 256 << 10 + +// persistentChunks is a list of all the persistent chunks we have +// allocated. The list is maintained through the first word in the +// persistent chunk. This is updated atomically. +var persistentChunks *notInHeap + // Wrapper around sysAlloc that can allocate small chunks. // There is no associated free operation. // Intended for things like function/type/debug-related persistent data. @@ -1184,7 +1222,6 @@ func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { //go:systemstack func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { const ( - chunk = 256 << 10 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows ) @@ -1215,15 +1252,24 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { persistent = &globalAlloc.persistentAlloc } persistent.off = round(persistent.off, align) - if persistent.off+size > chunk || persistent.base == nil { - persistent.base = (*notInHeap)(sysAlloc(chunk, &memstats.other_sys)) + if persistent.off+size > persistentChunkSize || persistent.base == nil { + persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys)) if persistent.base == nil { if persistent == &globalAlloc.persistentAlloc { unlock(&globalAlloc.mutex) } throw("runtime: cannot allocate memory") } - persistent.off = 0 + + // Add the new chunk to the persistentChunks list. + for { + chunks := uintptr(unsafe.Pointer(persistentChunks)) + *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks + if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { + break + } + } + persistent.off = sys.PtrSize } p := persistent.base.add(persistent.off) persistent.off += size @@ -1239,6 +1285,21 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { return p } +// inPersistentAlloc reports whether p points to memory allocated by +// persistentalloc. This must be nosplit because it is called by the +// cgo checker code, which is called by the write barrier code. +//go:nosplit +func inPersistentAlloc(p uintptr) bool { + chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) + for chunk != 0 { + if p >= chunk && p < chunk+persistentChunkSize { + return true + } + chunk = *(*uintptr)(unsafe.Pointer(chunk)) + } + return false +} + // linearAlloc is a simple linear allocator that pre-reserves a region // of memory and then maps that region as needed. The caller is // responsible for locking. -- cgit v1.1