aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime/malloc.go
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2020-12-23 09:57:37 -0800
committerIan Lance Taylor <iant@golang.org>2020-12-30 15:13:24 -0800
commitcfcbb4227fb20191e04eb8d7766ae6202f526afd (patch)
treee2effea96f6f204451779f044415c2385e45042b /libgo/go/runtime/malloc.go
parent0696141107d61483f38482b941549959a0d7f613 (diff)
downloadgcc-cfcbb4227fb20191e04eb8d7766ae6202f526afd.zip
gcc-cfcbb4227fb20191e04eb8d7766ae6202f526afd.tar.gz
gcc-cfcbb4227fb20191e04eb8d7766ae6202f526afd.tar.bz2
libgo: update to Go1.16beta1 release
This does not yet include support for the //go:embed directive added in this release. * Makefile.am (check-runtime): Don't create check-runtime-dir. (mostlyclean-local): Don't remove check-runtime-dir. (check-go-tool, check-vet): Copy in go.mod and modules.txt. (check-cgo-test, check-carchive-test): Add go.mod file. * Makefile.in: Regenerate. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/280172
Diffstat (limited to 'libgo/go/runtime/malloc.go')
-rw-r--r--libgo/go/runtime/malloc.go184
1 files changed, 84 insertions, 100 deletions
diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go
index feb043a..d3d88a2 100644
--- a/libgo/go/runtime/malloc.go
+++ b/libgo/go/runtime/malloc.go
@@ -208,7 +208,7 @@ const (
// mips32 only has access to the low 2GB of virtual memory, so
// we further limit it to 31 bits.
//
- // On darwin/arm64, although 64-bit pointers are presumably
+ // On ios/arm64, although 64-bit pointers are presumably
// available, pointers are truncated to 33 bits. Furthermore,
// only the top 4 GiB of the address space are actually available
// to the application, but we allow the whole 33 bits anyway for
@@ -217,7 +217,7 @@ const (
// arenaBaseOffset to offset into the top 4 GiB.
//
// WebAssembly currently has a limit of 4GB linear memory.
- heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosDarwin*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosDarwin*sys.GoarchArm64
+ heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosIos*sys.GoarchArm64
// maxAlloc is the maximum size of an allocation. On 64-bit,
// it's theoretically possible to allocate 1<<heapAddrBits bytes. On
@@ -525,14 +525,22 @@ func mallocinit() {
// However, on arm64, we ignore all this advice above and slam the
// allocation at 0x40 << 32 because when using 4k pages with 3-level
// translation buffers, the user address space is limited to 39 bits
- // On darwin/arm64, the address space is even smaller.
+ // On ios/arm64, the address space is even smaller.
//
// On AIX, mmaps starts at 0x0A00000000000000 for 64-bit.
// processes.
for i := 0x7f; i >= 0; i-- {
var p uintptr
switch {
- case GOARCH == "arm64" && GOOS == "darwin":
+ case raceenabled:
+ // The TSAN runtime requires the heap
+ // to be in the range [0x00c000000000,
+ // 0x00e000000000).
+ p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
+ if p >= uintptrMask&0x00e000000000 {
+ continue
+ }
+ case GOARCH == "arm64" && GOOS == "ios":
p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
case GOARCH == "arm64":
p = uintptr(i)<<40 | uintptrMask&(0x0040<<32)
@@ -543,14 +551,6 @@ func mallocinit() {
continue
}
p = uintptr(i)<<40 | uintptrMask&(0xa0<<52)
- case raceenabled:
- // The TSAN runtime requires the heap
- // to be in the range [0x00c000000000,
- // 0x00e000000000).
- p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32)
- if p >= uintptrMask&0x00e000000000 {
- continue
- }
default:
p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
}
@@ -638,6 +638,8 @@ func mallocinit() {
//
// h must be locked.
func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
+ assertLockHeld(&h.lock)
+
n = alignUp(n, heapArenaBytes)
// First, try the arena pre-reservation.
@@ -754,9 +756,9 @@ mapped:
throw("arena already initialized")
}
var r *heapArena
- r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys))
+ r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
if r == nil {
- r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys))
+ r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
if r == nil {
throw("out of memory allocating heap arena metadata")
}
@@ -768,7 +770,7 @@ mapped:
if size == 0 {
size = physPageSize
}
- newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gc_sys))
+ newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gcMiscSys))
if newArray == nil {
throw("out of memory allocating allArenas")
}
@@ -920,27 +922,34 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
return unsafe.Pointer(&zerobase)
}
- if debug.sbrk != 0 {
- align := uintptr(16)
- if typ != nil {
- // TODO(austin): This should be just
- // align = uintptr(typ.align)
- // but that's only 4 on 32-bit platforms,
- // even if there's a uint64 field in typ (see #599).
- // This causes 64-bit atomic accesses to panic.
- // Hence, we use stricter alignment that matches
- // the normal allocator better.
- if size&7 == 0 {
- align = 8
- } else if size&3 == 0 {
- align = 4
- } else if size&1 == 0 {
- align = 2
- } else {
- align = 1
+ if debug.malloc {
+ if debug.sbrk != 0 {
+ align := uintptr(16)
+ if typ != nil {
+ // TODO(austin): This should be just
+ // align = uintptr(typ.align)
+ // but that's only 4 on 32-bit platforms,
+ // even if there's a uint64 field in typ (see #599).
+ // This causes 64-bit atomic accesses to panic.
+ // Hence, we use stricter alignment that matches
+ // the normal allocator better.
+ if size&7 == 0 {
+ align = 8
+ } else if size&3 == 0 {
+ align = 4
+ } else if size&1 == 0 {
+ align = 2
+ } else {
+ align = 1
+ }
}
+ return persistentalloc(size, align, &memstats.other_sys)
+ }
+
+ if inittrace.active && inittrace.id == getg().goid {
+ // Init functions are executed sequentially in a single Go routine.
+ inittrace.allocs += 1
}
- return persistentalloc(size, align, &memstats.other_sys)
}
// When using gccgo, when a cgo or SWIG function has an
@@ -987,18 +996,9 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
shouldhelpgc := false
dataSize := size
- var c *mcache
- if mp.p != 0 {
- c = mp.p.ptr().mcache
- } else {
- // We will be called without a P while bootstrapping,
- // in which case we use mcache0, which is set in mallocinit.
- // mcache0 is cleared when bootstrapping is complete,
- // by procresize.
- c = mcache0
- if c == nil {
- throw("malloc called with no P")
- }
+ c := getMCache()
+ if c == nil {
+ throw("mallocgc called without a P or outside bootstrapping")
}
var span *mspan
var x unsafe.Pointer
@@ -1038,6 +1038,14 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// Align tiny pointer for required (conservative) alignment.
if size&7 == 0 {
off = alignUp(off, 8)
+ } else if sys.PtrSize == 4 && size == 12 {
+ // Conservatively align 12-byte objects to 8 bytes on 32-bit
+ // systems so that objects whose first field is a 64-bit
+ // value is aligned to 8 bytes and does not cause a fault on
+ // atomic access. See issue 37262.
+ // TODO(mknyszek): Remove this workaround if/when issue 36606
+ // is resolved.
+ off = alignUp(off, 8)
} else if size&3 == 0 {
off = alignUp(off, 4)
} else if size&1 == 0 {
@@ -1047,7 +1055,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// The object fits into existing tiny block.
x = unsafe.Pointer(c.tiny + off)
c.tinyoffset = off + size
- c.local_tinyallocs++
+ c.tinyAllocs++
mp.mallocing = 0
releasem(mp)
if incallback {
@@ -1092,9 +1100,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
} else {
shouldhelpgc = true
- systemstack(func() {
- span = largeAlloc(size, needzero, noscan)
- })
+ span = c.allocLarge(size, needzero, noscan)
span.freeindex = 1
span.allocCount = 1
x = unsafe.Pointer(span.base())
@@ -1114,7 +1120,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
} else {
scanSize = typ.ptrdata
}
- c.local_scan += scanSize
+ c.scanAlloc += scanSize
}
// Ensure that the stores above that initialize x to
@@ -1144,13 +1150,20 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
mp.mallocing = 0
releasem(mp)
- if debug.allocfreetrace != 0 {
- tracealloc(x, size, typ)
+ if debug.malloc {
+ if debug.allocfreetrace != 0 {
+ tracealloc(x, size, typ)
+ }
+
+ if inittrace.active && inittrace.id == getg().goid {
+ // Init functions are executed sequentially in a single Go routine.
+ inittrace.bytes += uint64(size)
+ }
}
if rate := MemProfileRate; rate > 0 {
- if rate != 1 && size < c.next_sample {
- c.next_sample -= size
+ if rate != 1 && size < c.nextSample {
+ c.nextSample -= size
} else {
mp := acquirem()
profilealloc(mp, x, size)
@@ -1182,37 +1195,6 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
return x
}
-func largeAlloc(size uintptr, needzero bool, noscan bool) *mspan {
- // print("largeAlloc size=", size, "\n")
-
- if size+_PageSize < size {
- throw("out of memory")
- }
- npages := size >> _PageShift
- if size&_PageMask != 0 {
- npages++
- }
-
- // Deduct credit for this span allocation and sweep if
- // necessary. mHeap_Alloc will also sweep npages, so this only
- // pays the debt down to npage pages.
- deductSweepCredit(npages*_PageSize, npages)
-
- spc := makeSpanClass(0, noscan)
- s := mheap_.alloc(npages, spc, needzero)
- if s == nil {
- throw("out of memory")
- }
- if go115NewMCentralImpl {
- // Put the large span in the mcentral swept list so that it's
- // visible to the background sweeper.
- mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
- }
- s.limit = s.base() + size
- heapBitsForAddr(s.base()).initSpan(s)
- return s
-}
-
// implementation of new builtin
// compiler (both frontend and SSA backend) knows the signature
// of this function
@@ -1248,16 +1230,11 @@ func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
}
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
- var c *mcache
- if mp.p != 0 {
- c = mp.p.ptr().mcache
- } else {
- c = mcache0
- if c == nil {
- throw("profilealloc called with no P")
- }
+ c := getMCache()
+ if c == nil {
+ throw("profilealloc called without a P or outside bootstrapping")
}
- c.next_sample = nextSample()
+ c.nextSample = nextSample()
mProf_Malloc(x, size)
}
@@ -1269,6 +1246,13 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
// distribution (exp(MemProfileRate)), so the best return value is a random
// number taken from an exponential distribution whose mean is MemProfileRate.
func nextSample() uintptr {
+ if MemProfileRate == 1 {
+ // Callers assign our return value to
+ // mcache.next_sample, but next_sample is not used
+ // when the rate is 1. So avoid the math below and
+ // just return something.
+ return 0
+ }
if GOOS == "plan9" {
// Plan 9 doesn't support floating point in note handler.
if g := getg(); g == g.m.gsignal {
@@ -1349,7 +1333,7 @@ var persistentChunks *notInHeap
// The returned memory will be zeroed.
//
// Consider marking persistentalloc'd types go:notinheap.
-func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
+func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
var p *notInHeap
systemstack(func() {
p = persistentalloc1(size, align, sysStat)
@@ -1360,7 +1344,7 @@ func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
// Must run on system stack because stack growth can (re)invoke it.
// See issue 9174.
//go:systemstack
-func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap {
+func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
const (
maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
)
@@ -1419,8 +1403,8 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap {
}
if sysStat != &memstats.other_sys {
- mSysStatInc(sysStat, size)
- mSysStatDec(&memstats.other_sys, size)
+ sysStat.add(int64(size))
+ memstats.other_sys.add(-int64(size))
}
return p
}
@@ -1461,7 +1445,7 @@ func (l *linearAlloc) init(base, size uintptr) {
l.end = base + size
}
-func (l *linearAlloc) alloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
+func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
p := alignUp(l.next, align)
if p+size > l.end {
return nil