aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime/malloc.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime/malloc.go')
-rw-r--r--libgo/go/runtime/malloc.go116
1 files changed, 87 insertions, 29 deletions
diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go
index d3d88a2..7005e53 100644
--- a/libgo/go/runtime/malloc.go
+++ b/libgo/go/runtime/malloc.go
@@ -237,6 +237,7 @@ const (
// -------------- --------- ---------- ---------- -----------
// */64-bit 48 64MB 1 4M (32MB)
// windows/64-bit 48 4MB 64 1M (8MB)
+ // ios/arm64 33 4MB 1 2048 (8KB)
// */32-bit 32 4MB 1 1024 (4KB)
// */mips(le) 31 4MB 1 512 (2KB)
@@ -257,7 +258,7 @@ const (
// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
// prefer using heapArenaBytes where possible (we need the
// constant to compute some other constants).
- logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm
+ logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm + (2+20)*sys.GoosIos*sys.GoarchArm64
// heapArenaBitmapBytes is the size of each heap arena's bitmap.
heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2)
@@ -305,7 +306,7 @@ const (
// high addresses if viewed as unsigned).
//
// On aix/ppc64, this offset allows to keep the heapAddrBits to
- // 48. Otherwize, it would be 60 in order to handle mmap addresses
+ // 48. Otherwise, it would be 60 in order to handle mmap addresses
// (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
// case, the memory reserved in (s *pageAlloc).init for chunks
// is causing important slowdowns.
@@ -579,7 +580,7 @@ func mallocinit() {
const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
meta := uintptr(sysReserve(nil, arenaMetaSize))
if meta != 0 {
- mheap_.heapArenaAlloc.init(meta, arenaMetaSize)
+ mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
}
// We want to start the arena low, but if we're linked
@@ -616,7 +617,7 @@ func mallocinit() {
for _, arenaSize := range &arenaSizes {
a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
if a != nil {
- mheap_.arena.init(uintptr(a), size)
+ mheap_.arena.init(uintptr(a), size, false)
p = mheap_.arena.end // For hint below
break
}
@@ -633,8 +634,8 @@ func mallocinit() {
// heapArenaBytes. sysAlloc returns nil on failure.
// There is no corresponding free function.
//
-// sysAlloc returns a memory region in the Prepared state. This region must
-// be transitioned to Ready before use.
+// sysAlloc returns a memory region in the Reserved state. This region must
+// be transitioned to Prepared and then Ready before use.
//
// h must be locked.
func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
@@ -736,9 +737,6 @@ func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
throw("misrounded allocation in sysAlloc")
}
- // Transition from Reserved to Prepared.
- sysMap(v, size, &memstats.heap_sys)
-
mapped:
// Create arena metadata.
for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
@@ -947,7 +945,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
if inittrace.active && inittrace.id == getg().goid {
- // Init functions are executed sequentially in a single Go routine.
+ // Init functions are executed sequentially in a single goroutine.
inittrace.allocs += 1
}
}
@@ -1003,6 +1001,9 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
var span *mspan
var x unsafe.Pointer
noscan := typ == nil || typ.ptrdata == 0
+ // In some cases block zeroing can profitably (for latency reduction purposes)
+ // be delayed till preemption is possible; isZeroed tracks that state.
+ isZeroed := true
if size <= maxSmallSize {
if noscan && size < maxTinySize {
// Tiny allocator.
@@ -1074,7 +1075,8 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
(*[2]uint64)(x)[1] = 0
// See if we need to replace the existing tiny block with the new one
// based on amount of remaining free space.
- if size < c.tinyoffset || c.tiny == 0 {
+ if !raceenabled && (size < c.tinyoffset || c.tiny == 0) {
+ // Note: disabled when race detector is on, see comment near end of this function.
c.tiny = uintptr(x)
c.tinyoffset = size
}
@@ -1100,7 +1102,9 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
} else {
shouldhelpgc = true
- span = c.allocLarge(size, needzero, noscan)
+ // For large allocations, keep track of zeroed state so that
+ // bulk zeroing can be happen later in a preemptible context.
+ span, isZeroed = c.allocLarge(size, needzero && !noscan, noscan)
span.freeindex = 1
span.allocCount = 1
x = unsafe.Pointer(span.base())
@@ -1147,30 +1151,34 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
msanmalloc(x, size)
}
+ if rate := MemProfileRate; rate > 0 {
+ // Note cache c only valid while m acquired; see #47302
+ if rate != 1 && size < c.nextSample {
+ c.nextSample -= size
+ } else {
+ profilealloc(mp, x, size)
+ }
+ }
mp.mallocing = 0
releasem(mp)
+ // Pointerfree data can be zeroed late in a context where preemption can occur.
+ // x will keep the memory alive.
+ if !isZeroed && needzero {
+ memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
+ }
+
if debug.malloc {
if debug.allocfreetrace != 0 {
tracealloc(x, size, typ)
}
if inittrace.active && inittrace.id == getg().goid {
- // Init functions are executed sequentially in a single Go routine.
+ // Init functions are executed sequentially in a single goroutine.
inittrace.bytes += uint64(size)
}
}
- if rate := MemProfileRate; rate > 0 {
- if rate != 1 && size < c.nextSample {
- c.nextSample -= size
- } else {
- mp := acquirem()
- profilealloc(mp, x, size)
- releasem(mp)
- }
- }
-
if assistG != nil {
// Account for internal fragmentation in the assist
// debt now that we know it.
@@ -1183,6 +1191,22 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
}
+ if raceenabled && noscan && dataSize < maxTinySize {
+ // Pad tinysize allocations so they are aligned with the end
+ // of the tinyalloc region. This ensures that any arithmetic
+ // that goes off the top end of the object will be detectable
+ // by checkptr (issue 38872).
+ // Note that we disable tinyalloc when raceenabled for this to work.
+ // TODO: This padding is only performed when the race detector
+ // is enabled. It would be nice to enable it if any package
+ // was compiled with checkptr, but there's no easy way to
+ // detect that (especially at compile time).
+ // TODO: enable this padding for all allocations, not just
+ // tinyalloc ones. It's tricky because of pointer maps.
+ // Maybe just all noscan objects?
+ x = add(x, size-dataSize)
+ }
+
// Check preemption, since unlike gc we don't check on every call.
if getg().preempt {
checkPreempt()
@@ -1195,6 +1219,33 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
return x
}
+// memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
+// on chunks of the buffer to be zeroed, with opportunities for preemption
+// along the way. memclrNoHeapPointers contains no safepoints and also
+// cannot be preemptively scheduled, so this provides a still-efficient
+// block copy that can also be preempted on a reasonable granularity.
+//
+// Use this with care; if the data being cleared is tagged to contain
+// pointers, this allows the GC to run before it is all cleared.
+func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) {
+ v := uintptr(x)
+ // got this from benchmarking. 128k is too small, 512k is too large.
+ const chunkBytes = 256 * 1024
+ vsize := v + size
+ for voff := v; voff < vsize; voff = voff + chunkBytes {
+ if getg().preempt {
+ // may hold locks, e.g., profiling
+ goschedguarded()
+ }
+ // clear min(avail, lump) bytes
+ n := vsize - voff
+ if n > chunkBytes {
+ n = chunkBytes
+ }
+ memclrNoHeapPointers(unsafe.Pointer(voff), n)
+ }
+}
+
// implementation of new builtin
// compiler (both frontend and SSA backend) knows the signature
// of this function
@@ -1425,15 +1476,19 @@ func inPersistentAlloc(p uintptr) bool {
}
// linearAlloc is a simple linear allocator that pre-reserves a region
-// of memory and then maps that region into the Ready state as needed. The
-// caller is responsible for locking.
+// of memory and then optionally maps that region into the Ready state
+// as needed.
+//
+// The caller is responsible for locking.
type linearAlloc struct {
next uintptr // next free byte
mapped uintptr // one byte past end of mapped space
end uintptr // end of reserved space
+
+ mapMemory bool // transition memory from Reserved to Ready if true
}
-func (l *linearAlloc) init(base, size uintptr) {
+func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
if base+size < base {
// Chop off the last byte. The runtime isn't prepared
// to deal with situations where the bounds could overflow.
@@ -1443,6 +1498,7 @@ func (l *linearAlloc) init(base, size uintptr) {
}
l.next, l.mapped = base, base
l.end = base + size
+ l.mapMemory = mapMemory
}
func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
@@ -1452,9 +1508,11 @@ func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Poi
}
l.next = p + size
if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
- // Transition from Reserved to Prepared to Ready.
- sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
- sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
+ if l.mapMemory {
+ // Transition from Reserved to Prepared to Ready.
+ sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
+ sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped)
+ }
l.mapped = pEnd
}
return unsafe.Pointer(p)