From 5a8ea165926cb0737ab03bc48c18dc5198ab5305 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Thu, 2 Jan 2020 15:05:27 -0800 Subject: libgo: update to Go1.14beta1 Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/214297 --- libgo/go/runtime/malloc.go | 76 +++++++++++++++++++++++++++------------------- 1 file changed, 44 insertions(+), 32 deletions(-) (limited to 'libgo/go/runtime/malloc.go') diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go index 0eee55e..fda2273 100644 --- a/libgo/go/runtime/malloc.go +++ b/libgo/go/runtime/malloc.go @@ -19,7 +19,7 @@ // fixalloc: a free-list allocator for fixed-size off-heap objects, // used to manage storage used by the allocator. // mheap: the malloc heap, managed at page (8192-byte) granularity. -// mspan: a run of pages managed by the mheap. +// mspan: a run of in-use pages managed by the mheap. // mcentral: collects all spans of a given size class. // mcache: a per-P cache of mspans with free space. // mstats: allocation statistics. @@ -56,13 +56,8 @@ // it is placed on the mcentral free list for the mspan's size // class. // -// 3. Otherwise, if all objects in the mspan are free, the mspan -// is now "idle", so it is returned to the mheap and no longer -// has a size class. -// This may coalesce it with adjacent idle mspans. -// -// 4. If an mspan remains idle for long enough, return its pages -// to the operating system. +// 3. Otherwise, if all objects in the mspan are free, the mspan's +// pages are returned to the mheap and the mspan is now dead. // // Allocating and freeing a large object uses the mheap // directly, bypassing the mcache and mcentral. @@ -207,17 +202,21 @@ const ( // exceed Go's 48 bit limit, it's extremely unlikely in // practice. // - // On aix/ppc64, the limits is increased to 1<<60 to accept addresses - // returned by mmap syscall. These are in range: - // 0x0a00000000000000 - 0x0afffffffffffff - // // On 32-bit platforms, we accept the full 32-bit address // space because doing so is cheap. // mips32 only has access to the low 2GB of virtual memory, so // we further limit it to 31 bits. // + // On darwin/arm64, although 64-bit pointers are presumably + // available, pointers are truncated to 33 bits. Furthermore, + // only the top 4 GiB of the address space are actually available + // to the application, but we allow the whole 33 bits anyway for + // simplicity. + // TODO(mknyszek): Consider limiting it to 32 bits and using + // arenaBaseOffset to offset into the top 4 GiB. + // // WebAssembly currently has a limit of 4GB linear memory. - heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosAix))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 60*(sys.GoosAix*_64bit) + heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosDarwin*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosDarwin*sys.GoarchArm64 // maxAlloc is the maximum size of an allocation. On 64-bit, // it's theoretically possible to allocate 1< maxPhysPageSize { + print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n") + throw("bad system page size") + } if physPageSize < minPhysPageSize { print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") throw("bad system page size") @@ -456,6 +461,13 @@ func mallocinit() { print("system huge page size (", physHugePageSize, ") must be a power of 2\n") throw("bad system huge page size") } + if physHugePageSize > maxPhysHugePageSize { + // physHugePageSize is greater than the maximum supported huge page size. + // Don't throw here, like in the other cases, since a system configured + // in this way isn't wrong, we just don't have the code to support them. + // Instead, silently set the huge page size to zero. + physHugePageSize = 0 + } if physHugePageSize != 0 { // Since physHugePageSize is a power of 2, it suffices to increase // physHugePageShift until 1< persistentChunkSize || persistent.base == nil { persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys)) if persistent.base == nil { @@ -1356,7 +1368,7 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { break } } - persistent.off = round(sys.PtrSize, align) + persistent.off = alignUp(sys.PtrSize, align) } p := persistent.base.add(persistent.off) persistent.off += size @@ -1402,12 +1414,12 @@ func (l *linearAlloc) init(base, size uintptr) { } func (l *linearAlloc) alloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { - p := round(l.next, align) + p := alignUp(l.next, align) if p+size > l.end { return nil } l.next = p + size - if pEnd := round(l.next-1, physPageSize); pEnd > l.mapped { + if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped { // Transition from Reserved to Prepared to Ready. sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat) sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped) -- cgit v1.1