From 1a2f01efa63036a5104f203a4789e682c0e0915d Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Tue, 9 Jan 2018 01:23:08 +0000 Subject: libgo: update to Go1.10beta1 Update the Go library to the 1.10beta1 release. Requires a few changes to the compiler for modifications to the map runtime code, and to handle some nowritebarrier cases in the runtime. Reviewed-on: https://go-review.googlesource.com/86455 gotools/: * Makefile.am (go_cmd_vet_files): New variable. (go_cmd_buildid_files, go_cmd_test2json_files): New variables. (s-zdefaultcc): Change from constants to functions. (noinst_PROGRAMS): Add vet, buildid, and test2json. (cgo$(EXEEXT)): Link against $(LIBGOTOOL). (vet$(EXEEXT)): New target. (buildid$(EXEEXT)): New target. (test2json$(EXEEXT)): New target. (install-exec-local): Install all $(noinst_PROGRAMS). (uninstall-local): Uninstasll all $(noinst_PROGRAMS). (check-go-tool): Depend on $(noinst_PROGRAMS). Copy down objabi.go. (check-runtime): Depend on $(noinst_PROGRAMS). (check-cgo-test, check-carchive-test): Likewise. (check-vet): New target. (check): Depend on check-vet. Look at cmd_vet-testlog. (.PHONY): Add check-vet. * Makefile.in: Rebuild. From-SVN: r256365 --- libgo/go/runtime/malloc.go | 82 ++++++++++++++++++++++++++++++---------------- 1 file changed, 53 insertions(+), 29 deletions(-) (limited to 'libgo/go/runtime/malloc.go') diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go index 796cd8a..88e4ba3 100644 --- a/libgo/go/runtime/malloc.go +++ b/libgo/go/runtime/malloc.go @@ -546,9 +546,8 @@ func nextFreeFast(s *mspan) gclinkptr { } s.allocCache >>= uint(theBit + 1) s.freeindex = freeidx - v := gclinkptr(result*s.elemsize + s.base()) s.allocCount++ - return v + return gclinkptr(result*s.elemsize + s.base()) } } return 0 @@ -877,6 +876,9 @@ func reflect_unsafe_New(typ *_type) unsafe.Pointer { // newarray allocates an array of n elements of type typ. func newarray(typ *_type, n int) unsafe.Pointer { + if n == 1 { + return mallocgc(typ.size, typ, true) + } if n < 0 || uintptr(n) > maxSliceCap(typ.size) { panic(plainError("runtime: allocation size out of range")) } @@ -893,11 +895,13 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { mProf_Malloc(x, size) } -// nextSample returns the next sampling point for heap profiling. -// It produces a random variable with a geometric distribution and -// mean MemProfileRate. This is done by generating a uniformly -// distributed random number and applying the cumulative distribution -// function for an exponential. +// nextSample returns the next sampling point for heap profiling. The goal is +// to sample allocations on average every MemProfileRate bytes, but with a +// completely random distribution over the allocation timeline; this +// corresponds to a Poisson process with parameter MemProfileRate. In Poisson +// processes, the distance between two samples follows the exponential +// distribution (exp(MemProfileRate)), so the best return value is a random +// number taken from an exponential distribution whose mean is MemProfileRate. func nextSample() int32 { if GOOS == "plan9" { // Plan 9 doesn't support floating point in note handler. @@ -906,25 +910,29 @@ func nextSample() int32 { } } - period := MemProfileRate + return fastexprand(MemProfileRate) +} - // make nextSample not overflow. Maximum possible step is - // -ln(1/(1< 0x7000000: - period = 0x7000000 - case period == 0: + case mean > 0x7000000: + mean = 0x7000000 + case mean == 0: return 0 } - // Let m be the sample rate, - // the probability distribution function is m*exp(-mx), so the CDF is - // p = 1 - exp(-mx), so - // q = 1 - p == exp(-mx) - // log_e(q) = -mx - // -log_e(q)/m = x - // x = -log_e(q) * period - // x = log_2(q) * (-log_e(2)) * period ; Using log_2 for efficiency + // Take a random sample of the exponential distribution exp(-mean*x). + // The probability distribution function is mean*exp(-mean*x), so the CDF is + // p = 1 - exp(-mean*x), so + // q = 1 - p == exp(-mean*x) + // log_e(q) = -mean*x + // -log_e(q)/mean = x + // x = -log_e(q) * mean + // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency const randomBitCount = 26 q := fastrand()%(1<= maxBlock { - return sysAlloc(size, sysStat) + return (*notInHeap)(sysAlloc(size, sysStat)) } mp := acquirem() @@ -1011,7 +1019,7 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer { } persistent.off = round(persistent.off, align) if persistent.off+size > chunk || persistent.base == nil { - persistent.base = sysAlloc(chunk, &memstats.other_sys) + persistent.base = (*notInHeap)(sysAlloc(chunk, &memstats.other_sys)) if persistent.base == nil { if persistent == &globalAlloc.persistentAlloc { unlock(&globalAlloc.mutex) @@ -1020,7 +1028,7 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer { } persistent.off = 0 } - p := add(persistent.base, persistent.off) + p := persistent.base.add(persistent.off) persistent.off += size releasem(mp) if persistent == &globalAlloc.persistentAlloc { @@ -1033,3 +1041,19 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer { } return p } + +// notInHeap is off-heap memory allocated by a lower-level allocator +// like sysAlloc or persistentAlloc. +// +// In general, it's better to use real types marked as go:notinheap, +// but this serves as a generic type for situations where that isn't +// possible (like in the allocators). +// +// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? +// +//go:notinheap +type notInHeap struct{} + +func (p *notInHeap) add(bytes uintptr) *notInHeap { + return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) +} -- cgit v1.1