aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime/malloc.go
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2018-01-09 01:23:08 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2018-01-09 01:23:08 +0000
commit1a2f01efa63036a5104f203a4789e682c0e0915d (patch)
tree373e15778dc8295354584e1f86915ae493b604ff /libgo/go/runtime/malloc.go
parent8799df67f2dab88f9fda11739c501780a85575e2 (diff)
downloadgcc-1a2f01efa63036a5104f203a4789e682c0e0915d.zip
gcc-1a2f01efa63036a5104f203a4789e682c0e0915d.tar.gz
gcc-1a2f01efa63036a5104f203a4789e682c0e0915d.tar.bz2
libgo: update to Go1.10beta1
Update the Go library to the 1.10beta1 release. Requires a few changes to the compiler for modifications to the map runtime code, and to handle some nowritebarrier cases in the runtime. Reviewed-on: https://go-review.googlesource.com/86455 gotools/: * Makefile.am (go_cmd_vet_files): New variable. (go_cmd_buildid_files, go_cmd_test2json_files): New variables. (s-zdefaultcc): Change from constants to functions. (noinst_PROGRAMS): Add vet, buildid, and test2json. (cgo$(EXEEXT)): Link against $(LIBGOTOOL). (vet$(EXEEXT)): New target. (buildid$(EXEEXT)): New target. (test2json$(EXEEXT)): New target. (install-exec-local): Install all $(noinst_PROGRAMS). (uninstall-local): Uninstasll all $(noinst_PROGRAMS). (check-go-tool): Depend on $(noinst_PROGRAMS). Copy down objabi.go. (check-runtime): Depend on $(noinst_PROGRAMS). (check-cgo-test, check-carchive-test): Likewise. (check-vet): New target. (check): Depend on check-vet. Look at cmd_vet-testlog. (.PHONY): Add check-vet. * Makefile.in: Rebuild. From-SVN: r256365
Diffstat (limited to 'libgo/go/runtime/malloc.go')
-rw-r--r--libgo/go/runtime/malloc.go82
1 files changed, 53 insertions, 29 deletions
diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go
index 796cd8a..88e4ba3 100644
--- a/libgo/go/runtime/malloc.go
+++ b/libgo/go/runtime/malloc.go
@@ -546,9 +546,8 @@ func nextFreeFast(s *mspan) gclinkptr {
}
s.allocCache >>= uint(theBit + 1)
s.freeindex = freeidx
- v := gclinkptr(result*s.elemsize + s.base())
s.allocCount++
- return v
+ return gclinkptr(result*s.elemsize + s.base())
}
}
return 0
@@ -877,6 +876,9 @@ func reflect_unsafe_New(typ *_type) unsafe.Pointer {
// newarray allocates an array of n elements of type typ.
func newarray(typ *_type, n int) unsafe.Pointer {
+ if n == 1 {
+ return mallocgc(typ.size, typ, true)
+ }
if n < 0 || uintptr(n) > maxSliceCap(typ.size) {
panic(plainError("runtime: allocation size out of range"))
}
@@ -893,11 +895,13 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
mProf_Malloc(x, size)
}
-// nextSample returns the next sampling point for heap profiling.
-// It produces a random variable with a geometric distribution and
-// mean MemProfileRate. This is done by generating a uniformly
-// distributed random number and applying the cumulative distribution
-// function for an exponential.
+// nextSample returns the next sampling point for heap profiling. The goal is
+// to sample allocations on average every MemProfileRate bytes, but with a
+// completely random distribution over the allocation timeline; this
+// corresponds to a Poisson process with parameter MemProfileRate. In Poisson
+// processes, the distance between two samples follows the exponential
+// distribution (exp(MemProfileRate)), so the best return value is a random
+// number taken from an exponential distribution whose mean is MemProfileRate.
func nextSample() int32 {
if GOOS == "plan9" {
// Plan 9 doesn't support floating point in note handler.
@@ -906,25 +910,29 @@ func nextSample() int32 {
}
}
- period := MemProfileRate
+ return fastexprand(MemProfileRate)
+}
- // make nextSample not overflow. Maximum possible step is
- // -ln(1/(1<<kRandomBitCount)) * period, approximately 20 * period.
+// fastexprand returns a random number from an exponential distribution with
+// the specified mean.
+func fastexprand(mean int) int32 {
+ // Avoid overflow. Maximum possible step is
+ // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
switch {
- case period > 0x7000000:
- period = 0x7000000
- case period == 0:
+ case mean > 0x7000000:
+ mean = 0x7000000
+ case mean == 0:
return 0
}
- // Let m be the sample rate,
- // the probability distribution function is m*exp(-mx), so the CDF is
- // p = 1 - exp(-mx), so
- // q = 1 - p == exp(-mx)
- // log_e(q) = -mx
- // -log_e(q)/m = x
- // x = -log_e(q) * period
- // x = log_2(q) * (-log_e(2)) * period ; Using log_2 for efficiency
+ // Take a random sample of the exponential distribution exp(-mean*x).
+ // The probability distribution function is mean*exp(-mean*x), so the CDF is
+ // p = 1 - exp(-mean*x), so
+ // q = 1 - p == exp(-mean*x)
+ // log_e(q) = -mean*x
+ // -log_e(q)/mean = x
+ // x = -log_e(q) * mean
+ // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency
const randomBitCount = 26
q := fastrand()%(1<<randomBitCount) + 1
qlog := fastlog2(float64(q)) - randomBitCount
@@ -932,7 +940,7 @@ func nextSample() int32 {
qlog = 0
}
const minusLog2 = -0.6931471805599453 // -ln(2)
- return int32(qlog*(minusLog2*float64(period))) + 1
+ return int32(qlog*(minusLog2*float64(mean))) + 1
}
// nextSampleNoFP is similar to nextSample, but uses older,
@@ -950,7 +958,7 @@ func nextSampleNoFP() int32 {
}
type persistentAlloc struct {
- base unsafe.Pointer
+ base *notInHeap
off uintptr
}
@@ -967,17 +975,17 @@ var globalAlloc struct {
//
// Consider marking persistentalloc'd types go:notinheap.
func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
- var p unsafe.Pointer
+ var p *notInHeap
systemstack(func() {
p = persistentalloc1(size, align, sysStat)
})
- return p
+ return unsafe.Pointer(p)
}
// Must run on system stack because stack growth can (re)invoke it.
// See issue 9174.
//go:systemstack
-func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer {
+func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap {
const (
chunk = 256 << 10
maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
@@ -998,7 +1006,7 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer {
}
if size >= maxBlock {
- return sysAlloc(size, sysStat)
+ return (*notInHeap)(sysAlloc(size, sysStat))
}
mp := acquirem()
@@ -1011,7 +1019,7 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer {
}
persistent.off = round(persistent.off, align)
if persistent.off+size > chunk || persistent.base == nil {
- persistent.base = sysAlloc(chunk, &memstats.other_sys)
+ persistent.base = (*notInHeap)(sysAlloc(chunk, &memstats.other_sys))
if persistent.base == nil {
if persistent == &globalAlloc.persistentAlloc {
unlock(&globalAlloc.mutex)
@@ -1020,7 +1028,7 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer {
}
persistent.off = 0
}
- p := add(persistent.base, persistent.off)
+ p := persistent.base.add(persistent.off)
persistent.off += size
releasem(mp)
if persistent == &globalAlloc.persistentAlloc {
@@ -1033,3 +1041,19 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) unsafe.Pointer {
}
return p
}
+
+// notInHeap is off-heap memory allocated by a lower-level allocator
+// like sysAlloc or persistentAlloc.
+//
+// In general, it's better to use real types marked as go:notinheap,
+// but this serves as a generic type for situations where that isn't
+// possible (like in the allocators).
+//
+// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
+//
+//go:notinheap
+type notInHeap struct{}
+
+func (p *notInHeap) add(bytes uintptr) *notInHeap {
+ return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
+}