aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2019-04-09 16:43:22 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2019-04-09 16:43:22 +0000
commit9bafe5a81e42406b86a65ef14b011ed9acc96c5d (patch)
tree35478f4f4ff092374f748479debcda06b96dd6dd /libgo/go/runtime
parent10f47d3d1dc2e1267a32fa6459655b4645a9adad (diff)
parent3fa176b767e14e1d2491775978afac3e87892d1d (diff)
downloadgcc-9bafe5a81e42406b86a65ef14b011ed9acc96c5d.zip
gcc-9bafe5a81e42406b86a65ef14b011ed9acc96c5d.tar.gz
gcc-9bafe5a81e42406b86a65ef14b011ed9acc96c5d.tar.bz2
Merge from trunk revision 270220.
From-SVN: r270233
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/malloc.go6
-rw-r--r--libgo/go/runtime/mem_gccgo.go5
-rw-r--r--libgo/go/runtime/netpoll_aix.go1
-rw-r--r--libgo/go/runtime/os_aix.go17
-rw-r--r--libgo/go/runtime/runtime-lldb_test.go1
-rw-r--r--libgo/go/runtime/slice.go30
-rw-r--r--libgo/go/runtime/stubs2.go1
-rw-r--r--libgo/go/runtime/timestub2.go2
8 files changed, 36 insertions, 27 deletions
diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go
index b6a7ee1..c0b4caa 100644
--- a/libgo/go/runtime/malloc.go
+++ b/libgo/go/runtime/malloc.go
@@ -218,7 +218,7 @@ const (
// we further limit it to 31 bits.
//
// WebAssembly currently has a limit of 4GB linear memory.
- heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosAix))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 60*sys.GoosAix
+ heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosAix))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 60*(sys.GoosAix*_64bit)
// maxAlloc is the maximum size of an allocation. On 64-bit,
// it's theoretically possible to allocate 1<<heapAddrBits bytes. On
@@ -259,7 +259,7 @@ const (
// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
// prefer using heapArenaBytes where possible (we need the
// constant to compute some other constants).
- logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoosAix)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (8+20)*sys.GoosAix
+ logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoosAix)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (8+20)*(sys.GoosAix*_64bit)
// heapArenaBitmapBytes is the size of each heap arena's bitmap.
heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2)
@@ -282,7 +282,7 @@ const (
//
// We use the L1 map on aix/ppc64 to keep the same L2 value
// as on Linux.
- arenaL1Bits = 6*(_64bit*sys.GoosWindows) + 12*sys.GoosAix
+ arenaL1Bits = 6*(_64bit*sys.GoosWindows) + 12*(sys.GoosAix*_64bit)
// arenaL2Bits is the number of bits of the arena number
// covered by the second level arena index.
diff --git a/libgo/go/runtime/mem_gccgo.go b/libgo/go/runtime/mem_gccgo.go
index 44f4648..9874678 100644
--- a/libgo/go/runtime/mem_gccgo.go
+++ b/libgo/go/runtime/mem_gccgo.go
@@ -187,6 +187,11 @@ func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
mSysStatInc(sysStat, n)
+ if GOOS == "aix" {
+ // AIX does not allow mapping a range that is already mapped.
+ // So always unmap first even if it is already unmapped.
+ munmap(v, n)
+ }
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, mmapFD, 0)
if err == _ENOMEM {
throw("runtime: out of memory")
diff --git a/libgo/go/runtime/netpoll_aix.go b/libgo/go/runtime/netpoll_aix.go
index 86c3e96..70bf9eb 100644
--- a/libgo/go/runtime/netpoll_aix.go
+++ b/libgo/go/runtime/netpoll_aix.go
@@ -37,7 +37,6 @@ const _POLLIN = 0x0001
const _POLLOUT = 0x0002
const _POLLHUP = 0x2000
const _POLLERR = 0x4000
-const _O_NONBLOCK = 0x4
var (
pfds []pollfd
diff --git a/libgo/go/runtime/os_aix.go b/libgo/go/runtime/os_aix.go
index 1003616..9211f21 100644
--- a/libgo/go/runtime/os_aix.go
+++ b/libgo/go/runtime/os_aix.go
@@ -62,12 +62,19 @@ func semasleep(ns int64) int32 {
if clock_gettime(_CLOCK_REALTIME, &ts) != 0 {
throw("clock_gettime")
}
- ts.tv_sec += ns / 1e9
- ts.tv_nsec += ns % 1e9
- if ts.tv_nsec >= 1e9 {
- ts.tv_sec++
- ts.tv_nsec -= 1e9
+
+ sec := int64(ts.tv_sec) + ns/1e9
+ nsec := int64(ts.tv_nsec) + ns%1e9
+ if nsec >= 1e9 {
+ sec++
+ nsec -= 1e9
+ }
+ if sec != int64(timespec_sec_t(sec)) {
+ // Handle overflows (timespec_sec_t is 32-bit in 32-bit applications)
+ sec = 1<<31 - 1
}
+ ts.tv_sec = timespec_sec_t(sec)
+ ts.tv_nsec = timespec_nsec_t(nsec)
if sem_timedwait((*semt)(unsafe.Pointer(_m_.mos.waitsema)), &ts) != 0 {
err := errno()
diff --git a/libgo/go/runtime/runtime-lldb_test.go b/libgo/go/runtime/runtime-lldb_test.go
index fe3a0eb..08d6a34 100644
--- a/libgo/go/runtime/runtime-lldb_test.go
+++ b/libgo/go/runtime/runtime-lldb_test.go
@@ -139,6 +139,7 @@ func TestLldbPython(t *testing.T) {
if final := os.Getenv("GOROOT_FINAL"); final != "" && runtime.GOROOT() != final {
t.Skip("gdb test can fail with GOROOT_FINAL pending")
}
+ testenv.SkipFlaky(t, 31188)
checkLldbPython(t)
diff --git a/libgo/go/runtime/slice.go b/libgo/go/runtime/slice.go
index 335532d..9137951 100644
--- a/libgo/go/runtime/slice.go
+++ b/libgo/go/runtime/slice.go
@@ -77,31 +77,31 @@ func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer {
// and it returns a new slice with at least that capacity, with the old data
// copied into it.
// The new slice's length is set to the requested capacity.
-func growslice(et *_type, old slice, cap int) slice {
+func growslice(et *_type, oldarray unsafe.Pointer, oldlen, oldcap, cap int) slice {
if raceenabled {
callerpc := getcallerpc()
- racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, funcPC(growslice))
+ racereadrangepc(oldarray, uintptr(oldlen*int(et.size)), callerpc, funcPC(growslice))
}
if msanenabled {
- msanread(old.array, uintptr(old.len*int(et.size)))
+ msanread(oldarray, uintptr(oldlen*int(et.size)))
}
- if cap < old.cap {
+ if cap < oldcap {
panic(errorString("growslice: cap out of range"))
}
if et.size == 0 {
// append should not create a slice with nil pointer but non-zero len.
- // We assume that append doesn't need to preserve old.array in this case.
+ // We assume that append doesn't need to preserve oldarray in this case.
return slice{unsafe.Pointer(&zerobase), cap, cap}
}
- newcap := old.cap
+ newcap := oldcap
doublecap := newcap + newcap
if cap > doublecap {
newcap = cap
} else {
- if old.len < 1024 {
+ if oldlen < 1024 {
newcap = doublecap
} else {
// Check 0 < newcap to detect overflow
@@ -125,13 +125,13 @@ func growslice(et *_type, old slice, cap int) slice {
// For powers of 2, use a variable shift.
switch {
case et.size == 1:
- lenmem = uintptr(old.len)
+ lenmem = uintptr(oldlen)
newlenmem = uintptr(cap)
capmem = roundupsize(uintptr(newcap))
overflow = uintptr(newcap) > maxAlloc
newcap = int(capmem)
case et.size == sys.PtrSize:
- lenmem = uintptr(old.len) * sys.PtrSize
+ lenmem = uintptr(oldlen) * sys.PtrSize
newlenmem = uintptr(cap) * sys.PtrSize
capmem = roundupsize(uintptr(newcap) * sys.PtrSize)
overflow = uintptr(newcap) > maxAlloc/sys.PtrSize
@@ -144,13 +144,13 @@ func growslice(et *_type, old slice, cap int) slice {
} else {
shift = uintptr(sys.Ctz32(uint32(et.size))) & 31
}
- lenmem = uintptr(old.len) << shift
+ lenmem = uintptr(oldlen) << shift
newlenmem = uintptr(cap) << shift
capmem = roundupsize(uintptr(newcap) << shift)
overflow = uintptr(newcap) > (maxAlloc >> shift)
newcap = int(capmem >> shift)
default:
- lenmem = uintptr(old.len) * et.size
+ lenmem = uintptr(oldlen) * et.size
newlenmem = uintptr(cap) * et.size
capmem, overflow = math.MulUintptr(et.size, uintptr(newcap))
capmem = roundupsize(capmem)
@@ -177,19 +177,19 @@ func growslice(et *_type, old slice, cap int) slice {
var p unsafe.Pointer
if et.kind&kindNoPointers != 0 {
p = mallocgc(capmem, nil, false)
- // The append() that calls growslice is going to overwrite from old.len to cap (which will be the new length).
+ // The append() that calls growslice is going to overwrite from oldlen to cap (which will be the new length).
// Only clear the part that will not be overwritten.
memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem)
} else {
// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
p = mallocgc(capmem, et, true)
if writeBarrier.enabled {
- // Only shade the pointers in old.array since we know the destination slice p
+ // Only shade the pointers in oldarray since we know the destination slice p
// only contains nil pointers because it has been cleared during alloc.
- bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(old.array), lenmem)
+ bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldarray), lenmem)
}
}
- memmove(p, old.array, lenmem)
+ memmove(p, oldarray, lenmem)
return slice{p, cap, newcap}
}
diff --git a/libgo/go/runtime/stubs2.go b/libgo/go/runtime/stubs2.go
index 304c8e4..1cb910c 100644
--- a/libgo/go/runtime/stubs2.go
+++ b/libgo/go/runtime/stubs2.go
@@ -7,7 +7,6 @@
// +build !nacl
// +build !js
// +build !darwin
-// +build !aix
package runtime
diff --git a/libgo/go/runtime/timestub2.go b/libgo/go/runtime/timestub2.go
index 00c2c55..7a28603 100644
--- a/libgo/go/runtime/timestub2.go
+++ b/libgo/go/runtime/timestub2.go
@@ -5,8 +5,6 @@
// +build !darwin
// +build !windows
// +build !freebsd
-// +build !aix
-
package runtime
func walltime() (sec int64, nsec int32)