diff options
Diffstat (limited to 'libgo/go/runtime/slice.go')
-rw-r--r-- | libgo/go/runtime/slice.go | 34 |
1 files changed, 26 insertions, 8 deletions
diff --git a/libgo/go/runtime/slice.go b/libgo/go/runtime/slice.go index 4548a5b..55f4454 100644 --- a/libgo/go/runtime/slice.go +++ b/libgo/go/runtime/slice.go @@ -12,6 +12,7 @@ import ( // themselves, so that the compiler will export them. // //go:linkname makeslice runtime.makeslice +//go:linkname makeslice64 runtime.makeslice64 //go:linkname growslice runtime.growslice //go:linkname slicecopy runtime.slicecopy //go:linkname slicestringcopy runtime.slicestringcopy @@ -44,21 +45,18 @@ func maxSliceCap(elemsize uintptr) uintptr { return _MaxMem / elemsize } -// TODO: take uintptrs instead of int64s? -func makeslice(et *_type, len64, cap64 int64) slice { +func makeslice(et *_type, len, cap int) slice { // NOTE: The len > maxElements check here is not strictly necessary, // but it produces a 'len out of range' error instead of a 'cap out of range' error // when someone does make([]T, bignumber). 'cap out of range' is true too, // but since the cap is only being supplied implicitly, saying len is clearer. // See issue 4085. maxElements := maxSliceCap(et.size) - len := int(len64) - if len64 < 0 || int64(len) != len64 || uintptr(len) > maxElements { + if len < 0 || uintptr(len) > maxElements { panic(errorString("makeslice: len out of range")) } - cap := int(cap64) - if cap < len || int64(cap) != cap64 || uintptr(cap) > maxElements { + if cap < len || uintptr(cap) > maxElements { panic(errorString("makeslice: cap out of range")) } @@ -69,6 +67,20 @@ func makeslice(et *_type, len64, cap64 int64) slice { return slice{p, len, cap} } +func makeslice64(et *_type, len64, cap64 int64) slice { + len := int(len64) + if int64(len) != len64 { + panic(errorString("makeslice: len out of range")) + } + + cap := int(cap64) + if int64(cap) != cap64 { + panic(errorString("makeslice: cap out of range")) + } + + return makeslice(et, len, cap) +} + // growslice handles slice growth during append. // It is passed the slice element type, the old slice, and the desired new minimum capacity, // and it returns a new slice with at least that capacity, with the old data @@ -106,19 +118,22 @@ func growslice(et *_type, old slice, cap int) slice { } } - var lenmem, capmem uintptr + var lenmem, newlenmem, capmem uintptr const ptrSize = unsafe.Sizeof((*byte)(nil)) switch et.size { case 1: lenmem = uintptr(old.len) + newlenmem = uintptr(cap) capmem = roundupsize(uintptr(newcap)) newcap = int(capmem) case ptrSize: lenmem = uintptr(old.len) * ptrSize + newlenmem = uintptr(cap) * ptrSize capmem = roundupsize(uintptr(newcap) * ptrSize) newcap = int(capmem / ptrSize) default: lenmem = uintptr(old.len) * et.size + newlenmem = uintptr(cap) * et.size capmem = roundupsize(uintptr(newcap) * et.size) newcap = int(capmem / et.size) } @@ -136,7 +151,10 @@ func growslice(et *_type, old slice, cap int) slice { // the newarray function will zero the memory. // Calling memclr is also wrong since we allocated // newcap*et.size bytes, which is not the same as capmem. - // memclr(add(p, lenmem), capmem-lenmem) + // The append() that calls growslice is going to overwrite from old.len to cap (which will be the new length). + // Only clear the part that will not be overwritten. + // memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem) + _ = newlenmem } else { // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. // gccgo's current GC requires newarray, not mallocgc. |