aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime/mbarrier.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime/mbarrier.go')
-rw-r--r--libgo/go/runtime/mbarrier.go56
1 files changed, 28 insertions, 28 deletions
diff --git a/libgo/go/runtime/mbarrier.go b/libgo/go/runtime/mbarrier.go
index e66b50d..836f85a 100644
--- a/libgo/go/runtime/mbarrier.go
+++ b/libgo/go/runtime/mbarrier.go
@@ -163,8 +163,8 @@ func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
if dst == src {
return
}
- if typ.ptrdata != 0 {
- bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.size)
+ if writeBarrier.needed && typ.ptrdata != 0 {
+ bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.ptrdata)
}
// There's a race here: if some other goroutine can write to
// src, it may change some pointer in src after we've
@@ -199,17 +199,18 @@ func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
// typedmemmovepartial is like typedmemmove but assumes that
// dst and src point off bytes into the value and only copies size bytes.
+// off must be a multiple of sys.PtrSize.
//go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
- if writeBarrier.needed && typ.ptrdata != 0 && size >= sys.PtrSize {
- // Pointer-align start address for bulk barrier.
- adst, asrc, asize := dst, src, size
- if frag := -off & (sys.PtrSize - 1); frag != 0 {
- adst = add(dst, frag)
- asrc = add(src, frag)
- asize -= frag
+ if writeBarrier.needed && typ.ptrdata > off && size >= sys.PtrSize {
+ if off&(sys.PtrSize-1) != 0 {
+ panic("reflect: internal error: misaligned offset")
}
- bulkBarrierPreWrite(uintptr(adst), uintptr(asrc), asize&^(sys.PtrSize-1))
+ pwsize := alignDown(size, sys.PtrSize)
+ if poff := typ.ptrdata - off; pwsize > poff {
+ pwsize = poff
+ }
+ bulkBarrierPreWrite(uintptr(dst), uintptr(src), pwsize)
}
memmove(dst, src, size)
@@ -219,16 +220,14 @@ func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size
}
//go:nosplit
-func typedslicecopy(typ *_type, dst, src slice) int {
- n := dst.len
- if n > src.len {
- n = src.len
+func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int {
+ n := dstLen
+ if n > srcLen {
+ n = srcLen
}
if n == 0 {
return 0
}
- dstp := dst.array
- srcp := src.array
// The compiler emits calls to typedslicecopy before
// instrumentation runs, so unlike the other copying and
@@ -237,19 +236,19 @@ func typedslicecopy(typ *_type, dst, src slice) int {
if raceenabled {
callerpc := getcallerpc()
pc := funcPC(slicecopy)
- racewriterangepc(dstp, uintptr(n)*typ.size, callerpc, pc)
- racereadrangepc(srcp, uintptr(n)*typ.size, callerpc, pc)
+ racewriterangepc(dstPtr, uintptr(n)*typ.size, callerpc, pc)
+ racereadrangepc(srcPtr, uintptr(n)*typ.size, callerpc, pc)
}
if msanenabled {
- msanwrite(dstp, uintptr(n)*typ.size)
- msanread(srcp, uintptr(n)*typ.size)
+ msanwrite(dstPtr, uintptr(n)*typ.size)
+ msanread(srcPtr, uintptr(n)*typ.size)
}
if writeBarrier.cgo {
- cgoCheckSliceCopy(typ, dst, src, n)
+ cgoCheckSliceCopy(typ, dstPtr, srcPtr, n)
}
- if dstp == srcp {
+ if dstPtr == srcPtr {
return n
}
@@ -259,11 +258,12 @@ func typedslicecopy(typ *_type, dst, src slice) int {
// before calling typedslicecopy.
size := uintptr(n) * typ.size
if writeBarrier.needed {
- bulkBarrierPreWrite(uintptr(dstp), uintptr(srcp), size)
+ pwsize := size - typ.size + typ.ptrdata
+ bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize)
}
// See typedmemmove for a discussion of the race between the
// barrier and memmove.
- memmove(dstp, srcp, size)
+ memmove(dstPtr, srcPtr, size)
return n
}
@@ -293,7 +293,7 @@ func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
memmove(dst.array, src.array, size)
return n
}
- return typedslicecopy(elemType, dst, src)
+ return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len)
}
// typedmemclr clears the typed memory at ptr with type typ. The
@@ -306,8 +306,8 @@ func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
//
//go:nosplit
func typedmemclr(typ *_type, ptr unsafe.Pointer) {
- if typ.ptrdata != 0 {
- bulkBarrierPreWrite(uintptr(ptr), 0, typ.size)
+ if writeBarrier.needed && typ.ptrdata != 0 {
+ bulkBarrierPreWrite(uintptr(ptr), 0, typ.ptrdata)
}
memclrNoHeapPointers(ptr, typ.size)
}
@@ -319,7 +319,7 @@ func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) {
//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
- if typ.ptrdata != 0 {
+ if writeBarrier.needed && typ.ptrdata != 0 {
bulkBarrierPreWrite(uintptr(ptr), 0, size)
}
memclrNoHeapPointers(ptr, size)