aboutsummaryrefslogtreecommitdiff
path: root/libgo
diff options
context:
space:
mode:
Diffstat (limited to 'libgo')
-rw-r--r--libgo/go/syscall/socket.go2
-rw-r--r--libgo/runtime/go-memclr.c41
-rw-r--r--libgo/runtime/go-memmove.c94
3 files changed, 54 insertions, 83 deletions
diff --git a/libgo/go/syscall/socket.go b/libgo/go/syscall/socket.go
index 54a4a99..35665d5 100644
--- a/libgo/go/syscall/socket.go
+++ b/libgo/go/syscall/socket.go
@@ -467,7 +467,7 @@ func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) {
func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) {
var msg Msghdr
- msg.Name = (*byte)(unsafe.Pointer(&rsa))
+ msg.Name = (*byte)(unsafe.Pointer(rsa))
msg.Namelen = uint32(SizeofSockaddrAny)
var iov Iovec
if len(p) > 0 {
diff --git a/libgo/runtime/go-memclr.c b/libgo/runtime/go-memclr.c
index 53b8117..84df98d 100644
--- a/libgo/runtime/go-memclr.c
+++ b/libgo/runtime/go-memclr.c
@@ -11,50 +11,39 @@ void memclrNoHeapPointers(void *, uintptr)
__attribute__ ((no_split_stack));
void
-memclrNoHeapPointers (void *p1, uintptr len)
+memclrNoHeapPointers(void *p1, uintptr len)
{
-
-#if !defined(__PPC64__)
- __builtin_memset(p1, 0, len);
-#else
- int64 rem,drem,i;
- uint64 offset;
- volatile uint64 *vp;
+ const uintptr ptr_size = sizeof(p1);
+ uintptr rem,drem,i;
+ uintptr offset;
+ volatile uintptr *vp;
if (len == 0) {
return;
}
rem = len;
- offset = (uint64)p1 % 8;
- // This memset is OK since it can't contain
- // an 8 byte aligned pointer.
- if ((rem < 8) || (offset > 0 && offset+rem <= 16)) {
+ offset = (uintptr)p1 % ptr_size;
+ if (rem < ptr_size || offset > 0) {
+ // This memset is OK since it can't contain
+ // an pointer aligned pointer.
__builtin_memset(p1, 0, rem);
return;
}
- // Move initial bytes to get to 8 byte boundary
- if (offset > 0) {
- __builtin_memset(p1, 0, 8-offset);
- p1 = (void*)((char*)p1+8-offset);
- rem -= 8-offset;
- }
- // If at least 8 bytes left, clear
- drem = rem>>3;
+ drem = rem / ptr_size;
- vp = (volatile uint64*)(p1);
+ vp = (volatile uintptr*)(p1);
// Without the use of volatile here, the compiler
// might convert the loop into a memset.
for (i=0; i<drem; i++) {
*vp = 0;
vp++;
- rem -= 8;
+ rem -= ptr_size;
}
- p1 = (void*)((char*)p1 + 8*drem);
- // Clear any remaining
+ // Clear any remaining bytes.
if (rem > 0) {
- __builtin_memset (p1, 0, rem);
+ p1 = (void*)((char*)p1 + ptr_size*drem);
+ __builtin_memset(p1, 0, rem);
}
-#endif
}
diff --git a/libgo/runtime/go-memmove.c b/libgo/runtime/go-memmove.c
index 1ca3f48..1dbd2b3 100644
--- a/libgo/runtime/go-memmove.c
+++ b/libgo/runtime/go-memmove.c
@@ -12,78 +12,60 @@ void gomemmove(void *, void *, uintptr)
// This implementation is necessary since
// the __builtin_memmove might use __libc_memmove
-// which doesn't require atomicity of 8 byte
+// which doesn't require atomicity of pointer-sized
// moves.
void
-gomemmove (void *dst, void *src, uintptr len)
+gomemmove(void *dst, void *src, uintptr len)
{
-#if !defined(__PPC64__)
- __builtin_memmove(dst, src, len);
-#else
- uint64 offset, tail;
- int64 rem;
- uint64 dwords;
- uint64 i;
- char *bdst,*bsrc;
-
- rem = len;
+ const uintptr ptr_size = sizeof(dst);
+ uintptr tail;
+ uintptr rem;
+ uintptr dwords;
+ uintptr i;
+ char *bdst, *bsrc;
if (len == 0) {
- return;
+ return;
}
- // If src and dst don't have the same 8 byte alignment then
- // there is no issue with copying pointer atomicity. Use the
- // builtin.
- if (((uint64)dst % 8) != ((uint64)src % 8) || len < 8) {
- __builtin_memmove(dst, src, len);
- return;
+ // We expect pointer-containing values to be pointer-aligned.
+ // If these pointers are not aligned, they don't contain pointers.
+ if ((uintptr)dst % ptr_size != 0 || (uintptr)src % ptr_size != 0 || len < ptr_size) {
+ __builtin_memmove(dst, src, len);
+ return;
}
- // Length >= 8 && same ptr alignment
- offset = (uint64)dst % 8;
-
- // If not 8 byte alignment, move the intial bytes.
- if (offset > 0) {
- __builtin_memmove(dst, src, 8-offset);
- dst += (8-offset);
- src += (8-offset);
- rem -= (8-offset);
- }
+ bdst = (char*)dst;
+ bsrc = (char*)src;
- // Move the tail bytes to make the backward move
- // easier.
- tail = rem % 8;
+ // Move the tail bytes to make the backward move easier.
+ rem = len;
+ tail = rem % ptr_size;
if (tail > 0) {
- __builtin_memmove(dst+rem-tail, src+rem-tail, tail);
- rem -= tail;
- }
-
- if (rem == 0) {
- return;
+ __builtin_memmove(bdst+rem-tail, bsrc+rem-tail, tail);
+ rem -= tail;
}
- // Must now be 8 byte alignment and rem is multiple of 8.
- dwords = len>>3;
+ // Must now be pointer alignment and rem is multiple of ptr_size.
+ dwords = rem / ptr_size;
- // Determine if a backwards move is needed
- // Forward or backward, move all doublewords
+ // Determine if a backwards move is needed.
+ // Forward or backward, move all words.
- if ((uint64)(dst - src) < (uint64)rem) {
- bdst = dst+rem-8;
- bsrc = src+rem-8;
- for (i = 0; i<dwords; i++) {
- *(uint64*)bdst = *(uint64*)bsrc;
- bdst -= 8;
- bsrc -= 8;
- }
+ if ((uintptr)(bdst - bsrc) < rem) {
+ bdst += rem - ptr_size;
+ bsrc += rem - ptr_size;
+ for (i = 0; i<dwords; i++) {
+ *(uintptr*)bdst = *(uintptr*)bsrc;
+ bdst -= ptr_size;
+ bsrc -= ptr_size;
+ }
} else {
- for (i = 0; i<dwords; i++) {
- *(uint64*)dst = *(uint64*)src;
- dst += 8;
- src += 8;
- }
+ for (i = 0; i<dwords; i++) {
+ *(uintptr*)bdst = *(uintptr*)bsrc;
+ bdst += ptr_size;
+ bsrc += ptr_size;
+ }
}
-#endif
}