diff options
author | Lynn Boger <laboger@linux.vnet.ibm.com> | 2021-02-22 09:12:01 -0600 |
---|---|---|
committer | Ian Lance Taylor <iant@golang.org> | 2021-02-25 17:01:23 -0800 |
commit | 2fbed0dc6c4dffcfd586a990fc3d2aeb7e00d521 (patch) | |
tree | 4c32e8e2b504ca2528abaf6d1800db11ed359027 /libgo/runtime | |
parent | a505fad4dd4d93b6d642995d7df320aa40949568 (diff) | |
download | gcc-2fbed0dc6c4dffcfd586a990fc3d2aeb7e00d521.zip gcc-2fbed0dc6c4dffcfd586a990fc3d2aeb7e00d521.tar.gz gcc-2fbed0dc6c4dffcfd586a990fc3d2aeb7e00d521.tar.bz2 |
libgo: ensure memmove, memset 8 byte atomicity on ppc64x
Go requires that pointer moves are done 8 bytes at a time,
but gccgo uses libc's memmove and memset which does not require
that, and there are some cases where an 8 byte move might be
done as 4+4.
To enforce 8 byte moves for memmove and memset, this adds a
C implementation in libgo/runtime for memmove and memset to be
used on ppc64le and ppc64. Asm implementations were considered
but discarded to avoid different implementations for different
target ISAs.
Fixes golang/go#41428
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/294931
Diffstat (limited to 'libgo/runtime')
-rw-r--r-- | libgo/runtime/go-memclr.c | 45 | ||||
-rw-r--r-- | libgo/runtime/go-memmove.c | 89 | ||||
-rw-r--r-- | libgo/runtime/runtime.h | 3 |
3 files changed, 135 insertions, 2 deletions
diff --git a/libgo/runtime/go-memclr.c b/libgo/runtime/go-memclr.c index b5d4975..53b8117 100644 --- a/libgo/runtime/go-memclr.c +++ b/libgo/runtime/go-memclr.c @@ -13,5 +13,48 @@ void memclrNoHeapPointers(void *, uintptr) void memclrNoHeapPointers (void *p1, uintptr len) { - __builtin_memset (p1, 0, len); + +#if !defined(__PPC64__) + __builtin_memset(p1, 0, len); +#else + int64 rem,drem,i; + uint64 offset; + volatile uint64 *vp; + + if (len == 0) { + return; + } + rem = len; + + offset = (uint64)p1 % 8; + // This memset is OK since it can't contain + // an 8 byte aligned pointer. + if ((rem < 8) || (offset > 0 && offset+rem <= 16)) { + __builtin_memset(p1, 0, rem); + return; + } + // Move initial bytes to get to 8 byte boundary + if (offset > 0) { + __builtin_memset(p1, 0, 8-offset); + p1 = (void*)((char*)p1+8-offset); + rem -= 8-offset; + } + + // If at least 8 bytes left, clear + drem = rem>>3; + + vp = (volatile uint64*)(p1); + // Without the use of volatile here, the compiler + // might convert the loop into a memset. + for (i=0; i<drem; i++) { + *vp = 0; + vp++; + rem -= 8; + } + p1 = (void*)((char*)p1 + 8*drem); + // Clear any remaining + if (rem > 0) { + __builtin_memset (p1, 0, rem); + } +#endif } diff --git a/libgo/runtime/go-memmove.c b/libgo/runtime/go-memmove.c new file mode 100644 index 0000000..1ca3f48 --- /dev/null +++ b/libgo/runtime/go-memmove.c @@ -0,0 +1,89 @@ +/* go-memmove.c -- memmove + + Copyright 2021 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. */ + +#include "runtime.h" + +void gomemmove(void *, void *, uintptr) + __asm__ (GOSYM_PREFIX "runtime.memmove") + __attribute__ ((no_split_stack)); + +// This implementation is necessary since +// the __builtin_memmove might use __libc_memmove +// which doesn't require atomicity of 8 byte +// moves. + +void +gomemmove (void *dst, void *src, uintptr len) +{ +#if !defined(__PPC64__) + __builtin_memmove(dst, src, len); +#else + uint64 offset, tail; + int64 rem; + uint64 dwords; + uint64 i; + char *bdst,*bsrc; + + rem = len; + + if (len == 0) { + return; + } + + // If src and dst don't have the same 8 byte alignment then + // there is no issue with copying pointer atomicity. Use the + // builtin. + if (((uint64)dst % 8) != ((uint64)src % 8) || len < 8) { + __builtin_memmove(dst, src, len); + return; + } + + // Length >= 8 && same ptr alignment + offset = (uint64)dst % 8; + + // If not 8 byte alignment, move the intial bytes. + if (offset > 0) { + __builtin_memmove(dst, src, 8-offset); + dst += (8-offset); + src += (8-offset); + rem -= (8-offset); + } + + // Move the tail bytes to make the backward move + // easier. + tail = rem % 8; + if (tail > 0) { + __builtin_memmove(dst+rem-tail, src+rem-tail, tail); + rem -= tail; + } + + if (rem == 0) { + return; + } + + // Must now be 8 byte alignment and rem is multiple of 8. + dwords = len>>3; + + // Determine if a backwards move is needed + // Forward or backward, move all doublewords + + if ((uint64)(dst - src) < (uint64)rem) { + bdst = dst+rem-8; + bsrc = src+rem-8; + for (i = 0; i<dwords; i++) { + *(uint64*)bdst = *(uint64*)bsrc; + bdst -= 8; + bsrc -= 8; + } + } else { + for (i = 0; i<dwords; i++) { + *(uint64*)dst = *(uint64*)src; + dst += 8; + src += 8; + } + } +#endif +} diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h index 3a65d44..b3dc4fd 100644 --- a/libgo/runtime/runtime.h +++ b/libgo/runtime/runtime.h @@ -221,7 +221,8 @@ bool runtime_canpanic(G*); void runtime_printf(const char*, ...); int32 runtime_snprintf(byte*, int32, const char*, ...); #define runtime_mcmp(a, b, s) __builtin_memcmp((a), (b), (s)) -#define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s)) +void runtime_memmove(void*, void*, uint64) + __asm__ (GOSYM_PREFIX "runtime.memmove"); String runtime_gostringnocopy(const byte*) __asm__ (GOSYM_PREFIX "runtime.gostringnocopy"); void runtime_ginit(void) |