aboutsummaryrefslogtreecommitdiff
path: root/libgo
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2015-09-23 17:07:15 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2015-09-23 17:07:15 +0000
commit46efdbbc0113427e165b0d3635d5a26fb83d1448 (patch)
tree66208412bf69d3df426444c19fd51e8e694750da /libgo
parenta9c238108bf8532990df88c137383b457acb8893 (diff)
downloadgcc-46efdbbc0113427e165b0d3635d5a26fb83d1448.zip
gcc-46efdbbc0113427e165b0d3635d5a26fb83d1448.tar.gz
gcc-46efdbbc0113427e165b0d3635d5a26fb83d1448.tar.bz2
runtime: rewrite lfstack packing/unpacking to look more like that in Go
Reviewed-on: https://go-review.googlesource.com/13037 From-SVN: r228057
Diffstat (limited to 'libgo')
-rw-r--r--libgo/runtime/lfstack.goc58
1 files changed, 37 insertions, 21 deletions
diff --git a/libgo/runtime/lfstack.goc b/libgo/runtime/lfstack.goc
index 060a0cc..9eb80d9 100644
--- a/libgo/runtime/lfstack.goc
+++ b/libgo/runtime/lfstack.goc
@@ -9,25 +9,41 @@ package runtime
#include "arch.h"
#if __SIZEOF_POINTER__ == 8
-// Amd64 uses 48-bit virtual addresses, 47-th bit is used as kernel/user flag.
-// So we use 17msb of pointers as ABA counter.
-# define PTR_BITS 47
-#else
-# define PTR_BITS 32
-#endif
-#define PTR_MASK ((1ull<<PTR_BITS)-1)
-#define CNT_MASK (0ull-1)
-
-#if __SIZEOF_POINTER__ == 8 && (defined(__sparc__) || (defined(__sun__) && defined(__amd64__)))
// SPARC64 and Solaris on AMD64 uses all 64 bits of virtual addresses.
// Use low-order three bits as ABA counter.
// http://docs.oracle.com/cd/E19120-01/open.solaris/816-5138/6mba6ua5p/index.html
-#undef PTR_BITS
-#undef CNT_MASK
-#undef PTR_MASK
-#define PTR_BITS 0
-#define CNT_MASK 7
-#define PTR_MASK ((0ull-1)<<3)
+# if defined(__sparc__) || (defined(__sun__) && defined(__amd64__))
+static inline uint64 lfPack(LFNode *node, uintptr cnt) {
+ return ((uint64)(node)) | ((cnt)&7);
+}
+static inline LFNode* lfUnpack(uint64 val) {
+ return (LFNode*)(val&~7);
+}
+# else
+# if defined(__aarch64__)
+// Depending on the kernel options, pointers on arm64 can have up to 48 significant
+// bits (see https://www.kernel.org/doc/Documentation/arm64/memory.txt).
+# define PTR_BITS 48
+# else
+// Amd64 uses 48-bit virtual addresses, 47-th bit is used as kernel/user flag.
+// So we use 17msb of pointers as ABA counter.
+# define PTR_BITS 47
+# endif
+# endif
+# define CNT_BITS (64 - PTR_BITS + 3)
+static inline uint64 lfPack(LFNode *node, uintptr cnt) {
+ return ((uint64)(node)<<(64-PTR_BITS)) | (cnt&(((1<<CNT_BITS)-1)));
+}
+static inline LFNode* lfUnpack(uint64 val) {
+ return (LFNode*)((val >> CNT_BITS) << 3);
+}
+#else
+static inline uint64 lfPack(LFNode *node, uintptr cnt) {
+ return ((uint64)(uintptr)(node)<<32) | cnt;
+}
+static inline LFNode* lfUnpack(uint64 val) {
+ return (LFNode*)(uintptr)(val >> 32);
+}
#endif
void
@@ -35,16 +51,16 @@ runtime_lfstackpush(uint64 *head, LFNode *node)
{
uint64 old, new;
- if((uintptr)node != ((uintptr)node&PTR_MASK)) {
+ if(node != lfUnpack(lfPack(node, 0))) {
runtime_printf("p=%p\n", node);
runtime_throw("runtime_lfstackpush: invalid pointer");
}
node->pushcnt++;
- new = (uint64)(uintptr)node|(((uint64)node->pushcnt&CNT_MASK)<<PTR_BITS);
+ new = lfPack(node, node->pushcnt);
for(;;) {
old = runtime_atomicload64(head);
- node->next = (LFNode*)(uintptr)(old&PTR_MASK);
+ node->next = lfUnpack(old);
if(runtime_cas64(head, old, new))
break;
}
@@ -60,11 +76,11 @@ runtime_lfstackpop(uint64 *head)
old = runtime_atomicload64(head);
if(old == 0)
return nil;
- node = (LFNode*)(uintptr)(old&PTR_MASK);
+ node = lfUnpack(old);
node2 = runtime_atomicloadp(&node->next);
new = 0;
if(node2 != nil)
- new = (uint64)(uintptr)node2|(((uint64)node2->pushcnt&CNT_MASK)<<PTR_BITS);
+ new = lfPack(node2, node2->pushcnt);
if(runtime_cas64(head, old, new))
return node;
}