aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorColin Schmidt <colins@eecs.berkeley.edu>2016-02-27 19:02:29 -0800
committerColin Schmidt <colins@eecs.berkeley.edu>2016-02-29 10:54:27 -0800
commit9e63c8544ea2e0aea6778defb7b535dbb87b7205 (patch)
treeb7ddaba78adcc7f760bb65db6c01124cf2d88412
parentfb9e416be46e8018c8333553136b91401bdd74a6 (diff)
downloadriscv-tests-9e63c8544ea2e0aea6778defb7b535dbb87b7205.zip
riscv-tests-9e63c8544ea2e0aea6778defb7b535dbb87b7205.tar.gz
riscv-tests-9e63c8544ea2e0aea6778defb7b535dbb87b7205.tar.bz2
if atomics are unavailable emulate them ala pk emu
-rw-r--r--benchmarks/common/util.h14
-rw-r--r--benchmarks/rsort/rsort.c22
2 files changed, 24 insertions, 12 deletions
diff --git a/benchmarks/common/util.h b/benchmarks/common/util.h
index 2fcc89d..cd31f92 100644
--- a/benchmarks/common/util.h
+++ b/benchmarks/common/util.h
@@ -33,6 +33,18 @@ static void setStats(int enable) {}
extern void setStats(int enable);
#endif
+// Emulate atomics - code is similar to pk emulation but we are baremetal so
+// we don't need to disable interrupts
+#ifdef __riscv_atomic
+# define atomic_add(ptr, inc) __sync_fetch_and_add(ptr, inc)
+#else
+# define atomic_add(ptr, inc) ({ \
+ typeof(*(ptr)) res = *(volatile typeof(*(ptr)) *)(ptr); \
+ *(volatile typeof(ptr))(ptr) = res + (inc); \
+ res; })
+#endif
+
+
#include <stdint.h>
extern int have_vec;
@@ -102,7 +114,7 @@ static void __attribute__((noinline)) barrier(int ncores)
__sync_synchronize();
threadsense = !threadsense;
- if (__sync_fetch_and_add(&count, 1) == ncores-1)
+ if (atomic_add(&count, 1) == ncores-1)
{
count = 0;
sense = threadsense;
diff --git a/benchmarks/rsort/rsort.c b/benchmarks/rsort/rsort.c
index fee68e5..8b222ec 100644
--- a/benchmarks/rsort/rsort.c
+++ b/benchmarks/rsort/rsort.c
@@ -46,20 +46,20 @@ void sort(size_t n, type* arrIn, type* scratchIn)
type a1 = p[1];
type a2 = p[2];
type a3 = p[3];
- __sync_fetch_and_add(&bucket[(a0 >> log_exp) % BASE], 1);
- __sync_fetch_and_add(&bucket[(a1 >> log_exp) % BASE], 1);
- __sync_fetch_and_add(&bucket[(a2 >> log_exp) % BASE], 1);
- __sync_fetch_and_add(&bucket[(a3 >> log_exp) % BASE], 1);
+ atomic_add(&bucket[(a0 >> log_exp) % BASE], 1);
+ atomic_add(&bucket[(a1 >> log_exp) % BASE], 1);
+ atomic_add(&bucket[(a2 >> log_exp) % BASE], 1);
+ atomic_add(&bucket[(a3 >> log_exp) % BASE], 1);
}
for ( ; p < &arr[n]; p++)
bucket[(*p >> log_exp) % BASE]++;
size_t prev = bucket[0];
- prev += __sync_fetch_and_add(&bucket[1], prev);
+ prev += atomic_add(&bucket[1], prev);
for (b = &bucket[2]; b < bucket + BASE; b += 2)
{
- prev += __sync_fetch_and_add(&b[0], prev);
- prev += __sync_fetch_and_add(&b[1], prev);
+ prev += atomic_add(&b[0], prev);
+ prev += atomic_add(&b[1], prev);
}
static_assert(BASE % 2 == 0);
@@ -73,10 +73,10 @@ void sort(size_t n, type* arrIn, type* scratchIn)
size_t* pb1 = &bucket[(a1 >> log_exp) % BASE];
size_t* pb2 = &bucket[(a2 >> log_exp) % BASE];
size_t* pb3 = &bucket[(a3 >> log_exp) % BASE];
- type* s0 = scratch + __sync_fetch_and_add(pb0, -1);
- type* s1 = scratch + __sync_fetch_and_add(pb1, -1);
- type* s2 = scratch + __sync_fetch_and_add(pb2, -1);
- type* s3 = scratch + __sync_fetch_and_add(pb3, -1);
+ type* s0 = scratch + atomic_add(pb0, -1);
+ type* s1 = scratch + atomic_add(pb1, -1);
+ type* s2 = scratch + atomic_add(pb2, -1);
+ type* s3 = scratch + atomic_add(pb3, -1);
s0[-1] = a0;
s1[-1] = a1;
s2[-1] = a2;