aboutsummaryrefslogtreecommitdiff
path: root/libitm/config/alpha/cacheline.h
diff options
context:
space:
mode:
Diffstat (limited to 'libitm/config/alpha/cacheline.h')
-rw-r--r--libitm/config/alpha/cacheline.h86
1 files changed, 1 insertions, 85 deletions
diff --git a/libitm/config/alpha/cacheline.h b/libitm/config/alpha/cacheline.h
index 5e38486..611a1c9 100644
--- a/libitm/config/alpha/cacheline.h
+++ b/libitm/config/alpha/cacheline.h
@@ -33,90 +33,6 @@
// modification mask, below.
#define CACHELINE_SIZE 64
-#ifdef __alpha_bwx__
-# include "config/generic/cacheline.h"
-#else
-// If we don't have byte-word stores, then we'll never be able to
-// adjust *all* of the byte loads/stores to be truely atomic. So
-// only guarantee 4-byte aligned values atomicly stored, exactly
-// like the native system. Use byte zap instructions to accelerate
-// sub-word masked stores.
+#include "config/generic/cacheline.h"
-namespace GTM HIDDEN {
-
-// A gtm_cacheline_mask stores a modified bit for every modified byte
-// in the cacheline with which it is associated.
-typedef sized_integral<CACHELINE_SIZE / 8>::type gtm_cacheline_mask;
-
-union gtm_cacheline
-{
- // Byte access to the cacheline.
- unsigned char b[CACHELINE_SIZE] __attribute__((aligned(CACHELINE_SIZE)));
-
- // Larger sized access to the cacheline.
- uint16_t u16[CACHELINE_SIZE / sizeof(uint16_t)];
- uint32_t u32[CACHELINE_SIZE / sizeof(uint32_t)];
- uint64_t u64[CACHELINE_SIZE / sizeof(uint64_t)];
- gtm_word w[CACHELINE_SIZE / sizeof(gtm_word)];
-
- // Store S into D, but only the bytes specified by M.
- static void store_mask(uint32_t *d, uint32_t s, uint8_t m);
- static void store_mask(uint64_t *d, uint64_t s, uint8_t m);
-
- // Copy S to D, but only the bytes specified by M.
- static void copy_mask (gtm_cacheline * __restrict d,
- const gtm_cacheline * __restrict s,
- gtm_cacheline_mask m);
-
- // A write barrier to emit after (a series of) copy_mask.
- static void copy_mask_wb () { atomic_write_barrier(); }
-};
-
-inline void ALWAYS_INLINE
-gtm_cacheline::store_mask (uint32_t *d, uint32_t s, uint8_t m)
-{
- const uint8_t tm = (1 << sizeof(uint32_t)) - 1;
-
- m &= tm;
- if (__builtin_expect (m, tm))
- {
- if (__builtin_expect (m == tm, 1))
- *d = s;
- else
- *d = __builtin_alpha_zap (*d, m) | __builtin_alpha_zapnot (s, m);
- }
-}
-
-inline void ALWAYS_INLINE
-gtm_cacheline::store_mask (uint64_t *d, uint64_t s, uint8_t m)
-{
- if (__builtin_expect (m, 0xff))
- {
- if (__builtin_expect (m == 0xff, 1))
- *d = s;
- else
- {
- typedef uint32_t *p32 __attribute__((may_alias));
- p32 d32 = reinterpret_cast<p32>(d);
-
- if ((m & 0x0f) == 0x0f)
- {
- d32[0] = s;
- m &= 0xf0;
- }
- else if ((m & 0xf0) == 0xf0)
- {
- d32[1] = s >> 32;
- m &= 0x0f;
- }
-
- if (m)
- *d = __builtin_alpha_zap (*d, m) | __builtin_alpha_zapnot (s, m);
- }
- }
-}
-
-} // namespace GTM
-
-#endif // __alpha_bwx__
#endif // LIBITM_ALPHA_CACHELINE_H