diff options
author | Torvald Riegel <triegel@redhat.com> | 2016-11-30 17:53:11 +0100 |
---|---|---|
committer | Torvald Riegel <triegel@redhat.com> | 2016-12-05 16:19:43 +0100 |
commit | ca6e601a9d4a72b3699cca15bad12ac1716bf49a (patch) | |
tree | fd761ea31c43377d02f2a097f8030411163d6905 /include/atomic.h | |
parent | 71be79a25f1d9efeafa5c634c4499281e8c313f2 (diff) | |
download | glibc-ca6e601a9d4a72b3699cca15bad12ac1716bf49a.zip glibc-ca6e601a9d4a72b3699cca15bad12ac1716bf49a.tar.gz glibc-ca6e601a9d4a72b3699cca15bad12ac1716bf49a.tar.bz2 |
Use C11-like atomics instead of plain memory accesses in x86 lock elision.
This uses atomic operations to access lock elision metadata that is accessed
concurrently (ie, adapt_count fields). The size of the data is less than a
word but accessed only with atomic loads and stores; therefore, we add
support for shorter-size atomic load and stores too.
* include/atomic.h (__atomic_check_size_ls): New.
(atomic_load_relaxed, atomic_load_acquire, atomic_store_relaxed,
atomic_store_release): Use it.
* sysdeps/x86/elide.h (ACCESS_ONCE): Remove.
(elision_adapt, ELIDE_LOCK): Use atomics.
* sysdeps/unix/sysv/linux/x86/elision-lock.c (__lll_lock_elision): Use
atomics and improve code comments.
* sysdeps/unix/sysv/linux/x86/elision-trylock.c
(__lll_trylock_elision): Likewise.
Diffstat (limited to 'include/atomic.h')
-rw-r--r-- | include/atomic.h | 24 |
1 files changed, 20 insertions, 4 deletions
diff --git a/include/atomic.h b/include/atomic.h index c8b4664..d14cbc5 100644 --- a/include/atomic.h +++ b/include/atomic.h @@ -550,6 +550,20 @@ void __atomic_link_error (void); if (sizeof (*mem) != 4) \ __atomic_link_error (); # endif +/* We additionally provide 8b and 16b atomic loads and stores; we do not yet + need other atomic operations of such sizes, and restricting the support to + loads and stores makes this easier for archs that do not have native + support for atomic operations to less-than-word-sized data. */ +# if __HAVE_64B_ATOMICS == 1 +# define __atomic_check_size_ls(mem) \ + if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4) \ + && (sizeof (*mem) != 8)) \ + __atomic_link_error (); +# else +# define __atomic_check_size_ls(mem) \ + if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && sizeof (*mem) != 4) \ + __atomic_link_error (); +# endif # define atomic_thread_fence_acquire() \ __atomic_thread_fence (__ATOMIC_ACQUIRE) @@ -559,18 +573,20 @@ void __atomic_link_error (void); __atomic_thread_fence (__ATOMIC_SEQ_CST) # define atomic_load_relaxed(mem) \ - ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_RELAXED); }) + ({ __atomic_check_size_ls((mem)); \ + __atomic_load_n ((mem), __ATOMIC_RELAXED); }) # define atomic_load_acquire(mem) \ - ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_ACQUIRE); }) + ({ __atomic_check_size_ls((mem)); \ + __atomic_load_n ((mem), __ATOMIC_ACQUIRE); }) # define atomic_store_relaxed(mem, val) \ do { \ - __atomic_check_size((mem)); \ + __atomic_check_size_ls((mem)); \ __atomic_store_n ((mem), (val), __ATOMIC_RELAXED); \ } while (0) # define atomic_store_release(mem, val) \ do { \ - __atomic_check_size((mem)); \ + __atomic_check_size_ls((mem)); \ __atomic_store_n ((mem), (val), __ATOMIC_RELEASE); \ } while (0) |