diff options
author | Andi Kleen <ak@linux.intel.com> | 2014-03-31 08:07:46 -0700 |
---|---|---|
committer | Andi Kleen <ak@linux.intel.com> | 2014-06-13 13:15:28 -0700 |
commit | 8491ed6d70b60e4c75cdcfde10ae759898547b08 (patch) | |
tree | f26bd7e65ec2a860474297d5c8b87d49193ca4f2 /sysdeps/x86 | |
parent | a832bdd36203fcb37fa5ad25200ef3c1ae205efe (diff) | |
download | glibc-8491ed6d70b60e4c75cdcfde10ae759898547b08.zip glibc-8491ed6d70b60e4c75cdcfde10ae759898547b08.tar.gz glibc-8491ed6d70b60e4c75cdcfde10ae759898547b08.tar.bz2 |
Add adaptive elision to rwlocks
This patch relies on the C version of the rwlocks posted earlier.
With C rwlocks it is very straight forward to do adaptive elision
using TSX. It is based on the infrastructure added earlier
for mutexes, but uses its own elision macros. The macros
are fairly general purpose and could be used for other
elision purposes too.
This version is much cleaner than the earlier assembler based
version, and in particular implements adaptation which makes
it safer.
I changed the behavior slightly to not require any changes
in the test suite and fully conform to all expected
behaviors (generally at the cost of not eliding in
various situations). In particular this means the timedlock
variants are not elided. Nested trylock aborts.
Diffstat (limited to 'sysdeps/x86')
-rw-r--r-- | sysdeps/x86/nptl/bits/pthreadtypes.h | 7 | ||||
-rw-r--r-- | sysdeps/x86/nptl/elide.h | 109 |
2 files changed, 114 insertions, 2 deletions
diff --git a/sysdeps/x86/nptl/bits/pthreadtypes.h b/sysdeps/x86/nptl/bits/pthreadtypes.h index b4329f6..b04c32b 100644 --- a/sysdeps/x86/nptl/bits/pthreadtypes.h +++ b/sysdeps/x86/nptl/bits/pthreadtypes.h @@ -184,11 +184,13 @@ typedef union unsigned int __nr_writers_queued; int __writer; int __shared; - unsigned long int __pad1; + signed char __rwelision; + unsigned char __pad1[7]; unsigned long int __pad2; /* FLAGS must stay at this position in the structure to maintain binary compatibility. */ unsigned int __flags; +# define __PTHREAD_RWLOCK_ELISION_EXTRA 0, {0, 0, 0, 0, 0, 0, 0 } # define __PTHREAD_RWLOCK_INT_FLAGS_SHARED 1 } __data; # else @@ -204,7 +206,8 @@ typedef union binary compatibility. */ unsigned char __flags; unsigned char __shared; - unsigned char __pad1; + signed char __rwelision; +# define __PTHREAD_RWLOCK_ELISION_EXTRA 0 unsigned char __pad2; int __writer; } __data; diff --git a/sysdeps/x86/nptl/elide.h b/sysdeps/x86/nptl/elide.h new file mode 100644 index 0000000..19f27e5 --- /dev/null +++ b/sysdeps/x86/nptl/elide.h @@ -0,0 +1,109 @@ +/* elide.h: Generic lock elision support. + Copyright (C) 2014 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ +#ifndef ELIDE_H +#define ELIDE_H 1 + +#include <hle.h> +#include <elision-conf.h> + +#define ACCESS_ONCE(x) (* (volatile typeof(x) *) &(x)) + +/* Adapt elision with ADAPT_COUNT and STATUS and decide retries. */ + +static inline bool +elision_adapt(uint8_t *adapt_count, unsigned int status) +{ + if (status & _XABORT_RETRY) + return false; + if ((status & _XABORT_EXPLICIT) + && _XABORT_CODE (status) == _ABORT_LOCK_BUSY) + { + /* Right now we skip here. Better would be to wait a bit + and retry. This likely needs some spinning. Be careful + to avoid writing the lock. */ + if (*adapt_count != __elision_aconf.skip_lock_busy) + ACCESS_ONCE (*adapt_count) = __elision_aconf.skip_lock_busy; + } + /* Internal abort. There is no chance for retry. + Use the normal locking and next time use lock. + Be careful to avoid writing to the lock. */ + else if (*adapt_count != __elision_aconf.skip_lock_internal_abort) + ACCESS_ONCE (*adapt_count) = __elision_aconf.skip_lock_internal_abort; + return true; +} + +/* is_lock_free must be executed inside the transaction */ + +/* Returns true if lock defined by IS_LOCK_FREE was elided. + ADAPT_COUNT is a pointer to per-lock state variable. */ + +#define ELIDE_LOCK(adapt_count, is_lock_free) \ + ({ \ + int ret = 0; \ + \ + if ((adapt_count) <= 0) \ + { \ + for (int i = __elision_aconf.retry_try_xbegin; i > 0; i--) \ + { \ + unsigned int status; \ + if ((status = _xbegin ()) == _XBEGIN_STARTED) \ + { \ + if (is_lock_free) \ + { \ + ret = 1; \ + break; \ + } \ + _xabort (_ABORT_LOCK_BUSY); \ + } \ + if (!elision_adapt (&(adapt_count), status)) \ + break; \ + } \ + } \ + else \ + (adapt_count)--; /* missing updates ok */ \ + ret; \ + }) + +/* Returns true if lock defined by IS_LOCK_FREE was try-elided. + ADAPT_COUNT is a pointer to per-lock state variable. */ + +#define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) ({ \ + int ret = 0; \ + if (__elision_aconf.retry_try_xbegin > 0) \ + { \ + if (write) \ + _xabort (_ABORT_NESTED_TRYLOCK); \ + ret = ELIDE_LOCK (adapt_count, is_lock_free); \ + } \ + ret; \ + }) + +/* Returns true if lock defined by IS_LOCK_FREE was elided. */ + +#define ELIDE_UNLOCK(is_lock_free) \ + ({ \ + int ret = 0; \ + if (is_lock_free) \ + { \ + _xend (); \ + ret = 1; \ + } \ + ret; \ + }) + +#endif |