diff options
author | John David Anglin <danglin@gcc.gnu.org> | 2023-01-13 19:22:49 +0000 |
---|---|---|
committer | John David Anglin <danglin@gcc.gnu.org> | 2023-01-13 19:24:15 +0000 |
commit | cf467fb93b7b92330ddcb9c8fe7c93df45ce8e40 (patch) | |
tree | 821c49d285046168caf441f057bb2ab1e499b87c /libgcc/config/pa/linux-atomic.c | |
parent | 733a1b777f16cd397b43a242d9c31761f66d3da8 (diff) | |
download | gcc-cf467fb93b7b92330ddcb9c8fe7c93df45ce8e40.zip gcc-cf467fb93b7b92330ddcb9c8fe7c93df45ce8e40.tar.gz gcc-cf467fb93b7b92330ddcb9c8fe7c93df45ce8e40.tar.bz2 |
Fix support for atomic loads and stores on hppa.
This change updates the atomic libcall support to fix the following
issues:
1) A internal compiler error with -fno-sync-libcalls.
2) When sync libcalls are disabled, we don't generate libcalls for
libatomic.
3) There is no sync libcall support for targets other than linux.
As a result, non-atomic stores are silently emitted for types
smaller or equal to the word size. There are now a few atomic
libcalls in the libgcc code, so we need sync support on all
targets.
2023-01-13 John David Anglin <danglin@gcc.gnu.org>
gcc/ChangeLog:
* config/pa/pa-linux.h (TARGET_SYNC_LIBCALL): Delete define.
* config/pa/pa.cc (pa_init_libfuncs): Use MAX_SYNC_LIBFUNC_SIZE
define.
* config/pa/pa.h (TARGET_SYNC_LIBCALLS): Use flag_sync_libcalls.
(MAX_SYNC_LIBFUNC_SIZE): Define.
(TARGET_CPU_CPP_BUILTINS): Define __SOFTFP__ when soft float is
enabled.
* config/pa/pa.md (atomic_storeqi): Emit __atomic_exchange_1
libcall when sync libcalls are disabled.
(atomic_storehi, atomic_storesi, atomic_storedi): Likewise.
(atomic_loaddi): Emit __atomic_load_8 libcall when sync libcalls
are disabled on 32-bit target.
* config/pa/pa.opt (matomic-libcalls): New option.
* doc/invoke.texi (HPPA Options): Update.
libgcc/ChangeLog:
* config.host (hppa*64*-*-linux*): Adjust tmake_file to use
pa/t-pa64-linux.
(hppa*64*-*-hpux11*): Adjust tmake_file to use pa/t-pa64-hpux
instead of pa/t-hpux and pa/t-pa64.
* config/pa/linux-atomic.c: Define u32 type.
(ATOMIC_LOAD): Define new macro to implement atomic_load_1,
atomic_load_2, atomic_load_4 and atomic_load_8. Update sync
defines to use atomic_load calls for type.
(SYNC_LOCK_LOAD_2): New macro to implement __sync_lock_load_8.
* config/pa/sync-libfuncs.c: New file.
* config/pa/t-netbsd (LIB2ADD_ST): Define.
* config/pa/t-openbsd (LIB2ADD_ST): Define.
* config/pa/t-pa64-hpux: New file.
* config/pa/t-pa64-linux: New file.
Diffstat (limited to 'libgcc/config/pa/linux-atomic.c')
-rw-r--r-- | libgcc/config/pa/linux-atomic.c | 79 |
1 files changed, 62 insertions, 17 deletions
diff --git a/libgcc/config/pa/linux-atomic.c b/libgcc/config/pa/linux-atomic.c index 10d7f42..1978e68 100644 --- a/libgcc/config/pa/linux-atomic.c +++ b/libgcc/config/pa/linux-atomic.c @@ -32,6 +32,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see typedef unsigned char u8; typedef short unsigned int u16; +typedef unsigned int u32; #ifdef __LP64__ typedef long unsigned int u64; #else @@ -115,6 +116,36 @@ __kernel_cmpxchg2 (volatile void *mem, const void *oldval, const void *newval, #define MASK_1 0xffu #define MASK_2 0xffffu +/* Load value with an atomic processor load if possible. */ +#define ATOMIC_LOAD(TYPE, WIDTH) \ + static inline TYPE \ + atomic_load_##WIDTH (volatile void *ptr) \ + { \ + return *(volatile TYPE *)ptr; \ + } + +#if defined(__LP64__) || defined(__SOFTFP__) +ATOMIC_LOAD (u64, 8) +#else +static inline u64 +atomic_load_8 (volatile void *ptr) +{ + u64 result; + double tmp; + + asm volatile ("{fldds|fldd} 0(%2),%1\n\t" + "{fstds|fstd} %1,-16(%%sp)\n\t" + "{ldws|ldw} -16(%%sp),%0\n\t" + "{ldws|ldw} -12(%%sp),%R0" + : "=r" (result), "=f" (tmp) : "r" (ptr): "memory"); + return result; +} +#endif + +ATOMIC_LOAD (u32, 4) +ATOMIC_LOAD (u16, 2) +ATOMIC_LOAD (u8, 1) + #define FETCH_AND_OP_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \ TYPE HIDDEN \ __sync_fetch_and_##OP##_##WIDTH (volatile void *ptr, TYPE val) \ @@ -123,7 +154,7 @@ __kernel_cmpxchg2 (volatile void *mem, const void *oldval, const void *newval, long failure; \ \ do { \ - tmp = __atomic_load_n ((volatile TYPE *)ptr, __ATOMIC_RELAXED); \ + tmp = atomic_load_##WIDTH ((volatile TYPE *)ptr); \ newval = PFX_OP (tmp INF_OP val); \ failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \ } while (failure != 0); \ @@ -160,7 +191,7 @@ FETCH_AND_OP_2 (nand, ~, &, u8, 1, 0) long failure; \ \ do { \ - tmp = __atomic_load_n ((volatile TYPE *)ptr, __ATOMIC_RELAXED); \ + tmp = atomic_load_##WIDTH ((volatile TYPE *)ptr); \ newval = PFX_OP (tmp INF_OP val); \ failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \ } while (failure != 0); \ @@ -197,8 +228,7 @@ OP_AND_FETCH_2 (nand, ~, &, u8, 1, 0) long failure; \ \ do { \ - tmp = __atomic_load_n ((volatile unsigned int *)ptr, \ - __ATOMIC_RELAXED); \ + tmp = atomic_load_4 ((volatile unsigned int *)ptr); \ failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \ } while (failure != 0); \ \ @@ -220,8 +250,7 @@ FETCH_AND_OP_WORD (nand, ~, &) long failure; \ \ do { \ - tmp = __atomic_load_n ((volatile unsigned int *)ptr, \ - __ATOMIC_RELAXED); \ + tmp = atomic_load_4 ((volatile unsigned int *)ptr); \ failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \ } while (failure != 0); \ \ @@ -247,8 +276,7 @@ typedef unsigned char bool; \ while (1) \ { \ - actual_oldval = __atomic_load_n ((volatile TYPE *)ptr, \ - __ATOMIC_RELAXED); \ + actual_oldval = atomic_load_##WIDTH ((volatile TYPE *)ptr); \ \ if (__builtin_expect (oldval != actual_oldval, 0)) \ return actual_oldval; \ @@ -281,8 +309,7 @@ __sync_val_compare_and_swap_4 (volatile void *ptr, unsigned int oldval, while (1) { - actual_oldval = __atomic_load_n ((volatile unsigned int *)ptr, - __ATOMIC_RELAXED); + actual_oldval = atomic_load_4 ((volatile unsigned int *)ptr); if (__builtin_expect (oldval != actual_oldval, 0)) return actual_oldval; @@ -310,8 +337,7 @@ TYPE HIDDEN \ long failure; \ \ do { \ - oldval = __atomic_load_n ((volatile TYPE *)ptr, \ - __ATOMIC_RELAXED); \ + oldval = atomic_load_##WIDTH ((volatile TYPE *)ptr); \ failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \ } while (failure != 0); \ \ @@ -322,14 +348,14 @@ SYNC_LOCK_TEST_AND_SET_2 (u64, 8, 3) SYNC_LOCK_TEST_AND_SET_2 (u16, 2, 1) SYNC_LOCK_TEST_AND_SET_2 (u8, 1, 0) -unsigned int HIDDEN +u32 HIDDEN __sync_lock_test_and_set_4 (volatile void *ptr, unsigned int val) { long failure; unsigned int oldval; do { - oldval = __atomic_load_n ((volatile unsigned int *)ptr, __ATOMIC_RELAXED); + oldval = atomic_load_4 ((volatile unsigned int *)ptr); failure = __kernel_cmpxchg (ptr, oldval, val); } while (failure != 0); @@ -344,8 +370,7 @@ __sync_lock_test_and_set_4 (volatile void *ptr, unsigned int val) long failure; \ \ do { \ - oldval = __atomic_load_n ((volatile TYPE *)ptr, \ - __ATOMIC_RELAXED); \ + oldval = atomic_load_##WIDTH ((volatile TYPE *)ptr); \ failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \ } while (failure != 0); \ } @@ -361,7 +386,27 @@ __sync_lock_release_4 (volatile void *ptr) unsigned int oldval; do { - oldval = __atomic_load_n ((volatile unsigned int *)ptr, __ATOMIC_RELAXED); + oldval = atomic_load_4 ((volatile unsigned int *)ptr); failure = __kernel_cmpxchg (ptr, oldval, 0); } while (failure != 0); } + +#ifndef __LP64__ +#define SYNC_LOCK_LOAD_2(TYPE, WIDTH, INDEX) \ + TYPE __sync_lock_load_##WIDTH (volatile void *) HIDDEN; \ + TYPE \ + __sync_lock_load_##WIDTH (volatile void *ptr) \ + { \ + TYPE oldval; \ + long failure; \ + \ + do { \ + oldval = atomic_load_##WIDTH ((volatile TYPE *)ptr); \ + failure = __kernel_cmpxchg2 (ptr, &oldval, &oldval, INDEX); \ + } while (failure != 0); \ + \ + return oldval; \ + } + +SYNC_LOCK_LOAD_2 (u64, 8, 3) +#endif |