From de071d199a8578055edf2722114788ae749823aa Mon Sep 17 00:00:00 2001 From: Joseph Myers Date: Fri, 11 Sep 2015 20:00:19 +0000 Subject: Move bits/atomic.h to atomic-machine.h (bug 14912). It was noted in that the bits/*.h naming scheme should only be used for installed headers. This patch renames bits/atomic.h to atomic-machine.h to follow that convention. This is the only change in this series that needs to change the filename rather than simply removing a directory level (because both atomic.h and bits/atomic.h exist at present). Tested for x86_64 (testsuite, and that installed stripped shared libraries are unchanged by the patch). [BZ #14912] * sysdeps/aarch64/bits/atomic.h: Move to ... * sysdeps/aarch64/atomic-machine.h: ...here. (_AARCH64_BITS_ATOMIC_H): Rename macro to _AARCH64_ATOMIC_MACHINE_H. * sysdeps/alpha/bits/atomic.h: Move to ... * sysdeps/alpha/atomic-machine.h: ...here. * sysdeps/arm/bits/atomic.h: Move to ... * sysdeps/arm/atomic-machine.h: ...here. Update comments. * bits/atomic.h: Move to ... * sysdeps/generic/atomic-machine.h: ...here. (_BITS_ATOMIC_H): Rename macro to _ATOMIC_MACHINE_H. * sysdeps/i386/bits/atomic.h: Move to ... * sysdeps/i386/atomic-machine.h: ...here. * sysdeps/ia64/bits/atomic.h: Move to ... * sysdeps/ia64/atomic-machine.h: ...here. * sysdeps/m68k/coldfire/bits/atomic.h: Move to ... * sysdeps/m68k/coldfire/atomic-machine.h: ...here. (_BITS_ATOMIC_H): Rename macro to _ATOMIC_MACHINE_H. * sysdeps/m68k/m680x0/m68020/bits/atomic.h: Move to ... * sysdeps/m68k/m680x0/m68020/atomic-machine.h: ...here. * sysdeps/microblaze/bits/atomic.h: Move to ... * sysdeps/microblaze/atomic-machine.h: ...here. * sysdeps/mips/bits/atomic.h: Move to ... * sysdeps/mips/atomic-machine.h: ...here. (_MIPS_BITS_ATOMIC_H): Rename macro to _MIPS_ATOMIC_MACHINE_H. * sysdeps/powerpc/bits/atomic.h: Move to ... * sysdeps/powerpc/atomic-machine.h: ...here. Update comments. * sysdeps/powerpc/powerpc32/bits/atomic.h: Move to ... * sysdeps/powerpc/powerpc32/atomic-machine.h: ...here. Update comments. Include instead of . * sysdeps/powerpc/powerpc64/bits/atomic.h: Move to ... * sysdeps/powerpc/powerpc64/atomic-machine.h: ...here. Include instead of . * sysdeps/s390/bits/atomic.h: Move to ... * sysdeps/s390/atomic-machine.h: ...here. * sysdeps/sparc/sparc32/bits/atomic.h: Move to ... * sysdeps/sparc/sparc32/atomic-machine.h: ...here. (_BITS_ATOMIC_H): Rename macro to _ATOMIC_MACHINE_H. * sysdeps/sparc/sparc32/sparcv9/bits/atomic.h: Move to ... * sysdeps/sparc/sparc32/sparcv9/atomic-machine.h: ...here. * sysdeps/sparc/sparc64/bits/atomic.h: Move to ... * sysdeps/sparc/sparc64/atomic-machine.h: ...here. * sysdeps/tile/bits/atomic.h: Move to ... * sysdeps/tile/atomic-machine.h: ...here. * sysdeps/tile/tilegx/bits/atomic.h: Move to ... * sysdeps/tile/tilegx/atomic-machine.h: ...here. Include instead of . (_BITS_ATOMIC_H): Rename macro to _ATOMIC_MACHINE_H. * sysdeps/tile/tilepro/bits/atomic.h: Move to ... * sysdeps/tile/tilepro/atomic-machine.h: ...here. Include instead of . (_BITS_ATOMIC_H): Rename macro to _ATOMIC_MACHINE_H. * sysdeps/unix/sysv/linux/arm/bits/atomic.h: Move to ... * sysdeps/unix/sysv/linux/arm/atomic-machine.h: ...here. Include instead of . * sysdeps/unix/sysv/linux/hppa/bits/atomic.h: Move to ... * sysdeps/unix/sysv/linux/hppa/atomic-machine.h: ...here. (_BITS_ATOMIC_H): Rename macro to _ATOMIC_MACHINE_H. * sysdeps/unix/sysv/linux/m68k/coldfire/bits/atomic.h: Move to ... * sysdeps/unix/sysv/linux/m68k/coldfire/atomic-machine.h: ...here. (_BITS_ATOMIC_H): Rename macro to _ATOMIC_MACHINE_H. * sysdeps/unix/sysv/linux/nios2/bits/atomic.h: Move to ... * sysdeps/unix/sysv/linux/nios2/atomic-machine.h: ...here. (_NIOS2_BITS_ATOMIC_H): Rename macro to _NIOS2_ATOMIC_MACHINE_H. * sysdeps/unix/sysv/linux/sh/bits/atomic.h: Move to ... * sysdeps/unix/sysv/linux/sh/atomic-machine.h: ...here. * sysdeps/x86_64/bits/atomic.h: Move to ... * sysdeps/x86_64/atomic-machine.h: ...here. * include/atomic.h: Include instead of . --- sysdeps/sparc/sparc32/atomic-machine.h | 360 +++++++++++++++++++++++++ sysdeps/sparc/sparc32/bits/atomic.h | 360 ------------------------- sysdeps/sparc/sparc32/sparcv9/atomic-machine.h | 105 ++++++++ sysdeps/sparc/sparc32/sparcv9/bits/atomic.h | 105 -------- sysdeps/sparc/sparc64/atomic-machine.h | 126 +++++++++ sysdeps/sparc/sparc64/bits/atomic.h | 126 --------- 6 files changed, 591 insertions(+), 591 deletions(-) create mode 100644 sysdeps/sparc/sparc32/atomic-machine.h delete mode 100644 sysdeps/sparc/sparc32/bits/atomic.h create mode 100644 sysdeps/sparc/sparc32/sparcv9/atomic-machine.h delete mode 100644 sysdeps/sparc/sparc32/sparcv9/bits/atomic.h create mode 100644 sysdeps/sparc/sparc64/atomic-machine.h delete mode 100644 sysdeps/sparc/sparc64/bits/atomic.h (limited to 'sysdeps/sparc') diff --git a/sysdeps/sparc/sparc32/atomic-machine.h b/sysdeps/sparc/sparc32/atomic-machine.h new file mode 100644 index 0000000..0fb1111 --- /dev/null +++ b/sysdeps/sparc/sparc32/atomic-machine.h @@ -0,0 +1,360 @@ +/* Atomic operations. sparc32 version. + Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Jakub Jelinek , 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#ifndef _ATOMIC_MACHINE_H +#define _ATOMIC_MACHINE_H 1 + +#include + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + + +/* We have no compare and swap, just test and set. + The following implementation contends on 64 global locks + per library and assumes no variable will be accessed using atomic.h + macros from two different libraries. */ + +__make_section_unallocated + (".gnu.linkonce.b.__sparc32_atomic_locks, \"aw\", %nobits"); + +volatile unsigned char __sparc32_atomic_locks[64] + __attribute__ ((nocommon, section (".gnu.linkonce.b.__sparc32_atomic_locks" + __sec_comment), + visibility ("hidden"))); + +#define __sparc32_atomic_do_lock(addr) \ + do \ + { \ + unsigned int __old_lock; \ + unsigned int __idx = (((long) addr >> 2) ^ ((long) addr >> 12)) \ + & 63; \ + do \ + __asm __volatile ("ldstub %1, %0" \ + : "=r" (__old_lock), \ + "=m" (__sparc32_atomic_locks[__idx]) \ + : "m" (__sparc32_atomic_locks[__idx]) \ + : "memory"); \ + while (__old_lock); \ + } \ + while (0) + +#define __sparc32_atomic_do_unlock(addr) \ + do \ + { \ + __sparc32_atomic_locks[(((long) addr >> 2) \ + ^ ((long) addr >> 12)) & 63] = 0; \ + __asm __volatile ("" ::: "memory"); \ + } \ + while (0) + +#define __sparc32_atomic_do_lock24(addr) \ + do \ + { \ + unsigned int __old_lock; \ + do \ + __asm __volatile ("ldstub %1, %0" \ + : "=r" (__old_lock), "=m" (*(addr)) \ + : "m" (*(addr)) \ + : "memory"); \ + while (__old_lock); \ + } \ + while (0) + +#define __sparc32_atomic_do_unlock24(addr) \ + do \ + { \ + __asm __volatile ("" ::: "memory"); \ + *(char *) (addr) = 0; \ + } \ + while (0) + + +#ifndef SHARED +# define __v9_compare_and_exchange_val_32_acq(mem, newval, oldval) \ +({union { __typeof (oldval) a; uint32_t v; } oldval_arg = { .a = (oldval) }; \ + union { __typeof (newval) a; uint32_t v; } newval_arg = { .a = (newval) }; \ + register uint32_t __acev_tmp __asm ("%g6"); \ + register __typeof (mem) __acev_mem __asm ("%g1") = (mem); \ + register uint32_t __acev_oldval __asm ("%g5"); \ + __acev_tmp = newval_arg.v; \ + __acev_oldval = oldval_arg.v; \ + /* .word 0xcde05005 is cas [%g1], %g5, %g6. Can't use cas here though, \ + because as will then mark the object file as V8+ arch. */ \ + __asm __volatile (".word 0xcde05005" \ + : "+r" (__acev_tmp), "=m" (*__acev_mem) \ + : "r" (__acev_oldval), "m" (*__acev_mem), \ + "r" (__acev_mem) : "memory"); \ + (__typeof (oldval)) __acev_tmp; }) +#endif + +/* The only basic operation needed is compare and exchange. */ +#define __v7_compare_and_exchange_val_acq(mem, newval, oldval) \ + ({ __typeof (mem) __acev_memp = (mem); \ + __typeof (*mem) __acev_ret; \ + __typeof (*mem) __acev_newval = (newval); \ + \ + __sparc32_atomic_do_lock (__acev_memp); \ + __acev_ret = *__acev_memp; \ + if (__acev_ret == (oldval)) \ + *__acev_memp = __acev_newval; \ + __sparc32_atomic_do_unlock (__acev_memp); \ + __acev_ret; }) + +#define __v7_compare_and_exchange_bool_acq(mem, newval, oldval) \ + ({ __typeof (mem) __aceb_memp = (mem); \ + int __aceb_ret; \ + __typeof (*mem) __aceb_newval = (newval); \ + \ + __sparc32_atomic_do_lock (__aceb_memp); \ + __aceb_ret = 0; \ + if (*__aceb_memp == (oldval)) \ + *__aceb_memp = __aceb_newval; \ + else \ + __aceb_ret = 1; \ + __sparc32_atomic_do_unlock (__aceb_memp); \ + __aceb_ret; }) + +#define __v7_exchange_acq(mem, newval) \ + ({ __typeof (mem) __acev_memp = (mem); \ + __typeof (*mem) __acev_ret; \ + __typeof (*mem) __acev_newval = (newval); \ + \ + __sparc32_atomic_do_lock (__acev_memp); \ + __acev_ret = *__acev_memp; \ + *__acev_memp = __acev_newval; \ + __sparc32_atomic_do_unlock (__acev_memp); \ + __acev_ret; }) + +#define __v7_exchange_and_add(mem, value) \ + ({ __typeof (mem) __acev_memp = (mem); \ + __typeof (*mem) __acev_ret; \ + \ + __sparc32_atomic_do_lock (__acev_memp); \ + __acev_ret = *__acev_memp; \ + *__acev_memp = __acev_ret + (value); \ + __sparc32_atomic_do_unlock (__acev_memp); \ + __acev_ret; }) + +/* Special versions, which guarantee that top 8 bits of all values + are cleared and use those bits as the ldstub lock. */ +#define __v7_compare_and_exchange_val_24_acq(mem, newval, oldval) \ + ({ __typeof (mem) __acev_memp = (mem); \ + __typeof (*mem) __acev_ret; \ + __typeof (*mem) __acev_newval = (newval); \ + \ + __sparc32_atomic_do_lock24 (__acev_memp); \ + __acev_ret = *__acev_memp & 0xffffff; \ + if (__acev_ret == (oldval)) \ + *__acev_memp = __acev_newval; \ + else \ + __sparc32_atomic_do_unlock24 (__acev_memp); \ + __asm __volatile ("" ::: "memory"); \ + __acev_ret; }) + +#define __v7_exchange_24_rel(mem, newval) \ + ({ __typeof (mem) __acev_memp = (mem); \ + __typeof (*mem) __acev_ret; \ + __typeof (*mem) __acev_newval = (newval); \ + \ + __sparc32_atomic_do_lock24 (__acev_memp); \ + __acev_ret = *__acev_memp & 0xffffff; \ + *__acev_memp = __acev_newval; \ + __asm __volatile ("" ::: "memory"); \ + __acev_ret; }) + +#ifdef SHARED + +/* When dynamically linked, we assume pre-v9 libraries are only ever + used on pre-v9 CPU. */ +# define __atomic_is_v9 0 + +# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + __v7_compare_and_exchange_val_acq (mem, newval, oldval) + +# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ + __v7_compare_and_exchange_bool_acq (mem, newval, oldval) + +# define atomic_exchange_acq(mem, newval) \ + __v7_exchange_acq (mem, newval) + +# define atomic_exchange_and_add(mem, value) \ + __v7_exchange_and_add (mem, value) + +# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ + ({ \ + if (sizeof (*mem) != 4) \ + abort (); \ + __v7_compare_and_exchange_val_24_acq (mem, newval, oldval); }) + +# define atomic_exchange_24_rel(mem, newval) \ + ({ \ + if (sizeof (*mem) != 4) \ + abort (); \ + __v7_exchange_24_rel (mem, newval); }) + +# define atomic_full_barrier() __asm ("" ::: "memory") +# define atomic_read_barrier() atomic_full_barrier () +# define atomic_write_barrier() atomic_full_barrier () + +#else + +/* In libc.a/libpthread.a etc. we don't know if we'll be run on + pre-v9 or v9 CPU. To be interoperable with dynamically linked + apps on v9 CPUs e.g. with process shared primitives, use cas insn + on v9 CPUs and ldstub on pre-v9. */ + +extern uint64_t _dl_hwcap __attribute__((weak)); +# define __atomic_is_v9 \ + (__builtin_expect (&_dl_hwcap != 0, 1) \ + && __builtin_expect (_dl_hwcap & HWCAP_SPARC_V9, HWCAP_SPARC_V9)) + +# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + ({ \ + __typeof (*mem) __acev_wret; \ + if (sizeof (*mem) != 4) \ + abort (); \ + if (__atomic_is_v9) \ + __acev_wret \ + = __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\ + else \ + __acev_wret \ + = __v7_compare_and_exchange_val_acq (mem, newval, oldval); \ + __acev_wret; }) + +# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ + ({ \ + int __acev_wret; \ + if (sizeof (*mem) != 4) \ + abort (); \ + if (__atomic_is_v9) \ + { \ + __typeof (oldval) __acev_woldval = (oldval); \ + __acev_wret \ + = __v9_compare_and_exchange_val_32_acq (mem, newval, \ + __acev_woldval) \ + != __acev_woldval; \ + } \ + else \ + __acev_wret \ + = __v7_compare_and_exchange_bool_acq (mem, newval, oldval); \ + __acev_wret; }) + +# define atomic_exchange_rel(mem, newval) \ + ({ \ + __typeof (*mem) __acev_wret; \ + if (sizeof (*mem) != 4) \ + abort (); \ + if (__atomic_is_v9) \ + { \ + __typeof (mem) __acev_wmemp = (mem); \ + __typeof (*(mem)) __acev_wval = (newval); \ + do \ + __acev_wret = *__acev_wmemp; \ + while (__builtin_expect \ + (__v9_compare_and_exchange_val_32_acq (__acev_wmemp,\ + __acev_wval, \ + __acev_wret) \ + != __acev_wret, 0)); \ + } \ + else \ + __acev_wret = __v7_exchange_acq (mem, newval); \ + __acev_wret; }) + +# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ + ({ \ + __typeof (*mem) __acev_wret; \ + if (sizeof (*mem) != 4) \ + abort (); \ + if (__atomic_is_v9) \ + __acev_wret \ + = __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\ + else \ + __acev_wret \ + = __v7_compare_and_exchange_val_24_acq (mem, newval, oldval);\ + __acev_wret; }) + +# define atomic_exchange_24_rel(mem, newval) \ + ({ \ + __typeof (*mem) __acev_w24ret; \ + if (sizeof (*mem) != 4) \ + abort (); \ + if (__atomic_is_v9) \ + __acev_w24ret = atomic_exchange_rel (mem, newval); \ + else \ + __acev_w24ret = __v7_exchange_24_rel (mem, newval); \ + __acev_w24ret; }) + +#define atomic_full_barrier() \ + do { \ + if (__atomic_is_v9) \ + /* membar #LoadLoad | #LoadStore | #StoreLoad | #StoreStore */ \ + __asm __volatile (".word 0x8143e00f" : : : "memory"); \ + else \ + __asm __volatile ("" : : : "memory"); \ + } while (0) + +#define atomic_read_barrier() \ + do { \ + if (__atomic_is_v9) \ + /* membar #LoadLoad | #LoadStore */ \ + __asm __volatile (".word 0x8143e005" : : : "memory"); \ + else \ + __asm __volatile ("" : : : "memory"); \ + } while (0) + +#define atomic_write_barrier() \ + do { \ + if (__atomic_is_v9) \ + /* membar #LoadStore | #StoreStore */ \ + __asm __volatile (".word 0x8143e00c" : : : "memory"); \ + else \ + __asm __volatile ("" : : : "memory"); \ + } while (0) + +#endif + +#include + +#endif /* atomic-machine.h */ diff --git a/sysdeps/sparc/sparc32/bits/atomic.h b/sysdeps/sparc/sparc32/bits/atomic.h deleted file mode 100644 index 4242ba8..0000000 --- a/sysdeps/sparc/sparc32/bits/atomic.h +++ /dev/null @@ -1,360 +0,0 @@ -/* Atomic operations. sparc32 version. - Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Jakub Jelinek , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#ifndef _BITS_ATOMIC_H -#define _BITS_ATOMIC_H 1 - -#include - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int16_t atomic16_t; -typedef uint16_t uatomic16_t; -typedef int_fast16_t atomic_fast16_t; -typedef uint_fast16_t uatomic_fast16_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 0 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - - -/* We have no compare and swap, just test and set. - The following implementation contends on 64 global locks - per library and assumes no variable will be accessed using atomic.h - macros from two different libraries. */ - -__make_section_unallocated - (".gnu.linkonce.b.__sparc32_atomic_locks, \"aw\", %nobits"); - -volatile unsigned char __sparc32_atomic_locks[64] - __attribute__ ((nocommon, section (".gnu.linkonce.b.__sparc32_atomic_locks" - __sec_comment), - visibility ("hidden"))); - -#define __sparc32_atomic_do_lock(addr) \ - do \ - { \ - unsigned int __old_lock; \ - unsigned int __idx = (((long) addr >> 2) ^ ((long) addr >> 12)) \ - & 63; \ - do \ - __asm __volatile ("ldstub %1, %0" \ - : "=r" (__old_lock), \ - "=m" (__sparc32_atomic_locks[__idx]) \ - : "m" (__sparc32_atomic_locks[__idx]) \ - : "memory"); \ - while (__old_lock); \ - } \ - while (0) - -#define __sparc32_atomic_do_unlock(addr) \ - do \ - { \ - __sparc32_atomic_locks[(((long) addr >> 2) \ - ^ ((long) addr >> 12)) & 63] = 0; \ - __asm __volatile ("" ::: "memory"); \ - } \ - while (0) - -#define __sparc32_atomic_do_lock24(addr) \ - do \ - { \ - unsigned int __old_lock; \ - do \ - __asm __volatile ("ldstub %1, %0" \ - : "=r" (__old_lock), "=m" (*(addr)) \ - : "m" (*(addr)) \ - : "memory"); \ - while (__old_lock); \ - } \ - while (0) - -#define __sparc32_atomic_do_unlock24(addr) \ - do \ - { \ - __asm __volatile ("" ::: "memory"); \ - *(char *) (addr) = 0; \ - } \ - while (0) - - -#ifndef SHARED -# define __v9_compare_and_exchange_val_32_acq(mem, newval, oldval) \ -({union { __typeof (oldval) a; uint32_t v; } oldval_arg = { .a = (oldval) }; \ - union { __typeof (newval) a; uint32_t v; } newval_arg = { .a = (newval) }; \ - register uint32_t __acev_tmp __asm ("%g6"); \ - register __typeof (mem) __acev_mem __asm ("%g1") = (mem); \ - register uint32_t __acev_oldval __asm ("%g5"); \ - __acev_tmp = newval_arg.v; \ - __acev_oldval = oldval_arg.v; \ - /* .word 0xcde05005 is cas [%g1], %g5, %g6. Can't use cas here though, \ - because as will then mark the object file as V8+ arch. */ \ - __asm __volatile (".word 0xcde05005" \ - : "+r" (__acev_tmp), "=m" (*__acev_mem) \ - : "r" (__acev_oldval), "m" (*__acev_mem), \ - "r" (__acev_mem) : "memory"); \ - (__typeof (oldval)) __acev_tmp; }) -#endif - -/* The only basic operation needed is compare and exchange. */ -#define __v7_compare_and_exchange_val_acq(mem, newval, oldval) \ - ({ __typeof (mem) __acev_memp = (mem); \ - __typeof (*mem) __acev_ret; \ - __typeof (*mem) __acev_newval = (newval); \ - \ - __sparc32_atomic_do_lock (__acev_memp); \ - __acev_ret = *__acev_memp; \ - if (__acev_ret == (oldval)) \ - *__acev_memp = __acev_newval; \ - __sparc32_atomic_do_unlock (__acev_memp); \ - __acev_ret; }) - -#define __v7_compare_and_exchange_bool_acq(mem, newval, oldval) \ - ({ __typeof (mem) __aceb_memp = (mem); \ - int __aceb_ret; \ - __typeof (*mem) __aceb_newval = (newval); \ - \ - __sparc32_atomic_do_lock (__aceb_memp); \ - __aceb_ret = 0; \ - if (*__aceb_memp == (oldval)) \ - *__aceb_memp = __aceb_newval; \ - else \ - __aceb_ret = 1; \ - __sparc32_atomic_do_unlock (__aceb_memp); \ - __aceb_ret; }) - -#define __v7_exchange_acq(mem, newval) \ - ({ __typeof (mem) __acev_memp = (mem); \ - __typeof (*mem) __acev_ret; \ - __typeof (*mem) __acev_newval = (newval); \ - \ - __sparc32_atomic_do_lock (__acev_memp); \ - __acev_ret = *__acev_memp; \ - *__acev_memp = __acev_newval; \ - __sparc32_atomic_do_unlock (__acev_memp); \ - __acev_ret; }) - -#define __v7_exchange_and_add(mem, value) \ - ({ __typeof (mem) __acev_memp = (mem); \ - __typeof (*mem) __acev_ret; \ - \ - __sparc32_atomic_do_lock (__acev_memp); \ - __acev_ret = *__acev_memp; \ - *__acev_memp = __acev_ret + (value); \ - __sparc32_atomic_do_unlock (__acev_memp); \ - __acev_ret; }) - -/* Special versions, which guarantee that top 8 bits of all values - are cleared and use those bits as the ldstub lock. */ -#define __v7_compare_and_exchange_val_24_acq(mem, newval, oldval) \ - ({ __typeof (mem) __acev_memp = (mem); \ - __typeof (*mem) __acev_ret; \ - __typeof (*mem) __acev_newval = (newval); \ - \ - __sparc32_atomic_do_lock24 (__acev_memp); \ - __acev_ret = *__acev_memp & 0xffffff; \ - if (__acev_ret == (oldval)) \ - *__acev_memp = __acev_newval; \ - else \ - __sparc32_atomic_do_unlock24 (__acev_memp); \ - __asm __volatile ("" ::: "memory"); \ - __acev_ret; }) - -#define __v7_exchange_24_rel(mem, newval) \ - ({ __typeof (mem) __acev_memp = (mem); \ - __typeof (*mem) __acev_ret; \ - __typeof (*mem) __acev_newval = (newval); \ - \ - __sparc32_atomic_do_lock24 (__acev_memp); \ - __acev_ret = *__acev_memp & 0xffffff; \ - *__acev_memp = __acev_newval; \ - __asm __volatile ("" ::: "memory"); \ - __acev_ret; }) - -#ifdef SHARED - -/* When dynamically linked, we assume pre-v9 libraries are only ever - used on pre-v9 CPU. */ -# define __atomic_is_v9 0 - -# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ - __v7_compare_and_exchange_val_acq (mem, newval, oldval) - -# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ - __v7_compare_and_exchange_bool_acq (mem, newval, oldval) - -# define atomic_exchange_acq(mem, newval) \ - __v7_exchange_acq (mem, newval) - -# define atomic_exchange_and_add(mem, value) \ - __v7_exchange_and_add (mem, value) - -# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ - ({ \ - if (sizeof (*mem) != 4) \ - abort (); \ - __v7_compare_and_exchange_val_24_acq (mem, newval, oldval); }) - -# define atomic_exchange_24_rel(mem, newval) \ - ({ \ - if (sizeof (*mem) != 4) \ - abort (); \ - __v7_exchange_24_rel (mem, newval); }) - -# define atomic_full_barrier() __asm ("" ::: "memory") -# define atomic_read_barrier() atomic_full_barrier () -# define atomic_write_barrier() atomic_full_barrier () - -#else - -/* In libc.a/libpthread.a etc. we don't know if we'll be run on - pre-v9 or v9 CPU. To be interoperable with dynamically linked - apps on v9 CPUs e.g. with process shared primitives, use cas insn - on v9 CPUs and ldstub on pre-v9. */ - -extern uint64_t _dl_hwcap __attribute__((weak)); -# define __atomic_is_v9 \ - (__builtin_expect (&_dl_hwcap != 0, 1) \ - && __builtin_expect (_dl_hwcap & HWCAP_SPARC_V9, HWCAP_SPARC_V9)) - -# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ - ({ \ - __typeof (*mem) __acev_wret; \ - if (sizeof (*mem) != 4) \ - abort (); \ - if (__atomic_is_v9) \ - __acev_wret \ - = __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\ - else \ - __acev_wret \ - = __v7_compare_and_exchange_val_acq (mem, newval, oldval); \ - __acev_wret; }) - -# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ - ({ \ - int __acev_wret; \ - if (sizeof (*mem) != 4) \ - abort (); \ - if (__atomic_is_v9) \ - { \ - __typeof (oldval) __acev_woldval = (oldval); \ - __acev_wret \ - = __v9_compare_and_exchange_val_32_acq (mem, newval, \ - __acev_woldval) \ - != __acev_woldval; \ - } \ - else \ - __acev_wret \ - = __v7_compare_and_exchange_bool_acq (mem, newval, oldval); \ - __acev_wret; }) - -# define atomic_exchange_rel(mem, newval) \ - ({ \ - __typeof (*mem) __acev_wret; \ - if (sizeof (*mem) != 4) \ - abort (); \ - if (__atomic_is_v9) \ - { \ - __typeof (mem) __acev_wmemp = (mem); \ - __typeof (*(mem)) __acev_wval = (newval); \ - do \ - __acev_wret = *__acev_wmemp; \ - while (__builtin_expect \ - (__v9_compare_and_exchange_val_32_acq (__acev_wmemp,\ - __acev_wval, \ - __acev_wret) \ - != __acev_wret, 0)); \ - } \ - else \ - __acev_wret = __v7_exchange_acq (mem, newval); \ - __acev_wret; }) - -# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ - ({ \ - __typeof (*mem) __acev_wret; \ - if (sizeof (*mem) != 4) \ - abort (); \ - if (__atomic_is_v9) \ - __acev_wret \ - = __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\ - else \ - __acev_wret \ - = __v7_compare_and_exchange_val_24_acq (mem, newval, oldval);\ - __acev_wret; }) - -# define atomic_exchange_24_rel(mem, newval) \ - ({ \ - __typeof (*mem) __acev_w24ret; \ - if (sizeof (*mem) != 4) \ - abort (); \ - if (__atomic_is_v9) \ - __acev_w24ret = atomic_exchange_rel (mem, newval); \ - else \ - __acev_w24ret = __v7_exchange_24_rel (mem, newval); \ - __acev_w24ret; }) - -#define atomic_full_barrier() \ - do { \ - if (__atomic_is_v9) \ - /* membar #LoadLoad | #LoadStore | #StoreLoad | #StoreStore */ \ - __asm __volatile (".word 0x8143e00f" : : : "memory"); \ - else \ - __asm __volatile ("" : : : "memory"); \ - } while (0) - -#define atomic_read_barrier() \ - do { \ - if (__atomic_is_v9) \ - /* membar #LoadLoad | #LoadStore */ \ - __asm __volatile (".word 0x8143e005" : : : "memory"); \ - else \ - __asm __volatile ("" : : : "memory"); \ - } while (0) - -#define atomic_write_barrier() \ - do { \ - if (__atomic_is_v9) \ - /* membar #LoadStore | #StoreStore */ \ - __asm __volatile (".word 0x8143e00c" : : : "memory"); \ - else \ - __asm __volatile ("" : : : "memory"); \ - } while (0) - -#endif - -#include - -#endif /* bits/atomic.h */ diff --git a/sysdeps/sparc/sparc32/sparcv9/atomic-machine.h b/sysdeps/sparc/sparc32/sparcv9/atomic-machine.h new file mode 100644 index 0000000..0ff5dcd --- /dev/null +++ b/sysdeps/sparc/sparc32/sparcv9/atomic-machine.h @@ -0,0 +1,105 @@ +/* Atomic operations. sparcv9 version. + Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Jakub Jelinek , 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + + +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ +({ \ + __typeof (*(mem)) __acev_tmp; \ + __typeof (mem) __acev_mem = (mem); \ + if (__builtin_constant_p (oldval) && (oldval) == 0) \ + __asm __volatile ("cas [%3], %%g0, %0" \ + : "=r" (__acev_tmp), "=m" (*__acev_mem) \ + : "m" (*__acev_mem), "r" (__acev_mem), \ + "0" (newval) : "memory"); \ + else \ + __asm __volatile ("cas [%4], %2, %0" \ + : "=r" (__acev_tmp), "=m" (*__acev_mem) \ + : "r" (oldval), "m" (*__acev_mem), "r" (__acev_mem), \ + "0" (newval) : "memory"); \ + __acev_tmp; }) + +/* This can be implemented if needed. */ +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define atomic_exchange_acq(mem, newvalue) \ + ({ __typeof (*(mem)) __oldval; \ + __typeof (mem) __memp = (mem); \ + __typeof (*(mem)) __value = (newvalue); \ + \ + if (sizeof (*(mem)) == 4) \ + __asm ("swap %0, %1" \ + : "=m" (*__memp), "=r" (__oldval) \ + : "m" (*__memp), "1" (__value) : "memory"); \ + else \ + abort (); \ + __oldval; }) + +#define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ + atomic_compare_and_exchange_val_acq (mem, newval, oldval) + +#define atomic_exchange_24_rel(mem, newval) \ + atomic_exchange_rel (mem, newval) + +#define atomic_full_barrier() \ + __asm __volatile ("membar #LoadLoad | #LoadStore" \ + " | #StoreLoad | #StoreStore" : : : "memory") +#define atomic_read_barrier() \ + __asm __volatile ("membar #LoadLoad | #LoadStore" : : : "memory") +#define atomic_write_barrier() \ + __asm __volatile ("membar #LoadStore | #StoreStore" : : : "memory") + +extern void __cpu_relax (void); +#define atomic_spin_nop() __cpu_relax () diff --git a/sysdeps/sparc/sparc32/sparcv9/bits/atomic.h b/sysdeps/sparc/sparc32/sparcv9/bits/atomic.h deleted file mode 100644 index 0ff5dcd..0000000 --- a/sysdeps/sparc/sparc32/sparcv9/bits/atomic.h +++ /dev/null @@ -1,105 +0,0 @@ -/* Atomic operations. sparcv9 version. - Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Jakub Jelinek , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int16_t atomic16_t; -typedef uint16_t uatomic16_t; -typedef int_fast16_t atomic_fast16_t; -typedef uint_fast16_t uatomic_fast16_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 0 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - - -#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ -({ \ - __typeof (*(mem)) __acev_tmp; \ - __typeof (mem) __acev_mem = (mem); \ - if (__builtin_constant_p (oldval) && (oldval) == 0) \ - __asm __volatile ("cas [%3], %%g0, %0" \ - : "=r" (__acev_tmp), "=m" (*__acev_mem) \ - : "m" (*__acev_mem), "r" (__acev_mem), \ - "0" (newval) : "memory"); \ - else \ - __asm __volatile ("cas [%4], %2, %0" \ - : "=r" (__acev_tmp), "=m" (*__acev_mem) \ - : "r" (oldval), "m" (*__acev_mem), "r" (__acev_mem), \ - "0" (newval) : "memory"); \ - __acev_tmp; }) - -/* This can be implemented if needed. */ -#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define atomic_exchange_acq(mem, newvalue) \ - ({ __typeof (*(mem)) __oldval; \ - __typeof (mem) __memp = (mem); \ - __typeof (*(mem)) __value = (newvalue); \ - \ - if (sizeof (*(mem)) == 4) \ - __asm ("swap %0, %1" \ - : "=m" (*__memp), "=r" (__oldval) \ - : "m" (*__memp), "1" (__value) : "memory"); \ - else \ - abort (); \ - __oldval; }) - -#define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ - atomic_compare_and_exchange_val_acq (mem, newval, oldval) - -#define atomic_exchange_24_rel(mem, newval) \ - atomic_exchange_rel (mem, newval) - -#define atomic_full_barrier() \ - __asm __volatile ("membar #LoadLoad | #LoadStore" \ - " | #StoreLoad | #StoreStore" : : : "memory") -#define atomic_read_barrier() \ - __asm __volatile ("membar #LoadLoad | #LoadStore" : : : "memory") -#define atomic_write_barrier() \ - __asm __volatile ("membar #LoadStore | #StoreStore" : : : "memory") - -extern void __cpu_relax (void); -#define atomic_spin_nop() __cpu_relax () diff --git a/sysdeps/sparc/sparc64/atomic-machine.h b/sysdeps/sparc/sparc64/atomic-machine.h new file mode 100644 index 0000000..79f4a1d --- /dev/null +++ b/sysdeps/sparc/sparc64/atomic-machine.h @@ -0,0 +1,126 @@ +/* Atomic operations. sparc64 version. + Copyright (C) 2003-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Jakub Jelinek , 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int16_t atomic16_t; +typedef uint16_t uatomic16_t; +typedef int_fast16_t atomic_fast16_t; +typedef uint_fast16_t uatomic_fast16_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 1 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + + +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ +({ \ + __typeof (*(mem)) __acev_tmp; \ + __typeof (mem) __acev_mem = (mem); \ + if (__builtin_constant_p (oldval) && (oldval) == 0) \ + __asm __volatile ("cas [%3], %%g0, %0" \ + : "=r" (__acev_tmp), "=m" (*__acev_mem) \ + : "m" (*__acev_mem), "r" (__acev_mem), \ + "0" (newval) : "memory"); \ + else \ + __asm __volatile ("cas [%4], %2, %0" \ + : "=r" (__acev_tmp), "=m" (*__acev_mem) \ + : "r" (oldval), "m" (*__acev_mem), "r" (__acev_mem), \ + "0" (newval) : "memory"); \ + __acev_tmp; }) + +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ +({ \ + __typeof (*(mem)) __acev_tmp; \ + __typeof (mem) __acev_mem = (mem); \ + if (__builtin_constant_p (oldval) && (oldval) == 0) \ + __asm __volatile ("casx [%3], %%g0, %0" \ + : "=r" (__acev_tmp), "=m" (*__acev_mem) \ + : "m" (*__acev_mem), "r" (__acev_mem), \ + "0" ((long) (newval)) : "memory"); \ + else \ + __asm __volatile ("casx [%4], %2, %0" \ + : "=r" (__acev_tmp), "=m" (*__acev_mem) \ + : "r" ((long) (oldval)), "m" (*__acev_mem), \ + "r" (__acev_mem), "0" ((long) (newval)) : "memory"); \ + __acev_tmp; }) + +#define atomic_exchange_acq(mem, newvalue) \ + ({ __typeof (*(mem)) __oldval, __val; \ + __typeof (mem) __memp = (mem); \ + __typeof (*(mem)) __value = (newvalue); \ + \ + if (sizeof (*(mem)) == 4) \ + __asm ("swap %0, %1" \ + : "=m" (*__memp), "=r" (__oldval) \ + : "m" (*__memp), "1" (__value) : "memory"); \ + else \ + { \ + __val = *__memp; \ + do \ + { \ + __oldval = __val; \ + __val = atomic_compare_and_exchange_val_acq (__memp, __value, \ + __oldval); \ + } \ + while (__builtin_expect (__val != __oldval, 0)); \ + } \ + __oldval; }) + +#define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ + atomic_compare_and_exchange_val_acq (mem, newval, oldval) + +#define atomic_exchange_24_rel(mem, newval) \ + atomic_exchange_rel (mem, newval) + +#define atomic_full_barrier() \ + __asm __volatile ("membar #LoadLoad | #LoadStore" \ + " | #StoreLoad | #StoreStore" : : : "memory") +#define atomic_read_barrier() \ + __asm __volatile ("membar #LoadLoad | #LoadStore" : : : "memory") +#define atomic_write_barrier() \ + __asm __volatile ("membar #LoadStore | #StoreStore" : : : "memory") + +extern void __cpu_relax (void); +#define atomic_spin_nop() __cpu_relax () diff --git a/sysdeps/sparc/sparc64/bits/atomic.h b/sysdeps/sparc/sparc64/bits/atomic.h deleted file mode 100644 index 79f4a1d..0000000 --- a/sysdeps/sparc/sparc64/bits/atomic.h +++ /dev/null @@ -1,126 +0,0 @@ -/* Atomic operations. sparc64 version. - Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Jakub Jelinek , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include - -typedef int8_t atomic8_t; -typedef uint8_t uatomic8_t; -typedef int_fast8_t atomic_fast8_t; -typedef uint_fast8_t uatomic_fast8_t; - -typedef int16_t atomic16_t; -typedef uint16_t uatomic16_t; -typedef int_fast16_t atomic_fast16_t; -typedef uint_fast16_t uatomic_fast16_t; - -typedef int32_t atomic32_t; -typedef uint32_t uatomic32_t; -typedef int_fast32_t atomic_fast32_t; -typedef uint_fast32_t uatomic_fast32_t; - -typedef int64_t atomic64_t; -typedef uint64_t uatomic64_t; -typedef int_fast64_t atomic_fast64_t; -typedef uint_fast64_t uatomic_fast64_t; - -typedef intptr_t atomicptr_t; -typedef uintptr_t uatomicptr_t; -typedef intmax_t atomic_max_t; -typedef uintmax_t uatomic_max_t; - -#define __HAVE_64B_ATOMICS 1 -#define USE_ATOMIC_COMPILER_BUILTINS 0 - - -#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ - (abort (), (__typeof (*mem)) 0) - -#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ -({ \ - __typeof (*(mem)) __acev_tmp; \ - __typeof (mem) __acev_mem = (mem); \ - if (__builtin_constant_p (oldval) && (oldval) == 0) \ - __asm __volatile ("cas [%3], %%g0, %0" \ - : "=r" (__acev_tmp), "=m" (*__acev_mem) \ - : "m" (*__acev_mem), "r" (__acev_mem), \ - "0" (newval) : "memory"); \ - else \ - __asm __volatile ("cas [%4], %2, %0" \ - : "=r" (__acev_tmp), "=m" (*__acev_mem) \ - : "r" (oldval), "m" (*__acev_mem), "r" (__acev_mem), \ - "0" (newval) : "memory"); \ - __acev_tmp; }) - -#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ -({ \ - __typeof (*(mem)) __acev_tmp; \ - __typeof (mem) __acev_mem = (mem); \ - if (__builtin_constant_p (oldval) && (oldval) == 0) \ - __asm __volatile ("casx [%3], %%g0, %0" \ - : "=r" (__acev_tmp), "=m" (*__acev_mem) \ - : "m" (*__acev_mem), "r" (__acev_mem), \ - "0" ((long) (newval)) : "memory"); \ - else \ - __asm __volatile ("casx [%4], %2, %0" \ - : "=r" (__acev_tmp), "=m" (*__acev_mem) \ - : "r" ((long) (oldval)), "m" (*__acev_mem), \ - "r" (__acev_mem), "0" ((long) (newval)) : "memory"); \ - __acev_tmp; }) - -#define atomic_exchange_acq(mem, newvalue) \ - ({ __typeof (*(mem)) __oldval, __val; \ - __typeof (mem) __memp = (mem); \ - __typeof (*(mem)) __value = (newvalue); \ - \ - if (sizeof (*(mem)) == 4) \ - __asm ("swap %0, %1" \ - : "=m" (*__memp), "=r" (__oldval) \ - : "m" (*__memp), "1" (__value) : "memory"); \ - else \ - { \ - __val = *__memp; \ - do \ - { \ - __oldval = __val; \ - __val = atomic_compare_and_exchange_val_acq (__memp, __value, \ - __oldval); \ - } \ - while (__builtin_expect (__val != __oldval, 0)); \ - } \ - __oldval; }) - -#define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \ - atomic_compare_and_exchange_val_acq (mem, newval, oldval) - -#define atomic_exchange_24_rel(mem, newval) \ - atomic_exchange_rel (mem, newval) - -#define atomic_full_barrier() \ - __asm __volatile ("membar #LoadLoad | #LoadStore" \ - " | #StoreLoad | #StoreStore" : : : "memory") -#define atomic_read_barrier() \ - __asm __volatile ("membar #LoadLoad | #LoadStore" : : : "memory") -#define atomic_write_barrier() \ - __asm __volatile ("membar #LoadStore | #StoreStore" : : : "memory") - -extern void __cpu_relax (void); -#define atomic_spin_nop() __cpu_relax () -- cgit v1.1