aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Earnshaw <rearnsha@arm.com>2000-12-05 10:36:33 +0000
committerRichard Earnshaw <rearnsha@gcc.gnu.org>2000-12-05 10:36:33 +0000
commit529dec15d809c6bff8031344f2075de98872395d (patch)
treef59f4b3198a5c6c03557c5f02f37bdc0b0ee4041
parent11b176c156aecf09298da2f3be09507806fdad2b (diff)
downloadgcc-529dec15d809c6bff8031344f2075de98872395d.zip
gcc-529dec15d809c6bff8031344f2075de98872395d.tar.gz
gcc-529dec15d809c6bff8031344f2075de98872395d.tar.bz2
atomicity.h: Add support for compiling Thumb code.
* config/cpu/arm/bits/atomicity.h: Add support for compiling Thumb code. From-SVN: r38033
-rw-r--r--libstdc++-v3/ChangeLog5
-rw-r--r--libstdc++-v3/config/cpu/arm/bits/atomicity.h183
2 files changed, 165 insertions, 23 deletions
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog
index 5e2a9a3..927ae79 100644
--- a/libstdc++-v3/ChangeLog
+++ b/libstdc++-v3/ChangeLog
@@ -1,5 +1,10 @@
2000-12-05 Richard Earnshaw <rearnsha@arm.com>
+ * config/cpu/arm/bits/atomicity.h: Add support for compiling Thumb
+ code.
+
+2000-12-05 Richard Earnshaw <rearnsha@arm.com>
+
Support for NetBSD.
* aclocal.m4: Add test for NetBSD's ctype support.
* configure: Regenerate.
diff --git a/libstdc++-v3/config/cpu/arm/bits/atomicity.h b/libstdc++-v3/config/cpu/arm/bits/atomicity.h
index 3298775..e933ca4 100644
--- a/libstdc++-v3/config/cpu/arm/bits/atomicity.h
+++ b/libstdc++-v3/config/cpu/arm/bits/atomicity.h
@@ -27,19 +27,46 @@ __attribute__ ((__unused__))
__exchange_and_add (volatile _Atomic_word* __mem, int __val)
{
_Atomic_word __tmp, __tmp2, __result;
+#ifdef __thumb__
+ /* Since this function is inlined, we can't be sure of the alignment. */
+ __asm__ __volatile__ (
+ "ldr %0, 4f \n\t"
+ "bx %0 \n\t"
+ ".align 0 \n"
+ "4:\t"
+ ".word 0f \n\t"
+ ".code 32 \n"
+ "0:\t"
+ "ldr %0, [%3] \n\t"
+ "add %1, %0, %4 \n\t"
+ "swp %2, %1, [%3] \n\t"
+ "cmp %0, %2 \n\t"
+ "swpne %1, %2, [%3] \n\t"
+ "bne 0b \n\t"
+ "ldr %1, 1f \n\t"
+ "bx %1 \n"
+ "1:\t"
+ ".word 2f \n\t"
+ ".code 16 \n"
+ "2:\n"
+ : "=&l"(__result), "=&r"(__tmp), "=&r"(__tmp2)
+ : "r" (__mem), "r"(__val)
+ : "cc", "memory");
+#else
__asm__ __volatile__ (
"\n"
"0:\t"
- "ldr %0,[%3] \n\t"
- "add %1,%0,%4 \n\t"
- "swp %2,%1,[%3] \n\t"
- "cmp %0,%2 \n\t"
- "swpne %1,%2,[%3] \n\t"
+ "ldr %0, [%3] \n\t"
+ "add %1, %0, %4 \n\t"
+ "swp %2, %1, [%3] \n\t"
+ "cmp %0, %2 \n\t"
+ "swpne %1, %2, [%3] \n\t"
"bne 0b \n\t"
""
: "=&r"(__result), "=&r"(__tmp), "=&r"(__tmp2)
: "r" (__mem), "r"(__val)
: "cc", "memory");
+#endif
return __result;
}
@@ -48,19 +75,46 @@ __attribute__ ((__unused__))
__atomic_add (volatile _Atomic_word *__mem, int __val)
{
_Atomic_word __tmp, __tmp2, __tmp3;
+#ifdef __thumb__
+ /* Since this function is inlined, we can't be sure of the alignment. */
+ __asm__ __volatile__ (
+ "ldr %0, 4f \n\t"
+ "bx %0 \n\t"
+ ".align 0\n"
+ "4:\t"
+ ".word 0f \n\t"
+ ".code 32 \n"
+ "0:\t"
+ "ldr %0, [%3] \n\t"
+ "add %1, %0, %4 \n\t"
+ "swp %2, %1, [%3] \n\t"
+ "cmp %0, %2 \n\t"
+ "swpne %1, %2,[%3] \n\t"
+ "bne 0b \n\t"
+ "ldr %1, 1f \n\t"
+ "bx %1 \n"
+ "1:\t"
+ ".word 2f \n\t"
+ ".code 16 \n"
+ "2:\n"
+ : "=&l"(__tmp), "=&r"(__tmp2), "=&r"(__tmp3)
+ : "r" (__mem), "r"(__val)
+ : "cc", "memory");
+#else
__asm__ __volatile__ (
"\n"
"0:\t"
- "ldr %0,[%3] \n\t"
- "add %1,%0,%4 \n\t"
- "swp %2,%1,[%3] \n\t"
- "cmp %0,%2 \n\t"
- "swpne %1,%2,[%3] \n\t"
+ "ldr %0, [%3] \n\t"
+ "add %1, %0, %4 \n\t"
+ "swp %2, %1, [%3] \n\t"
+ "cmp %0, %2 \n\t"
+ "swpne %1, %2, [%3] \n\t"
"bne 0b \n\t"
""
: "=&r"(__tmp), "=&r"(__tmp2), "=&r"(__tmp3)
: "r" (__mem), "r"(__val)
: "cc", "memory");
+#endif
}
static inline int
@@ -69,23 +123,54 @@ __compare_and_swap (volatile long *__p, long __oldval, long __newval)
{
int __result;
long __tmp;
+#ifdef __thumb__
+ /* Since this function is inlined, we can't be sure of the alignment. */
+ __asm__ __volatile__ (
+ "ldr %0, 4f \n\t"
+ "bx %0 \n\t"
+ ".align 0 \n"
+ "4:\t"
+ ".word 0f \n\t"
+ ".code 32 \n"
+ "0:\t"
+ "ldr %1, [%2] \n\t"
+ "mov %0, #0 \n\t"
+ "cmp %1, %4 \n\t"
+ "bne 1f \n\t"
+ "swp %0, %3, [%2] \n\t"
+ "cmp %1, %0 \n\t"
+ "swpne %1, %0, [%2] \n\t"
+ "bne 0b \n\t"
+ "mov %0, #1 \n"
+ "1:\t"
+ "ldr %1, 2f \n\t"
+ "bx %1 \n"
+ "2:\t"
+ ".word 3f \n\t"
+ ".code 16\n"
+ "3:\n"
+ : "=&l"(__result), "=&r"(__tmp)
+ : "r" (__p), "r" (__newval), "r" (__oldval)
+ : "cc", "memory");
+#else
__asm__ __volatile__ (
"\n"
"0:\t"
- "ldr %1,[%2] \n\t"
- "mov %0,#0 \n\t"
- "cmp %1,%4 \n\t"
+ "ldr %1, [%2] \n\t"
+ "mov %0, #0 \n\t"
+ "cmp %1, %4 \n\t"
"bne 1f \n\t"
- "swp %0,%3,[%2] \n\t"
- "cmp %1,%0 \n\t"
- "swpne %1,%0,[%2] \n\t"
+ "swp %0, %3, [%2] \n\t"
+ "cmp %1, %0 \n\t"
+ "swpne %1, %0, [%2] \n\t"
"bne 0b \n\t"
- "mov %0,#1 \n"
+ "mov %0, #1 \n"
"1:\n\t"
""
: "=&r"(__result), "=&r"(__tmp)
: "r" (__p), "r" (__newval), "r" (__oldval)
: "cc", "memory");
+#endif
return __result;
}
@@ -94,13 +179,36 @@ __attribute__ ((__unused__))
__always_swap (volatile long *__p, long __newval)
{
long __result;
+#ifdef __thumb__
+ long __tmp;
+ /* Since this function is inlined, we can't be sure of the alignment. */
+ __asm__ __volatile__ (
+ "ldr %0, 4f \n\t"
+ "bx %0 \n\t"
+ ".align 0 \n"
+ "4:\t"
+ ".word 0f \n\t"
+ ".code 32\n"
+ "0:\t"
+ "swp %0, %3, [%2] \n\t"
+ "ldr %1, 1f \n\t"
+ "bx %1 \n"
+ "1:\t"
+ ".word 2f \n\t"
+ ".code 16 \n"
+ "2:\n"
+ : "=&l"(__result), "=&r"(__tmp)
+ : "r"(__p), "r"(__newval)
+ : "memory");
+#else
__asm__ __volatile__ (
"\n\t"
- "swp %0,%2,[%1] \n\t"
+ "swp %0, %2, [%1] \n\t"
""
: "=&r"(__result)
: "r"(__p), "r"(__newval)
: "memory");
+#endif
return __result;
}
@@ -110,21 +218,50 @@ __test_and_set (volatile long *__p, long __newval)
{
int __result;
long __tmp;
+#ifdef __thumb__
+ /* Since this function is inlined, we can't be sure of the alignment. */
+ __asm__ __volatile__ (
+ "ldr %0, 4f \n\t"
+ "bx %0 \n\t"
+ ".align 0 \n"
+ "4:\t"
+ ".word 0f \n\t"
+ ".code 32 \n"
+ "0:\t"
+ "ldr %0, [%2] \n\t"
+ "cmp %0, #0 \n\t"
+ "bne 1f \n\t"
+ "swp %1, %3, [%2] \n\t"
+ "cmp %0, %1 \n\t"
+ "swpne %0, %1, [%2]\n\t"
+ "bne 0b \n"
+ "1:\t"
+ "ldr %1, 2f \n\t"
+ "bx %1 \n"
+ "2:\t"
+ ".word 3f \n\t"
+ ".code 16 \n"
+ "3:"
+ : "=&l"(__result), "=r" (__tmp)
+ : "r"(__p), "r"(__newval)
+ : "cc", "memory");
+#else
__asm__ __volatile__ (
"\n"
"0:\t"
- "ldr %0,[%2] \n\t"
- "cmp %0,#0 \n\t"
+ "ldr %0, [%2] \n\t"
+ "cmp %0, #0 \n\t"
"bne 1f \n\t"
- "swp %1,%3,[%2] \n\t"
- "cmp %0,%1 \n\t"
- "swpne %0,%1,[%2] \n\t"
+ "swp %1, %3, [%2] \n\t"
+ "cmp %0, %1 \n\t"
+ "swpne %0, %1, [%2] \n\t"
"bne 0b \n"
"1:\n\t"
""
: "=&r"(__result), "=r" (__tmp)
: "r"(__p), "r"(__newval)
: "cc", "memory");
+#endif
return __result;
}