aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Richardson <alexrichardson@google.com>2024-04-17 08:42:41 -0700
committerGitHub <noreply@github.com>2024-04-17 08:42:41 -0700
commitabd5e45a96954d80f6ffe6d8676c0059fae8573b (patch)
tree97ffa2a443c83f13c45eab7716d1a41dd5f6d49e
parent8656d4c6a7a742c6fa6ee02c2ace7415163e65e4 (diff)
downloadllvm-abd5e45a96954d80f6ffe6d8676c0059fae8573b.zip
llvm-abd5e45a96954d80f6ffe6d8676c0059fae8573b.tar.gz
llvm-abd5e45a96954d80f6ffe6d8676c0059fae8573b.tar.bz2
[compiler-rt] Use __atomic builtins whenever possible
The code in this file dates back to 2012 when Clang's support for atomic builtins was still quite limited. The bugs referenced in the comment at the top of the file have long been fixed and using the compiler builtins directly should now generate slightly better code. Additionally, this allows using the atomic builtin header for platforms where the __sync_builtins are lacking (e.g. Arm Morello). This change does not introduce any code generation changes for __tsan_read*/__tsan_write* or __tsan_func_{entry,exit} on x86, which indicates the previously noted compiler issues have been fixed. We also have to touch the non-clang codepaths here since the only way we can make this work easily is by making the memory_order enum match the compiler-provided macros, so we have to update the debug checks that assumed the enum was always a bitflag. The one downside of this change is that 32-bit MIPS now definitely requires libatomic (but that may already have been needed for RMW ops). Reviewed By: dvyukov Pull Request: https://github.com/llvm/llvm-project/pull/84439
-rw-r--r--compiler-rt/lib/sanitizer_common/CMakeLists.txt3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_atomic.h12
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h85
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h117
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h85
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h113
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h8
-rw-r--r--llvm/utils/gn/secondary/compiler-rt/lib/sanitizer_common/BUILD.gn3
8 files changed, 56 insertions, 370 deletions
diff --git a/compiler-rt/lib/sanitizer_common/CMakeLists.txt b/compiler-rt/lib/sanitizer_common/CMakeLists.txt
index f2b4ac7..66f2d25 100644
--- a/compiler-rt/lib/sanitizer_common/CMakeLists.txt
+++ b/compiler-rt/lib/sanitizer_common/CMakeLists.txt
@@ -122,9 +122,6 @@ set(SANITIZER_IMPL_HEADERS
sanitizer_asm.h
sanitizer_atomic.h
sanitizer_atomic_clang.h
- sanitizer_atomic_clang_mips.h
- sanitizer_atomic_clang_other.h
- sanitizer_atomic_clang_x86.h
sanitizer_atomic_msvc.h
sanitizer_bitvector.h
sanitizer_bvgraph.h
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h
index 46f0695..0609a11 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h
@@ -18,12 +18,24 @@
namespace __sanitizer {
enum memory_order {
+// If the __atomic atomic builtins are supported (Clang/GCC), use the
+// compiler provided macro values so that we can map the atomic operations
+// to __atomic_* directly.
+#ifdef __ATOMIC_SEQ_CST
+ memory_order_relaxed = __ATOMIC_RELAXED,
+ memory_order_consume = __ATOMIC_CONSUME,
+ memory_order_acquire = __ATOMIC_ACQUIRE,
+ memory_order_release = __ATOMIC_RELEASE,
+ memory_order_acq_rel = __ATOMIC_ACQ_REL,
+ memory_order_seq_cst = __ATOMIC_SEQ_CST
+#else
memory_order_relaxed = 1 << 0,
memory_order_consume = 1 << 1,
memory_order_acquire = 1 << 2,
memory_order_release = 1 << 3,
memory_order_acq_rel = 1 << 4,
memory_order_seq_cst = 1 << 5
+#endif
};
struct atomic_uint8_t {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
index 4318d64..1414092 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
@@ -14,60 +14,63 @@
#ifndef SANITIZER_ATOMIC_CLANG_H
#define SANITIZER_ATOMIC_CLANG_H
-#if defined(__i386__) || defined(__x86_64__)
-# include "sanitizer_atomic_clang_x86.h"
-#else
-# include "sanitizer_atomic_clang_other.h"
-#endif
-
namespace __sanitizer {
-// We would like to just use compiler builtin atomic operations
-// for loads and stores, but they are mostly broken in clang:
-// - they lead to vastly inefficient code generation
-// (http://llvm.org/bugs/show_bug.cgi?id=17281)
-// - 64-bit atomic operations are not implemented on x86_32
-// (http://llvm.org/bugs/show_bug.cgi?id=15034)
-// - they are not implemented on ARM
-// error: undefined reference to '__atomic_load_4'
+// We use the compiler builtin atomic operations for loads and stores, which
+// generates correct code for all architectures, but may require libatomic
+// on platforms where e.g. 64-bit atomics are not supported natively.
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
// for mappings of the memory model to different processors.
-inline void atomic_signal_fence(memory_order) {
+inline void atomic_signal_fence(memory_order mo) { __atomic_signal_fence(mo); }
+
+inline void atomic_thread_fence(memory_order mo) { __atomic_thread_fence(mo); }
+
+inline void proc_yield(int cnt) {
+ __asm__ __volatile__("" ::: "memory");
+#if defined(__i386__) || defined(__x86_64__)
+ for (int i = 0; i < cnt; i++) __asm__ __volatile__("pause");
__asm__ __volatile__("" ::: "memory");
+#endif
}
-inline void atomic_thread_fence(memory_order) {
- __sync_synchronize();
+template <typename T>
+inline typename T::Type atomic_load(const volatile T *a, memory_order mo) {
+ DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
+ mo == memory_order_acquire || mo == memory_order_seq_cst);
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return __atomic_load_n(&a->val_dont_use, mo);
}
-template<typename T>
-inline typename T::Type atomic_fetch_add(volatile T *a,
- typename T::Type v, memory_order mo) {
- (void)mo;
+template <typename T>
+inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
+ mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
- return __sync_fetch_and_add(&a->val_dont_use, v);
+ __atomic_store_n(&a->val_dont_use, v, mo);
}
-template<typename T>
-inline typename T::Type atomic_fetch_sub(volatile T *a,
- typename T::Type v, memory_order mo) {
+template <typename T>
+inline typename T::Type atomic_fetch_add(volatile T *a, typename T::Type v,
+ memory_order mo) {
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return __atomic_fetch_add(&a->val_dont_use, v, mo);
+}
+
+template <typename T>
+inline typename T::Type atomic_fetch_sub(volatile T *a, typename T::Type v,
+ memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
- return __sync_fetch_and_add(&a->val_dont_use, -v);
+ return __atomic_fetch_sub(&a->val_dont_use, v, mo);
}
-template<typename T>
-inline typename T::Type atomic_exchange(volatile T *a,
- typename T::Type v, memory_order mo) {
+template <typename T>
+inline typename T::Type atomic_exchange(volatile T *a, typename T::Type v,
+ memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
- if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
- __sync_synchronize();
- v = __sync_lock_test_and_set(&a->val_dont_use, v);
- if (mo == memory_order_seq_cst)
- __sync_synchronize();
- return v;
+ return __atomic_exchange_n(&a->val_dont_use, v, mo);
}
template <typename T>
@@ -82,9 +85,8 @@ inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
}
-template<typename T>
-inline bool atomic_compare_exchange_weak(volatile T *a,
- typename T::Type *cmp,
+template <typename T>
+inline bool atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
return atomic_compare_exchange_strong(a, cmp, xchg, mo);
@@ -92,13 +94,6 @@ inline bool atomic_compare_exchange_weak(volatile T *a,
} // namespace __sanitizer
-// This include provides explicit template instantiations for atomic_uint64_t
-// on MIPS32, which does not directly support 8 byte atomics. It has to
-// proceed the template definitions above.
-#if defined(_MIPS_SIM) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
-# include "sanitizer_atomic_clang_mips.h"
-#endif
-
#undef ATOMIC_ORDER
#endif // SANITIZER_ATOMIC_CLANG_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
deleted file mode 100644
index f3d3052..0000000
--- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
+++ /dev/null
@@ -1,117 +0,0 @@
-//===-- sanitizer_atomic_clang_mips.h ---------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
-// Not intended for direct inclusion. Include sanitizer_atomic.h.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SANITIZER_ATOMIC_CLANG_MIPS_H
-#define SANITIZER_ATOMIC_CLANG_MIPS_H
-
-namespace __sanitizer {
-
-// MIPS32 does not support atomics > 4 bytes. To address this lack of
-// functionality, the sanitizer library provides helper methods which use an
-// internal spin lock mechanism to emulate atomic operations when the size is
-// 8 bytes.
-static void __spin_lock(volatile int *lock) {
- while (__sync_lock_test_and_set(lock, 1))
- while (*lock) {
- }
-}
-
-static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
-
-// Make sure the lock is on its own cache line to prevent false sharing.
-// Put it inside a struct that is aligned and padded to the typical MIPS
-// cacheline which is 32 bytes.
-static struct {
- int lock;
- char pad[32 - sizeof(int)];
-} __attribute__((aligned(32))) lock = {0, {0}};
-
-template <>
-inline atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
- atomic_uint64_t::Type val,
- memory_order mo) {
- DCHECK(mo &
- (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
- DCHECK(!((uptr)ptr % sizeof(*ptr)));
-
- atomic_uint64_t::Type ret;
-
- __spin_lock(&lock.lock);
- ret = *(const_cast<atomic_uint64_t::Type volatile *>(&ptr->val_dont_use));
- ptr->val_dont_use = ret + val;
- __spin_unlock(&lock.lock);
-
- return ret;
-}
-
-template <>
-inline atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
- atomic_uint64_t::Type val,
- memory_order mo) {
- return atomic_fetch_add(ptr, -val, mo);
-}
-
-template <>
-inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
- atomic_uint64_t::Type *cmp,
- atomic_uint64_t::Type xchg,
- memory_order mo) {
- DCHECK(mo &
- (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
- DCHECK(!((uptr)ptr % sizeof(*ptr)));
-
- typedef atomic_uint64_t::Type Type;
- Type cmpv = *cmp;
- Type prev;
- bool ret = false;
-
- __spin_lock(&lock.lock);
- prev = *(const_cast<Type volatile *>(&ptr->val_dont_use));
- if (prev == cmpv) {
- ret = true;
- ptr->val_dont_use = xchg;
- }
- __spin_unlock(&lock.lock);
-
- return ret;
-}
-
-template <>
-inline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
- memory_order mo) {
- DCHECK(mo &
- (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
- DCHECK(!((uptr)ptr % sizeof(*ptr)));
-
- atomic_uint64_t::Type zero = 0;
- volatile atomic_uint64_t *Newptr =
- const_cast<volatile atomic_uint64_t *>(ptr);
- return atomic_fetch_add(Newptr, zero, mo);
-}
-
-template <>
-inline void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
- memory_order mo) {
- DCHECK(mo &
- (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
- DCHECK(!((uptr)ptr % sizeof(*ptr)));
-
- __spin_lock(&lock.lock);
- ptr->val_dont_use = v;
- __spin_unlock(&lock.lock);
-}
-
-} // namespace __sanitizer
-
-#endif // SANITIZER_ATOMIC_CLANG_MIPS_H
-
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h
deleted file mode 100644
index 557082a..0000000
--- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h
+++ /dev/null
@@ -1,85 +0,0 @@
-//===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
-// Not intended for direct inclusion. Include sanitizer_atomic.h.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
-#define SANITIZER_ATOMIC_CLANG_OTHER_H
-
-namespace __sanitizer {
-
-
-inline void proc_yield(int cnt) {
- __asm__ __volatile__("" ::: "memory");
-}
-
-template<typename T>
-inline typename T::Type atomic_load(
- const volatile T *a, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_consume
- | memory_order_acquire | memory_order_seq_cst));
- DCHECK(!((uptr)a % sizeof(*a)));
- typename T::Type v;
-
- if (sizeof(*a) < 8 || sizeof(void*) == 8) {
- // Assume that aligned loads are atomic.
- if (mo == memory_order_relaxed) {
- v = a->val_dont_use;
- } else if (mo == memory_order_consume) {
- // Assume that processor respects data dependencies
- // (and that compiler won't break them).
- __asm__ __volatile__("" ::: "memory");
- v = a->val_dont_use;
- __asm__ __volatile__("" ::: "memory");
- } else if (mo == memory_order_acquire) {
- __asm__ __volatile__("" ::: "memory");
- v = a->val_dont_use;
- __sync_synchronize();
- } else { // seq_cst
- // E.g. on POWER we need a hw fence even before the store.
- __sync_synchronize();
- v = a->val_dont_use;
- __sync_synchronize();
- }
- } else {
- __atomic_load(const_cast<typename T::Type volatile *>(&a->val_dont_use), &v,
- __ATOMIC_SEQ_CST);
- }
- return v;
-}
-
-template<typename T>
-inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_release
- | memory_order_seq_cst));
- DCHECK(!((uptr)a % sizeof(*a)));
-
- if (sizeof(*a) < 8 || sizeof(void*) == 8) {
- // Assume that aligned stores are atomic.
- if (mo == memory_order_relaxed) {
- a->val_dont_use = v;
- } else if (mo == memory_order_release) {
- __sync_synchronize();
- a->val_dont_use = v;
- __asm__ __volatile__("" ::: "memory");
- } else { // seq_cst
- __sync_synchronize();
- a->val_dont_use = v;
- __sync_synchronize();
- }
- } else {
- __atomic_store(&a->val_dont_use, &v, __ATOMIC_SEQ_CST);
- }
-}
-
-} // namespace __sanitizer
-
-#endif // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h
deleted file mode 100644
index b81a354..0000000
--- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h
+++ /dev/null
@@ -1,113 +0,0 @@
-//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
-// Not intended for direct inclusion. Include sanitizer_atomic.h.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SANITIZER_ATOMIC_CLANG_X86_H
-#define SANITIZER_ATOMIC_CLANG_X86_H
-
-namespace __sanitizer {
-
-inline void proc_yield(int cnt) {
- __asm__ __volatile__("" ::: "memory");
- for (int i = 0; i < cnt; i++)
- __asm__ __volatile__("pause");
- __asm__ __volatile__("" ::: "memory");
-}
-
-template<typename T>
-inline typename T::Type atomic_load(
- const volatile T *a, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_consume
- | memory_order_acquire | memory_order_seq_cst));
- DCHECK(!((uptr)a % sizeof(*a)));
- typename T::Type v;
-
- if (sizeof(*a) < 8 || sizeof(void*) == 8) {
- // Assume that aligned loads are atomic.
- if (mo == memory_order_relaxed) {
- v = a->val_dont_use;
- } else if (mo == memory_order_consume) {
- // Assume that processor respects data dependencies
- // (and that compiler won't break them).
- __asm__ __volatile__("" ::: "memory");
- v = a->val_dont_use;
- __asm__ __volatile__("" ::: "memory");
- } else if (mo == memory_order_acquire) {
- __asm__ __volatile__("" ::: "memory");
- v = a->val_dont_use;
- // On x86 loads are implicitly acquire.
- __asm__ __volatile__("" ::: "memory");
- } else { // seq_cst
- // On x86 plain MOV is enough for seq_cst store.
- __asm__ __volatile__("" ::: "memory");
- v = a->val_dont_use;
- __asm__ __volatile__("" ::: "memory");
- }
- } else {
- // 64-bit load on 32-bit platform.
- __asm__ __volatile__(
- "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
- "movq %%mm0, %0;" // (ptr could be read-only)
- "emms;" // Empty mmx state/Reset FP regs
- : "=m" (v)
- : "m" (a->val_dont_use)
- : // mark the mmx registers as clobbered
-#ifdef __MMX__
- "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
-#endif // #ifdef __MMX__
- "memory");
- }
- return v;
-}
-
-template<typename T>
-inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_release
- | memory_order_seq_cst));
- DCHECK(!((uptr)a % sizeof(*a)));
-
- if (sizeof(*a) < 8 || sizeof(void*) == 8) {
- // Assume that aligned stores are atomic.
- if (mo == memory_order_relaxed) {
- a->val_dont_use = v;
- } else if (mo == memory_order_release) {
- // On x86 stores are implicitly release.
- __asm__ __volatile__("" ::: "memory");
- a->val_dont_use = v;
- __asm__ __volatile__("" ::: "memory");
- } else { // seq_cst
- // On x86 stores are implicitly release.
- __asm__ __volatile__("" ::: "memory");
- a->val_dont_use = v;
- __sync_synchronize();
- }
- } else {
- // 64-bit store on 32-bit platform.
- __asm__ __volatile__(
- "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
- "movq %%mm0, %0;"
- "emms;" // Empty mmx state/Reset FP regs
- : "=m" (a->val_dont_use)
- : "m" (v)
- : // mark the mmx registers as clobbered
-#ifdef __MMX__
- "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
-#endif // #ifdef __MMX__
- "memory");
- if (mo == memory_order_seq_cst)
- __sync_synchronize();
- }
-}
-
-} // namespace __sanitizer
-
-#endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h
index 31317ad..d80bfdb 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h
@@ -70,8 +70,8 @@ inline void proc_yield(int cnt) {
template<typename T>
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_consume
- | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
+ mo == memory_order_acquire || mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
// FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
@@ -87,8 +87,8 @@ inline typename T::Type atomic_load(
template<typename T>
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_release
- | memory_order_seq_cst));
+ DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
+ mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
// FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
diff --git a/llvm/utils/gn/secondary/compiler-rt/lib/sanitizer_common/BUILD.gn b/llvm/utils/gn/secondary/compiler-rt/lib/sanitizer_common/BUILD.gn
index 0519073..f7f1fce 100644
--- a/llvm/utils/gn/secondary/compiler-rt/lib/sanitizer_common/BUILD.gn
+++ b/llvm/utils/gn/secondary/compiler-rt/lib/sanitizer_common/BUILD.gn
@@ -33,9 +33,6 @@ source_set("sources") {
"sanitizer_asm.h",
"sanitizer_atomic.h",
"sanitizer_atomic_clang.h",
- "sanitizer_atomic_clang_mips.h",
- "sanitizer_atomic_clang_other.h",
- "sanitizer_atomic_clang_x86.h",
"sanitizer_atomic_msvc.h",
"sanitizer_bitvector.h",
"sanitizer_bvgraph.h",