aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/Frontend/InitPreprocessor.cpp
diff options
context:
space:
mode:
authorRyan Prichard <rprichard@google.com>2022-07-21 13:09:48 -0700
committerRyan Prichard <rprichard@google.com>2022-07-21 17:23:29 -0700
commit02a25279aedcd959d060bba585adc0fe1cec3782 (patch)
tree1f630cbf2f852ffb10144023d98dc719826f477f /clang/lib/Frontend/InitPreprocessor.cpp
parent408a2638fda63b381f8750e16c78bc3c845cfdfd (diff)
downloadllvm-02a25279aedcd959d060bba585adc0fe1cec3782.zip
llvm-02a25279aedcd959d060bba585adc0fe1cec3782.tar.gz
llvm-02a25279aedcd959d060bba585adc0fe1cec3782.tar.bz2
[Frontend] Correct values of ATOMIC_*_LOCK_FREE to match builtin
Correct the logic used to set `ATOMIC_*_LOCK_FREE` preprocessor macros not to rely on the ABI alignment of types. Instead, just assume all those types are aligned correctly by default since clang uses safe alignment for `_Atomic` types even if the underlying types are aligned to a lower boundary by default. For example, the `long long` and `double` types on x86 are aligned to 32-bit boundary by default. However, `_Atomic long long` and `_Atomic double` are aligned to 64-bit boundary, therefore satisfying the requirements of lock-free atomic operations. This fixes PR #19355 by correcting the value of `__GCC_ATOMIC_LLONG_LOCK_FREE` on x86, and therefore also fixing the assumption made in libc++ tests. This also fixes PR #30581 by applying a consistent logic between the functions used to implement both interfaces. Reviewed By: hfinkel, efriedma Differential Revision: https://reviews.llvm.org/D28213
Diffstat (limited to 'clang/lib/Frontend/InitPreprocessor.cpp')
-rw-r--r--clang/lib/Frontend/InitPreprocessor.cpp10
1 files changed, 4 insertions, 6 deletions
diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp
index 655490b..20bfbf1 100644
--- a/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/clang/lib/Frontend/InitPreprocessor.cpp
@@ -298,12 +298,12 @@ static void DefineFastIntType(unsigned TypeWidth, bool IsSigned,
/// Get the value the ATOMIC_*_LOCK_FREE macro should have for a type with
/// the specified properties.
-static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign,
- unsigned InlineWidth) {
+static const char *getLockFreeValue(unsigned TypeWidth, unsigned InlineWidth) {
// Fully-aligned, power-of-2 sizes no larger than the inline
// width will be inlined as lock-free operations.
- if (TypeWidth == TypeAlign && (TypeWidth & (TypeWidth - 1)) == 0 &&
- TypeWidth <= InlineWidth)
+ // Note: we do not need to check alignment since _Atomic(T) is always
+ // appropriately-aligned in clang.
+ if ((TypeWidth & (TypeWidth - 1)) == 0 && TypeWidth <= InlineWidth)
return "2"; // "always lock free"
// We cannot be certain what operations the lib calls might be
// able to implement as lock-free on future processors.
@@ -1142,7 +1142,6 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \
Builder.defineMacro(Prefix + #TYPE "_LOCK_FREE", \
getLockFreeValue(TI.get##Type##Width(), \
- TI.get##Type##Align(), \
InlineWidthBits));
DEFINE_LOCK_FREE_MACRO(BOOL, Bool);
DEFINE_LOCK_FREE_MACRO(CHAR, Char);
@@ -1157,7 +1156,6 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DEFINE_LOCK_FREE_MACRO(LLONG, LongLong);
Builder.defineMacro(Prefix + "POINTER_LOCK_FREE",
getLockFreeValue(TI.getPointerWidth(0),
- TI.getPointerAlign(0),
InlineWidthBits));
#undef DEFINE_LOCK_FREE_MACRO
};