aboutsummaryrefslogtreecommitdiff
path: root/clang
diff options
context:
space:
mode:
Diffstat (limited to 'clang')
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.h111
-rw-r--r--clang/include/clang/Basic/BuiltinsX86.td46
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td3
-rw-r--r--clang/include/clang/Basic/DirectoryEntry.h10
-rw-r--r--clang/include/clang/Basic/FileEntry.h6
-rw-r--r--clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h5
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIROps.td7
-rw-r--r--clang/include/clang/CIR/MissingFeatures.h3
-rw-r--r--clang/include/clang/Driver/Distro.h18
-rw-r--r--clang/include/clang/Sema/Sema.h46
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h8
-rw-r--r--clang/lib/AST/ASTConcept.cpp2
-rw-r--r--clang/lib/AST/ByteCode/Context.cpp7
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp175
-rw-r--r--clang/lib/AST/ByteCode/Program.cpp5
-rw-r--r--clang/lib/AST/Decl.cpp10
-rw-r--r--clang/lib/AST/ExprConstant.cpp131
-rw-r--r--clang/lib/Analysis/ExprMutationAnalyzer.cpp20
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt1
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp6
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp290
-rw-r--r--clang/lib/Basic/Targets/Mips.cpp10
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp10
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp182
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp165
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h25
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp7
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenValue.h11
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp10
-rw-r--r--clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp31
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp2
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp155
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp2
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp2
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp2
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp6
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp26
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp6
-rw-r--r--clang/lib/CodeGen/CGPointerAuth.cpp4
-rw-r--r--clang/lib/CodeGen/CodeGenTypeCache.h2
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp24
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp32
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/ARM.cpp22
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/PPC.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/PPC.cpp2
-rw-r--r--clang/lib/Driver/Distro.cpp26
-rw-r--r--clang/lib/Driver/ToolChains/Arch/Mips.cpp6
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.cpp14
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp12
-rw-r--r--clang/lib/Format/DefinitionBlockSeparator.cpp2
-rw-r--r--clang/lib/Format/FormatToken.h3
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp59
-rw-r--r--clang/lib/Format/WhitespaceManager.cpp192
-rw-r--r--clang/lib/Frontend/FrontendAction.cpp2
-rw-r--r--clang/lib/Headers/__clang_hip_runtime_wrapper.h1
-rw-r--r--clang/lib/Headers/avx2intrin.h5
-rw-r--r--clang/lib/Headers/avx512dqintrin.h34
-rw-r--r--clang/lib/Headers/avx512fintrin.h30
-rw-r--r--clang/lib/Headers/avx512vldqintrin.h18
-rw-r--r--clang/lib/Headers/avx512vlintrin.h18
-rw-r--r--clang/lib/Headers/avxintrin.h10
-rw-r--r--clang/lib/Headers/emmintrin.h6
-rw-r--r--clang/lib/Headers/smmintrin.h3
-rw-r--r--clang/lib/Headers/xmmintrin.h4
-rw-r--r--clang/lib/Lex/PPLexerChange.cpp2
-rw-r--r--clang/lib/Parse/ParsePragma.cpp9
-rw-r--r--clang/lib/Sema/SemaChecking.cpp21
-rw-r--r--clang/lib/Sema/SemaConcept.cpp78
-rw-r--r--clang/lib/Sema/SemaExpr.cpp2
-rw-r--r--clang/lib/Sema/SemaInit.cpp11
-rw-r--r--clang/lib/Sema/SemaOpenACC.cpp93
-rw-r--r--clang/lib/Sema/SemaStmtAttr.cpp2
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp93
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp36
-rw-r--r--clang/lib/Sema/TreeTransform.h12
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp19
-rw-r--r--clang/test/AST/ByteCode/c.c9
-rw-r--r--clang/test/AST/ByteCode/extern.cpp6
-rw-r--r--clang/test/Analysis/analyzer-stats/entry-point-stats.cpp2
-rw-r--r--clang/test/Analysis/z3-crosscheck-max-attempts.cpp26
-rw-r--r--clang/test/Analysis/z3/D83660.c2
-rw-r--r--clang/test/CIR/CodeGen/agg-expr-lvalue.c111
-rw-r--r--clang/test/CIR/CodeGen/binassign.c48
-rw-r--r--clang/test/CIR/CodeGen/complex.cpp63
-rw-r--r--clang/test/CIR/CodeGen/compound_literal.cpp27
-rw-r--r--clang/test/CIR/CodeGen/dtors.cpp2
-rw-r--r--clang/test/CIR/CodeGen/opaque.cpp163
-rw-r--r--clang/test/CIR/CodeGen/struct.cpp2
-rw-r--r--clang/test/CIR/CodeGen/ternary-throw.cpp197
-rw-r--r--clang/test/CIR/CodeGen/ternary.cpp233
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp536
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp164
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp142
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp164
-rw-r--r--clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp144
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c557
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp536
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c165
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp163
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp142
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c164
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp164
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp142
-rw-r--r--clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c164
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp536
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp164
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp142
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp164
-rw-r--r--clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp142
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1.c104
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sb.c48
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sh.c32
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sw.c16
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1ub.c48
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uh.c32
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uw.c16
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1.c104
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1b.c24
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1h.c16
-rw-r--r--clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1w.c8
-rw-r--r--clang/test/CodeGen/X86/avx-builtins.c12
-rw-r--r--clang/test/CodeGen/X86/avx10_2bf16-builtins.c8
-rw-r--r--clang/test/CodeGen/X86/avx2-builtins.c4
-rw-r--r--clang/test/CodeGen/X86/avx512bw-builtins.c12
-rw-r--r--clang/test/CodeGen/X86/avx512dq-builtins.c12
-rw-r--r--clang/test/CodeGen/X86/avx512f-builtins.c65
-rw-r--r--clang/test/CodeGen/X86/avx512fp16-builtins.c6
-rw-r--r--clang/test/CodeGen/X86/avx512vl-builtins.c102
-rw-r--r--clang/test/CodeGen/X86/avx512vlbw-builtins.c24
-rw-r--r--clang/test/CodeGen/X86/avx512vldq-builtins.c6
-rw-r--r--clang/test/CodeGen/X86/mmx-builtins.c4
-rw-r--r--clang/test/CodeGen/X86/sse-builtins.c2
-rw-r--r--clang/test/CodeGen/X86/sse2-builtins.c5
-rw-r--r--clang/test/CodeGen/X86/sse41-builtins.c10
-rw-r--r--clang/test/CodeGen/arm-mve-intrinsics/cplusplus.cpp2
-rw-r--r--clang/test/CodeGen/arm-mve-intrinsics/dup.c24
-rw-r--r--clang/test/CodeGen/arm-mve-intrinsics/load-store.c88
-rw-r--r--clang/test/CodeGen/builtin-masked.c18
-rw-r--r--clang/test/CodeGen/c11atomics-ios.c4
-rw-r--r--clang/test/CodeGenCXX/builtin-atomic-compare_exchange.cpp130
-rw-r--r--clang/test/CodeGenOpenCL/amdgpu-cluster-dims.cl47
-rw-r--r--clang/test/Frontend/rewrite-includes-bom.c6
-rw-r--r--clang/test/Headers/wasm.c18
-rw-r--r--clang/test/Lexer/minimize_source_to_dependency_directives_utf8bom.c2
-rw-r--r--clang/test/Modules/crash-vfs-relative-incdir.m2
-rw-r--r--clang/test/Modules/crash-vfs-run-reproducer.m2
-rw-r--r--clang/test/Preprocessor/predefined-arch-macros.c2
-rw-r--r--clang/test/SemaCXX/libstdcxx_pair_swap_hack.cpp7
-rw-r--r--clang/test/SemaCXX/warn-implicit-unicode-conversions.cpp8
-rw-r--r--clang/test/SemaOpenACC/combined-construct-reduction-clause.cpp8
-rw-r--r--clang/test/SemaOpenACC/compute-construct-reduction-clause.c18
-rw-r--r--clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp17
-rw-r--r--clang/test/SemaTemplate/GH164330.cpp309
-rw-r--r--clang/test/SemaTemplate/concepts.cpp18
-rw-r--r--clang/test/SemaTemplate/instantiate-self.cpp28
-rw-r--r--clang/test/Unit/lit.cfg.py2
-rw-r--r--clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp38
-rw-r--r--clang/unittests/Analysis/FlowSensitive/CMakeLists.txt2
-rw-r--r--clang/unittests/Analysis/FlowSensitive/MockHeaders.cpp12
-rw-r--r--clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTest.cpp34
-rw-r--r--clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTestFixture.cpp2518
-rw-r--r--clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTestFixture.h159
-rw-r--r--clang/unittests/Driver/MultilibBuilderTest.cpp2
-rw-r--r--clang/unittests/Format/FormatTest.cpp29
-rw-r--r--clang/unittests/Format/TokenAnnotatorTest.cpp23
-rw-r--r--clang/utils/TableGen/ClangOptionDocEmitter.cpp23
-rwxr-xr-xclang/www/cxx_status.html9
170 files changed, 10999 insertions, 1204 deletions
diff --git a/clang/include/clang/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.h b/clang/include/clang/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.h
new file mode 100644
index 0000000..24f8b0b
--- /dev/null
+++ b/clang/include/clang/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.h
@@ -0,0 +1,111 @@
+//===- UncheckedStatusOrAccessModel.h -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_ANALYSIS_FLOWSENSITIVE_MODELS_UNCHECKEDSTATUSORACCESSMODEL_H
+#define CLANG_ANALYSIS_FLOWSENSITIVE_MODELS_UNCHECKEDSTATUSORACCESSMODEL_H
+
+#include "clang/AST/Type.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/CFGMatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/DataflowAnalysis.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/MatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/NoopLattice.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang::dataflow::statusor_model {
+
+// The helper functions exported here are for use of downstream vendor
+// extensions of this model.
+
+// Match declaration of `absl::StatusOr<T>` and bind `T` to "T".
+clang::ast_matchers::DeclarationMatcher statusOrClass();
+// Match declaration of `absl::Status`.
+clang::ast_matchers::DeclarationMatcher statusClass();
+// Match declaration of `absl::internal_statusor::OperatorBase`.
+clang::ast_matchers::DeclarationMatcher statusOrOperatorBaseClass();
+clang::ast_matchers::TypeMatcher statusOrType();
+
+// Get RecordStorageLocation for the `Status` contained in the `StatusOr`
+RecordStorageLocation &locForStatus(RecordStorageLocation &StatusOrLoc);
+// Get the StorageLocation for the OK boolean in the `Status`
+StorageLocation &locForOk(RecordStorageLocation &StatusLoc);
+// Get the OK boolean in the `Status`, and initialize it if necessary.
+BoolValue &valForOk(RecordStorageLocation &StatusLoc, Environment &Env);
+// Get synthetic fields for the types modelled by
+// `UncheckedStatusOrAccessModel`.
+llvm::StringMap<QualType> getSyntheticFields(QualType Ty, QualType StatusType,
+ const CXXRecordDecl &RD);
+
+// Initialize the synthetic fields of the `StatusOr`.
+// N.B. if it is already initialized, the value gets reset.
+BoolValue &initializeStatusOr(RecordStorageLocation &StatusOrLoc,
+ Environment &Env);
+// Initialize the synthetic fields of the `Status`.
+// N.B. if it is already initialized, the value gets reset.
+BoolValue &initializeStatus(RecordStorageLocation &StatusLoc, Environment &Env);
+
+// Return true if `Type` is instantiation of `absl::StatusOr<T>`
+bool isStatusOrType(QualType Type);
+// Return true if `Type` is `absl::Status`
+bool isStatusType(QualType Type);
+
+// Get `QualType` for `absl::Status`, or default-constructed
+// QualType if it does not exist.
+QualType findStatusType(const ASTContext &Ctx);
+
+struct UncheckedStatusOrAccessModelOptions {};
+
+// Dataflow analysis that discovers unsafe uses of StatusOr values.
+class UncheckedStatusOrAccessModel
+ : public DataflowAnalysis<UncheckedStatusOrAccessModel, NoopLattice> {
+public:
+ explicit UncheckedStatusOrAccessModel(ASTContext &Ctx, Environment &Env);
+
+ static Lattice initialElement() { return {}; }
+
+ void transfer(const CFGElement &Elt, Lattice &L, Environment &Env);
+
+private:
+ CFGMatchSwitch<TransferState<Lattice>> TransferMatchSwitch;
+};
+
+using LatticeTransferState =
+ TransferState<UncheckedStatusOrAccessModel::Lattice>;
+
+// Extend the Builder with the transfer functions for
+// `UncheckedStatusOrAccessModel`. This is useful to write downstream models
+// that extend the model.
+CFGMatchSwitch<LatticeTransferState>
+buildTransferMatchSwitch(ASTContext &Ctx,
+ CFGMatchSwitchBuilder<LatticeTransferState> Builder);
+
+class UncheckedStatusOrAccessDiagnoser {
+public:
+ explicit UncheckedStatusOrAccessDiagnoser(
+ UncheckedStatusOrAccessModelOptions Options = {});
+
+ llvm::SmallVector<SourceLocation> operator()(
+ const CFGElement &Elt, ASTContext &Ctx,
+ const TransferStateForDiagnostics<UncheckedStatusOrAccessModel::Lattice>
+ &State);
+
+private:
+ CFGMatchSwitch<const Environment, llvm::SmallVector<SourceLocation>>
+ DiagnoseMatchSwitch;
+};
+
+} // namespace clang::dataflow::statusor_model
+
+#endif // CLANG_ANALYSIS_FLOWSENSITIVE_MODELS_UNCHECKEDSTATUSORACCESSMODEL_H
diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td
index d03c778..54b3ce0 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -185,7 +185,8 @@ let Features = "sse", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in
def cvttss2si : X86Builtin<"int(_Vector<4, float>)">;
}
-let Features = "sse", Attributes = [NoThrow, RequiredVectorWidth<128>] in {
+let Features = "sse",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def movmskps : X86Builtin<"int(_Vector<4, float>)">;
}
@@ -211,11 +212,6 @@ let Features = "sse2", Attributes = [NoThrow, RequiredVectorWidth<128>] in {
def maskmovdqu : X86Builtin<"void(_Vector<16, char>, _Vector<16, char>, char *)">;
}
-let Features = "sse2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
- def movmskpd : X86Builtin<"int(_Vector<2, double>)">;
- def pmovmskb128 : X86Builtin<"int(_Vector<16, char>)">;
-}
-
let Features = "sse2", Attributes = [NoThrow] in {
def movnti : X86Builtin<"void(int *, int)">;
}
@@ -224,6 +220,8 @@ let Features = "sse2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWi
def pshuflw : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Constant int)">;
def pshufd : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Constant int)">;
def pshufhw : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Constant int)">;
+ def movmskpd : X86Builtin<"int(_Vector<2, double>)">;
+ def pmovmskb128 : X86Builtin<"int(_Vector<16, char>)">;
}
let Features = "sse2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
@@ -334,8 +332,8 @@ let Features = "sse4.1", Attributes = [NoThrow, Const, RequiredVectorWidth<128>]
def dpps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, float>, _Constant char)">;
def dppd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, "
"_Vector<2,double>, _Constant char)">;
- def mpsadbw128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Vector<16, char>, _Constant char)">;
- def phminposuw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>)">;
+ def mpsadbw128 : X86Builtin<"_Vector<16, char>(_Vector<16, char>, "
+ "_Vector<16, char>, _Constant char)">;
}
let Features = "sse4.1",
@@ -358,6 +356,7 @@ let Features = "sse4.1", Attributes = [NoThrow, Const, Constexpr, RequiredVector
def pmuldq128 : X86Builtin<"_Vector<2, long long int>(_Vector<4, int>, _Vector<4, int>)">;
def packusdw128 : X86Builtin<"_Vector<8, short>(_Vector<4, int>, _Vector<4, int>)">;
+ def phminposuw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>)">;
def vec_ext_v16qi : X86Builtin<"char(_Vector<16, char>, _Constant int)">;
def vec_set_v16qi : X86Builtin<"_Vector<16, char>(_Vector<16, char>, char, _Constant int)">;
@@ -498,9 +497,6 @@ let Features = "avx", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in
def dpps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, float>, _Constant char)">;
def cmppd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, _Vector<4, double>, _Constant char)">;
def cmpps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, float>, _Constant char)">;
- def vextractf128_pd256 : X86Builtin<"_Vector<2, double>(_Vector<4, double>, _Constant int)">;
- def vextractf128_ps256 : X86Builtin<"_Vector<4, float>(_Vector<8, float>, _Constant int)">;
- def vextractf128_si256 : X86Builtin<"_Vector<4, int>(_Vector<8, int>, _Constant int)">;
def cvtpd2ps256 : X86Builtin<"_Vector<4, float>(_Vector<4, double>)">;
def cvtps2dq256 : X86Builtin<"_Vector<8, int>(_Vector<8, float>)">;
def cvttpd2dq256 : X86Builtin<"_Vector<4, int>(_Vector<4, double>)">;
@@ -521,6 +517,9 @@ let Features = "avx", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWid
def blendps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, float>, _Constant int)">;
def blendvpd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, _Vector<4, double>, _Vector<4, double>)">;
def blendvps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, float>, _Vector<8, float>)">;
+ def vextractf128_pd256 : X86Builtin<"_Vector<2, double>(_Vector<4, double>, _Constant int)">;
+ def vextractf128_ps256 : X86Builtin<"_Vector<4, float>(_Vector<8, float>, _Constant int)">;
+ def vextractf128_si256 : X86Builtin<"_Vector<4, int>(_Vector<8, int>, _Constant int)">;
def vinsertf128_pd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, _Vector<2, double>, _Constant int)">;
def vinsertf128_ps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<4, float>, _Constant int)">;
def vinsertf128_si256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<4, int>, _Constant int)">;
@@ -562,11 +561,8 @@ let Features = "avx",
def vtestnzcps256 : X86Builtin<"int(_Vector<8, float>, _Vector<8, float>)">;
def ptestz256 : X86Builtin<"int(_Vector<4, long long int>, _Vector<4, long long int>)">;
def ptestc256 : X86Builtin<"int(_Vector<4, long long int>, _Vector<4, long long int>)">;
- def ptestnzc256 : X86Builtin<"int(_Vector<4, long long int>, _Vector<4, long long int>)">;
-}
-
-let Features = "avx",
- Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+ def ptestnzc256
+ : X86Builtin<"int(_Vector<4, long long int>, _Vector<4, long long int>)">;
def movmskpd256 : X86Builtin<"int(_Vector<4, double>)">;
def movmskps256 : X86Builtin<"int(_Vector<8, float>)">;
}
@@ -605,9 +601,8 @@ let Features = "avx", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWid
let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
def mpsadbw256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>, _Constant char)">;
- def palignr256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>, _Constant int)">;
-
- def pmovmskb256 : X86Builtin<"int(_Vector<32, char>)">;
+ def palignr256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, "
+ "_Vector<32, char>, _Constant int)">;
def psadbw256 : X86Builtin<"_Vector<4, long long int>(_Vector<32, char>, _Vector<32, char>)">;
def psignb256 : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Vector<32, char>)">;
def psignw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
@@ -627,11 +622,11 @@ let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] i
def permvarsf256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, _Vector<8, int>)">;
def permti256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>, _Constant int)">;
def permdi256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Constant int)">;
- def extract128i256 : X86Builtin<"_Vector<2, long long int>(_Vector<4, long long int>, _Constant int)">;
}
let Features = "avx2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
+ def pmovmskb256 : X86Builtin<"int(_Vector<32, char>)">;
def pavgb256 : X86Builtin<"_Vector<32, unsigned char>(_Vector<32, unsigned char>, _Vector<32, unsigned char>)">;
def pavgw256 : X86Builtin<"_Vector<16, unsigned short>(_Vector<16, unsigned short>, _Vector<16, unsigned short>)">;
@@ -694,6 +689,7 @@ let Features = "avx2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWi
def psrlv4si : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
def psllv2di : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">;
def psrlv2di : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">;
+ def extract128i256 : X86Builtin<"_Vector<2, long long int>(_Vector<4, long long int>, _Constant int)">;
}
let Features = "avx2", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
@@ -1095,7 +1091,7 @@ let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256
def alignq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<4, long long int>, _Constant int)">;
}
-let Features = "avx512f", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def extractf64x4_mask : X86Builtin<"_Vector<4, double>(_Vector<8, double>, _Constant int, _Vector<4, double>, unsigned char)">;
def extractf32x4_mask : X86Builtin<"_Vector<4, float>(_Vector<16, float>, _Constant int, _Vector<4, float>, unsigned char)">;
}
@@ -2960,24 +2956,24 @@ let Features = "avx512vl", Attributes = [NoThrow, RequiredVectorWidth<256>] in {
def pmovqw256mem_mask : X86Builtin<"void(_Vector<8, short *>, _Vector<4, long long int>, unsigned char)">;
}
-let Features = "avx512dq", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512dq", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def extractf32x8_mask : X86Builtin<"_Vector<8, float>(_Vector<16, float>, _Constant int, _Vector<8, float>, unsigned char)">;
def extractf64x2_512_mask : X86Builtin<"_Vector<2, double>(_Vector<8, double>, _Constant int, _Vector<2, double>, unsigned char)">;
def extracti32x8_mask : X86Builtin<"_Vector<8, int>(_Vector<16, int>, _Constant int, _Vector<8, int>, unsigned char)">;
def extracti64x2_512_mask : X86Builtin<"_Vector<2, long long int>(_Vector<8, long long int>, _Constant int, _Vector<2, long long int>, unsigned char)">;
}
-let Features = "avx512f", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def extracti32x4_mask : X86Builtin<"_Vector<4, int>(_Vector<16, int>, _Constant int, _Vector<4, int>, unsigned char)">;
def extracti64x4_mask : X86Builtin<"_Vector<4, long long int>(_Vector<8, long long int>, _Constant int, _Vector<4, long long int>, unsigned char)">;
}
-let Features = "avx512dq,avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512dq,avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def extractf64x2_256_mask : X86Builtin<"_Vector<2, double>(_Vector<4, double>, _Constant int, _Vector<2, double>, unsigned char)">;
def extracti64x2_256_mask : X86Builtin<"_Vector<2, long long int>(_Vector<4, long long int>, _Constant int, _Vector<2, long long int>, unsigned char)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def extractf32x4_256_mask : X86Builtin<"_Vector<4, float>(_Vector<8, float>, _Constant int, _Vector<4, float>, unsigned char)">;
def extracti32x4_256_mask : X86Builtin<"_Vector<4, int>(_Vector<8, int>, _Constant int, _Vector<4, int>, unsigned char)">;
}
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 22de85d..5ff4cc4 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -13676,6 +13676,9 @@ def err_acc_reduction_recipe_no_op
"not have a valid operation available">;
def note_acc_reduction_recipe_noop_field
: Note<"while forming combiner for compound type %0">;
+def note_acc_reduction_combiner_forming
+ : Note<"while forming %select{|binary operator '%1'|conditional "
+ "operator|final assignment operator}0">;
// AMDGCN builtins diagnostics
def err_amdgcn_load_lds_size_invalid_value : Error<"invalid size value">;
diff --git a/clang/include/clang/Basic/DirectoryEntry.h b/clang/include/clang/Basic/DirectoryEntry.h
index 35fe529..6965612 100644
--- a/clang/include/clang/Basic/DirectoryEntry.h
+++ b/clang/include/clang/Basic/DirectoryEntry.h
@@ -119,10 +119,10 @@ namespace FileMgr {
/// the private optional_none_tag to keep it to the size of a single pointer.
template <class RefTy> class MapEntryOptionalStorage {
using optional_none_tag = typename RefTy::optional_none_tag;
- RefTy MaybeRef;
+ RefTy MaybeRef = optional_none_tag();
public:
- MapEntryOptionalStorage() : MaybeRef(optional_none_tag()) {}
+ MapEntryOptionalStorage() = default;
template <class... ArgTypes>
explicit MapEntryOptionalStorage(std::in_place_t, ArgTypes &&...Args)
@@ -168,11 +168,7 @@ class OptionalStorage<clang::DirectoryEntryRef>
clang::FileMgr::MapEntryOptionalStorage<clang::DirectoryEntryRef>;
public:
- OptionalStorage() = default;
-
- template <class... ArgTypes>
- explicit OptionalStorage(std::in_place_t, ArgTypes &&...Args)
- : StorageImpl(std::in_place_t{}, std::forward<ArgTypes>(Args)...) {}
+ using StorageImpl::StorageImpl;
OptionalStorage &operator=(clang::DirectoryEntryRef Ref) {
StorageImpl::operator=(Ref);
diff --git a/clang/include/clang/Basic/FileEntry.h b/clang/include/clang/Basic/FileEntry.h
index c973ba3..e7091fd 100644
--- a/clang/include/clang/Basic/FileEntry.h
+++ b/clang/include/clang/Basic/FileEntry.h
@@ -218,11 +218,7 @@ class OptionalStorage<clang::FileEntryRef>
clang::FileMgr::MapEntryOptionalStorage<clang::FileEntryRef>;
public:
- OptionalStorage() = default;
-
- template <class... ArgTypes>
- explicit OptionalStorage(std::in_place_t, ArgTypes &&...Args)
- : StorageImpl(std::in_place_t{}, std::forward<ArgTypes>(Args)...) {}
+ using StorageImpl::StorageImpl;
OptionalStorage &operator=(clang::FileEntryRef Ref) {
StorageImpl::operator=(Ref);
diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index 3ac8987..3288f5b 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -311,8 +311,9 @@ public:
}
/// Create a copy with inferred length.
- cir::CopyOp createCopy(mlir::Value dst, mlir::Value src) {
- return cir::CopyOp::create(*this, dst.getLoc(), dst, src);
+ cir::CopyOp createCopy(mlir::Value dst, mlir::Value src,
+ bool isVolatile = false) {
+ return cir::CopyOp::create(*this, dst.getLoc(), dst, src, isVolatile);
}
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, mlir::Value dst,
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index e0163a4..919c194 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -2724,6 +2724,8 @@ def CIR_CopyOp : CIR_Op<"copy",[
type of `src` and `dst` must match and both must implement the
`DataLayoutTypeInterface`.
+ The `volatile` keyword indicates that the operation is volatile.
+
Examples:
```mlir
@@ -2734,10 +2736,11 @@ def CIR_CopyOp : CIR_Op<"copy",[
let arguments = (ins
Arg<CIR_PointerType, "", [MemWrite]>:$dst,
- Arg<CIR_PointerType, "", [MemRead]>:$src
+ Arg<CIR_PointerType, "", [MemRead]>:$src,
+ UnitAttr:$is_volatile
);
- let assemblyFormat = [{$src `to` $dst
+ let assemblyFormat = [{$src `to` $dst (`volatile` $is_volatile^)?
attr-dict `:` qualified(type($dst))
}];
let hasVerifier = 1;
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index 090cf35..01da626 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -177,9 +177,10 @@ struct MissingFeatures {
static bool atomicInfo() { return false; }
static bool atomicInfoGetAtomicPointer() { return false; }
static bool atomicInfoGetAtomicAddress() { return false; }
- static bool atomicUseLibCall() { return false; }
static bool atomicScope() { return false; }
static bool atomicSyncScopeID() { return false; }
+ static bool atomicTypes() { return false; }
+ static bool atomicUseLibCall() { return false; }
// Global ctor handling
static bool globalCtorLexOrder() { return false; }
diff --git a/clang/include/clang/Driver/Distro.h b/clang/include/clang/Driver/Distro.h
index a515cbf..0e17b30 100644
--- a/clang/include/clang/Driver/Distro.h
+++ b/clang/include/clang/Driver/Distro.h
@@ -30,9 +30,6 @@ public:
// the first and last known member in the family, e.g. IsRedHat().
AlpineLinux,
ArchLinux,
- DebianLenny,
- DebianSqueeze,
- DebianWheezy,
DebianJessie,
DebianStretch,
DebianBuster,
@@ -42,16 +39,13 @@ public:
DebianForky,
DebianDuke,
Exherbo,
- RHEL5,
- RHEL6,
RHEL7,
+ RHEL8,
+ RHEL9,
+ RHEL10,
Fedora,
Gentoo,
OpenSUSE,
- UbuntuMaverick,
- UbuntuNatty,
- UbuntuOneiric,
- UbuntuPrecise,
UbuntuQuantal,
UbuntuRaring,
UbuntuSaucy,
@@ -121,17 +115,17 @@ public:
/// @{
bool IsRedhat() const {
- return DistroVal == Fedora || (DistroVal >= RHEL5 && DistroVal <= RHEL7);
+ return DistroVal == Fedora || (DistroVal >= RHEL7 && DistroVal <= RHEL10);
}
bool IsOpenSUSE() const { return DistroVal == OpenSUSE; }
bool IsDebian() const {
- return DistroVal >= DebianLenny && DistroVal <= DebianDuke;
+ return DistroVal >= DebianJessie && DistroVal <= DebianDuke;
}
bool IsUbuntu() const {
- return DistroVal >= UbuntuMaverick && DistroVal <= UbuntuResolute;
+ return DistroVal >= UbuntuQuantal && DistroVal <= UbuntuResolute;
}
bool IsAlpineLinux() const { return DistroVal == AlpineLinux; }
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index add4c15..cb21335 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -13007,6 +13007,37 @@ public:
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
+ using InstantiatingSpecializationsKey = llvm::PointerIntPair<Decl *, 2>;
+
+ struct RecursiveInstGuard {
+ enum class Kind {
+ Template,
+ DefaultArgument,
+ ExceptionSpec,
+ };
+
+ RecursiveInstGuard(Sema &S, Decl *D, Kind Kind)
+ : S(S), Key(D->getCanonicalDecl(), unsigned(Kind)) {
+ auto [_, Created] = S.InstantiatingSpecializations.insert(Key);
+ if (!Created)
+ Key = {};
+ }
+
+ ~RecursiveInstGuard() {
+ if (Key.getOpaqueValue()) {
+ [[maybe_unused]] bool Erased =
+ S.InstantiatingSpecializations.erase(Key);
+ assert(Erased);
+ }
+ }
+
+ operator bool() const { return Key.getOpaqueValue() == nullptr; }
+
+ private:
+ Sema &S;
+ Sema::InstantiatingSpecializationsKey Key;
+ };
+
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
@@ -13368,14 +13399,9 @@ public:
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
- /// Determine whether we are already instantiating this
- /// specialization in some surrounding active instantiation.
- bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
-
private:
Sema &SemaRef;
bool Invalid;
- bool AlreadyInstantiating;
InstantiatingTemplate(Sema &SemaRef,
CodeSynthesisContext::SynthesisKind Kind,
@@ -13505,7 +13531,7 @@ public:
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
- llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
+ llvm::DenseSet<InstantiatingSpecializationsKey> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
@@ -13780,6 +13806,14 @@ public:
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK, bool Complain = true);
+private:
+ bool InstantiateClassImpl(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation,
+ CXXRecordDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK, bool Complain);
+
+public:
/// Instantiate the definition of an enum from a given pattern.
///
/// \param PointOfInstantiation The point of instantiation within the
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
index 7613952..db4aec7 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
@@ -48,6 +48,9 @@ class FunctionSummariesTy {
/// The number of times the function has been inlined.
unsigned TimesInlined : 32;
+ /// Running time for syntax-based AST analysis in milliseconds.
+ std::optional<unsigned> SyntaxRunningTime = std::nullopt;
+
FunctionSummary()
: TotalBasicBlocks(0), InlineChecked(0), MayInline(0),
TimesInlined(0) {}
@@ -69,6 +72,11 @@ public:
return I;
}
+ FunctionSummary const *findSummary(const Decl *D) const {
+ auto I = Map.find(D);
+ return I == Map.end() ? nullptr : &I->second;
+ }
+
void markMayInline(const Decl *D) {
MapTy::iterator I = findOrInsertSummary(D);
I->second.InlineChecked = 1;
diff --git a/clang/lib/AST/ASTConcept.cpp b/clang/lib/AST/ASTConcept.cpp
index fd12bc4..9ea104c 100644
--- a/clang/lib/AST/ASTConcept.cpp
+++ b/clang/lib/AST/ASTConcept.cpp
@@ -86,7 +86,7 @@ void ConstraintSatisfaction::Profile(llvm::FoldingSetNodeID &ID,
ID.AddPointer(ConstraintOwner);
ID.AddInteger(TemplateArgs.size());
for (auto &Arg : TemplateArgs)
- Arg.Profile(ID, C);
+ C.getCanonicalTemplateArgument(Arg).Profile(ID, C);
}
ConceptReference *
diff --git a/clang/lib/AST/ByteCode/Context.cpp b/clang/lib/AST/ByteCode/Context.cpp
index 683e916..4f4b122 100644
--- a/clang/lib/AST/ByteCode/Context.cpp
+++ b/clang/lib/AST/ByteCode/Context.cpp
@@ -566,10 +566,15 @@ const Function *Context::getOrCreateFunction(const FunctionDecl *FuncDecl) {
// Assign descriptors to all parameters.
// Composite objects are lowered to pointers.
- for (const ParmVarDecl *PD : FuncDecl->parameters()) {
+ const auto *FuncProto = FuncDecl->getType()->getAs<FunctionProtoType>();
+ for (auto [ParamIndex, PD] : llvm::enumerate(FuncDecl->parameters())) {
bool IsConst = PD->getType().isConstQualified();
bool IsVolatile = PD->getType().isVolatileQualified();
+ if (!getASTContext().hasSameType(PD->getType(),
+ FuncProto->getParamType(ParamIndex)))
+ return nullptr;
+
OptPrimType T = classify(PD->getType());
PrimType PT = T.value_or(PT_Ptr);
Descriptor *Desc = P->createDescriptor(PD, PT, nullptr, std::nullopt,
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 0cb4910..39b991c 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -2899,6 +2899,35 @@ static bool interp__builtin_ia32_test_op(
return true;
}
+static bool interp__builtin_ia32_movmsk_op(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ assert(Call->getNumArgs() == 1);
+
+ const Pointer &Source = S.Stk.pop<Pointer>();
+
+ unsigned SourceLen = Source.getNumElems();
+ QualType ElemQT = getElemType(Source);
+ OptPrimType ElemT = S.getContext().classify(ElemQT);
+ unsigned ResultLen =
+ S.getASTContext().getTypeSize(Call->getType()); // Always 32-bit integer.
+ APInt Result(ResultLen, 0);
+
+ for (unsigned I = 0; I != SourceLen; ++I) {
+ APInt Elem;
+ if (ElemQT->isIntegerType()) {
+ INT_TYPE_SWITCH_NO_BOOL(*ElemT, { Elem = Source.elem<T>(I).toAPSInt(); });
+ } else if (ElemQT->isRealFloatingType()) {
+ using T = PrimConv<PT_Float>::T;
+ Elem = Source.elem<T>(I).getAPFloat().bitcastToAPInt();
+ } else {
+ return false;
+ }
+ Result.setBitVal(I, Elem.isNegative());
+ }
+ pushInteger(S, Result, Call->getType());
+ return true;
+}
+
static bool interp__builtin_elementwise_triop(
InterpState &S, CodePtr OpPC, const CallExpr *Call,
llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &)>
@@ -2962,6 +2991,82 @@ static bool interp__builtin_elementwise_triop(
return true;
}
+static bool interp__builtin_x86_extract_vector(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call,
+ unsigned ID) {
+ assert(Call->getNumArgs() == 2);
+
+ APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
+ uint64_t Index = ImmAPS.getZExtValue();
+
+ const Pointer &Src = S.Stk.pop<Pointer>();
+ if (!Src.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ if (!Dst.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ unsigned SrcElems = Src.getNumElems();
+ unsigned DstElems = Dst.getNumElems();
+
+ unsigned NumLanes = SrcElems / DstElems;
+ unsigned Lane = static_cast<unsigned>(Index % NumLanes);
+ unsigned ExtractPos = Lane * DstElems;
+
+ PrimType ElemT = Src.getFieldDesc()->getPrimType();
+
+ TYPE_SWITCH(ElemT, {
+ for (unsigned I = 0; I != DstElems; ++I) {
+ Dst.elem<T>(I) = Src.elem<T>(ExtractPos + I);
+ }
+ });
+
+ Dst.initializeAllElements();
+ return true;
+}
+
+static bool interp__builtin_x86_extract_vector_masked(InterpState &S,
+ CodePtr OpPC,
+ const CallExpr *Call,
+ unsigned ID) {
+ assert(Call->getNumArgs() == 4);
+
+ APSInt MaskAPS = popToAPSInt(S, Call->getArg(3));
+ const Pointer &Merge = S.Stk.pop<Pointer>();
+ APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
+ const Pointer &Src = S.Stk.pop<Pointer>();
+
+ if (!Src.getFieldDesc()->isPrimitiveArray() ||
+ !Merge.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ if (!Dst.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ unsigned SrcElems = Src.getNumElems();
+ unsigned DstElems = Dst.getNumElems();
+
+ unsigned NumLanes = SrcElems / DstElems;
+ unsigned Lane = static_cast<unsigned>(ImmAPS.getZExtValue() % NumLanes);
+ unsigned Base = Lane * DstElems;
+
+ PrimType ElemT = Src.getFieldDesc()->getPrimType();
+
+ TYPE_SWITCH(ElemT, {
+ for (unsigned I = 0; I != DstElems; ++I) {
+ if (MaskAPS[I])
+ Dst.elem<T>(I) = Src.elem<T>(Base + I);
+ else
+ Dst.elem<T>(I) = Merge.elem<T>(I);
+ }
+ });
+
+ Dst.initializeAllElements();
+ return true;
+}
+
static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC,
const CallExpr *Call,
unsigned ID) {
@@ -3003,6 +3108,45 @@ static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_ia32_phminposuw(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ assert(Call->getNumArgs() == 1);
+
+ const Pointer &Source = S.Stk.pop<Pointer>();
+ const Pointer &Dest = S.Stk.peek<Pointer>();
+
+ unsigned SourceLen = Source.getNumElems();
+ QualType ElemQT = getElemType(Source);
+ OptPrimType ElemT = S.getContext().classify(ElemQT);
+ unsigned ElemBitWidth = S.getASTContext().getTypeSize(ElemQT);
+
+ bool DestUnsigned = Call->getCallReturnType(S.getASTContext())
+ ->castAs<VectorType>()
+ ->getElementType()
+ ->isUnsignedIntegerOrEnumerationType();
+
+ INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
+ APSInt MinIndex(ElemBitWidth, DestUnsigned);
+ APSInt MinVal = Source.elem<T>(0).toAPSInt();
+
+ for (unsigned I = 1; I != SourceLen; ++I) {
+ APSInt Val = Source.elem<T>(I).toAPSInt();
+ if (MinVal.ugt(Val)) {
+ MinVal = Val;
+ MinIndex = I;
+ }
+ }
+
+ Dest.elem<T>(0) = static_cast<T>(MinVal);
+ Dest.elem<T>(1) = static_cast<T>(MinIndex);
+ for (unsigned I = 2; I != SourceLen; ++I) {
+ Dest.elem<T>(I) = static_cast<T>(APSInt(ElemBitWidth, DestUnsigned));
+ }
+ });
+ Dest.initializeAllElements();
+ return true;
+}
+
static bool interp__builtin_ia32_pternlog(InterpState &S, CodePtr OpPC,
const CallExpr *Call, bool MaskZ) {
assert(Call->getNumArgs() == 5);
@@ -3620,6 +3764,25 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
});
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256:
+ return interp__builtin_x86_extract_vector(S, OpPC, Call, BuiltinID);
+
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extractf64x4_mask:
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ return interp__builtin_x86_extract_vector_masked(S, OpPC, Call, BuiltinID);
case clang::X86::BI__builtin_ia32_pmulhrsw128:
case clang::X86::BI__builtin_ia32_pmulhrsw256:
@@ -3630,6 +3793,15 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
.extractBits(16, 1);
});
+ case clang::X86::BI__builtin_ia32_movmskps:
+ case clang::X86::BI__builtin_ia32_movmskpd:
+ case clang::X86::BI__builtin_ia32_pmovmskb128:
+ case clang::X86::BI__builtin_ia32_pmovmskb256:
+ case clang::X86::BI__builtin_ia32_movmskps256:
+ case clang::X86::BI__builtin_ia32_movmskpd256: {
+ return interp__builtin_ia32_movmsk_op(S, OpPC, Call);
+ }
+
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
case clang::X86::BI__builtin_ia32_pavgb256:
@@ -4087,6 +4259,9 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
S, OpPC, Call,
[](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
+ case X86::BI__builtin_ia32_phminposuw128:
+ return interp__builtin_ia32_phminposuw(S, OpPC, Call);
+
case X86::BI__builtin_ia32_pternlogd128_mask:
case X86::BI__builtin_ia32_pternlogd256_mask:
case X86::BI__builtin_ia32_pternlogd512_mask:
diff --git a/clang/lib/AST/ByteCode/Program.cpp b/clang/lib/AST/ByteCode/Program.cpp
index e653782..e0b2852 100644
--- a/clang/lib/AST/ByteCode/Program.cpp
+++ b/clang/lib/AST/ByteCode/Program.cpp
@@ -226,7 +226,10 @@ UnsignedOrNone Program::createGlobal(const ValueDecl *VD, const Expr *Init) {
Globals[PIdx] = NewGlobal;
// All pointers pointing to the previous extern decl now point to the
// new decl.
- RedeclBlock->movePointersTo(NewGlobal->block());
+ // A previous iteration might've already fixed up the pointers for this
+ // global.
+ if (RedeclBlock != NewGlobal->block())
+ RedeclBlock->movePointersTo(NewGlobal->block());
}
}
PIdx = *Idx;
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index f048076..8579e51 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -3380,11 +3380,11 @@ bool FunctionDecl::isMSVCRTEntryPoint() const {
return false;
return llvm::StringSwitch<bool>(getName())
- .Cases("main", // an ANSI console app
- "wmain", // a Unicode console App
- "WinMain", // an ANSI GUI app
- "wWinMain", // a Unicode GUI app
- "DllMain", // a DLL
+ .Cases({"main", // an ANSI console app
+ "wmain", // a Unicode console App
+ "WinMain", // an ANSI GUI app
+ "wWinMain", // a Unicode GUI app
+ "DllMain"}, // a DLL
true)
.Default(false);
}
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index e308c17..00aaaab 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -11811,6 +11811,73 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
});
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256: {
+ APValue SourceVec, SourceImm;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceVec) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceImm))
+ return false;
+
+ if (!SourceVec.isVector())
+ return false;
+
+ const auto *RetVT = E->getType()->castAs<VectorType>();
+ unsigned RetLen = RetVT->getNumElements();
+ unsigned Idx = SourceImm.getInt().getZExtValue() & 1;
+
+ SmallVector<APValue, 32> ResultElements;
+ ResultElements.reserve(RetLen);
+
+ for (unsigned I = 0; I < RetLen; I++)
+ ResultElements.push_back(SourceVec.getVectorElt(Idx * RetLen + I));
+
+ return Success(APValue(ResultElements.data(), RetLen), E);
+ }
+
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ case X86::BI__builtin_ia32_extractf64x4_mask: {
+ APValue SourceVec, MergeVec;
+ APSInt Imm, MaskImm;
+
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceVec) ||
+ !EvaluateInteger(E->getArg(1), Imm, Info) ||
+ !EvaluateAsRValue(Info, E->getArg(2), MergeVec) ||
+ !EvaluateInteger(E->getArg(3), MaskImm, Info))
+ return false;
+
+ const auto *RetVT = E->getType()->castAs<VectorType>();
+ unsigned RetLen = RetVT->getNumElements();
+
+ if (!SourceVec.isVector() || !MergeVec.isVector())
+ return false;
+ unsigned SrcLen = SourceVec.getVectorLength();
+ unsigned Lanes = SrcLen / RetLen;
+ unsigned Lane = static_cast<unsigned>(Imm.getZExtValue() % Lanes);
+ unsigned Base = Lane * RetLen;
+
+ SmallVector<APValue, 32> ResultElements;
+ ResultElements.reserve(RetLen);
+ for (unsigned I = 0; I < RetLen; ++I) {
+ if (MaskImm[I])
+ ResultElements.push_back(SourceVec.getVectorElt(Base + I));
+ else
+ ResultElements.push_back(MergeVec.getVectorElt(I));
+ }
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
case clang::X86::BI__builtin_ia32_pavgb256:
@@ -12353,6 +12420,40 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(R, E);
}
+ case X86::BI__builtin_ia32_phminposuw128: {
+ APValue Source;
+ if (!Evaluate(Source, Info, E->getArg(0)))
+ return false;
+ unsigned SourceLen = Source.getVectorLength();
+ const VectorType *VT = E->getArg(0)->getType()->castAs<VectorType>();
+ QualType ElemQT = VT->getElementType();
+ unsigned ElemBitWidth = Info.Ctx.getTypeSize(ElemQT);
+
+ APInt MinIndex(ElemBitWidth, 0);
+ APInt MinVal = Source.getVectorElt(0).getInt();
+ for (unsigned I = 1; I != SourceLen; ++I) {
+ APInt Val = Source.getVectorElt(I).getInt();
+ if (MinVal.ugt(Val)) {
+ MinVal = Val;
+ MinIndex = I;
+ }
+ }
+
+ bool ResultUnsigned = E->getCallReturnType(Info.Ctx)
+ ->castAs<VectorType>()
+ ->getElementType()
+ ->isUnsignedIntegerOrEnumerationType();
+
+ SmallVector<APValue, 8> Result;
+ Result.reserve(SourceLen);
+ Result.emplace_back(APSInt(MinVal, ResultUnsigned));
+ Result.emplace_back(APSInt(MinIndex, ResultUnsigned));
+ for (unsigned I = 0; I != SourceLen - 2; ++I) {
+ Result.emplace_back(APSInt(APInt(ElemBitWidth, 0), ResultUnsigned));
+ }
+ return Success(APValue(Result.data(), Result.size()), E);
+ }
+
case X86::BI__builtin_ia32_pternlogd128_mask:
case X86::BI__builtin_ia32_pternlogd256_mask:
case X86::BI__builtin_ia32_pternlogd512_mask:
@@ -15268,6 +15369,36 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(CarryOut, E);
}
+ case clang::X86::BI__builtin_ia32_movmskps:
+ case clang::X86::BI__builtin_ia32_movmskpd:
+ case clang::X86::BI__builtin_ia32_pmovmskb128:
+ case clang::X86::BI__builtin_ia32_pmovmskb256:
+ case clang::X86::BI__builtin_ia32_movmskps256:
+ case clang::X86::BI__builtin_ia32_movmskpd256: {
+ APValue Source;
+ if (!Evaluate(Source, Info, E->getArg(0)))
+ return false;
+ unsigned SourceLen = Source.getVectorLength();
+ const VectorType *VT = E->getArg(0)->getType()->castAs<VectorType>();
+ QualType ElemQT = VT->getElementType();
+ unsigned ResultLen = Info.Ctx.getTypeSize(
+ E->getCallReturnType(Info.Ctx)); // Always 32-bit integer.
+ APInt Result(ResultLen, 0);
+
+ for (unsigned I = 0; I != SourceLen; ++I) {
+ APInt Elem;
+ if (ElemQT->isIntegerType()) {
+ Elem = Source.getVectorElt(I).getInt();
+ } else if (ElemQT->isRealFloatingType()) {
+ Elem = Source.getVectorElt(I).getFloat().bitcastToAPInt();
+ } else {
+ return false;
+ }
+ Result.setBitVal(I, Elem.isNegative());
+ }
+ return Success(Result, E);
+ }
+
case clang::X86::BI__builtin_ia32_bextr_u32:
case clang::X86::BI__builtin_ia32_bextr_u64:
case clang::X86::BI__builtin_ia32_bextri_u32:
diff --git a/clang/lib/Analysis/ExprMutationAnalyzer.cpp b/clang/lib/Analysis/ExprMutationAnalyzer.cpp
index 1e376da..75b17c54 100644
--- a/clang/lib/Analysis/ExprMutationAnalyzer.cpp
+++ b/clang/lib/Analysis/ExprMutationAnalyzer.cpp
@@ -140,7 +140,8 @@ class ExprPointeeResolve {
// explicit cast will be checked in `findPointeeToNonConst`
const CastKind kind = ICE->getCastKind();
if (kind == CK_LValueToRValue || kind == CK_DerivedToBase ||
- kind == CK_UncheckedDerivedToBase)
+ kind == CK_UncheckedDerivedToBase ||
+ (kind == CK_NoOp && (ICE->getType() == ICE->getSubExpr()->getType())))
return resolveExpr(ICE->getSubExpr());
return false;
}
@@ -788,13 +789,16 @@ ExprMutationAnalyzer::Analyzer::findPointeeToNonConst(const Expr *Exp) {
// FIXME: false positive if the pointee does not change in lambda
const auto CaptureNoConst = lambdaExpr(hasCaptureInit(Exp));
- const auto Matches =
- match(stmt(anyOf(forEachDescendant(
- stmt(anyOf(AssignToNonConst, PassAsNonConstArg,
- CastToNonConst, CaptureNoConst))
- .bind("stmt")),
- forEachDescendant(InitToNonConst))),
- Stm, Context);
+ const auto ReturnNoConst =
+ returnStmt(hasReturnValue(canResolveToExprPointee(Exp)));
+
+ const auto Matches = match(
+ stmt(anyOf(forEachDescendant(
+ stmt(anyOf(AssignToNonConst, PassAsNonConstArg,
+ CastToNonConst, CaptureNoConst, ReturnNoConst))
+ .bind("stmt")),
+ forEachDescendant(InitToNonConst))),
+ Stm, Context);
return selectFirst<Stmt>("stmt", Matches);
}
diff --git a/clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt b/clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt
index 89bbe87..d1236f5 100644
--- a/clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt
+++ b/clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt
@@ -1,6 +1,7 @@
add_clang_library(clangAnalysisFlowSensitiveModels
ChromiumCheckModel.cpp
UncheckedOptionalAccessModel.cpp
+ UncheckedStatusOrAccessModel.cpp
LINK_LIBS
clangAnalysis
diff --git a/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp b/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
index bb703ef..0fa333e 100644
--- a/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
+++ b/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
@@ -241,9 +241,9 @@ auto nulloptTypeDecl() {
auto hasNulloptType() { return hasType(nulloptTypeDecl()); }
auto inPlaceClass() {
- return recordDecl(hasAnyName("std::in_place_t", "absl::in_place_t",
- "base::in_place_t", "folly::in_place_t",
- "bsl::in_place_t"));
+ return namedDecl(hasAnyName("std::in_place_t", "absl::in_place_t",
+ "base::in_place_t", "folly::in_place_t",
+ "bsl::in_place_t"));
}
auto isOptionalNulloptConstructor() {
diff --git a/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
new file mode 100644
index 0000000..c88a470
--- /dev/null
+++ b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
@@ -0,0 +1,290 @@
+//===- UncheckedStatusOrAccessModel.cpp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.h"
+
+#include <cassert>
+#include <utility>
+
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeBase.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/ASTMatchers/ASTMatchersInternal.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/CFGMatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/DataflowAnalysis.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/MatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/StringMap.h"
+
+namespace clang::dataflow::statusor_model {
+namespace {
+
+using ::clang::ast_matchers::MatchFinder;
+using ::clang::ast_matchers::StatementMatcher;
+
+} // namespace
+
+static bool namespaceEquals(const NamespaceDecl *NS,
+ clang::ArrayRef<clang::StringRef> NamespaceNames) {
+ while (!NamespaceNames.empty() && NS) {
+ if (NS->getName() != NamespaceNames.consume_back())
+ return false;
+ NS = dyn_cast_or_null<NamespaceDecl>(NS->getParent());
+ }
+ return NamespaceNames.empty() && !NS;
+}
+
+// TODO: move this to a proper place to share with the rest of clang
+static bool isTypeNamed(QualType Type, clang::ArrayRef<clang::StringRef> NS,
+ StringRef Name) {
+ if (Type.isNull())
+ return false;
+ if (auto *RD = Type->getAsRecordDecl())
+ if (RD->getName() == Name)
+ if (const auto *N = dyn_cast_or_null<NamespaceDecl>(RD->getDeclContext()))
+ return namespaceEquals(N, NS);
+ return false;
+}
+
+static bool isStatusOrOperatorBaseType(QualType Type) {
+ return isTypeNamed(Type, {"absl", "internal_statusor"}, "OperatorBase");
+}
+
+static bool isSafeUnwrap(RecordStorageLocation *StatusOrLoc,
+ const Environment &Env) {
+ if (!StatusOrLoc)
+ return false;
+ auto &StatusLoc = locForStatus(*StatusOrLoc);
+ auto *OkVal = Env.get<BoolValue>(locForOk(StatusLoc));
+ return OkVal != nullptr && Env.proves(OkVal->formula());
+}
+
+static ClassTemplateSpecializationDecl *
+getStatusOrBaseClass(const QualType &Ty) {
+ auto *RD = Ty->getAsCXXRecordDecl();
+ if (RD == nullptr)
+ return nullptr;
+ if (isStatusOrType(Ty) ||
+ // In case we are analyzing code under OperatorBase itself that uses
+ // operator* (e.g. to implement operator->).
+ isStatusOrOperatorBaseType(Ty))
+ return cast<ClassTemplateSpecializationDecl>(RD);
+ if (!RD->hasDefinition())
+ return nullptr;
+ for (const auto &Base : RD->bases())
+ if (auto *QT = getStatusOrBaseClass(Base.getType()))
+ return QT;
+ return nullptr;
+}
+
+static QualType getStatusOrValueType(ClassTemplateSpecializationDecl *TRD) {
+ return TRD->getTemplateArgs().get(0).getAsType();
+}
+
+static auto isStatusOrMemberCallWithName(llvm::StringRef member_name) {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return cxxMemberCallExpr(
+ on(expr(unless(cxxThisExpr()))),
+ callee(cxxMethodDecl(
+ hasName(member_name),
+ ofClass(anyOf(statusOrClass(), statusOrOperatorBaseClass())))));
+}
+
+static auto isStatusOrOperatorCallWithName(llvm::StringRef operator_name) {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return cxxOperatorCallExpr(
+ hasOverloadedOperatorName(operator_name),
+ callee(cxxMethodDecl(
+ ofClass(anyOf(statusOrClass(), statusOrOperatorBaseClass())))));
+}
+
+static auto valueCall() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return anyOf(isStatusOrMemberCallWithName("value"),
+ isStatusOrMemberCallWithName("ValueOrDie"));
+}
+
+static auto valueOperatorCall() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return expr(anyOf(isStatusOrOperatorCallWithName("*"),
+ isStatusOrOperatorCallWithName("->")));
+}
+
+static auto
+buildDiagnoseMatchSwitch(const UncheckedStatusOrAccessModelOptions &Options) {
+ return CFGMatchSwitchBuilder<const Environment,
+ llvm::SmallVector<SourceLocation>>()
+ // StatusOr::value, StatusOr::ValueOrDie
+ .CaseOfCFGStmt<CXXMemberCallExpr>(
+ valueCall(),
+ [](const CXXMemberCallExpr *E,
+ const ast_matchers::MatchFinder::MatchResult &,
+ const Environment &Env) {
+ if (!isSafeUnwrap(getImplicitObjectLocation(*E, Env), Env))
+ return llvm::SmallVector<SourceLocation>({E->getExprLoc()});
+ return llvm::SmallVector<SourceLocation>();
+ })
+
+ // StatusOr::operator*, StatusOr::operator->
+ .CaseOfCFGStmt<CXXOperatorCallExpr>(
+ valueOperatorCall(),
+ [](const CXXOperatorCallExpr *E,
+ const ast_matchers::MatchFinder::MatchResult &,
+ const Environment &Env) {
+ RecordStorageLocation *StatusOrLoc =
+ Env.get<RecordStorageLocation>(*E->getArg(0));
+ if (!isSafeUnwrap(StatusOrLoc, Env))
+ return llvm::SmallVector<SourceLocation>({E->getOperatorLoc()});
+ return llvm::SmallVector<SourceLocation>();
+ })
+ .Build();
+}
+
+UncheckedStatusOrAccessDiagnoser::UncheckedStatusOrAccessDiagnoser(
+ UncheckedStatusOrAccessModelOptions Options)
+ : DiagnoseMatchSwitch(buildDiagnoseMatchSwitch(Options)) {}
+
+llvm::SmallVector<SourceLocation> UncheckedStatusOrAccessDiagnoser::operator()(
+ const CFGElement &Elt, ASTContext &Ctx,
+ const TransferStateForDiagnostics<UncheckedStatusOrAccessModel::Lattice>
+ &State) {
+ return DiagnoseMatchSwitch(Elt, Ctx, State.Env);
+}
+
+BoolValue &initializeStatus(RecordStorageLocation &StatusLoc,
+ Environment &Env) {
+ auto &OkVal = Env.makeAtomicBoolValue();
+ Env.setValue(locForOk(StatusLoc), OkVal);
+ return OkVal;
+}
+
+BoolValue &initializeStatusOr(RecordStorageLocation &StatusOrLoc,
+ Environment &Env) {
+ return initializeStatus(locForStatus(StatusOrLoc), Env);
+}
+
+clang::ast_matchers::DeclarationMatcher statusOrClass() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return classTemplateSpecializationDecl(
+ hasName("absl::StatusOr"),
+ hasTemplateArgument(0, refersToType(type().bind("T"))));
+}
+
+clang::ast_matchers::DeclarationMatcher statusClass() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return cxxRecordDecl(hasName("absl::Status"));
+}
+
+clang::ast_matchers::DeclarationMatcher statusOrOperatorBaseClass() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return classTemplateSpecializationDecl(
+ hasName("absl::internal_statusor::OperatorBase"));
+}
+
+clang::ast_matchers::TypeMatcher statusOrType() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return hasCanonicalType(qualType(hasDeclaration(statusOrClass())));
+}
+
+bool isStatusOrType(QualType Type) {
+ return isTypeNamed(Type, {"absl"}, "StatusOr");
+}
+
+bool isStatusType(QualType Type) {
+ return isTypeNamed(Type, {"absl"}, "Status");
+}
+
+llvm::StringMap<QualType> getSyntheticFields(QualType Ty, QualType StatusType,
+ const CXXRecordDecl &RD) {
+ if (auto *TRD = getStatusOrBaseClass(Ty))
+ return {{"status", StatusType}, {"value", getStatusOrValueType(TRD)}};
+ if (isStatusType(Ty) || (RD.hasDefinition() &&
+ RD.isDerivedFrom(StatusType->getAsCXXRecordDecl())))
+ return {{"ok", RD.getASTContext().BoolTy}};
+ return {};
+}
+
+RecordStorageLocation &locForStatus(RecordStorageLocation &StatusOrLoc) {
+ return cast<RecordStorageLocation>(StatusOrLoc.getSyntheticField("status"));
+}
+
+StorageLocation &locForOk(RecordStorageLocation &StatusLoc) {
+ return StatusLoc.getSyntheticField("ok");
+}
+
+BoolValue &valForOk(RecordStorageLocation &StatusLoc, Environment &Env) {
+ if (auto *Val = Env.get<BoolValue>(locForOk(StatusLoc)))
+ return *Val;
+ return initializeStatus(StatusLoc, Env);
+}
+
+static void transferStatusOrOkCall(const CXXMemberCallExpr *Expr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ RecordStorageLocation *StatusOrLoc =
+ getImplicitObjectLocation(*Expr, State.Env);
+ if (StatusOrLoc == nullptr)
+ return;
+
+ auto &OkVal = valForOk(locForStatus(*StatusOrLoc), State.Env);
+ State.Env.setValue(*Expr, OkVal);
+}
+
+CFGMatchSwitch<LatticeTransferState>
+buildTransferMatchSwitch(ASTContext &Ctx,
+ CFGMatchSwitchBuilder<LatticeTransferState> Builder) {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return std::move(Builder)
+ .CaseOfCFGStmt<CXXMemberCallExpr>(isStatusOrMemberCallWithName("ok"),
+ transferStatusOrOkCall)
+ .Build();
+}
+
+QualType findStatusType(const ASTContext &Ctx) {
+ for (Type *Ty : Ctx.getTypes())
+ if (isStatusType(QualType(Ty, 0)))
+ return QualType(Ty, 0);
+
+ return QualType();
+}
+
+UncheckedStatusOrAccessModel::UncheckedStatusOrAccessModel(ASTContext &Ctx,
+ Environment &Env)
+ : DataflowAnalysis<UncheckedStatusOrAccessModel,
+ UncheckedStatusOrAccessModel::Lattice>(Ctx),
+ TransferMatchSwitch(buildTransferMatchSwitch(Ctx, {})) {
+ QualType StatusType = findStatusType(Ctx);
+ Env.getDataflowAnalysisContext().setSyntheticFieldCallback(
+ [StatusType](QualType Ty) -> llvm::StringMap<QualType> {
+ CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ if (RD == nullptr)
+ return {};
+
+ if (auto Fields = getSyntheticFields(Ty, StatusType, *RD);
+ !Fields.empty())
+ return Fields;
+ return {};
+ });
+}
+
+void UncheckedStatusOrAccessModel::transfer(const CFGElement &Elt, Lattice &L,
+ Environment &Env) {
+ LatticeTransferState State(L, Env);
+ TransferMatchSwitch(Elt, getASTContext(), State);
+}
+
+} // namespace clang::dataflow::statusor_model
diff --git a/clang/lib/Basic/Targets/Mips.cpp b/clang/lib/Basic/Targets/Mips.cpp
index de6ccff..a999d14 100644
--- a/clang/lib/Basic/Targets/Mips.cpp
+++ b/clang/lib/Basic/Targets/Mips.cpp
@@ -68,11 +68,11 @@ void MipsTargetInfo::fillValidCPUList(
unsigned MipsTargetInfo::getISARev() const {
return llvm::StringSwitch<unsigned>(getCPU())
- .Cases("mips32", "mips64", 1)
- .Cases("mips32r2", "mips64r2", "octeon", "octeon+", 2)
- .Cases("mips32r3", "mips64r3", 3)
- .Cases("mips32r5", "mips64r5", "p5600", 5)
- .Cases("mips32r6", "mips64r6", "i6400", "i6500", 6)
+ .Cases({"mips32", "mips64"}, 1)
+ .Cases({"mips32r2", "mips64r2", "octeon", "octeon+"}, 2)
+ .Cases({"mips32r3", "mips64r3"}, 3)
+ .Cases({"mips32r5", "mips64r5", "p5600"}, 5)
+ .Cases({"mips32r6", "mips64r6", "i6400", "i6500"}, 6)
.Default(0);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 4a19d91..5667273 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -740,6 +740,16 @@ struct CallStackRestore final : EHScopeStack::Cleanup {
};
} // namespace
+/// Push the standard destructor for the given type as
+/// at least a normal cleanup.
+void CIRGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
+ Address addr, QualType type) {
+ assert(dtorKind && "cannot push destructor for trivial type");
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind));
+}
+
void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
QualType type, Destroyer *destroyer) {
pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type, destroyer);
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 4897c29..9732c9c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1626,14 +1626,15 @@ LValue CIRGenFunction::emitBinaryOperatorLValue(const BinaryOperator *e) {
/// Emit code to compute the specified expression which
/// can have any type. The result is returned as an RValue struct.
-RValue CIRGenFunction::emitAnyExpr(const Expr *e, AggValueSlot aggSlot) {
+RValue CIRGenFunction::emitAnyExpr(const Expr *e, AggValueSlot aggSlot,
+ bool ignoreResult) {
switch (CIRGenFunction::getEvaluationKind(e->getType())) {
case cir::TEK_Scalar:
return RValue::get(emitScalarExpr(e));
case cir::TEK_Complex:
return RValue::getComplex(emitComplexExpr(e));
case cir::TEK_Aggregate: {
- if (aggSlot.isIgnored())
+ if (!ignoreResult && aggSlot.isIgnored())
aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()),
getCounterAggTmpAsString());
emitAggExpr(e, aggSlot);
@@ -1869,8 +1870,7 @@ RValue CIRGenFunction::emitCallExpr(const clang::CallExpr *e,
/// Emit code to compute the specified expression, ignoring the result.
void CIRGenFunction::emitIgnoredExpr(const Expr *e) {
if (e->isPRValue()) {
- assert(!cir::MissingFeatures::aggValueSlot());
- emitAnyExpr(e);
+ emitAnyExpr(e, AggValueSlot::ignored(), /*ignoreResult=*/true);
return;
}
@@ -2394,6 +2394,180 @@ LValue CIRGenFunction::emitPredefinedLValue(const PredefinedExpr *e) {
return emitStringLiteralLValue(sl, gvName);
}
+LValue CIRGenFunction::emitOpaqueValueLValue(const OpaqueValueExpr *e) {
+ assert(OpaqueValueMappingData::shouldBindAsLValue(e));
+ return getOrCreateOpaqueLValueMapping(e);
+}
+
+namespace {
+// Handle the case where the condition is a constant evaluatable simple integer,
+// which means we don't have to separately handle the true/false blocks.
+std::optional<LValue> handleConditionalOperatorLValueSimpleCase(
+ CIRGenFunction &cgf, const AbstractConditionalOperator *e) {
+ const Expr *condExpr = e->getCond();
+ llvm::APSInt condExprVal;
+ if (!cgf.constantFoldsToSimpleInteger(condExpr, condExprVal))
+ return std::nullopt;
+
+ const Expr *live = e->getTrueExpr(), *dead = e->getFalseExpr();
+ if (!condExprVal.getBoolValue())
+ std::swap(live, dead);
+
+ if (cgf.containsLabel(dead))
+ return std::nullopt;
+
+ // If the true case is live, we need to track its region.
+ assert(!cir::MissingFeatures::incrementProfileCounter());
+ assert(!cir::MissingFeatures::pgoUse());
+ // If a throw expression we emit it and return an undefined lvalue
+ // because it can't be used.
+ if (auto *throwExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
+ cgf.emitCXXThrowExpr(throwExpr);
+ // Return an undefined lvalue - the throw terminates execution
+ // so this value will never actually be used
+ mlir::Type elemTy = cgf.convertType(dead->getType());
+ mlir::Value undefPtr =
+ cgf.getBuilder().getNullPtr(cgf.getBuilder().getPointerTo(elemTy),
+ cgf.getLoc(throwExpr->getSourceRange()));
+ return cgf.makeAddrLValue(Address(undefPtr, elemTy, CharUnits::One()),
+ dead->getType());
+ }
+ return cgf.emitLValue(live);
+}
+
+/// Emit the operand of a glvalue conditional operator. This is either a glvalue
+/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
+/// LValue is returned and the current block has been terminated.
+static std::optional<LValue> emitLValueOrThrowExpression(CIRGenFunction &cgf,
+ const Expr *operand) {
+ if (auto *throwExpr = dyn_cast<CXXThrowExpr>(operand->IgnoreParens())) {
+ cgf.emitCXXThrowExpr(throwExpr);
+ return std::nullopt;
+ }
+
+ return cgf.emitLValue(operand);
+}
+} // namespace
+
+// Create and generate the 3 blocks for a conditional operator.
+// Leaves the 'current block' in the continuation basic block.
+template <typename FuncTy>
+CIRGenFunction::ConditionalInfo
+CIRGenFunction::emitConditionalBlocks(const AbstractConditionalOperator *e,
+ const FuncTy &branchGenFunc) {
+ ConditionalInfo info;
+ ConditionalEvaluation eval(*this);
+ mlir::Location loc = getLoc(e->getSourceRange());
+ CIRGenBuilderTy &builder = getBuilder();
+
+ mlir::Value condV = emitOpOnBoolExpr(loc, e->getCond());
+ SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
+ mlir::Type yieldTy{};
+
+ auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc,
+ const Expr *expr, std::optional<LValue> &resultLV) {
+ CIRGenFunction::LexicalScope lexScope{*this, loc, b.getInsertionBlock()};
+ curLexScope->setAsTernary();
+
+ assert(!cir::MissingFeatures::incrementProfileCounter());
+ eval.beginEvaluation();
+ resultLV = branchGenFunc(*this, expr);
+ mlir::Value resultPtr = resultLV ? resultLV->getPointer() : mlir::Value();
+ eval.endEvaluation();
+
+ if (resultPtr) {
+ yieldTy = resultPtr.getType();
+ cir::YieldOp::create(b, loc, resultPtr);
+ } else {
+ // If LHS or RHS is a void expression we need
+ // to patch arms as to properly match yield types.
+ // If the current block's terminator is an UnreachableOp (from a throw),
+ // we don't need a yield
+ if (builder.getInsertionBlock()->mightHaveTerminator()) {
+ mlir::Operation *terminator =
+ builder.getInsertionBlock()->getTerminator();
+ if (isa_and_nonnull<cir::UnreachableOp>(terminator))
+ insertPoints.push_back(b.saveInsertionPoint());
+ }
+ }
+ };
+
+ info.result = cir::TernaryOp::create(
+ builder, loc, condV,
+ /*trueBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ emitBranch(b, loc, e->getTrueExpr(), info.lhs);
+ },
+ /*falseBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ emitBranch(b, loc, e->getFalseExpr(), info.rhs);
+ })
+ .getResult();
+
+ // If both arms are void, so be it.
+ if (!yieldTy)
+ yieldTy = VoidTy;
+
+ // Insert required yields.
+ for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(toInsert);
+
+ // Block does not return: build empty yield.
+ if (!yieldTy) {
+ cir::YieldOp::create(builder, loc);
+ } else { // Block returns: set null yield value.
+ mlir::Value op0 = builder.getNullValue(yieldTy, loc);
+ cir::YieldOp::create(builder, loc, op0);
+ }
+ }
+
+ return info;
+}
+
+LValue CIRGenFunction::emitConditionalOperatorLValue(
+ const AbstractConditionalOperator *expr) {
+ if (!expr->isGLValue()) {
+ // ?: here should be an aggregate.
+ assert(hasAggregateEvaluationKind(expr->getType()) &&
+ "Unexpected conditional operator!");
+ return emitAggExprToLValue(expr);
+ }
+
+ OpaqueValueMapping binding(*this, expr);
+ if (std::optional<LValue> res =
+ handleConditionalOperatorLValueSimpleCase(*this, expr))
+ return *res;
+
+ ConditionalInfo info =
+ emitConditionalBlocks(expr, [](CIRGenFunction &cgf, const Expr *e) {
+ return emitLValueOrThrowExpression(cgf, e);
+ });
+
+ if ((info.lhs && !info.lhs->isSimple()) ||
+ (info.rhs && !info.rhs->isSimple())) {
+ cgm.errorNYI(expr->getSourceRange(),
+ "unsupported conditional operator with non-simple lvalue");
+ return LValue();
+ }
+
+ if (info.lhs && info.rhs) {
+ Address lhsAddr = info.lhs->getAddress();
+ Address rhsAddr = info.rhs->getAddress();
+ Address result(info.result, lhsAddr.getElementType(),
+ std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
+ AlignmentSource alignSource =
+ std::max(info.lhs->getBaseInfo().getAlignmentSource(),
+ info.rhs->getBaseInfo().getAlignmentSource());
+ assert(!cir::MissingFeatures::opTBAA());
+ return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource));
+ }
+
+ assert((info.lhs || info.rhs) &&
+ "both operands of glvalue conditional are throw-expressions?");
+ return info.lhs ? *info.lhs : *info.rhs;
+}
+
/// An LValue is a candidate for having its loads and stores be made atomic if
/// we are operating under /volatile:ms *and* the LValue itself is volatile and
/// performing such an operation can be performed without a libcall.
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index 901b937..568cbdb 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -24,6 +24,73 @@ using namespace clang;
using namespace clang::CIRGen;
namespace {
+// FIXME(cir): This should be a common helper between CIRGen
+// and traditional CodeGen
+/// Is the value of the given expression possibly a reference to or
+/// into a __block variable?
+static bool isBlockVarRef(const Expr *e) {
+ // Make sure we look through parens.
+ e = e->IgnoreParens();
+
+ // Check for a direct reference to a __block variable.
+ if (const DeclRefExpr *dre = dyn_cast<DeclRefExpr>(e)) {
+ const VarDecl *var = dyn_cast<VarDecl>(dre->getDecl());
+ return (var && var->hasAttr<BlocksAttr>());
+ }
+
+ // More complicated stuff.
+
+ // Binary operators.
+ if (const BinaryOperator *op = dyn_cast<BinaryOperator>(e)) {
+ // For an assignment or pointer-to-member operation, just care
+ // about the LHS.
+ if (op->isAssignmentOp() || op->isPtrMemOp())
+ return isBlockVarRef(op->getLHS());
+
+ // For a comma, just care about the RHS.
+ if (op->getOpcode() == BO_Comma)
+ return isBlockVarRef(op->getRHS());
+
+ // FIXME: pointer arithmetic?
+ return false;
+
+ // Check both sides of a conditional operator.
+ } else if (const AbstractConditionalOperator *op =
+ dyn_cast<AbstractConditionalOperator>(e)) {
+ return isBlockVarRef(op->getTrueExpr()) ||
+ isBlockVarRef(op->getFalseExpr());
+
+ // OVEs are required to support BinaryConditionalOperators.
+ } else if (const OpaqueValueExpr *op = dyn_cast<OpaqueValueExpr>(e)) {
+ if (const Expr *src = op->getSourceExpr())
+ return isBlockVarRef(src);
+
+ // Casts are necessary to get things like (*(int*)&var) = foo().
+ // We don't really care about the kind of cast here, except
+ // we don't want to look through l2r casts, because it's okay
+ // to get the *value* in a __block variable.
+ } else if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
+ if (cast->getCastKind() == CK_LValueToRValue)
+ return false;
+ return isBlockVarRef(cast->getSubExpr());
+
+ // Handle unary operators. Again, just aggressively look through
+ // it, ignoring the operation.
+ } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(e)) {
+ return isBlockVarRef(uop->getSubExpr());
+
+ // Look into the base of a field access.
+ } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(e)) {
+ return isBlockVarRef(mem->getBase());
+
+ // Look into the base of a subscript.
+ } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(e)) {
+ return isBlockVarRef(sub->getBase());
+ }
+
+ return false;
+}
+
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
CIRGenFunction &cgf;
@@ -41,9 +108,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
AggValueSlot ensureSlot(mlir::Location loc, QualType t) {
if (!dest.isIgnored())
return dest;
-
- cgf.cgm.errorNYI(loc, "Slot for ignored address");
- return dest;
+ return cgf.createAggTemp(t, loc, "agg.tmp.ensured");
}
void ensureDest(mlir::Location loc, QualType ty) {
@@ -89,6 +154,47 @@ public:
(void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca, dest);
}
+ void VisitBinAssign(const BinaryOperator *e) {
+ // For an assignment to work, the value on the right has
+ // to be compatible with the value on the left.
+ assert(cgf.getContext().hasSameUnqualifiedType(e->getLHS()->getType(),
+ e->getRHS()->getType()) &&
+ "Invalid assignment");
+
+ if (isBlockVarRef(e->getLHS()) &&
+ e->getRHS()->HasSideEffects(cgf.getContext())) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "block var reference with side effects");
+ return;
+ }
+
+ LValue lhs = cgf.emitLValue(e->getLHS());
+
+ // If we have an atomic type, evaluate into the destination and then
+ // do an atomic copy.
+ assert(!cir::MissingFeatures::atomicTypes());
+
+ // Codegen the RHS so that it stores directly into the LHS.
+ assert(!cir::MissingFeatures::aggValueSlotGC());
+ AggValueSlot lhsSlot = AggValueSlot::forLValue(
+ lhs, AggValueSlot::IsDestructed, AggValueSlot::IsAliased,
+ AggValueSlot::MayOverlap);
+
+ // A non-volatile aggregate destination might have volatile member.
+ if (!lhsSlot.isVolatile() && cgf.hasVolatileMember(e->getLHS()->getType()))
+ lhsSlot.setVolatile(true);
+
+ cgf.emitAggExpr(e->getRHS(), lhsSlot);
+
+ // Copy into the destination if the assignment isn't ignored.
+ emitFinalDestCopy(e->getType(), lhs);
+
+ if (!dest.isIgnored() && !dest.isExternallyDestructed() &&
+ e->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
+ cgf.pushDestroy(QualType::DK_nontrivial_c_struct, dest.getAddress(),
+ e->getType());
+ }
+
void VisitDeclRefExpr(DeclRefExpr *e) { emitAggLoadOfLValue(e); }
void VisitInitListExpr(InitListExpr *e);
@@ -170,19 +276,10 @@ public:
void VisitConstantExpr(ConstantExpr *e) {
cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitConstantExpr");
}
- void VisitMemberExpr(MemberExpr *e) {
- cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitMemberExpr");
- }
- void VisitUnaryDeref(UnaryOperator *e) {
- cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryDeref");
- }
- void VisitStringLiteral(StringLiteral *e) {
- cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitStringLiteral");
- }
- void VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
- cgf.cgm.errorNYI(e->getSourceRange(),
- "AggExprEmitter: VisitCompoundLiteralExpr");
- }
+ void VisitMemberExpr(MemberExpr *e) { emitAggLoadOfLValue(e); }
+ void VisitUnaryDeref(UnaryOperator *e) { emitAggLoadOfLValue(e); }
+ void VisitStringLiteral(StringLiteral *e) { emitAggLoadOfLValue(e); }
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *e);
void VisitPredefinedExpr(const PredefinedExpr *e) {
cgf.cgm.errorNYI(e->getSourceRange(),
"AggExprEmitter: VisitPredefinedExpr");
@@ -195,9 +292,6 @@ public:
cgf.cgm.errorNYI(e->getSourceRange(),
"AggExprEmitter: VisitPointerToDataMemberBinaryOperator");
}
- void VisitBinAssign(const BinaryOperator *e) {
- cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinAssign");
- }
void VisitBinComma(const BinaryOperator *e) {
cgf.emitIgnoredExpr(e->getLHS());
Visit(e->getRHS());
@@ -325,6 +419,31 @@ void AggExprEmitter::emitAggLoadOfLValue(const Expr *e) {
emitFinalDestCopy(e->getType(), lv);
}
+void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
+ if (dest.isPotentiallyAliased() && e->getType().isPODType(cgf.getContext())) {
+ // For a POD type, just emit a load of the lvalue + a copy, because our
+ // compound literal might alias the destination.
+ emitAggLoadOfLValue(e);
+ return;
+ }
+
+ AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
+
+ // Block-scope compound literals are destroyed at the end of the enclosing
+ // scope in C.
+ bool destruct =
+ !cgf.getLangOpts().CPlusPlus && !slot.isExternallyDestructed();
+ if (destruct)
+ slot.setExternallyDestructed();
+
+ cgf.emitAggExpr(e->getInitializer(), slot);
+
+ if (destruct)
+ if ([[maybe_unused]] QualType::DestructionKind dtorKind =
+ e->getType().isDestructedType())
+ cgf.cgm.errorNYI(e->getSourceRange(), "compound literal with destructor");
+}
+
void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
QualType arrayQTy, Expr *e,
ArrayRef<Expr *> args, Expr *arrayFiller) {
@@ -487,7 +606,8 @@ void AggExprEmitter::emitCopy(QualType type, const AggValueSlot &dest,
LValue destLV = cgf.makeAddrLValue(dest.getAddress(), type);
LValue srcLV = cgf.makeAddrLValue(src.getAddress(), type);
assert(!cir::MissingFeatures::aggValueSlotVolatile());
- cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap());
+ cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap(),
+ dest.isVolatile() || src.isVolatile());
}
void AggExprEmitter::emitInitializationToLValue(Expr *e, LValue lv) {
@@ -788,7 +908,8 @@ void CIRGenFunction::emitAggExpr(const Expr *e, AggValueSlot slot) {
}
void CIRGenFunction::emitAggregateCopy(LValue dest, LValue src, QualType ty,
- AggValueSlot::Overlap_t mayOverlap) {
+ AggValueSlot::Overlap_t mayOverlap,
+ bool isVolatile) {
// TODO(cir): this function needs improvements, commented code for now since
// this will be touched again soon.
assert(!ty->isAnyComplexType() && "Unexpected copy of complex");
@@ -844,7 +965,7 @@ void CIRGenFunction::emitAggregateCopy(LValue dest, LValue src, QualType ty,
cgm.errorNYI("emitAggregateCopy: GC");
[[maybe_unused]] cir::CopyOp copyOp =
- builder.createCopy(destPtr.getPointer(), srcPtr.getPointer());
+ builder.createCopy(destPtr.getPointer(), srcPtr.getPointer(), isVolatile);
assert(!cir::MissingFeatures::opTBAA());
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index fcde487..d8f4943 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -196,9 +196,8 @@ public:
return Visit(e->getSubExpr());
}
mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
- cgf.cgm.errorNYI(dae->getExprLoc(),
- "ComplexExprEmitter VisitCXXDefaultArgExpr");
- return {};
+ CIRGenFunction::CXXDefaultArgExprScope scope(cgf, dae);
+ return Visit(dae->getExpr());
}
mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die);
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index ba36cbe..25a46df 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -822,6 +822,10 @@ LValue CIRGenFunction::emitLValue(const Expr *e) {
std::string("l-value not implemented for '") +
e->getStmtClassName() + "'");
return LValue();
+ case Expr::ConditionalOperatorClass:
+ return emitConditionalOperatorLValue(cast<ConditionalOperator>(e));
+ case Expr::BinaryConditionalOperatorClass:
+ return emitConditionalOperatorLValue(cast<BinaryConditionalOperator>(e));
case Expr::ArraySubscriptExprClass:
return emitArraySubscriptExpr(cast<ArraySubscriptExpr>(e));
case Expr::UnaryOperatorClass:
@@ -866,6 +870,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) {
return emitCastLValue(cast<CastExpr>(e));
case Expr::MaterializeTemporaryExprClass:
return emitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(e));
+ case Expr::OpaqueValueExprClass:
+ return emitOpaqueValueLValue(cast<OpaqueValueExpr>(e));
case Expr::ChooseExprClass:
return emitLValue(cast<ChooseExpr>(e)->getChosenSubExpr());
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 3c36f5c..5a71126 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -733,6 +733,11 @@ public:
SourceLocExprScopeGuard sourceLocScope;
};
+ struct CXXDefaultArgExprScope : SourceLocExprScopeGuard {
+ CXXDefaultArgExprScope(CIRGenFunction &cfg, const CXXDefaultArgExpr *e)
+ : SourceLocExprScopeGuard(e, cfg.curSourceLocExprScope) {}
+ };
+
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t);
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty);
@@ -853,6 +858,13 @@ public:
FunctionArgList args, clang::SourceLocation loc,
clang::SourceLocation startLoc);
+ /// returns true if aggregate type has a volatile member.
+ bool hasVolatileMember(QualType t) {
+ if (const auto *rd = t->getAsRecordDecl())
+ return rd->hasVolatileMember();
+ return false;
+ }
+
/// The cleanup depth enclosing all the cleanups associated with the
/// parameters.
EHScopeStack::stable_iterator prologueCleanupDepth;
@@ -1077,6 +1089,9 @@ public:
static Destroyer destroyCXXObject;
+ void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
+ QualType type);
+
void pushDestroy(CleanupKind kind, Address addr, QualType type,
Destroyer *destroyer);
@@ -1131,14 +1146,16 @@ public:
/// occupied by some other object. More efficient code can often be
/// generated if not.
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy,
- AggValueSlot::Overlap_t mayOverlap);
+ AggValueSlot::Overlap_t mayOverlap,
+ bool isVolatile = false);
/// Emit code to compute the specified expression which can have any type. The
/// result is returned as an RValue struct. If this is an aggregate
/// expression, the aggloc/agglocvolatile arguments indicate where the result
/// should be returned.
RValue emitAnyExpr(const clang::Expr *e,
- AggValueSlot aggSlot = AggValueSlot::ignored());
+ AggValueSlot aggSlot = AggValueSlot::ignored(),
+ bool ignoreResult = false);
/// Emits the code necessary to evaluate an arbitrary expression into the
/// given memory location.
@@ -1518,6 +1535,10 @@ public:
LValue emitMemberExpr(const MemberExpr *e);
+ LValue emitOpaqueValueLValue(const OpaqueValueExpr *e);
+
+ LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr);
+
/// Given an expression with a pointer type, emit the value and compute our
/// best estimate of the alignment of the pointee.
///
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
index f638d39..be063033 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
@@ -590,15 +590,18 @@ void OpenACCRecipeBuilderBase::createReductionRecipeCombiner(
} else {
// else we have to handle each individual field after after a
// get-element.
+ const CIRGenRecordLayout &layout =
+ cgf.cgm.getTypes().getCIRGenRecordLayout(rd);
for (const auto &[field, combiner] :
llvm::zip_equal(rd->fields(), combinerRecipes)) {
mlir::Type fieldType = cgf.convertType(field->getType());
auto fieldPtr = cir::PointerType::get(fieldType);
+ unsigned fieldIndex = layout.getCIRFieldNo(field);
mlir::Value lhsField = builder.createGetMember(
- loc, fieldPtr, lhsArg, field->getName(), field->getFieldIndex());
+ loc, fieldPtr, lhsArg, field->getName(), fieldIndex);
mlir::Value rhsField = builder.createGetMember(
- loc, fieldPtr, rhsArg, field->getName(), field->getFieldIndex());
+ loc, fieldPtr, rhsArg, field->getName(), fieldIndex);
emitSingleCombiner(lhsField, rhsField, combiner);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 5ba64dd..ad8c4d0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -475,8 +475,8 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
}
break;
case cir::TEK_Complex:
- getCIRGenModule().errorNYI(s.getSourceRange(),
- "complex function return type");
+ emitComplexExprIntoLValue(rv, makeAddrLValue(returnValue, rv->getType()),
+ /*isInit=*/true);
break;
case cir::TEK_Aggregate:
assert(!cir::MissingFeatures::aggValueSlotGC());
diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h
index c05142e..ab245a77 100644
--- a/clang/lib/CIR/CodeGen/CIRGenValue.h
+++ b/clang/lib/CIR/CodeGen/CIRGenValue.h
@@ -380,6 +380,15 @@ public:
clang::Qualifiers getQualifiers() const { return quals; }
+ bool isVolatile() const { return quals.hasVolatile(); }
+
+ void setVolatile(bool flag) {
+ if (flag)
+ quals.addVolatile();
+ else
+ quals.removeVolatile();
+ }
+
Address getAddress() const { return addr; }
bool isIgnored() const { return !addr.isValid(); }
@@ -390,6 +399,8 @@ public:
IsZeroed_t isZeroed() const { return IsZeroed_t(zeroedFlag); }
+ IsAliased_t isPotentiallyAliased() const { return IsAliased_t(aliasedFlag); }
+
RValue asRValue() const {
if (isIgnored())
return RValue::getIgnored();
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index b4c3704..ed606b7 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -1978,13 +1978,19 @@ void cir::TernaryOp::build(
result.addOperands(cond);
OpBuilder::InsertionGuard guard(builder);
Region *trueRegion = result.addRegion();
- Block *block = builder.createBlock(trueRegion);
+ builder.createBlock(trueRegion);
trueBuilder(builder, result.location);
Region *falseRegion = result.addRegion();
builder.createBlock(falseRegion);
falseBuilder(builder, result.location);
- auto yield = dyn_cast<YieldOp>(block->getTerminator());
+ // Get result type from whichever branch has a yield (the other may have
+ // unreachable from a throw expression)
+ auto yield =
+ dyn_cast_or_null<cir::YieldOp>(trueRegion->back().getTerminator());
+ if (!yield)
+ yield = dyn_cast_or_null<cir::YieldOp>(falseRegion->back().getTerminator());
+
assert((yield && yield.getNumOperands() <= 1) &&
"expected zero or one result type");
if (yield.getNumOperands() == 1)
diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
index 26e5c05..8589a2e 100644
--- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
@@ -505,10 +505,19 @@ public:
Block *trueBlock = &trueRegion.front();
mlir::Operation *trueTerminator = trueRegion.back().getTerminator();
rewriter.setInsertionPointToEnd(&trueRegion.back());
- auto trueYieldOp = dyn_cast<cir::YieldOp>(trueTerminator);
- rewriter.replaceOpWithNewOp<cir::BrOp>(trueYieldOp, trueYieldOp.getArgs(),
- continueBlock);
+ // Handle both yield and unreachable terminators (throw expressions)
+ if (auto trueYieldOp = dyn_cast<cir::YieldOp>(trueTerminator)) {
+ rewriter.replaceOpWithNewOp<cir::BrOp>(trueYieldOp, trueYieldOp.getArgs(),
+ continueBlock);
+ } else if (isa<cir::UnreachableOp>(trueTerminator)) {
+ // Terminator is unreachable (e.g., from throw), just keep it
+ } else {
+ trueTerminator->emitError("unexpected terminator in ternary true region, "
+ "expected yield or unreachable, got: ")
+ << trueTerminator->getName();
+ return mlir::failure();
+ }
rewriter.inlineRegionBefore(trueRegion, continueBlock);
Block *falseBlock = continueBlock;
@@ -517,9 +526,19 @@ public:
falseBlock = &falseRegion.front();
mlir::Operation *falseTerminator = falseRegion.back().getTerminator();
rewriter.setInsertionPointToEnd(&falseRegion.back());
- auto falseYieldOp = dyn_cast<cir::YieldOp>(falseTerminator);
- rewriter.replaceOpWithNewOp<cir::BrOp>(falseYieldOp, falseYieldOp.getArgs(),
- continueBlock);
+
+ // Handle both yield and unreachable terminators (throw expressions)
+ if (auto falseYieldOp = dyn_cast<cir::YieldOp>(falseTerminator)) {
+ rewriter.replaceOpWithNewOp<cir::BrOp>(
+ falseYieldOp, falseYieldOp.getArgs(), continueBlock);
+ } else if (isa<cir::UnreachableOp>(falseTerminator)) {
+ // Terminator is unreachable (e.g., from throw), just keep it
+ } else {
+ falseTerminator->emitError("unexpected terminator in ternary false "
+ "region, expected yield or unreachable, got: ")
+ << falseTerminator->getName();
+ return mlir::failure();
+ }
rewriter.inlineRegionBefore(falseRegion, continueBlock);
rewriter.setInsertionPointToEnd(condBlock);
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 0243bf1..dc26dac 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -182,7 +182,7 @@ mlir::LogicalResult CIRToLLVMCopyOpLowering::matchAndRewrite(
rewriter, op.getLoc(), rewriter.getI32Type(), op.getLength(layout));
assert(!cir::MissingFeatures::aggValueSlotVolatile());
rewriter.replaceOpWithNewOp<mlir::LLVM::MemcpyOp>(
- op, adaptor.getDst(), adaptor.getSrc(), length, /*isVolatile=*/false);
+ op, adaptor.getDst(), adaptor.getSrc(), length, op.getIsVolatile());
return mlir::success();
}
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index d95dab3..a012581 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -86,7 +86,7 @@ namespace {
llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64(
CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
StoragePtr = CGF.Builder.CreateAddrSpaceCast(
- StoragePtr, CGF.UnqualPtrTy, "atomic_bitfield_base");
+ StoragePtr, CGF.DefaultPtrTy, "atomic_bitfield_base");
BFI = OrigBFI;
BFI.Offset = Offset;
BFI.StorageSize = AtomicSizeInBits;
@@ -374,10 +374,9 @@ bool AtomicInfo::emitMemSetZeroIfNecessary() const {
}
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
- Address Dest, Address Ptr,
- Address Val1, Address Val2,
- uint64_t Size,
- llvm::AtomicOrdering SuccessOrder,
+ Address Dest, Address Ptr, Address Val1,
+ Address Val2, Address ExpectedResult,
+ uint64_t Size, llvm::AtomicOrdering SuccessOrder,
llvm::AtomicOrdering FailureOrder,
llvm::SyncScope::ID Scope) {
// Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
@@ -411,8 +410,30 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
CGF.Builder.SetInsertPoint(StoreExpectedBB);
// Update the memory at Expected with Old's value.
- auto *I = CGF.Builder.CreateStore(Old, Val1);
- CGF.addInstToCurrentSourceAtom(I, Old);
+ llvm::Type *ExpectedType = ExpectedResult.getElementType();
+ const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
+ uint64_t ExpectedSizeInBytes = DL.getTypeStoreSize(ExpectedType);
+
+ if (ExpectedSizeInBytes == Size) {
+ // Sizes match: store directly
+ auto *I = CGF.Builder.CreateStore(Old, ExpectedResult);
+ CGF.addInstToCurrentSourceAtom(I, Old);
+ } else {
+ // store only the first ExpectedSizeInBytes bytes of Old
+ llvm::Type *OldType = Old->getType();
+
+ // Allocate temporary storage for Old value
+ Address OldTmp =
+ CGF.CreateTempAlloca(OldType, Ptr.getAlignment(), "old.tmp");
+
+ // Store Old into this temporary
+ auto *I = CGF.Builder.CreateStore(Old, OldTmp);
+ CGF.addInstToCurrentSourceAtom(I, Old);
+
+ // Perform memcpy for first ExpectedSizeInBytes bytes
+ CGF.Builder.CreateMemCpy(ExpectedResult, OldTmp, ExpectedSizeInBytes,
+ /*isVolatile=*/false);
+ }
// Finally, branch to the exit point.
CGF.Builder.CreateBr(ContinueBB);
@@ -425,13 +446,11 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
/// Given an ordering required on success, emit all possible cmpxchg
/// instructions to cope with the provided (but possibly only dynamically known)
/// FailureOrder.
-static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
- bool IsWeak, Address Dest, Address Ptr,
- Address Val1, Address Val2,
- llvm::Value *FailureOrderVal,
- uint64_t Size,
- llvm::AtomicOrdering SuccessOrder,
- llvm::SyncScope::ID Scope) {
+static void emitAtomicCmpXchgFailureSet(
+ CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr,
+ Address Val1, Address Val2, Address ExpectedResult,
+ llvm::Value *FailureOrderVal, uint64_t Size,
+ llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope) {
llvm::AtomicOrdering FailureOrder;
if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
auto FOS = FO->getSExtValue();
@@ -458,8 +477,8 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
// success argument". This condition has been lifted and the only
// precondition is 31.7.2.18. Effectively treat this as a DR and skip
// language version checks.
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- FailureOrder, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult,
+ Size, SuccessOrder, FailureOrder, Scope);
return;
}
@@ -483,18 +502,19 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
// Emit all the different atomics
CGF.Builder.SetInsertPoint(MonotonicBB);
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
- Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+ SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(AcquireBB);
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- llvm::AtomicOrdering::Acquire, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+ SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(SeqCstBB);
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- llvm::AtomicOrdering::SequentiallyConsistent, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+ SuccessOrder, llvm::AtomicOrdering::SequentiallyConsistent,
+ Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(ContBB);
@@ -538,8 +558,9 @@ static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
Address Ptr, Address Val1, Address Val2,
- llvm::Value *IsWeak, llvm::Value *FailureOrder,
- uint64_t Size, llvm::AtomicOrdering Order,
+ Address ExpectedResult, llvm::Value *IsWeak,
+ llvm::Value *FailureOrder, uint64_t Size,
+ llvm::AtomicOrdering Order,
llvm::SyncScope::ID Scope) {
llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
bool PostOpMinMax = false;
@@ -554,13 +575,15 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
return;
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
return;
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n:
@@ -568,7 +591,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
- Val1, Val2, FailureOrder, Size, Order, Scope);
+ Val1, Val2, ExpectedResult, FailureOrder,
+ Size, Order, Scope);
} else {
// Create all the relevant BB's
llvm::BasicBlock *StrongBB =
@@ -582,12 +606,14 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
CGF.Builder.SetInsertPoint(StrongBB);
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(WeakBB);
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(ContBB);
@@ -797,9 +823,9 @@ EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
Address Ptr, Address Val1, Address Val2,
- llvm::Value *IsWeak, llvm::Value *FailureOrder,
- uint64_t Size, llvm::AtomicOrdering Order,
- llvm::Value *Scope) {
+ Address OriginalVal1, llvm::Value *IsWeak,
+ llvm::Value *FailureOrder, uint64_t Size,
+ llvm::AtomicOrdering Order, llvm::Value *Scope) {
auto ScopeModel = Expr->getScopeModel();
// LLVM atomic instructions always have sync scope. If clang atomic
@@ -816,8 +842,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
Order, CGF.getLLVMContext());
else
SS = llvm::SyncScope::System;
- EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
- Order, SS);
+ EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ FailureOrder, Size, Order, SS);
return;
}
@@ -826,8 +852,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
Order, CGF.CGM.getLLVMContext());
- EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
- Order, SCID);
+ EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ FailureOrder, Size, Order, SCID);
return;
}
@@ -852,12 +878,11 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
SI->addCase(Builder.getInt32(S), B);
Builder.SetInsertPoint(B);
- EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
- Order,
- CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
- ScopeModel->map(S),
- Order,
- CGF.getLLVMContext()));
+ EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ FailureOrder, Size, Order,
+ CGF.getTargetHooks().getLLVMSyncScopeID(
+ CGF.CGM.getLangOpts(), ScopeModel->map(S), Order,
+ CGF.getLLVMContext()));
Builder.CreateBr(ContBB);
}
@@ -1058,6 +1083,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
AtomicInfo Atomics(*this, AtomicVal);
+ Address OriginalVal1 = Val1;
if (ShouldCastToIntPtrTy) {
Ptr = Atomics.castToAtomicIntPointer(Ptr);
if (Val1.isValid())
@@ -1301,30 +1327,32 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
if (llvm::isValidAtomicOrderingCABI(ord))
switch ((llvm::AtomicOrderingCABI)ord) {
case llvm::AtomicOrderingCABI::relaxed:
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Monotonic, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Monotonic, Scope);
break;
case llvm::AtomicOrderingCABI::consume:
case llvm::AtomicOrderingCABI::acquire:
if (IsStore)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Acquire, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
break;
case llvm::AtomicOrderingCABI::release:
if (IsLoad)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Release, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
break;
case llvm::AtomicOrderingCABI::acq_rel:
if (IsLoad || IsStore)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::AcquireRelease, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::AcquireRelease,
+ Scope);
break;
case llvm::AtomicOrderingCABI::seq_cst:
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size,
llvm::AtomicOrdering::SequentiallyConsistent, Scope);
break;
}
@@ -1360,13 +1388,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// Emit all the different atomics
Builder.SetInsertPoint(MonotonicBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Monotonic, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
+ Size, llvm::AtomicOrdering::Monotonic, Scope);
Builder.CreateBr(ContBB);
if (!IsStore) {
Builder.SetInsertPoint(AcquireBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Acquire, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
AcquireBB);
@@ -1375,23 +1403,23 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
}
if (!IsLoad) {
Builder.SetInsertPoint(ReleaseBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Release, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
ReleaseBB);
}
if (!IsLoad && !IsStore) {
Builder.SetInsertPoint(AcqRelBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::AcquireRelease, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::AcquireRelease, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
AcqRelBB);
}
Builder.SetInsertPoint(SeqCstBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::SequentiallyConsistent, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
+ Size, llvm::AtomicOrdering::SequentiallyConsistent, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
SeqCstBB);
@@ -1417,6 +1445,11 @@ Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
if (SourceSizeInBits != AtomicSizeInBits) {
Address Tmp = CreateTempAlloca();
+ CGF.Builder.CreateMemSet(
+ Tmp.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0),
+ CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
+ Tmp.getAlignment().getAsAlign());
+
CGF.Builder.CreateMemCpy(Tmp, Addr,
std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
Addr = Tmp;
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 597127ab..20a595e 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -1207,7 +1207,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
} else {
// Bitcast the block literal to a generic block literal.
BlockPtr =
- Builder.CreatePointerCast(BlockPtr, UnqualPtrTy, "block.literal");
+ Builder.CreatePointerCast(BlockPtr, DefaultPtrTy, "block.literal");
// Get pointer to the block invoke function
FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 3);
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 92dba32..fd14cd6 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -1673,7 +1673,7 @@ static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
CGF.getLLVMContext(),
CGF.getContext().getTypeSize(E->getArg(1)->getType()));
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.Int8Ty, {CGF.UnqualPtrTy, IntType}, false);
+ llvm::FunctionType::get(CGF.Int8Ty, {CGF.DefaultPtrTy, IntType}, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index cb16fe1..b463f88 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -230,7 +230,7 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
IntTy = CGM.IntTy;
SizeTy = CGM.SizeTy;
VoidTy = CGM.VoidTy;
- PtrTy = CGM.UnqualPtrTy;
+ PtrTy = CGM.DefaultPtrTy;
if (CGM.getLangOpts().OffloadViaLLVM)
Prefix = "llvm";
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 8439ec7..fd73314 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -1756,7 +1756,7 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
const char *Name) {
ErrorUnsupported(E, Name);
llvm::Type *ElTy = ConvertType(E->getType());
- llvm::Type *Ty = UnqualPtrTy;
+ llvm::Type *Ty = DefaultPtrTy;
return MakeAddrLValue(
Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
}
@@ -4270,7 +4270,7 @@ void CodeGenFunction::EmitCfiCheckFail() {
llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
llvm::Value *V = Builder.CreateConstGEP2_32(
- CfiCheckFailDataTy, Builder.CreatePointerCast(Data, UnqualPtrTy), 0, 0);
+ CfiCheckFailDataTy, Builder.CreatePointerCast(Data, DefaultPtrTy), 0, 0);
Address CheckKindAddr(V, Int8Ty, getIntAlign());
llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
@@ -5711,7 +5711,7 @@ std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
CGF.EmitCXXThrowExpr(ThrowExpr);
llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
- llvm::Type *Ty = CGF.UnqualPtrTy;
+ llvm::Type *Ty = CGF.DefaultPtrTy;
return CGF.MakeAddrLValue(
Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
Dead->getType());
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index c571821a..cb5bb40 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -338,7 +338,7 @@ public:
/// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
llvm::FunctionCallee getGcReadWeakFn() {
// id objc_read_weak (id *)
- llvm::Type *args[] = {CGM.UnqualPtrTy};
+ llvm::Type *args[] = {CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
}
@@ -346,7 +346,7 @@ public:
/// GcAssignWeakFn -- LLVM objc_assign_weak function.
llvm::FunctionCallee getGcAssignWeakFn() {
// id objc_assign_weak (id, id *)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
}
@@ -354,7 +354,7 @@ public:
/// GcAssignGlobalFn -- LLVM objc_assign_global function.
llvm::FunctionCallee getGcAssignGlobalFn() {
// id objc_assign_global(id, id *)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
}
@@ -362,7 +362,7 @@ public:
/// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function.
llvm::FunctionCallee getGcAssignThreadLocalFn() {
// id objc_assign_threadlocal(id src, id * dest)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal");
}
@@ -370,7 +370,7 @@ public:
/// GcAssignIvarFn -- LLVM objc_assign_ivar function.
llvm::FunctionCallee getGcAssignIvarFn() {
// id objc_assign_ivar(id, id *, ptrdiff_t)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy, CGM.PtrDiffTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy, CGM.PtrDiffTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
}
@@ -386,7 +386,7 @@ public:
/// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
llvm::FunctionCallee getGcAssignStrongCastFn() {
// id objc_assign_strongCast(id, id *)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
}
@@ -517,7 +517,7 @@ public:
/// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
llvm::FunctionCallee getExceptionTryEnterFn() {
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, false),
"objc_exception_try_enter");
@@ -525,7 +525,7 @@ public:
/// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
llvm::FunctionCallee getExceptionTryExitFn() {
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, false),
"objc_exception_try_exit");
@@ -533,7 +533,7 @@ public:
/// ExceptionExtractFn - LLVM objc_exception_extract function.
llvm::FunctionCallee getExceptionExtractFn() {
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(ObjectPtrTy, params, false),
"objc_exception_extract");
@@ -550,7 +550,7 @@ public:
/// SetJmpFn - LLVM _setjmp function.
llvm::FunctionCallee getSetJmpFn() {
// This is specifically the prototype for x86.
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.Int32Ty, params, false), "_setjmp",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -1927,7 +1927,7 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
// If we don't already have it, construct the type for a constant NSString.
if (!NSConstantStringType) {
NSConstantStringType =
- llvm::StructType::create({CGM.UnqualPtrTy, CGM.Int8PtrTy, CGM.IntTy},
+ llvm::StructType::create({CGM.DefaultPtrTy, CGM.Int8PtrTy, CGM.IntTy},
"struct.__builtin_NSString");
}
@@ -5959,7 +5959,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(
Int8PtrTy, PropertyListPtrTy);
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
- ImpnfABITy = CGM.UnqualPtrTy;
+ ImpnfABITy = CGM.DefaultPtrTy;
// struct _class_t {
// struct _class_t *isa;
@@ -6380,7 +6380,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
CGM.getModule(), ObjCTypes.ImpnfABITy, false,
llvm::GlobalValue::ExternalLinkage, nullptr, "_objc_empty_vtable");
else
- ObjCEmptyVtableVar = llvm::ConstantPointerNull::get(CGM.UnqualPtrTy);
+ ObjCEmptyVtableVar = llvm::ConstantPointerNull::get(CGM.DefaultPtrTy);
}
// FIXME: Is this correct (that meta class size is never computed)?
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 1ff2be7..85b2404 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -1714,12 +1714,12 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
// Copying constructor for the threadprivate variable.
// Must be NULL - reserved by runtime, but currently it requires that this
// parameter is always NULL. Otherwise it fires assertion.
- CopyCtor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
+ CopyCtor = llvm::Constant::getNullValue(CGM.DefaultPtrTy);
if (Ctor == nullptr) {
- Ctor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
+ Ctor = llvm::Constant::getNullValue(CGM.DefaultPtrTy);
}
if (Dtor == nullptr) {
- Dtor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
+ Dtor = llvm::Constant::getNullValue(CGM.DefaultPtrTy);
}
if (!CGF) {
auto *InitFunctionTy =
diff --git a/clang/lib/CodeGen/CGPointerAuth.cpp b/clang/lib/CodeGen/CGPointerAuth.cpp
index 375f87a..dbb7bc9 100644
--- a/clang/lib/CodeGen/CGPointerAuth.cpp
+++ b/clang/lib/CodeGen/CGPointerAuth.cpp
@@ -426,10 +426,10 @@ CodeGenModule::getConstantSignedPointer(llvm::Constant *Pointer, unsigned Key,
llvm::ConstantInt *OtherDiscriminator) {
llvm::Constant *AddressDiscriminator;
if (StorageAddress) {
- assert(StorageAddress->getType() == UnqualPtrTy);
+ assert(StorageAddress->getType() == DefaultPtrTy);
AddressDiscriminator = StorageAddress;
} else {
- AddressDiscriminator = llvm::Constant::getNullValue(UnqualPtrTy);
+ AddressDiscriminator = llvm::Constant::getNullValue(DefaultPtrTy);
}
llvm::ConstantInt *IntegerDiscriminator;
diff --git a/clang/lib/CodeGen/CodeGenTypeCache.h b/clang/lib/CodeGen/CodeGenTypeCache.h
index e273ebe..015306b 100644
--- a/clang/lib/CodeGen/CodeGenTypeCache.h
+++ b/clang/lib/CodeGen/CodeGenTypeCache.h
@@ -53,7 +53,7 @@ struct CodeGenTypeCache {
/// void*, void** in the target's default address space (often 0)
union {
- llvm::PointerType *UnqualPtrTy;
+ llvm::PointerType *DefaultPtrTy;
llvm::PointerType *VoidPtrTy;
llvm::PointerType *Int8PtrTy;
llvm::PointerType *VoidPtrPtrTy;
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 9e195a9..65c4763 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -774,7 +774,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
} else {
llvm::Value *VFPAddr =
CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
- VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.UnqualPtrTy, VFPAddr,
+ VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.DefaultPtrTy, VFPAddr,
CGF.getPointerAlign(),
"memptr.virtualfn");
}
@@ -816,7 +816,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// function pointer.
CGF.EmitBlock(FnNonVirtual);
llvm::Value *NonVirtualFn =
- Builder.CreateIntToPtr(FnAsInt, CGF.UnqualPtrTy, "memptr.nonvirtualfn");
+ Builder.CreateIntToPtr(FnAsInt, CGF.DefaultPtrTy, "memptr.nonvirtualfn");
// Check the function pointer if CFI on member function pointers is enabled.
if (ShouldEmitCFICheck) {
@@ -856,7 +856,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// We're done.
CGF.EmitBlock(FnEnd);
- llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.UnqualPtrTy, 2);
+ llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.DefaultPtrTy, 2);
CalleePtr->addIncoming(VirtualFn, FnVirtual);
CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
@@ -1403,7 +1403,7 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// Grab the vtable pointer as an intptr_t*.
auto *ClassDecl = ElementType->castAsCXXRecordDecl();
- llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl);
+ llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.DefaultPtrTy, ClassDecl);
// Track back to entry -2 and pull out the offset there.
llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
@@ -1749,7 +1749,7 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
auto AuthenticateVTable = [&](Address ThisAddr, const CXXRecordDecl *Decl) {
if (!CGF.getLangOpts().PointerAuthCalls)
return;
- (void)CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, Decl,
+ (void)CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, Decl,
CodeGenFunction::VTableAuthMode::MustTrap);
};
@@ -1775,7 +1775,7 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
if (PerformPostCastAuthentication)
VTable = CGF.EmitPointerAuthAuth(StrippingAuthInfo, VTable);
} else
- VTable = CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, SrcDecl);
+ VTable = CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, SrcDecl);
// Compare the vptr against the expected vptr for the destination type at
// this offset.
@@ -1828,7 +1828,7 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Get the vtable pointer.
llvm::Value *VTable =
- CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
+ CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
@@ -1841,7 +1841,7 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
// Get the vtable pointer.
llvm::Value *VTable =
- CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
+ CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
@@ -2578,7 +2578,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
// We can't simply ignore this load using nosanitize metadata because
// the metadata may be lost.
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false);
+ llvm::FunctionType::get(CGF.SizeTy, CGF.DefaultPtrTy, false);
llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
return CGF.Builder.CreateCall(F, numElementsPtr.emitRawPointer(CGF));
@@ -2921,7 +2921,7 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
// We're assuming that the destructor function is something we can
// reasonably call with the default CC.
- llvm::Type *dtorTy = CGF.UnqualPtrTy;
+ llvm::Type *dtorTy = CGF.DefaultPtrTy;
// Preserve address space of addr.
auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
@@ -5035,7 +5035,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
auto catchRD = CatchType->getAsCXXRecordDecl();
CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
- llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok
+ llvm::Type *PtrTy = CGF.DefaultPtrTy;
// Check for a copy expression. If we don't have a copy expression,
// that means a trivial copy is okay.
@@ -5244,7 +5244,7 @@ void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
llvm::FunctionCallee Dtor,
llvm::Constant *Addr) {
if (D.getTLSKind() != VarDecl::TLS_None) {
- llvm::PointerType *PtrTy = CGF.UnqualPtrTy;
+ llvm::PointerType *PtrTy = CGF.DefaultPtrTy;
// extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
llvm::FunctionType *AtExitTy =
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 19d9265..71e2449 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -528,7 +528,7 @@ public:
CGM.IntTy,
CGM.IntTy,
CGM.IntTy,
- getImageRelativeType(CGM.UnqualPtrTy),
+ getImageRelativeType(CGM.DefaultPtrTy),
};
BaseClassDescriptorType = llvm::StructType::create(
CGM.getLLVMContext(), FieldTypes, "rtti.BaseClassDescriptor");
@@ -540,7 +540,7 @@ public:
return ClassHierarchyDescriptorType;
// Forward-declare RTTIClassHierarchyDescriptor to break a cycle.
llvm::Type *FieldTypes[] = {CGM.IntTy, CGM.IntTy, CGM.IntTy,
- getImageRelativeType(CGM.UnqualPtrTy)};
+ getImageRelativeType(CGM.DefaultPtrTy)};
ClassHierarchyDescriptorType =
llvm::StructType::create(FieldTypes, "rtti.ClassHierarchyDescriptor");
return ClassHierarchyDescriptorType;
@@ -554,7 +554,7 @@ public:
CGM.IntTy,
CGM.IntTy,
getImageRelativeType(CGM.Int8PtrTy),
- getImageRelativeType(CGM.UnqualPtrTy),
+ getImageRelativeType(CGM.DefaultPtrTy),
getImageRelativeType(CGM.VoidTy),
};
llvm::ArrayRef<llvm::Type *> FieldTypesRef(FieldTypes);
@@ -752,7 +752,7 @@ public:
llvm::SmallString<23> CTATypeName("eh.CatchableTypeArray.");
CTATypeName += llvm::utostr(NumEntries);
- llvm::Type *CTType = getImageRelativeType(CGM.UnqualPtrTy);
+ llvm::Type *CTType = getImageRelativeType(CGM.DefaultPtrTy);
llvm::Type *FieldTypes[] = {
CGM.IntTy, // NumEntries
llvm::ArrayType::get(CTType, NumEntries) // CatchableTypes
@@ -779,7 +779,7 @@ public:
llvm::FunctionCallee getThrowFn() {
// _CxxThrowException is passed an exception object and a ThrowInfo object
// which describes the exception.
- llvm::Type *Args[] = {CGM.Int8PtrTy, CGM.UnqualPtrTy};
+ llvm::Type *Args[] = {CGM.Int8PtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
llvm::FunctionCallee Throw =
@@ -920,7 +920,7 @@ void MicrosoftCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
void MicrosoftCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
llvm::Value *Args[] = {llvm::ConstantPointerNull::get(CGM.Int8PtrTy),
- llvm::ConstantPointerNull::get(CGM.UnqualPtrTy)};
+ llvm::ConstantPointerNull::get(CGM.DefaultPtrTy)};
llvm::FunctionCallee Fn = getThrowFn();
if (isNoReturn)
CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, Args);
@@ -1969,13 +1969,13 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
SourceLocation Loc) {
CGBuilderTy &Builder = CGF.Builder;
- Ty = CGF.UnqualPtrTy;
+ Ty = CGF.DefaultPtrTy;
Address VPtr =
adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
llvm::Value *VTable =
- CGF.GetVTablePtr(VPtr, CGF.UnqualPtrTy, MethodDecl->getParent());
+ CGF.GetVTablePtr(VPtr, CGF.DefaultPtrTy, MethodDecl->getParent());
MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
MethodVFTableLocation ML = VFTContext.getMethodVFTableLocation(GD);
@@ -2136,9 +2136,9 @@ MicrosoftCXXABI::EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
// Load the vfptr and then callee from the vftable. The callee should have
// adjusted 'this' so that the vfptr is at offset zero.
- llvm::Type *ThunkPtrTy = CGF.UnqualPtrTy;
+ llvm::Type *ThunkPtrTy = CGF.DefaultPtrTy;
llvm::Value *VTable =
- CGF.GetVTablePtr(getThisAddress(CGF), CGF.UnqualPtrTy, MD->getParent());
+ CGF.GetVTablePtr(getThisAddress(CGF), CGF.DefaultPtrTy, MD->getParent());
llvm::Value *VFuncPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
ThunkPtrTy, VTable, ML.Index, "vfn");
@@ -2562,7 +2562,7 @@ static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- CGM.UnqualPtrTy, /*isVarArg=*/false);
+ CGM.DefaultPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_header",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -2574,7 +2574,7 @@ static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) {
static llvm::FunctionCallee getInitThreadFooterFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- CGM.UnqualPtrTy, /*isVarArg=*/false);
+ CGM.DefaultPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_footer",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -2586,7 +2586,7 @@ static llvm::FunctionCallee getInitThreadFooterFn(CodeGenModule &CGM) {
static llvm::FunctionCallee getInitThreadAbortFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- CGM.UnqualPtrTy, /*isVarArg=*/false);
+ CGM.DefaultPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_abort",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -3169,7 +3169,7 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
}
llvm::Value *VBTable =
- Builder.CreateAlignedLoad(CGM.UnqualPtrTy, VBPtr, VBPtrAlign, "vbtable");
+ Builder.CreateAlignedLoad(CGM.DefaultPtrTy, VBPtr, VBPtrAlign, "vbtable");
// Translate from byte offset to table index. It improves analyzability.
llvm::Value *VBTableIndex = Builder.CreateAShr(
@@ -3825,7 +3825,7 @@ MSRTTIBuilder::getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes) {
// mode) bytes of padding. We provide a pointer sized amount of padding by
// adding +1 to Classes.size(). The sections have pointer alignment and are
// marked pick-any so it shouldn't matter.
- llvm::Type *PtrType = ABI.getImageRelativeType(CGM.UnqualPtrTy);
+ llvm::Type *PtrType = ABI.getImageRelativeType(CGM.DefaultPtrTy);
auto *ArrType = llvm::ArrayType::get(PtrType, Classes.size() + 1);
auto *BCA =
new llvm::GlobalVariable(Module, ArrType,
@@ -4372,7 +4372,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) {
CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy));
uint32_t NumEntries = CatchableTypes.size();
- llvm::Type *CTType = getImageRelativeType(CGM.UnqualPtrTy);
+ llvm::Type *CTType = getImageRelativeType(CGM.DefaultPtrTy);
llvm::ArrayType *AT = llvm::ArrayType::get(CTType, NumEntries);
llvm::StructType *CTAType = getCatchableTypeArrayType(NumEntries);
llvm::Constant *Fields[] = {
diff --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
index 2429a43..60f9b86 100644
--- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
@@ -2037,7 +2037,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vld1q_x3_v:
case NEON::BI__builtin_neon_vld1_x4_v:
case NEON::BI__builtin_neon_vld1q_x4_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
@@ -2263,11 +2263,11 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
// in AArch64 it comes last. We may want to stick to one or another.
if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
Arch == llvm::Triple::aarch64_32) {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
- llvm::Type *Tys[2] = {UnqualPtrTy, VTy};
+ llvm::Type *Tys[2] = {DefaultPtrTy, VTy};
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
case NEON::BI__builtin_neon_vsubhn_v: {
@@ -2858,7 +2858,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(
BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex
: Intrinsic::arm_ldrex,
- UnqualPtrTy);
+ DefaultPtrTy);
CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
Val->addParamAttr(
0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
@@ -5225,7 +5225,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxr
: Intrinsic::aarch64_ldxr,
- UnqualPtrTy);
+ DefaultPtrTy);
CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
Val->addParamAttr(
0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
@@ -7482,42 +7482,42 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
diff --git a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
index e71dc9e..44d5938 100644
--- a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
@@ -59,7 +59,7 @@ static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
Constraints += MachineClobbers;
}
- llvm::Type *PtrType = CGF.UnqualPtrTy;
+ llvm::Type *PtrType = CGF.DefaultPtrTy;
llvm::FunctionType *FTy = llvm::FunctionType::get(RetType, {PtrType}, false);
llvm::InlineAsm *IA =
diff --git a/clang/lib/CodeGen/Targets/PPC.cpp b/clang/lib/CodeGen/Targets/PPC.cpp
index 380e8c0..35e7655 100644
--- a/clang/lib/CodeGen/Targets/PPC.cpp
+++ b/clang/lib/CodeGen/Targets/PPC.cpp
@@ -490,7 +490,7 @@ RValue PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy;
if (isIndirect)
- DirectTy = CGF.UnqualPtrTy;
+ DirectTy = CGF.DefaultPtrTy;
// Case 1: consume registers.
Address RegAddr = Address::invalid();
diff --git a/clang/lib/Driver/Distro.cpp b/clang/lib/Driver/Distro.cpp
index 838e087..df10458 100644
--- a/clang/lib/Driver/Distro.cpp
+++ b/clang/lib/Driver/Distro.cpp
@@ -61,10 +61,6 @@ static Distro::DistroType DetectLsbRelease(llvm::vfs::FileSystem &VFS) {
if (Version == Distro::UnknownDistro &&
Line.starts_with("DISTRIB_CODENAME="))
Version = llvm::StringSwitch<Distro::DistroType>(Line.substr(17))
- .Case("maverick", Distro::UbuntuMaverick)
- .Case("natty", Distro::UbuntuNatty)
- .Case("oneiric", Distro::UbuntuOneiric)
- .Case("precise", Distro::UbuntuPrecise)
.Case("quantal", Distro::UbuntuQuantal)
.Case("raring", Distro::UbuntuRaring)
.Case("saucy", Distro::UbuntuSaucy)
@@ -120,13 +116,17 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
if (Data.starts_with("Fedora release"))
return Distro::Fedora;
if (Data.starts_with("Red Hat Enterprise Linux") ||
- Data.starts_with("CentOS") || Data.starts_with("Scientific Linux")) {
+ Data.starts_with("CentOS") || Data.starts_with("AlmaLinux") ||
+ Data.starts_with("Rocky Linux") ||
+ Data.starts_with("Scientific Linux")) {
+ if (Data.contains("release 10"))
+ return Distro::RHEL10;
+ if (Data.contains("release 9"))
+ return Distro::RHEL9;
+ if (Data.contains("release 8"))
+ return Distro::RHEL8;
if (Data.contains("release 7"))
return Distro::RHEL7;
- else if (Data.contains("release 6"))
- return Distro::RHEL6;
- else if (Data.contains("release 5"))
- return Distro::RHEL5;
}
return Distro::UnknownDistro;
}
@@ -139,12 +139,6 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
int MajorVersion;
if (!Data.split('.').first.getAsInteger(10, MajorVersion)) {
switch (MajorVersion) {
- case 5:
- return Distro::DebianLenny;
- case 6:
- return Distro::DebianSqueeze;
- case 7:
- return Distro::DebianWheezy;
case 8:
return Distro::DebianJessie;
case 9:
@@ -166,8 +160,6 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
}
}
return llvm::StringSwitch<Distro::DistroType>(Data.split("\n").first)
- .Case("squeeze/sid", Distro::DebianSqueeze)
- .Case("wheezy/sid", Distro::DebianWheezy)
.Case("jessie/sid", Distro::DebianJessie)
.Case("stretch/sid", Distro::DebianStretch)
.Case("buster/sid", Distro::DebianBuster)
diff --git a/clang/lib/Driver/ToolChains/Arch/Mips.cpp b/clang/lib/Driver/ToolChains/Arch/Mips.cpp
index bac8681..227c6a0 100644
--- a/clang/lib/Driver/ToolChains/Arch/Mips.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/Mips.cpp
@@ -482,9 +482,9 @@ bool mips::isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
return false;
return llvm::StringSwitch<bool>(CPUName)
- .Cases("mips2", "mips3", "mips4", "mips5", true)
- .Cases("mips32", "mips32r2", "mips32r3", "mips32r5", true)
- .Cases("mips64", "mips64r2", "mips64r3", "mips64r5", true)
+ .Cases({"mips2", "mips3", "mips4", "mips5"}, true)
+ .Cases({"mips32", "mips32r2", "mips32r3", "mips32r5"}, true)
+ .Cases({"mips64", "mips64r2", "mips64r3", "mips64r5"}, true)
.Default(false);
}
diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp
index d2356eb..cc5bcd1 100644
--- a/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -51,15 +51,15 @@ llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) {
// translation.
return llvm::StringSwitch<llvm::Triple::ArchType>(Str)
- .Cases("i386", "i486", "i486SX", "i586", "i686", llvm::Triple::x86)
- .Cases("pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4",
+ .Cases({"i386", "i486", "i486SX", "i586", "i686"}, llvm::Triple::x86)
+ .Cases({"pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4"},
llvm::Triple::x86)
- .Cases("x86_64", "x86_64h", llvm::Triple::x86_64)
+ .Cases({"x86_64", "x86_64h"}, llvm::Triple::x86_64)
// This is derived from the driver.
- .Cases("arm", "armv4t", "armv5", "armv6", "armv6m", llvm::Triple::arm)
- .Cases("armv7", "armv7em", "armv7k", "armv7m", llvm::Triple::arm)
- .Cases("armv7s", "xscale", llvm::Triple::arm)
- .Cases("arm64", "arm64e", llvm::Triple::aarch64)
+ .Cases({"arm", "armv4t", "armv5", "armv6", "armv6m"}, llvm::Triple::arm)
+ .Cases({"armv7", "armv7em", "armv7k", "armv7m"}, llvm::Triple::arm)
+ .Cases({"armv7s", "xscale"}, llvm::Triple::arm)
+ .Cases({"arm64", "arm64e"}, llvm::Triple::aarch64)
.Case("arm64_32", llvm::Triple::aarch64_32)
.Case("r600", llvm::Triple::r600)
.Case("amdgcn", llvm::Triple::amdgcn)
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index 26a9542..37c10c6 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -433,7 +433,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
}
if ((startsNextParameter(Current, Style) || Previous.is(tok::semi) ||
(Previous.is(TT_TemplateCloser) && Current.is(TT_StartOfName) &&
- State.Line->First->isNot(TT_AttributeSquare) && Style.isCpp() &&
+ State.Line->First->isNot(TT_AttributeLSquare) && Style.isCpp() &&
// FIXME: This is a temporary workaround for the case where clang-format
// sets BreakBeforeParameter to avoid bin packing and this creates a
// completely unnecessary line break after a template type that isn't
@@ -1374,7 +1374,8 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
}
if (Current.is(TT_LambdaArrow) &&
Previous.isOneOf(tok::kw_noexcept, tok::kw_mutable, tok::kw_constexpr,
- tok::kw_consteval, tok::kw_static, TT_AttributeSquare)) {
+ tok::kw_consteval, tok::kw_static,
+ TT_AttributeRSquare)) {
return ContinuationIndent;
}
if ((Current.isOneOf(tok::r_brace, tok::r_square) ||
@@ -1499,9 +1500,10 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
Current.isNot(tok::l_paren) &&
!Current.endsSequence(TT_StartOfName, TT_AttributeMacro,
TT_PointerOrReference)) ||
- PreviousNonComment->isOneOf(
- TT_AttributeRParen, TT_AttributeSquare, TT_FunctionAnnotationRParen,
- TT_JavaAnnotation, TT_LeadingJavaAnnotation))) ||
+ PreviousNonComment->isOneOf(TT_AttributeRParen, TT_AttributeRSquare,
+ TT_FunctionAnnotationRParen,
+ TT_JavaAnnotation,
+ TT_LeadingJavaAnnotation))) ||
(!Style.IndentWrappedFunctionNames &&
NextNonComment->isOneOf(tok::kw_operator, TT_FunctionDeclarationName))) {
return std::max(CurrentState.LastSpace, CurrentState.Indent);
diff --git a/clang/lib/Format/DefinitionBlockSeparator.cpp b/clang/lib/Format/DefinitionBlockSeparator.cpp
index 3f4ce5f..855f2ef 100644
--- a/clang/lib/Format/DefinitionBlockSeparator.cpp
+++ b/clang/lib/Format/DefinitionBlockSeparator.cpp
@@ -169,7 +169,7 @@ void DefinitionBlockSeparator::separateBlocks(
}
}
- if (Style.isCSharp() && OperateLine->First->is(TT_AttributeSquare))
+ if (Style.isCSharp() && OperateLine->First->is(TT_AttributeLSquare))
return true;
return false;
};
diff --git a/clang/lib/Format/FormatToken.h b/clang/lib/Format/FormatToken.h
index f015d27..6f3d24a 100644
--- a/clang/lib/Format/FormatToken.h
+++ b/clang/lib/Format/FormatToken.h
@@ -30,9 +30,10 @@ namespace format {
TYPE(ArraySubscriptLSquare) \
TYPE(AttributeColon) \
TYPE(AttributeLParen) \
+ TYPE(AttributeLSquare) \
TYPE(AttributeMacro) \
TYPE(AttributeRParen) \
- TYPE(AttributeSquare) \
+ TYPE(AttributeRSquare) \
TYPE(BinaryOperator) \
TYPE(BitFieldColon) \
TYPE(BlockComment) \
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 778d2ca..c97a9e8 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -531,10 +531,6 @@ private:
OpeningParen.Previous->is(TT_LeadingJavaAnnotation)) {
CurrentToken->setType(TT_LeadingJavaAnnotation);
}
- if (OpeningParen.Previous &&
- OpeningParen.Previous->is(TT_AttributeSquare)) {
- CurrentToken->setType(TT_AttributeSquare);
- }
if (!HasMultipleLines)
OpeningParen.setPackingKind(PPK_Inconclusive);
@@ -722,9 +718,11 @@ private:
} else if (InsideInlineASM) {
Left->setType(TT_InlineASMSymbolicNameLSquare);
} else if (IsCpp11AttributeSpecifier) {
- Left->setType(TT_AttributeSquare);
- if (!IsInnerSquare && Left->Previous)
- Left->Previous->EndsCppAttributeGroup = false;
+ if (!IsInnerSquare) {
+ Left->setType(TT_AttributeLSquare);
+ if (Left->Previous)
+ Left->Previous->EndsCppAttributeGroup = false;
+ }
} else if (Style.isJavaScript() && Parent &&
Contexts.back().ContextKind == tok::l_brace &&
Parent->isOneOf(tok::l_brace, tok::comma)) {
@@ -733,7 +731,7 @@ private:
Parent && Parent->isOneOf(tok::l_brace, tok::comma)) {
Left->setType(TT_DesignatedInitializerLSquare);
} else if (IsCSharpAttributeSpecifier) {
- Left->setType(TT_AttributeSquare);
+ Left->setType(TT_AttributeLSquare);
} else if (CurrentToken->is(tok::r_square) && Parent &&
Parent->is(TT_TemplateCloser)) {
Left->setType(TT_ArraySubscriptLSquare);
@@ -797,13 +795,12 @@ private:
while (CurrentToken) {
if (CurrentToken->is(tok::r_square)) {
- if (IsCpp11AttributeSpecifier) {
- CurrentToken->setType(TT_AttributeSquare);
- if (!IsInnerSquare)
- CurrentToken->EndsCppAttributeGroup = true;
+ if (IsCpp11AttributeSpecifier && !IsInnerSquare) {
+ CurrentToken->setType(TT_AttributeRSquare);
+ CurrentToken->EndsCppAttributeGroup = true;
}
if (IsCSharpAttributeSpecifier) {
- CurrentToken->setType(TT_AttributeSquare);
+ CurrentToken->setType(TT_AttributeRSquare);
} else if (((CurrentToken->Next &&
CurrentToken->Next->is(tok::l_paren)) ||
(CurrentToken->Previous &&
@@ -1297,7 +1294,7 @@ private:
bool consumeToken() {
if (IsCpp) {
const auto *Prev = CurrentToken->getPreviousNonComment();
- if (Prev && Prev->is(tok::r_square) && Prev->is(TT_AttributeSquare) &&
+ if (Prev && Prev->is(TT_AttributeRSquare) &&
CurrentToken->isOneOf(tok::kw_if, tok::kw_switch, tok::kw_case,
tok::kw_default, tok::kw_for, tok::kw_while) &&
mustBreakAfterAttributes(*CurrentToken, Style)) {
@@ -2850,7 +2847,7 @@ private:
T = Tok->Previous;
continue;
}
- } else if (T->is(TT_AttributeSquare)) {
+ } else if (T->is(TT_AttributeRSquare)) {
// Handle `x = (foo *[[clang::foo]])&v;`:
if (T->MatchingParen && T->MatchingParen->Previous) {
T = T->MatchingParen->Previous;
@@ -3656,7 +3653,7 @@ static FormatToken *getFunctionName(const AnnotatedLine &Line,
for (FormatToken *Tok = Line.getFirstNonComment(), *Name = nullptr; Tok;
Tok = Tok->getNextNonComment()) {
// Skip C++11 attributes both before and after the function name.
- if (Tok->is(tok::l_square) && Tok->is(TT_AttributeSquare)) {
+ if (Tok->is(TT_AttributeLSquare)) {
Tok = Tok->MatchingParen;
if (!Tok)
break;
@@ -4325,7 +4322,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 35;
if (Right.isNoneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_ArrayInitializerLSquare,
- TT_DesignatedInitializerLSquare, TT_AttributeSquare)) {
+ TT_DesignatedInitializerLSquare, TT_AttributeLSquare)) {
return 500;
}
}
@@ -4808,7 +4805,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Right.is(tok::l_square) &&
Right.isNoneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_DesignatedInitializerLSquare,
- TT_StructuredBindingLSquare, TT_AttributeSquare) &&
+ TT_StructuredBindingLSquare, TT_AttributeLSquare) &&
Left.isNoneOf(tok::numeric_constant, TT_DictLiteral) &&
!(Left.isNot(tok::r_square) && Style.SpaceBeforeSquareBrackets &&
Right.is(TT_ArraySubscriptLSquare))) {
@@ -4827,7 +4824,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// Space between template and attribute.
// e.g. template <typename T> [[nodiscard]] ...
- if (Left.is(TT_TemplateCloser) && Right.is(TT_AttributeSquare))
+ if (Left.is(TT_TemplateCloser) && Right.is(TT_AttributeLSquare))
return true;
// Space before parentheses common for all languages
if (Right.is(tok::l_paren)) {
@@ -4842,10 +4839,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return Style.SpaceBeforeParensOptions.AfterRequiresInExpression ||
spaceRequiredBeforeParens(Right);
}
- if (Left.is(TT_AttributeRParen) ||
- (Left.is(tok::r_square) && Left.is(TT_AttributeSquare))) {
+ if (Left.isOneOf(TT_AttributeRParen, TT_AttributeRSquare))
return true;
- }
if (Left.is(TT_ForEachMacro)) {
return Style.SpaceBeforeParensOptions.AfterForeachMacros ||
spaceRequiredBeforeParens(Right);
@@ -5663,16 +5658,14 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
}
// Break after C# [...] and before public/protected/private/internal.
- if (Left.is(TT_AttributeSquare) && Left.is(tok::r_square) &&
+ if (Left.is(TT_AttributeRSquare) &&
(Right.isAccessSpecifier(/*ColonRequired=*/false) ||
Right.is(Keywords.kw_internal))) {
return true;
}
// Break between ] and [ but only when there are really 2 attributes.
- if (Left.is(TT_AttributeSquare) && Right.is(TT_AttributeSquare) &&
- Left.is(tok::r_square) && Right.is(tok::l_square)) {
+ if (Left.is(TT_AttributeRSquare) && Right.is(TT_AttributeLSquare))
return true;
- }
} else if (Style.isJavaScript()) {
// FIXME: This might apply to other languages and token kinds.
if (Right.is(tok::string_literal) && Left.is(tok::plus) && BeforeLeft &&
@@ -6412,8 +6405,10 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Right.isAttribute())
return true;
- if (Right.is(tok::l_square) && Right.is(TT_AttributeSquare))
- return Left.isNot(TT_AttributeSquare);
+ if (Right.is(TT_AttributeLSquare)) {
+ assert(Left.isNot(tok::l_square));
+ return true;
+ }
if (Left.is(tok::identifier) && Right.is(tok::string_literal))
return true;
@@ -6454,8 +6449,12 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
Left.getPrecedence() == prec::Assignment)) {
return true;
}
- if ((Left.is(TT_AttributeSquare) && Right.is(tok::l_square)) ||
- (Left.is(tok::r_square) && Right.is(TT_AttributeSquare))) {
+ if (Left.is(TT_AttributeLSquare) && Right.is(tok::l_square)) {
+ assert(Right.isNot(TT_AttributeLSquare));
+ return false;
+ }
+ if (Left.is(tok::r_square) && Right.is(TT_AttributeRSquare)) {
+ assert(Left.isNot(TT_AttributeRSquare));
return false;
}
diff --git a/clang/lib/Format/WhitespaceManager.cpp b/clang/lib/Format/WhitespaceManager.cpp
index 9261294..65fc65e 100644
--- a/clang/lib/Format/WhitespaceManager.cpp
+++ b/clang/lib/Format/WhitespaceManager.cpp
@@ -432,7 +432,11 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
// right-justified. It is used to align compound assignments like `+=` and `=`.
// When RightJustify and ACS.PadOperators are true, operators in each block to
// be aligned will be padded on the left to the same length before aligning.
-template <typename F>
+//
+// The simple check will not look at the indentaion and nesting level to recurse
+// into the line for alignment. It will also not count the commas. This is e.g.
+// for aligning macro definitions.
+template <typename F, bool SimpleCheck = false>
static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
SmallVector<WhitespaceManager::Change, 16> &Changes,
unsigned StartAt,
@@ -465,9 +469,9 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
// Measure the scope level (i.e. depth of (), [], {}) of the first token, and
// abort when we hit any token in a higher scope than the starting one.
- auto IndentAndNestingLevel = StartAt < Changes.size()
- ? Changes[StartAt].indentAndNestingLevel()
- : std::tuple<unsigned, unsigned, unsigned>();
+ const auto IndentAndNestingLevel =
+ StartAt < Changes.size() ? Changes[StartAt].indentAndNestingLevel()
+ : std::tuple<unsigned, unsigned, unsigned>();
// Keep track of the number of commas before the matching tokens, we will only
// align a sequence of matching tokens if they are preceded by the same number
@@ -502,15 +506,15 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
MatchedIndices.clear();
};
- unsigned i = StartAt;
- for (unsigned e = Changes.size(); i != e; ++i) {
- auto &CurrentChange = Changes[i];
+ unsigned I = StartAt;
+ for (unsigned E = Changes.size(); I != E; ++I) {
+ auto &CurrentChange = Changes[I];
if (CurrentChange.indentAndNestingLevel() < IndentAndNestingLevel)
break;
if (CurrentChange.NewlinesBefore != 0) {
CommasBeforeMatch = 0;
- EndOfSequence = i;
+ EndOfSequence = I;
// Whether to break the alignment sequence because of an empty line.
bool EmptyLineBreak =
@@ -526,8 +530,8 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
// A new line starts, re-initialize line status tracking bools.
// Keep the match state if a string literal is continued on this line.
- if (i == 0 || CurrentChange.Tok->isNot(tok::string_literal) ||
- Changes[i - 1].Tok->isNot(tok::string_literal)) {
+ if (I == 0 || CurrentChange.Tok->isNot(tok::string_literal) ||
+ Changes[I - 1].Tok->isNot(tok::string_literal)) {
FoundMatchOnLine = false;
}
LineIsComment = true;
@@ -536,14 +540,17 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
if (CurrentChange.Tok->isNot(tok::comment))
LineIsComment = false;
- if (CurrentChange.Tok->is(tok::comma)) {
- ++CommasBeforeMatch;
- } else if (CurrentChange.indentAndNestingLevel() > IndentAndNestingLevel) {
- // Call AlignTokens recursively, skipping over this scope block.
- unsigned StoppedAt =
- AlignTokens(Style, Matches, Changes, i, ACS, RightJustify);
- i = StoppedAt - 1;
- continue;
+ if (!SimpleCheck) {
+ if (CurrentChange.Tok->is(tok::comma)) {
+ ++CommasBeforeMatch;
+ } else if (CurrentChange.indentAndNestingLevel() >
+ IndentAndNestingLevel) {
+ // Call AlignTokens recursively, skipping over this scope block.
+ const auto StoppedAt =
+ AlignTokens(Style, Matches, Changes, I, ACS, RightJustify);
+ I = StoppedAt - 1;
+ continue;
+ }
}
if (!Matches(CurrentChange))
@@ -552,7 +559,7 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
// If there is more than one matching token per line, or if the number of
// preceding commas, do not match anymore, end the sequence.
if (FoundMatchOnLine || CommasBeforeMatch != CommasBeforeLastMatch) {
- MatchedIndices.push_back(i);
+ MatchedIndices.push_back(I);
AlignCurrentSequence();
}
@@ -560,29 +567,69 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
FoundMatchOnLine = true;
if (StartOfSequence == 0)
- StartOfSequence = i;
+ StartOfSequence = I;
unsigned ChangeWidthLeft = CurrentChange.StartOfTokenColumn;
unsigned ChangeWidthAnchor = 0;
unsigned ChangeWidthRight = 0;
+ unsigned CurrentChangeWidthRight = 0;
if (RightJustify)
if (ACS.PadOperators)
ChangeWidthAnchor = CurrentChange.TokenLength;
else
ChangeWidthLeft += CurrentChange.TokenLength;
else
- ChangeWidthRight = CurrentChange.TokenLength;
- for (unsigned j = i + 1; j != e && Changes[j].NewlinesBefore == 0; ++j) {
- ChangeWidthRight += Changes[j].Spaces;
+ CurrentChangeWidthRight = CurrentChange.TokenLength;
+ const FormatToken *MatchingParenToEncounter = nullptr;
+ for (unsigned J = I + 1;
+ J != E && (Changes[J].NewlinesBefore == 0 || MatchingParenToEncounter);
+ ++J) {
+ const auto &Change = Changes[J];
+ const auto *Tok = Change.Tok;
+
+ if (Tok->MatchingParen) {
+ if (Tok->isOneOf(tok::l_paren, tok::l_brace, tok::l_square,
+ TT_TemplateOpener) &&
+ !MatchingParenToEncounter) {
+ // If the next token is on the next line, we probably don't need to
+ // check the following lengths, because it most likely isn't aligned
+ // with the rest.
+ if (J + 1 != E && Changes[J + 1].NewlinesBefore == 0)
+ MatchingParenToEncounter = Tok->MatchingParen;
+ } else if (MatchingParenToEncounter == Tok->MatchingParen) {
+ MatchingParenToEncounter = nullptr;
+ }
+ }
+
+ if (Change.NewlinesBefore != 0) {
+ ChangeWidthRight = std::max(ChangeWidthRight, CurrentChangeWidthRight);
+ const auto ChangeWidthStart = ChangeWidthLeft + ChangeWidthAnchor;
+ // If the position of the current token is columnwise before the begin
+ // of the alignment, we drop out here, because the next line does not
+ // have to be moved with the previous one(s) for the alignment. E.g.:
+ // int i1 = 1; | <- ColumnLimit | int i1 = 1;
+ // int j = 0; | Without the break -> | int j = 0;
+ // int k = bar( | We still want to align the = | int k = bar(
+ // argument1, | here, even if we can't move | argument1,
+ // argument2); | the following lines. | argument2);
+ if (static_cast<unsigned>(Change.Spaces) < ChangeWidthStart)
+ break;
+ CurrentChangeWidthRight = Change.Spaces - ChangeWidthStart;
+ } else {
+ CurrentChangeWidthRight += Change.Spaces;
+ }
+
// Changes are generally 1:1 with the tokens, but a change could also be
// inside of a token, in which case it's counted more than once: once for
// the whitespace surrounding the token (!IsInsideToken) and once for
// each whitespace change within it (IsInsideToken).
// Therefore, changes inside of a token should only count the space.
- if (!Changes[j].IsInsideToken)
- ChangeWidthRight += Changes[j].TokenLength;
+ if (!Change.IsInsideToken)
+ CurrentChangeWidthRight += Change.TokenLength;
}
+ ChangeWidthRight = std::max(ChangeWidthRight, CurrentChangeWidthRight);
+
// If we are restricted by the maximum column width, end the sequence.
unsigned NewLeft = std::max(ChangeWidthLeft, WidthLeft);
unsigned NewAnchor = std::max(ChangeWidthAnchor, WidthAnchor);
@@ -591,7 +638,7 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
if (Style.ColumnLimit != 0 &&
Style.ColumnLimit < NewLeft + NewAnchor + NewRight) {
AlignCurrentSequence();
- StartOfSequence = i;
+ StartOfSequence = I;
WidthLeft = ChangeWidthLeft;
WidthAnchor = ChangeWidthAnchor;
WidthRight = ChangeWidthRight;
@@ -600,12 +647,12 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
WidthAnchor = NewAnchor;
WidthRight = NewRight;
}
- MatchedIndices.push_back(i);
+ MatchedIndices.push_back(I);
}
- EndOfSequence = i;
+ EndOfSequence = I;
AlignCurrentSequence();
- return i;
+ return I;
}
// Aligns a sequence of matching tokens, on the MinColumn column.
@@ -656,7 +703,7 @@ void WhitespaceManager::alignConsecutiveMacros() {
auto AlignMacrosMatches = [](const Change &C) {
const FormatToken *Current = C.Tok;
- unsigned SpacesRequiredBefore = 1;
+ assert(Current);
if (Current->SpacesRequiredBefore == 0 || !Current->Previous)
return false;
@@ -665,79 +712,26 @@ void WhitespaceManager::alignConsecutiveMacros() {
// If token is a ")", skip over the parameter list, to the
// token that precedes the "("
- if (Current->is(tok::r_paren) && Current->MatchingParen) {
- Current = Current->MatchingParen->Previous;
- SpacesRequiredBefore = 0;
- }
-
- if (!Current || Current->isNot(tok::identifier))
- return false;
-
- if (!Current->Previous || Current->Previous->isNot(tok::pp_define))
- return false;
-
- // For a macro function, 0 spaces are required between the
- // identifier and the lparen that opens the parameter list.
- // For a simple macro, 1 space is required between the
- // identifier and the first token of the defined value.
- return Current->Next->SpacesRequiredBefore == SpacesRequiredBefore;
- };
-
- unsigned MinColumn = 0;
-
- // Start and end of the token sequence we're processing.
- unsigned StartOfSequence = 0;
- unsigned EndOfSequence = 0;
-
- // Whether a matching token has been found on the current line.
- bool FoundMatchOnLine = false;
-
- // Whether the current line consists only of comments
- bool LineIsComment = true;
-
- unsigned I = 0;
- for (unsigned E = Changes.size(); I != E; ++I) {
- if (Changes[I].NewlinesBefore != 0) {
- EndOfSequence = I;
-
- // Whether to break the alignment sequence because of an empty line.
- bool EmptyLineBreak = (Changes[I].NewlinesBefore > 1) &&
- !Style.AlignConsecutiveMacros.AcrossEmptyLines;
-
- // Whether to break the alignment sequence because of a line without a
- // match.
- bool NoMatchBreak =
- !FoundMatchOnLine &&
- !(LineIsComment && Style.AlignConsecutiveMacros.AcrossComments);
-
- if (EmptyLineBreak || NoMatchBreak) {
- AlignMatchingTokenSequence(StartOfSequence, EndOfSequence, MinColumn,
- AlignMacrosMatches, Changes);
+ if (Current->is(tok::r_paren)) {
+ const auto *MatchingParen = Current->MatchingParen;
+ // For a macro function, 0 spaces are required between the
+ // identifier and the lparen that opens the parameter list.
+ if (!MatchingParen || MatchingParen->SpacesRequiredBefore > 0 ||
+ !MatchingParen->Previous) {
+ return false;
}
-
- // A new line starts, re-initialize line status tracking bools.
- FoundMatchOnLine = false;
- LineIsComment = true;
+ Current = MatchingParen->Previous;
+ } else if (Current->Next->SpacesRequiredBefore != 1) {
+ // For a simple macro, 1 space is required between the
+ // identifier and the first token of the defined value.
+ return false;
}
- if (Changes[I].Tok->isNot(tok::comment))
- LineIsComment = false;
-
- if (!AlignMacrosMatches(Changes[I]))
- continue;
-
- FoundMatchOnLine = true;
-
- if (StartOfSequence == 0)
- StartOfSequence = I;
-
- unsigned ChangeMinColumn = Changes[I].StartOfTokenColumn;
- MinColumn = std::max(MinColumn, ChangeMinColumn);
- }
+ return Current->endsSequence(tok::identifier, tok::pp_define);
+ };
- EndOfSequence = I;
- AlignMatchingTokenSequence(StartOfSequence, EndOfSequence, MinColumn,
- AlignMacrosMatches, Changes);
+ AlignTokens<decltype(AlignMacrosMatches) &, /*SimpleCheck=*/true>(
+ Style, AlignMacrosMatches, Changes, 0, Style.AlignConsecutiveMacros);
}
void WhitespaceManager::alignConsecutiveAssignments() {
diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp
index 1b63c40..0daa20a 100644
--- a/clang/lib/Frontend/FrontendAction.cpp
+++ b/clang/lib/Frontend/FrontendAction.cpp
@@ -629,7 +629,7 @@ static std::error_code collectModuleHeaderIncludes(
// Check whether this entry has an extension typically associated with
// headers.
if (!llvm::StringSwitch<bool>(llvm::sys::path::extension(Dir->path()))
- .Cases(".h", ".H", ".hh", ".hpp", true)
+ .Cases({".h", ".H", ".hh", ".hpp"}, true)
.Default(false))
continue;
diff --git a/clang/lib/Headers/__clang_hip_runtime_wrapper.h b/clang/lib/Headers/__clang_hip_runtime_wrapper.h
index fb0ece9..19ce7a5 100644
--- a/clang/lib/Headers/__clang_hip_runtime_wrapper.h
+++ b/clang/lib/Headers/__clang_hip_runtime_wrapper.h
@@ -26,6 +26,7 @@
#define __managed__ __attribute__((managed))
#define __cluster_dims__(...) __attribute__((cluster_dims(__VA_ARGS__)))
+#define __no_cluster__ __attribute__((no_cluster))
#if !defined(__cplusplus) || __cplusplus < 201103L
#define nullptr NULL;
diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h
index d35bc0e..fdb825f 100644
--- a/clang/lib/Headers/avx2intrin.h
+++ b/clang/lib/Headers/avx2intrin.h
@@ -1298,9 +1298,8 @@ _mm256_min_epu32(__m256i __a, __m256i __b) {
/// \param __a
/// A 256-bit integer vector containing the source bytes.
/// \returns The 32-bit integer mask.
-static __inline__ int __DEFAULT_FN_ATTRS256
-_mm256_movemask_epi8(__m256i __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_movemask_epi8(__m256i __a) {
return __builtin_ia32_pmovmskb256((__v32qi)__a);
}
diff --git a/clang/lib/Headers/avx512dqintrin.h b/clang/lib/Headers/avx512dqintrin.h
index 3681cca..fef1a2d 100644
--- a/clang/lib/Headers/avx512dqintrin.h
+++ b/clang/lib/Headers/avx512dqintrin.h
@@ -1200,10 +1200,10 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v8di)_mm512_setzero_si512());
}
-#define _mm512_extractf32x8_ps(A, imm) \
- ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v8sf)_mm256_undefined_ps(), \
- (__mmask8)-1))
+#define _mm512_extractf32x8_ps(A, imm) \
+ ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8) - 1))
#define _mm512_mask_extractf32x8_ps(W, U, A, imm) \
((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
@@ -1215,11 +1215,10 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v8sf)_mm256_setzero_ps(), \
(__mmask8)(U)))
-#define _mm512_extractf64x2_pd(A, imm) \
- ((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
- (int)(imm), \
- (__v2df)_mm_undefined_pd(), \
- (__mmask8)-1))
+#define _mm512_extractf64x2_pd(A, imm) \
+ ((__m128d)__builtin_ia32_extractf64x2_512_mask( \
+ (__v8df)(__m512d)(A), (int)(imm), (__v2df)_mm_setzero_pd(), \
+ (__mmask8) - 1))
#define _mm512_mask_extractf64x2_pd(W, U, A, imm) \
((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
@@ -1233,10 +1232,10 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v2df)_mm_setzero_pd(), \
(__mmask8)(U)))
-#define _mm512_extracti32x8_epi32(A, imm) \
- ((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v8si)_mm256_undefined_si256(), \
- (__mmask8)-1))
+#define _mm512_extracti32x8_epi32(A, imm) \
+ ((__m256i)__builtin_ia32_extracti32x8_mask( \
+ (__v16si)(__m512i)(A), (int)(imm), (__v8si)_mm256_setzero_si256(), \
+ (__mmask8) - 1))
#define _mm512_mask_extracti32x8_epi32(W, U, A, imm) \
((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
@@ -1248,11 +1247,10 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v8si)_mm256_setzero_si256(), \
(__mmask8)(U)))
-#define _mm512_extracti64x2_epi64(A, imm) \
- ((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
- (int)(imm), \
- (__v2di)_mm_undefined_si128(), \
- (__mmask8)-1))
+#define _mm512_extracti64x2_epi64(A, imm) \
+ ((__m128i)__builtin_ia32_extracti64x2_512_mask( \
+ (__v8di)(__m512i)(A), (int)(imm), (__v2di)_mm_setzero_si128(), \
+ (__mmask8) - 1))
#define _mm512_mask_extracti64x2_epi64(W, U, A, imm) \
((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h
index 07de036..18c4a44 100644
--- a/clang/lib/Headers/avx512fintrin.h
+++ b/clang/lib/Headers/avx512fintrin.h
@@ -3156,10 +3156,10 @@ _mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
(__v16si)_mm512_setzero_si512()))
/* Vector Extract */
-#define _mm512_extractf64x4_pd(A, I) \
- ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
- (__v4df)_mm256_undefined_pd(), \
- (__mmask8)-1))
+#define _mm512_extractf64x4_pd(A, I) \
+ ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8) - 1))
#define _mm512_mask_extractf64x4_pd(W, U, A, imm) \
((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
@@ -3171,10 +3171,10 @@ _mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
(__v4df)_mm256_setzero_pd(), \
(__mmask8)(U)))
-#define _mm512_extractf32x4_ps(A, I) \
- ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v4sf)_mm_undefined_ps(), \
- (__mmask8)-1))
+#define _mm512_extractf32x4_ps(A, I) \
+ ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8) - 1))
#define _mm512_mask_extractf32x4_ps(W, U, A, imm) \
((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
@@ -7089,10 +7089,10 @@ _mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
__builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
}
-#define _mm512_extracti32x4_epi32(A, imm) \
- ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v4si)_mm_undefined_si128(), \
- (__mmask8)-1))
+#define _mm512_extracti32x4_epi32(A, imm) \
+ ((__m128i)__builtin_ia32_extracti32x4_mask( \
+ (__v16si)(__m512i)(A), (int)(imm), (__v4si)_mm_setzero_si128(), \
+ (__mmask8) - 1))
#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) \
((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
@@ -7104,10 +7104,10 @@ _mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
(__v4si)_mm_setzero_si128(), \
(__mmask8)(U)))
-#define _mm512_extracti64x4_epi64(A, imm) \
+#define _mm512_extracti64x4_epi64(A, imm) \
((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
- (__v4di)_mm256_undefined_si256(), \
- (__mmask8)-1))
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8) - 1))
#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) \
((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
diff --git a/clang/lib/Headers/avx512vldqintrin.h b/clang/lib/Headers/avx512vldqintrin.h
index ee7974e..707d039 100644
--- a/clang/lib/Headers/avx512vldqintrin.h
+++ b/clang/lib/Headers/avx512vldqintrin.h
@@ -1062,11 +1062,10 @@ _mm256_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v4di)_mm256_setzero_si256());
}
-#define _mm256_extractf64x2_pd(A, imm) \
- ((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
- (int)(imm), \
- (__v2df)_mm_undefined_pd(), \
- (__mmask8)-1))
+#define _mm256_extractf64x2_pd(A, imm) \
+ ((__m128d)__builtin_ia32_extractf64x2_256_mask( \
+ (__v4df)(__m256d)(A), (int)(imm), (__v2df)_mm_setzero_pd(), \
+ (__mmask8) - 1))
#define _mm256_mask_extractf64x2_pd(W, U, A, imm) \
((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
@@ -1080,11 +1079,10 @@ _mm256_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v2df)_mm_setzero_pd(), \
(__mmask8)(U)))
-#define _mm256_extracti64x2_epi64(A, imm) \
- ((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
- (int)(imm), \
- (__v2di)_mm_undefined_si128(), \
- (__mmask8)-1))
+#define _mm256_extracti64x2_epi64(A, imm) \
+ ((__m128i)__builtin_ia32_extracti64x2_256_mask( \
+ (__v4di)(__m256i)(A), (int)(imm), (__v2di)_mm_setzero_si128(), \
+ (__mmask8) - 1))
#define _mm256_mask_extracti64x2_epi64(W, U, A, imm) \
((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
diff --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h
index 676b5a0..92bb444 100644
--- a/clang/lib/Headers/avx512vlintrin.h
+++ b/clang/lib/Headers/avx512vlintrin.h
@@ -7545,11 +7545,10 @@ _mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
__builtin_ia32_pmovqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
}
-#define _mm256_extractf32x4_ps(A, imm) \
- ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
- (int)(imm), \
- (__v4sf)_mm_undefined_ps(), \
- (__mmask8)-1))
+#define _mm256_extractf32x4_ps(A, imm) \
+ ((__m128)__builtin_ia32_extractf32x4_256_mask( \
+ (__v8sf)(__m256)(A), (int)(imm), (__v4sf)_mm_setzero_ps(), \
+ (__mmask8) - 1))
#define _mm256_mask_extractf32x4_ps(W, U, A, imm) \
((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
@@ -7563,11 +7562,10 @@ _mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
(__v4sf)_mm_setzero_ps(), \
(__mmask8)(U)))
-#define _mm256_extracti32x4_epi32(A, imm) \
- ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
- (int)(imm), \
- (__v4si)_mm_undefined_si128(), \
- (__mmask8)-1))
+#define _mm256_extracti32x4_epi32(A, imm) \
+ ((__m128i)__builtin_ia32_extracti32x4_256_mask( \
+ (__v8si)(__m256i)(A), (int)(imm), (__v4si)_mm_setzero_si128(), \
+ (__mmask8) - 1))
#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) \
((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
diff --git a/clang/lib/Headers/avxintrin.h b/clang/lib/Headers/avxintrin.h
index 696ec31..4aef924 100644
--- a/clang/lib/Headers/avxintrin.h
+++ b/clang/lib/Headers/avxintrin.h
@@ -2941,9 +2941,8 @@ _mm256_testnzc_si256(__m256i __a, __m256i __b) {
/// A 256-bit vector of [4 x double] containing the double-precision
/// floating point values with sign bits to be extracted.
/// \returns The sign bits from the operand, written to bits [3:0].
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_movemask_pd(__m256d __a)
-{
+static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_movemask_pd(__m256d __a) {
return __builtin_ia32_movmskpd256((__v4df)__a);
}
@@ -2959,9 +2958,8 @@ _mm256_movemask_pd(__m256d __a)
/// A 256-bit vector of [8 x float] containing the single-precision floating
/// point values with sign bits to be extracted.
/// \returns The sign bits from the operand, written to bits [7:0].
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_movemask_ps(__m256 __a)
-{
+static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_movemask_ps(__m256 __a) {
return __builtin_ia32_movmskps256((__v8sf)__a);
}
diff --git a/clang/lib/Headers/emmintrin.h b/clang/lib/Headers/emmintrin.h
index 454e9a2..dbe5ca0 100644
--- a/clang/lib/Headers/emmintrin.h
+++ b/clang/lib/Headers/emmintrin.h
@@ -4280,7 +4280,8 @@ _mm_packus_epi16(__m128i __a, __m128i __b) {
/// A 128-bit integer vector containing the values with bits to be extracted.
/// \returns The most significant bits from each 8-bit element in \a __a,
/// written to bits [15:0]. The other bits are assigned zeros.
-static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a) {
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_movemask_epi8(__m128i __a) {
return __builtin_ia32_pmovmskb128((__v16qi)__a);
}
@@ -4699,7 +4700,8 @@ _mm_unpacklo_pd(__m128d __a, __m128d __b) {
/// be extracted.
/// \returns The sign bits from each of the double-precision elements in \a __a,
/// written to bits [1:0]. The remaining bits are assigned values of zero.
-static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_pd(__m128d __a) {
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_movemask_pd(__m128d __a) {
return __builtin_ia32_movmskpd((__v2df)__a);
}
diff --git a/clang/lib/Headers/smmintrin.h b/clang/lib/Headers/smmintrin.h
index 4f197d5..511a135 100644
--- a/clang/lib/Headers/smmintrin.h
+++ b/clang/lib/Headers/smmintrin.h
@@ -1524,7 +1524,8 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2) {
/// \returns A 128-bit value where bits [15:0] contain the minimum value found
/// in parameter \a __V, bits [18:16] contain the index of the minimum value
/// and the remaining bits are set to 0.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_minpos_epu16(__m128i __V) {
return (__m128i)__builtin_ia32_phminposuw128((__v8hi)__V);
}
diff --git a/clang/lib/Headers/xmmintrin.h b/clang/lib/Headers/xmmintrin.h
index 605409c..fe6afdc 100644
--- a/clang/lib/Headers/xmmintrin.h
+++ b/clang/lib/Headers/xmmintrin.h
@@ -3014,9 +3014,7 @@ _mm_cvtps_pi8(__m128 __a)
/// \returns A 32-bit integer value. Bits [3:0] contain the sign bits from each
/// single-precision floating-point element of the parameter. Bits [31:4] are
/// set to zero.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_ps(__m128 __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movemask_ps(__m128 __a) {
return __builtin_ia32_movmskps((__v4sf)__a);
}
diff --git a/clang/lib/Lex/PPLexerChange.cpp b/clang/lib/Lex/PPLexerChange.cpp
index d8f61c0..b014124 100644
--- a/clang/lib/Lex/PPLexerChange.cpp
+++ b/clang/lib/Lex/PPLexerChange.cpp
@@ -302,7 +302,7 @@ void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
// Check whether this entry has an extension typically associated with
// headers.
if (!StringSwitch<bool>(llvm::sys::path::extension(Entry->path()))
- .Cases(".h", ".H", ".hh", ".hpp", true)
+ .Cases({".h", ".H", ".hh", ".hpp"}, true)
.Default(false))
continue;
diff --git a/clang/lib/Parse/ParsePragma.cpp b/clang/lib/Parse/ParsePragma.cpp
index 9893381..7c2b928 100644
--- a/clang/lib/Parse/ParsePragma.cpp
+++ b/clang/lib/Parse/ParsePragma.cpp
@@ -1419,10 +1419,11 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
// Return a valid hint if pragma unroll or nounroll were specified
// without an argument.
- auto IsLoopHint = llvm::StringSwitch<bool>(PragmaNameInfo->getName())
- .Cases("unroll", "nounroll", "unroll_and_jam",
- "nounroll_and_jam", true)
- .Default(false);
+ auto IsLoopHint =
+ llvm::StringSwitch<bool>(PragmaNameInfo->getName())
+ .Cases({"unroll", "nounroll", "unroll_and_jam", "nounroll_and_jam"},
+ true)
+ .Default(false);
if (Toks.empty() && IsLoopHint) {
ConsumeAnnotationToken();
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 652527a..2990fd6 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -6926,13 +6926,13 @@ StringRef Sema::GetFormatStringTypeName(FormatStringType FST) {
FormatStringType Sema::GetFormatStringType(StringRef Flavor) {
return llvm::StringSwitch<FormatStringType>(Flavor)
- .Cases("gnu_scanf", "scanf", FormatStringType::Scanf)
- .Cases("gnu_printf", "printf", "printf0", "syslog",
+ .Cases({"gnu_scanf", "scanf"}, FormatStringType::Scanf)
+ .Cases({"gnu_printf", "printf", "printf0", "syslog"},
FormatStringType::Printf)
- .Cases("NSString", "CFString", FormatStringType::NSString)
- .Cases("gnu_strftime", "strftime", FormatStringType::Strftime)
- .Cases("gnu_strfmon", "strfmon", FormatStringType::Strfmon)
- .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err",
+ .Cases({"NSString", "CFString"}, FormatStringType::NSString)
+ .Cases({"gnu_strftime", "strftime"}, FormatStringType::Strftime)
+ .Cases({"gnu_strfmon", "strfmon"}, FormatStringType::Strfmon)
+ .Cases({"kprintf", "cmn_err", "vcmn_err", "zcmn_err"},
FormatStringType::Kprintf)
.Case("freebsd_kprintf", FormatStringType::FreeBSDKPrintf)
.Case("os_trace", FormatStringType::OSLog)
@@ -12309,13 +12309,20 @@ static void DiagnoseMixedUnicodeImplicitConversion(Sema &S, const Type *Source,
SourceLocation CC) {
assert(Source->isUnicodeCharacterType() && Target->isUnicodeCharacterType() &&
Source != Target);
+
+ // Lone surrogates have a distinct representation in UTF-32.
+ // Converting between UTF-16 and UTF-32 codepoints seems very widespread,
+ // so don't warn on such conversion.
+ if (Source->isChar16Type() && Target->isChar32Type())
+ return;
+
Expr::EvalResult Result;
if (E->EvaluateAsInt(Result, S.getASTContext(), Expr::SE_AllowSideEffects,
S.isConstantEvaluatedContext())) {
llvm::APSInt Value(32);
Value = Result.Val.getInt();
bool IsASCII = Value <= 0x7F;
- bool IsBMP = Value <= 0xD7FF || (Value >= 0xE000 && Value <= 0xFFFF);
+ bool IsBMP = Value <= 0xDFFF || (Value >= 0xE000 && Value <= 0xFFFF);
bool ConversionPreservesSemantics =
IsASCII || (!Source->isChar8Type() && !Target->isChar8Type() && IsBMP);
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index 04a73181..54cbfe4 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -432,7 +432,7 @@ private:
// XXX: It is SLOW! Use it very carefully.
std::optional<MultiLevelTemplateArgumentList> SubstitutionInTemplateArguments(
const NormalizedConstraintWithParamMapping &Constraint,
- MultiLevelTemplateArgumentList MLTAL,
+ const MultiLevelTemplateArgumentList &MLTAL,
llvm::SmallVector<TemplateArgument> &SubstitutedOuterMost);
ExprResult EvaluateSlow(const AtomicConstraint &Constraint,
@@ -564,12 +564,17 @@ ExprResult ConstraintSatisfactionChecker::EvaluateAtomicConstraint(
std::optional<MultiLevelTemplateArgumentList>
ConstraintSatisfactionChecker::SubstitutionInTemplateArguments(
const NormalizedConstraintWithParamMapping &Constraint,
- MultiLevelTemplateArgumentList MLTAL,
- llvm::SmallVector<TemplateArgument> &SubstitutedOuterMost) {
+ const MultiLevelTemplateArgumentList &MLTAL,
+ llvm::SmallVector<TemplateArgument> &SubstitutedOutermost) {
if (!Constraint.hasParameterMapping())
return std::move(MLTAL);
+ // The mapping is empty, meaning no template arguments are needed for
+ // evaluation.
+ if (Constraint.getParameterMapping().empty())
+ return MultiLevelTemplateArgumentList();
+
TemplateDeductionInfo Info(Constraint.getBeginLoc());
Sema::InstantiatingTemplate Inst(
S, Constraint.getBeginLoc(),
@@ -607,7 +612,7 @@ ConstraintSatisfactionChecker::SubstitutionInTemplateArguments(
// The empty MLTAL situation should only occur when evaluating non-dependent
// constraints.
if (MLTAL.getNumSubstitutedLevels())
- SubstitutedOuterMost =
+ SubstitutedOutermost =
llvm::to_vector_of<TemplateArgument>(MLTAL.getOutermost());
unsigned Offset = 0;
for (unsigned I = 0, MappedIndex = 0; I < Used.size(); I++) {
@@ -615,19 +620,19 @@ ConstraintSatisfactionChecker::SubstitutionInTemplateArguments(
if (Used[I])
Arg = S.Context.getCanonicalTemplateArgument(
CTAI.SugaredConverted[MappedIndex++]);
- if (I < SubstitutedOuterMost.size()) {
- SubstitutedOuterMost[I] = Arg;
+ if (I < SubstitutedOutermost.size()) {
+ SubstitutedOutermost[I] = Arg;
Offset = I + 1;
} else {
- SubstitutedOuterMost.push_back(Arg);
- Offset = SubstitutedOuterMost.size();
+ SubstitutedOutermost.push_back(Arg);
+ Offset = SubstitutedOutermost.size();
}
}
- if (Offset < SubstitutedOuterMost.size())
- SubstitutedOuterMost.erase(SubstitutedOuterMost.begin() + Offset);
+ if (Offset < SubstitutedOutermost.size())
+ SubstitutedOutermost.erase(SubstitutedOutermost.begin() + Offset);
MultiLevelTemplateArgumentList SubstitutedTemplateArgs;
- SubstitutedTemplateArgs.addOuterTemplateArguments(TD, SubstitutedOuterMost,
+ SubstitutedTemplateArgs.addOuterTemplateArguments(TD, SubstitutedOutermost,
/*Final=*/false);
return std::move(SubstitutedTemplateArgs);
}
@@ -636,9 +641,9 @@ ExprResult ConstraintSatisfactionChecker::EvaluateSlow(
const AtomicConstraint &Constraint,
const MultiLevelTemplateArgumentList &MLTAL) {
- llvm::SmallVector<TemplateArgument> SubstitutedOuterMost;
+ llvm::SmallVector<TemplateArgument> SubstitutedOutermost;
std::optional<MultiLevelTemplateArgumentList> SubstitutedArgs =
- SubstitutionInTemplateArguments(Constraint, MLTAL, SubstitutedOuterMost);
+ SubstitutionInTemplateArguments(Constraint, MLTAL, SubstitutedOutermost);
if (!SubstitutedArgs) {
Satisfaction.IsSatisfied = false;
return ExprEmpty();
@@ -736,8 +741,9 @@ ExprResult ConstraintSatisfactionChecker::Evaluate(
UnsubstitutedConstraintSatisfactionCacheResult Cache;
Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
- std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
- std::back_inserter(Cache.Satisfaction.Details));
+ Cache.Satisfaction.Details.insert(Cache.Satisfaction.Details.end(),
+ Satisfaction.Details.begin() + Size,
+ Satisfaction.Details.end());
Cache.SubstExpr = E;
S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
@@ -786,13 +792,13 @@ ExprResult ConstraintSatisfactionChecker::EvaluateSlow(
FoldExpandedConstraint::FoldOperatorKind::And;
unsigned EffectiveDetailEndIndex = Satisfaction.Details.size();
- llvm::SmallVector<TemplateArgument> SubstitutedOuterMost;
+ llvm::SmallVector<TemplateArgument> SubstitutedOutermost;
// FIXME: Is PackSubstitutionIndex correct?
llvm::SaveAndRestore _(PackSubstitutionIndex, S.ArgPackSubstIndex);
std::optional<MultiLevelTemplateArgumentList> SubstitutedArgs =
SubstitutionInTemplateArguments(
static_cast<const NormalizedConstraintWithParamMapping &>(Constraint),
- MLTAL, SubstitutedOuterMost);
+ MLTAL, SubstitutedOutermost);
if (!SubstitutedArgs) {
Satisfaction.IsSatisfied = false;
return ExprError();
@@ -868,8 +874,9 @@ ExprResult ConstraintSatisfactionChecker::Evaluate(
UnsubstitutedConstraintSatisfactionCacheResult Cache;
Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
- std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
- std::back_inserter(Cache.Satisfaction.Details));
+ Cache.Satisfaction.Details.insert(Cache.Satisfaction.Details.end(),
+ Satisfaction.Details.begin() + Size,
+ Satisfaction.Details.end());
Cache.SubstExpr = E;
S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
return E;
@@ -880,9 +887,9 @@ ExprResult ConstraintSatisfactionChecker::EvaluateSlow(
const MultiLevelTemplateArgumentList &MLTAL, unsigned Size) {
const ConceptReference *ConceptId = Constraint.getConceptId();
- llvm::SmallVector<TemplateArgument> SubstitutedOuterMost;
+ llvm::SmallVector<TemplateArgument> SubstitutedOutermost;
std::optional<MultiLevelTemplateArgumentList> SubstitutedArgs =
- SubstitutionInTemplateArguments(Constraint, MLTAL, SubstitutedOuterMost);
+ SubstitutionInTemplateArguments(Constraint, MLTAL, SubstitutedOutermost);
if (!SubstitutedArgs) {
Satisfaction.IsSatisfied = false;
@@ -1012,8 +1019,9 @@ ExprResult ConstraintSatisfactionChecker::Evaluate(
UnsubstitutedConstraintSatisfactionCacheResult Cache;
Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
- std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
- std::back_inserter(Cache.Satisfaction.Details));
+ Cache.Satisfaction.Details.insert(Cache.Satisfaction.Details.end(),
+ Satisfaction.Details.begin() + Size,
+ Satisfaction.Details.end());
Cache.SubstExpr = CE;
S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
return CE;
@@ -1217,10 +1225,10 @@ bool Sema::CheckConstraintSatisfaction(
return false;
}
-static const ExprResult
-SubstituteConceptsInConstrainExpression(Sema &S, const NamedDecl *D,
- const ConceptSpecializationExpr *CSE,
- UnsignedOrNone SubstIndex) {
+static ExprResult
+SubstituteConceptsInConstraintExpression(Sema &S, const NamedDecl *D,
+ const ConceptSpecializationExpr *CSE,
+ UnsignedOrNone SubstIndex) {
// [C++2c] [temp.constr.normal]
// Otherwise, to form CE, any non-dependent concept template argument Ai
@@ -1255,7 +1263,7 @@ bool Sema::CheckConstraintSatisfaction(
const ConceptSpecializationExpr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction) {
- ExprResult Res = SubstituteConceptsInConstrainExpression(
+ ExprResult Res = SubstituteConceptsInConstraintExpression(
*this, nullptr, ConstraintExpr, ArgPackSubstIndex);
if (!Res.isUsable())
return true;
@@ -2017,8 +2025,13 @@ void SubstituteParameterMappings::buildParameterMapping(
SemaRef.MarkUsedTemplateParameters(Args->arguments(),
/*Depth=*/0, OccurringIndices);
}
+ unsigned Size = OccurringIndices.count();
+ // When the constraint is independent of any template parameters,
+ // we build an empty mapping so that we can distinguish these cases
+ // from cases where no mapping exists at all, e.g. when there are only atomic
+ // constraints.
TemplateArgumentLoc *TempArgs =
- new (SemaRef.Context) TemplateArgumentLoc[OccurringIndices.count()];
+ new (SemaRef.Context) TemplateArgumentLoc[Size];
llvm::SmallVector<NamedDecl *> UsedParams;
for (unsigned I = 0, J = 0, C = TemplateParams->size(); I != C; ++I) {
SourceLocation Loc = ArgsAsWritten->NumTemplateArgs > I
@@ -2039,7 +2052,6 @@ void SubstituteParameterMappings::buildParameterMapping(
TemplateParams->getLAngleLoc(), UsedParams,
/*RAngleLoc=*/SourceLocation(),
/*RequiresClause=*/nullptr);
- unsigned Size = OccurringIndices.count();
N.updateParameterMapping(
std::move(OccurringIndices), std::move(OccurringIndicesForSubsumption),
MutableArrayRef<TemplateArgumentLoc>{TempArgs, Size}, UsedList);
@@ -2050,6 +2062,10 @@ bool SubstituteParameterMappings::substitute(
if (!N.hasParameterMapping())
buildParameterMapping(N);
+ // If the parameter mapping is empty, there is nothing to substitute.
+ if (N.getParameterMapping().empty())
+ return false;
+
SourceLocation InstLocBegin, InstLocEnd;
llvm::ArrayRef Arguments = ArgsAsWritten->arguments();
if (Arguments.empty()) {
@@ -2289,7 +2305,7 @@ NormalizedConstraint *NormalizedConstraint::fromConstraintExpr(
ConceptDecl *CD = CSE->getNamedConcept()->getCanonicalDecl();
ExprResult Res =
- SubstituteConceptsInConstrainExpression(S, D, CSE, SubstIndex);
+ SubstituteConceptsInConstraintExpression(S, D, CSE, SubstIndex);
if (!Res.isUsable())
return nullptr;
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index dca9d6e..a50c276 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -12811,7 +12811,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
if (auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
if (CTSD->isInStdNamespace() &&
llvm::StringSwitch<bool>(CTSD->getName())
- .Cases("less", "less_equal", "greater", "greater_equal", true)
+ .Cases({"less", "less_equal", "greater", "greater_equal"}, true)
.Default(false)) {
if (RHSType->isNullPtrType())
RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index f7974eb..7debe33 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -672,11 +672,12 @@ ExprResult InitListChecker::PerformEmptyInit(SourceLocation Loc,
IsInStd = true;
}
- if (IsInStd && llvm::StringSwitch<bool>(R->getName())
- .Cases("basic_string", "deque", "forward_list", true)
- .Cases("list", "map", "multimap", "multiset", true)
- .Cases("priority_queue", "queue", "set", "stack", true)
- .Cases("unordered_map", "unordered_set", "vector", true)
+ if (IsInStd &&
+ llvm::StringSwitch<bool>(R->getName())
+ .Cases({"basic_string", "deque", "forward_list"}, true)
+ .Cases({"list", "map", "multimap", "multiset"}, true)
+ .Cases({"priority_queue", "queue", "set", "stack"}, true)
+ .Cases({"unordered_map", "unordered_set", "vector"}, true)
.Default(false)) {
InitSeq.InitializeFrom(
SemaRef, Entity,
diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp
index ca99834..3bb8080 100644
--- a/clang/lib/Sema/SemaOpenACC.cpp
+++ b/clang/lib/Sema/SemaOpenACC.cpp
@@ -2996,6 +2996,8 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
case OpenACCReductionOperator::Max:
case OpenACCReductionOperator::Min:
+ BinOp = BinaryOperatorKind::BO_LT;
+ break;
case OpenACCReductionOperator::And:
case OpenACCReductionOperator::Or:
// We just want a 'NYI' error in the backend, so leave an empty combiner
@@ -3011,26 +3013,80 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
assert(!VarTy->isArrayType() && "Only 1 level of array allowed");
+ enum class CombinerFailureKind {
+ None = 0,
+ BinOp = 1,
+ Conditional = 2,
+ Assignment = 3,
+ };
+
+ auto genCombiner = [&, this](DeclRefExpr *LHSDRE, DeclRefExpr *RHSDRE)
+ -> std::pair<ExprResult, CombinerFailureKind> {
+ ExprResult BinOpRes =
+ SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc, BinOp, LHSDRE, RHSDRE,
+ /*ForFoldExpr=*/false);
+ switch (ReductionOperator) {
+ case OpenACCReductionOperator::Addition:
+ case OpenACCReductionOperator::Multiplication:
+ case OpenACCReductionOperator::BitwiseAnd:
+ case OpenACCReductionOperator::BitwiseOr:
+ case OpenACCReductionOperator::BitwiseXOr:
+ // These 5 are simple and are being done as compound operators, so we can
+ // immediately quit here.
+ return {BinOpRes, BinOpRes.isUsable() ? CombinerFailureKind::None
+ : CombinerFailureKind::BinOp};
+ case OpenACCReductionOperator::Max:
+ case OpenACCReductionOperator::Min: {
+ // These are done as:
+ // LHS = (LHS < RHS) ? LHS : RHS; and LHS = (LHS < RHS) ? RHS : LHS;
+ //
+ // The BinOpRes should have been created with the less-than, so we just
+ // have to build the conditional and assignment.
+ if (!BinOpRes.isUsable())
+ return {BinOpRes, CombinerFailureKind::BinOp};
+
+ // Create the correct conditional operator, swapping the results
+ // (true/false value) depending on min/max.
+ ExprResult CondRes;
+ if (ReductionOperator == OpenACCReductionOperator::Min)
+ CondRes = SemaRef.ActOnConditionalOp(Loc, Loc, BinOpRes.get(), LHSDRE,
+ RHSDRE);
+ else
+ CondRes = SemaRef.ActOnConditionalOp(Loc, Loc, BinOpRes.get(), RHSDRE,
+ LHSDRE);
+
+ if (!CondRes.isUsable())
+ return {CondRes, CombinerFailureKind::Conditional};
+
+ // Build assignment.
+ ExprResult Assignment = SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc,
+ BinaryOperatorKind::BO_Assign,
+ LHSDRE, CondRes.get(),
+ /*ForFoldExpr=*/false);
+ return {Assignment, Assignment.isUsable()
+ ? CombinerFailureKind::None
+ : CombinerFailureKind::Assignment};
+ }
+ case OpenACCReductionOperator::And:
+ case OpenACCReductionOperator::Or:
+ llvm_unreachable("And/Or not implemented, but should fail earlier");
+ case OpenACCReductionOperator::Invalid:
+ llvm_unreachable("Invalid should have been caught above");
+ }
+ };
+
auto tryCombiner = [&, this](DeclRefExpr *LHSDRE, DeclRefExpr *RHSDRE,
bool IncludeTrap) {
- // TODO: OpenACC: we have to figure out based on the bin-op how to do the
- // ones that we can't just use compound operators for. So &&, ||, max, and
- // min aren't really clear what we could do here.
if (IncludeTrap) {
// Trap all of the errors here, we'll emit our own at the end.
Sema::TentativeAnalysisScope Trap{SemaRef};
-
- return SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc, BinOp, LHSDRE,
- RHSDRE,
- /*ForFoldExpr=*/false);
- } else {
- return SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc, BinOp, LHSDRE,
- RHSDRE,
- /*ForFoldExpr=*/false);
+ return genCombiner(LHSDRE, RHSDRE);
}
+ return genCombiner(LHSDRE, RHSDRE);
};
struct CombinerAttemptTy {
+ CombinerFailureKind FailKind;
VarDecl *LHS;
DeclRefExpr *LHSDRE;
VarDecl *RHS;
@@ -3058,9 +3114,11 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
RHSDecl->getBeginLoc()},
Ty, clang::VK_LValue, RHSDecl, nullptr, NOUR_None);
- ExprResult BinOpResult = tryCombiner(LHSDRE, RHSDRE, /*IncludeTrap=*/true);
+ std::pair<ExprResult, CombinerFailureKind> BinOpResult =
+ tryCombiner(LHSDRE, RHSDRE, /*IncludeTrap=*/true);
- return {LHSDecl, LHSDRE, RHSDecl, RHSDRE, BinOpResult.get()};
+ return {BinOpResult.second, LHSDecl, LHSDRE, RHSDecl, RHSDRE,
+ BinOpResult.first.get()};
};
CombinerAttemptTy TopLevelCombinerInfo = formCombiner(VarTy);
@@ -3081,12 +3139,20 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
}
}
+ auto EmitFailureNote = [&](CombinerFailureKind CFK) {
+ if (CFK == CombinerFailureKind::BinOp)
+ return Diag(Loc, diag::note_acc_reduction_combiner_forming)
+ << CFK << BinaryOperator::getOpcodeStr(BinOp);
+ return Diag(Loc, diag::note_acc_reduction_combiner_forming) << CFK;
+ };
+
// Since the 'root' level didn't fail, the only thing that could be successful
// is a struct that we decompose on its individual fields.
RecordDecl *RD = VarTy->getAsRecordDecl();
if (!RD) {
Diag(Loc, diag::err_acc_reduction_recipe_no_op) << VarTy;
+ EmitFailureNote(TopLevelCombinerInfo.FailKind);
tryCombiner(TopLevelCombinerInfo.LHSDRE, TopLevelCombinerInfo.RHSDRE,
/*IncludeTrap=*/false);
return true;
@@ -3098,6 +3164,7 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
if (!FieldCombinerInfo.Op || FieldCombinerInfo.Op->containsErrors()) {
Diag(Loc, diag::err_acc_reduction_recipe_no_op) << FD->getType();
Diag(FD->getBeginLoc(), diag::note_acc_reduction_recipe_noop_field) << RD;
+ EmitFailureNote(FieldCombinerInfo.FailKind);
tryCombiner(FieldCombinerInfo.LHSDRE, FieldCombinerInfo.RHSDRE,
/*IncludeTrap=*/false);
return true;
diff --git a/clang/lib/Sema/SemaStmtAttr.cpp b/clang/lib/Sema/SemaStmtAttr.cpp
index 50acc83..27fd556 100644
--- a/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/clang/lib/Sema/SemaStmtAttr.cpp
@@ -81,7 +81,7 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
StringRef PragmaName =
llvm::StringSwitch<StringRef>(
PragmaNameLoc->getIdentifierInfo()->getName())
- .Cases("unroll", "nounroll", "unroll_and_jam", "nounroll_and_jam",
+ .Cases({"unroll", "nounroll", "unroll_and_jam", "nounroll_and_jam"},
PragmaNameLoc->getIdentifierInfo()->getName())
.Default("clang loop");
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index ca7e3b2..7f85805 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -639,15 +639,8 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
}
Invalid = SemaRef.pushCodeSynthesisContext(Inst);
- if (!Invalid) {
- AlreadyInstantiating =
- !Inst.Entity
- ? false
- : !SemaRef.InstantiatingSpecializations
- .insert({Inst.Entity->getCanonicalDecl(), Inst.Kind})
- .second;
+ if (!Invalid)
atTemplateBegin(SemaRef.TemplateInstCallbacks, SemaRef, Inst);
- }
}
Sema::InstantiatingTemplate::InstantiatingTemplate(
@@ -902,13 +895,6 @@ void Sema::popCodeSynthesisContext() {
void Sema::InstantiatingTemplate::Clear() {
if (!Invalid) {
- if (!AlreadyInstantiating) {
- auto &Active = SemaRef.CodeSynthesisContexts.back();
- if (Active.Entity)
- SemaRef.InstantiatingSpecializations.erase(
- {Active.Entity->getCanonicalDecl(), Active.Kind});
- }
-
atTemplateEnd(SemaRef.TemplateInstCallbacks, SemaRef,
SemaRef.CodeSynthesisContexts.back());
@@ -2864,9 +2850,9 @@ TemplateInstantiator::TransformNestedRequirement(
TemplateArgs, Constraint->getSourceRange(), Satisfaction,
/*TopLevelConceptId=*/nullptr, &NewConstraint);
- assert(!Success || !Trap.hasErrorOccurred() &&
- "Substitution failures must be handled "
- "by CheckConstraintSatisfaction.");
+ assert((!Success || !Trap.hasErrorOccurred()) &&
+ "Substitution failures must be handled "
+ "by CheckConstraintSatisfaction.");
}
if (!Success || Satisfaction.HasSubstitutionFailure())
@@ -3312,17 +3298,20 @@ bool Sema::SubstDefaultArgument(
FunctionDecl *FD = cast<FunctionDecl>(Param->getDeclContext());
Expr *PatternExpr = Param->getUninstantiatedDefaultArg();
+ RecursiveInstGuard AlreadyInstantiating(
+ *this, Param, RecursiveInstGuard::Kind::DefaultArgument);
+ if (AlreadyInstantiating) {
+ Param->setInvalidDecl();
+ return Diag(Param->getBeginLoc(), diag::err_recursive_default_argument)
+ << FD << PatternExpr->getSourceRange();
+ }
+
EnterExpressionEvaluationContext EvalContext(
*this, ExpressionEvaluationContext::PotentiallyEvaluated, Param);
InstantiatingTemplate Inst(*this, Loc, Param, TemplateArgs.getInnermost());
if (Inst.isInvalid())
return true;
- if (Inst.isAlreadyInstantiating()) {
- Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
- Param->setInvalidDecl();
- return true;
- }
ExprResult Result;
// C++ [dcl.fct.default]p5:
@@ -3554,12 +3543,26 @@ namespace clang {
}
}
-bool
-Sema::InstantiateClass(SourceLocation PointOfInstantiation,
- CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
- const MultiLevelTemplateArgumentList &TemplateArgs,
- TemplateSpecializationKind TSK,
- bool Complain) {
+bool Sema::InstantiateClass(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation,
+ CXXRecordDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK, bool Complain) {
+#ifndef NDEBUG
+ RecursiveInstGuard AlreadyInstantiating(*this, Instantiation,
+ RecursiveInstGuard::Kind::Template);
+ assert(!AlreadyInstantiating && "should have been caught by caller");
+#endif
+
+ return InstantiateClassImpl(PointOfInstantiation, Instantiation, Pattern,
+ TemplateArgs, TSK, Complain);
+}
+
+bool Sema::InstantiateClassImpl(
+ SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation,
+ CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK, bool Complain) {
+
CXXRecordDecl *PatternDef
= cast_or_null<CXXRecordDecl>(Pattern->getDefinition());
if (DiagnoseUninstantiableTemplate(PointOfInstantiation, Instantiation,
@@ -3596,7 +3599,6 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
if (Inst.isInvalid())
return true;
- assert(!Inst.isAlreadyInstantiating() && "should have been caught by caller");
PrettyDeclStackTraceEntry CrashInfo(Context, Instantiation, SourceLocation(),
"instantiating class definition");
@@ -3808,6 +3810,12 @@ bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK) {
+#ifndef NDEBUG
+ RecursiveInstGuard AlreadyInstantiating(*this, Instantiation,
+ RecursiveInstGuard::Kind::Template);
+ assert(!AlreadyInstantiating && "should have been caught by caller");
+#endif
+
EnumDecl *PatternDef = Pattern->getDefinition();
if (DiagnoseUninstantiableTemplate(PointOfInstantiation, Instantiation,
Instantiation->getInstantiatedFromMemberEnum(),
@@ -3825,8 +3833,6 @@ bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
if (Inst.isInvalid())
return true;
- if (Inst.isAlreadyInstantiating())
- return false;
PrettyDeclStackTraceEntry CrashInfo(Context, Instantiation, SourceLocation(),
"instantiating enum definition");
@@ -3865,6 +3871,14 @@ bool Sema::InstantiateInClassInitializer(
Pattern->getInClassInitStyle() &&
"pattern and instantiation disagree about init style");
+ RecursiveInstGuard AlreadyInstantiating(*this, Instantiation,
+ RecursiveInstGuard::Kind::Template);
+ if (AlreadyInstantiating)
+ // Error out if we hit an instantiation cycle for this initializer.
+ return Diag(PointOfInstantiation,
+ diag::err_default_member_initializer_cycle)
+ << Instantiation;
+
// Error out if we haven't parsed the initializer of the pattern yet because
// we are waiting for the closing brace of the outer class.
Expr *OldInit = Pattern->getInClassInitializer();
@@ -3883,12 +3897,6 @@ bool Sema::InstantiateInClassInitializer(
InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
if (Inst.isInvalid())
return true;
- if (Inst.isAlreadyInstantiating()) {
- // Error out if we hit an instantiation cycle for this initializer.
- Diag(PointOfInstantiation, diag::err_default_member_initializer_cycle)
- << Instantiation;
- return true;
- }
PrettyDeclStackTraceEntry CrashInfo(Context, Instantiation, SourceLocation(),
"instantiating default member init");
@@ -3972,8 +3980,6 @@ static ActionResult<CXXRecordDecl *> getPatternForClassTemplateSpecialization(
Sema::InstantiatingTemplate Inst(S, PointOfInstantiation, ClassTemplateSpec);
if (Inst.isInvalid())
return {/*Invalid=*/true};
- if (Inst.isAlreadyInstantiating())
- return {/*Invalid=*/false};
llvm::PointerUnion<ClassTemplateDecl *,
ClassTemplatePartialSpecializationDecl *>
@@ -4136,6 +4142,11 @@ bool Sema::InstantiateClassTemplateSpecialization(
if (ClassTemplateSpec->isInvalidDecl())
return true;
+ Sema::RecursiveInstGuard AlreadyInstantiating(
+ *this, ClassTemplateSpec, Sema::RecursiveInstGuard::Kind::Template);
+ if (AlreadyInstantiating)
+ return false;
+
bool HadAvaibilityWarning =
ShouldDiagnoseAvailabilityOfDecl(ClassTemplateSpec, nullptr, nullptr)
.first != AR_Available;
@@ -4148,7 +4159,7 @@ bool Sema::InstantiateClassTemplateSpecialization(
if (!Pattern.isUsable())
return Pattern.isInvalid();
- bool Err = InstantiateClass(
+ bool Err = InstantiateClassImpl(
PointOfInstantiation, ClassTemplateSpec, Pattern.get(),
getTemplateInstantiationArgs(ClassTemplateSpec), TSK, Complain);
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 4863b45..28925cc 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -5312,6 +5312,16 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
if (Proto->getExceptionSpecType() != EST_Uninstantiated)
return;
+ RecursiveInstGuard AlreadyInstantiating(
+ *this, Decl, RecursiveInstGuard::Kind::ExceptionSpec);
+ if (AlreadyInstantiating) {
+ // This exception specification indirectly depends on itself. Reject.
+ // FIXME: Corresponding rule in the standard?
+ Diag(PointOfInstantiation, diag::err_exception_spec_cycle) << Decl;
+ UpdateExceptionSpec(Decl, EST_None);
+ return;
+ }
+
InstantiatingTemplate Inst(*this, PointOfInstantiation, Decl,
InstantiatingTemplate::ExceptionSpecification());
if (Inst.isInvalid()) {
@@ -5320,13 +5330,6 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
UpdateExceptionSpec(Decl, EST_None);
return;
}
- if (Inst.isAlreadyInstantiating()) {
- // This exception specification indirectly depends on itself. Reject.
- // FIXME: Corresponding rule in the standard?
- Diag(PointOfInstantiation, diag::err_exception_spec_cycle) << Decl;
- UpdateExceptionSpec(Decl, EST_None);
- return;
- }
// Enter the scope of this instantiation. We don't use
// PushDeclContext because we don't have a scope.
@@ -5386,8 +5389,6 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
if (ActiveInst.Kind == ActiveInstType::ExplicitTemplateArgumentSubstitution ||
ActiveInst.Kind == ActiveInstType::DeducedTemplateArgumentSubstitution) {
if (isa<FunctionTemplateDecl>(ActiveInst.Entity)) {
- SemaRef.InstantiatingSpecializations.erase(
- {ActiveInst.Entity->getCanonicalDecl(), ActiveInst.Kind});
atTemplateEnd(SemaRef.TemplateInstCallbacks, SemaRef, ActiveInst);
ActiveInst.Kind = ActiveInstType::TemplateInstantiation;
ActiveInst.Entity = New;
@@ -5545,6 +5546,12 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
Function = const_cast<FunctionDecl*>(ExistingDefn);
}
+#ifndef NDEBUG
+ RecursiveInstGuard AlreadyInstantiating(*this, Function,
+ RecursiveInstGuard::Kind::Template);
+ assert(!AlreadyInstantiating && "should have been caught by caller");
+#endif
+
// Find the function body that we'll be substituting.
const FunctionDecl *PatternDecl = Function->getTemplateInstantiationPattern();
assert(PatternDecl && "instantiating a non-template");
@@ -5684,7 +5691,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
}
InstantiatingTemplate Inst(*this, PointOfInstantiation, Function);
- if (Inst.isInvalid() || Inst.isAlreadyInstantiating())
+ if (Inst.isInvalid())
return;
PrettyDeclStackTraceEntry CrashInfo(Context, Function, SourceLocation(),
"instantiating function definition");
@@ -6253,6 +6260,11 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
if (TSK == TSK_ExplicitSpecialization)
return;
+ RecursiveInstGuard AlreadyInstantiating(*this, Var,
+ RecursiveInstGuard::Kind::Template);
+ if (AlreadyInstantiating)
+ return;
+
// Find the pattern and the arguments to substitute into it.
VarDecl *PatternDecl = Var->getTemplateInstantiationPattern();
assert(PatternDecl && "no pattern for templated variable");
@@ -6276,7 +6288,7 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
// FIXME: Factor out the duplicated instantiation context setup/tear down
// code here.
InstantiatingTemplate Inst(*this, PointOfInstantiation, Var);
- if (Inst.isInvalid() || Inst.isAlreadyInstantiating())
+ if (Inst.isInvalid())
return;
PrettyDeclStackTraceEntry CrashInfo(Context, Var, SourceLocation(),
"instantiating variable initializer");
@@ -6380,7 +6392,7 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
}
InstantiatingTemplate Inst(*this, PointOfInstantiation, Var);
- if (Inst.isInvalid() || Inst.isAlreadyInstantiating())
+ if (Inst.isInvalid())
return;
PrettyDeclStackTraceEntry CrashInfo(Context, Var, SourceLocation(),
"instantiating variable definition");
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index 29f0c30..0c8c1d1 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -16430,12 +16430,16 @@ ExprResult TreeTransform<Derived>::TransformSubstNonTypeTemplateParmExpr(
AssociatedDecl == E->getAssociatedDecl())
return E;
- auto getParamAndType = [Index = E->getIndex()](Decl *AssociatedDecl)
+ auto getParamAndType = [E](Decl *AssociatedDecl)
-> std::tuple<NonTypeTemplateParmDecl *, QualType> {
- auto [PDecl, Arg] = getReplacedTemplateParameter(AssociatedDecl, Index);
+ auto [PDecl, Arg] =
+ getReplacedTemplateParameter(AssociatedDecl, E->getIndex());
auto *Param = cast<NonTypeTemplateParmDecl>(PDecl);
- return {Param, Arg.isNull() ? Param->getType()
- : Arg.getNonTypeTemplateArgumentType()};
+ if (Arg.isNull())
+ return {Param, Param->getType()};
+ if (UnsignedOrNone PackIndex = E->getPackIndex())
+ Arg = Arg.getPackAsArray()[*PackIndex];
+ return {Param, Arg.getNonTypeTemplateArgumentType()};
};
// If the replacement expression did not change, and the parameter type
diff --git a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index f6a3e79..871400e 100644
--- a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -723,6 +723,7 @@ AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) {
}
static UnsignedEPStat PathRunningTime("PathRunningTime");
+static UnsignedEPStat SyntaxRunningTime("SyntaxRunningTime");
void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
ExprEngine::InliningModes IMode,
@@ -761,6 +762,8 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
SyntaxCheckTimer->stopTimer();
llvm::TimeRecord CheckerEndTime = SyntaxCheckTimer->getTotalTime();
CheckerEndTime -= CheckerStartTime;
+ FunctionSummaries.findOrInsertSummary(D)->second.SyntaxRunningTime =
+ std::lround(CheckerEndTime.getWallTime() * 1000);
DisplayTime(CheckerEndTime);
if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
AnalyzerTimers->clear();
@@ -792,11 +795,23 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
if (!CFG)
return;
+ CFGSize.set(CFG->size());
+
+ auto *DeclContext = Mgr->getAnalysisDeclContext(D);
// See if the LiveVariables analysis scales.
- if (!Mgr->getAnalysisDeclContext(D)->getAnalysis<RelaxedLiveVariables>())
+ if (!DeclContext->getAnalysis<RelaxedLiveVariables>())
return;
- CFGSize.set(CFG->size());
+ // DeclContext declaration is the redeclaration of D that has a body.
+ const Decl *DefDecl = DeclContext->getDecl();
+
+ // Get the SyntaxRunningTime from the function summary, because it is computed
+ // during the AM_Syntax analysis, which is done at a different point in time
+ // and in different order, but always before AM_Path.
+ if (const auto *Summary = FunctionSummaries.findSummary(DefDecl);
+ Summary && Summary->SyntaxRunningTime.has_value()) {
+ SyntaxRunningTime.set(*Summary->SyntaxRunningTime);
+ }
ExprEngine Eng(CTU, *Mgr, VisitedCallees, &FunctionSummaries, IMode);
diff --git a/clang/test/AST/ByteCode/c.c b/clang/test/AST/ByteCode/c.c
index 657a920..cfdc9d0 100644
--- a/clang/test/AST/ByteCode/c.c
+++ b/clang/test/AST/ByteCode/c.c
@@ -372,3 +372,12 @@ void discardedCmp(void)
/// ArraySubscriptExpr that's not an lvalue
typedef unsigned char U __attribute__((vector_size(1)));
void nonLValueASE(U f) { f[0] = f[((U)(U){0})[0]]; }
+
+static char foo_(a) // all-warning {{definition without a prototype}}
+ char a;
+{
+ return 'a';
+}
+static void bar_(void) {
+ foo_(foo_(1));
+}
diff --git a/clang/test/AST/ByteCode/extern.cpp b/clang/test/AST/ByteCode/extern.cpp
index a616269..c321593 100644
--- a/clang/test/AST/ByteCode/extern.cpp
+++ b/clang/test/AST/ByteCode/extern.cpp
@@ -1,9 +1,11 @@
// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -verify=both,expected %s
-// RUN: %clang_cc1 -verify=both,ref %s
-
+// RUN: %clang_cc1 -verify=both,ref %s
// both-no-diagnostics
+extern const double Num;
+extern const double Num = 12;
+
extern const int E;
constexpr int getE() {
return E;
diff --git a/clang/test/Analysis/analyzer-stats/entry-point-stats.cpp b/clang/test/Analysis/analyzer-stats/entry-point-stats.cpp
index ebbc015..3ff3bb1 100644
--- a/clang/test/Analysis/analyzer-stats/entry-point-stats.cpp
+++ b/clang/test/Analysis/analyzer-stats/entry-point-stats.cpp
@@ -10,6 +10,7 @@
// CHECK-NEXT: "DebugName": "fib(unsigned int)",
// CHECK-NEXT: "CFGSize": "5",
// CHECK-NEXT: "PathRunningTime": "{{[0-9]+}}",
+// CHECK-NEXT: "SyntaxRunningTime": "{{[0-9]+}}",
// CHECK-NEXT: "MaxBugClassSize": "{{[0-9]+}}",
// CHECK-NEXT: "MaxQueueSize": "{{[0-9]+}}",
// CHECK-NEXT: "MaxReachableSize": "{{[0-9]+}}",
@@ -47,6 +48,7 @@
// CHECK-NEXT: "DebugName": "main(int, char **)",
// CHECK-NEXT: "CFGSize": "3",
// CHECK-NEXT: "PathRunningTime": "{{[0-9]+}}",
+// CHECK-NEXT: "SyntaxRunningTime": "{{[0-9]+}}",
// CHECK-NEXT: "MaxBugClassSize": "{{[0-9]+}}",
// CHECK-NEXT: "MaxQueueSize": "{{[0-9]+}}",
// CHECK-NEXT: "MaxReachableSize": "{{[0-9]+}}",
diff --git a/clang/test/Analysis/z3-crosscheck-max-attempts.cpp b/clang/test/Analysis/z3-crosscheck-max-attempts.cpp
index 8439236..ab66fe9 100644
--- a/clang/test/Analysis/z3-crosscheck-max-attempts.cpp
+++ b/clang/test/Analysis/z3-crosscheck-max-attempts.cpp
@@ -3,10 +3,10 @@
// RUN: | FileCheck %s --match-full-lines
// CHECK: crosscheck-with-z3-max-attempts-per-query = 3
-// DEFINE: %{mocked_clang} = \
-// DEFINE: LD_PRELOAD="%llvmshlibdir/MockZ3SolverCheck%pluginext" \
-// DEFINE: %clang_analyze_cc1 %s \
-// DEFINE: -analyzer-config crosscheck-with-z3=true \
+// DEFINE: %{mocked_clang} = \
+// DEFINE: env LD_PRELOAD="%llvmshlibdir/MockZ3SolverCheck%pluginext" \
+// DEFINE: %clang_analyze_cc1 %s \
+// DEFINE: -analyzer-config crosscheck-with-z3=true \
// DEFINE: -analyzer-checker=core
// DEFINE: %{attempts} = -analyzer-config crosscheck-with-z3-max-attempts-per-query
@@ -14,17 +14,17 @@
// RUN: not %clang_analyze_cc1 %{attempts}=0 2>&1 | FileCheck %s --check-prefix=VERIFY-INVALID
// VERIFY-INVALID: invalid input for analyzer-config option 'crosscheck-with-z3-max-attempts-per-query', that expects a positive value
-// RUN: Z3_SOLVER_RESULTS="UNDEF" %{mocked_clang} %{attempts}=1 -verify=refuted
-// RUN: Z3_SOLVER_RESULTS="UNSAT" %{mocked_clang} %{attempts}=1 -verify=refuted
-// RUN: Z3_SOLVER_RESULTS="SAT" %{mocked_clang} %{attempts}=1 -verify=accepted
+// RUN: env Z3_SOLVER_RESULTS="UNDEF" %{mocked_clang} %{attempts}=1 -verify=refuted
+// RUN: env Z3_SOLVER_RESULTS="UNSAT" %{mocked_clang} %{attempts}=1 -verify=refuted
+// RUN: env Z3_SOLVER_RESULTS="SAT" %{mocked_clang} %{attempts}=1 -verify=accepted
-// RUN: Z3_SOLVER_RESULTS="UNDEF,UNDEF" %{mocked_clang} %{attempts}=2 -verify=refuted
-// RUN: Z3_SOLVER_RESULTS="UNDEF,UNSAT" %{mocked_clang} %{attempts}=2 -verify=refuted
-// RUN: Z3_SOLVER_RESULTS="UNDEF,SAT" %{mocked_clang} %{attempts}=2 -verify=accepted
+// RUN: env Z3_SOLVER_RESULTS="UNDEF,UNDEF" %{mocked_clang} %{attempts}=2 -verify=refuted
+// RUN: env Z3_SOLVER_RESULTS="UNDEF,UNSAT" %{mocked_clang} %{attempts}=2 -verify=refuted
+// RUN: env Z3_SOLVER_RESULTS="UNDEF,SAT" %{mocked_clang} %{attempts}=2 -verify=accepted
-// RUN: Z3_SOLVER_RESULTS="UNDEF,UNDEF,UNDEF" %{mocked_clang} %{attempts}=3 -verify=refuted
-// RUN: Z3_SOLVER_RESULTS="UNDEF,UNDEF,UNSAT" %{mocked_clang} %{attempts}=3 -verify=refuted
-// RUN: Z3_SOLVER_RESULTS="UNDEF,UNDEF,SAT" %{mocked_clang} %{attempts}=3 -verify=accepted
+// RUN: env Z3_SOLVER_RESULTS="UNDEF,UNDEF,UNDEF" %{mocked_clang} %{attempts}=3 -verify=refuted
+// RUN: env Z3_SOLVER_RESULTS="UNDEF,UNDEF,UNSAT" %{mocked_clang} %{attempts}=3 -verify=refuted
+// RUN: env Z3_SOLVER_RESULTS="UNDEF,UNDEF,SAT" %{mocked_clang} %{attempts}=3 -verify=accepted
// REQUIRES: z3, z3-mock, asserts, shell, system-linux
diff --git a/clang/test/Analysis/z3/D83660.c b/clang/test/Analysis/z3/D83660.c
index 16ea4ff..a81ce3a 100644
--- a/clang/test/Analysis/z3/D83660.c
+++ b/clang/test/Analysis/z3/D83660.c
@@ -1,4 +1,4 @@
-// RUN: Z3_SOLVER_RESULTS="SAT,SAT,SAT,SAT,UNDEF" \
+// RUN: env Z3_SOLVER_RESULTS="SAT,SAT,SAT,SAT,UNDEF" \
// RUN: LD_PRELOAD="%llvmshlibdir/MockZ3SolverCheck%pluginext" \
// RUN: %clang_analyze_cc1 -analyzer-constraints=z3 \
// RUN: -analyzer-checker=core %s -verify
diff --git a/clang/test/CIR/CodeGen/agg-expr-lvalue.c b/clang/test/CIR/CodeGen/agg-expr-lvalue.c
new file mode 100644
index 0000000..c826f8f
--- /dev/null
+++ b/clang/test/CIR/CodeGen/agg-expr-lvalue.c
@@ -0,0 +1,111 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s
+
+struct Point {
+ int x, y;
+};
+
+struct Line {
+ struct Point start;
+ struct Point end;
+};
+
+// AggExprEmitter::VisitMemberExpr
+void test_member_in_array(void) {
+ struct Line line = {{1, 2}, {3, 4}};
+ struct Point arr[1] = {line.start};
+}
+
+// CIR-LABEL: cir.func{{.*}} @test_member_in_array
+// CIR: %[[LINE:.*]] = cir.alloca !rec_Line{{.*}}, ["line", init]
+// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!rec_Point x 1>{{.*}}, ["arr", init]
+// CIR: %[[MEMBER:.*]] = cir.get_member %[[LINE]][0] {name = "start"}
+// CIR: cir.copy
+
+// LLVM-LABEL: define{{.*}} @test_member_in_array
+// LLVM: %[[LINE:.*]] = alloca %struct.Line
+// LLVM: %[[ARR:.*]] = alloca [1 x %struct.Point]
+// LLVM: %[[MEMBER:.*]] = getelementptr{{.*}}%struct.Line{{.*}}%[[LINE]]{{.*}}i32 0, i32 0
+// LLVM: call void @llvm.memcpy
+
+// OGCG-LABEL: define{{.*}} @test_member_in_array
+// OGCG: %[[LINE:.*]] = alloca %struct.Line
+// OGCG: %[[ARR:.*]] = alloca [1 x %struct.Point]
+// OGCG: %[[MEMBER:.*]] = getelementptr{{.*}}%struct.Line{{.*}}%[[LINE]]{{.*}}i32 0, i32 0
+// OGCG: call void @llvm.memcpy
+
+// AggExprEmitter::VisitMemberExpr
+void test_member_arrow_in_array(void) {
+ struct Line *line_ptr;
+ struct Point arr[1] = {line_ptr->start};
+}
+
+// CIR-LABEL: cir.func{{.*}} @test_member_arrow_in_array
+// CIR: %[[PTR:.*]] = cir.alloca !cir.ptr<!rec_Line>{{.*}}, ["line_ptr"]
+// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!rec_Point x 1>{{.*}}, ["arr", init]
+// CIR: %[[LOADED:.*]] = cir.load{{.*}}%[[PTR]]
+// CIR: %[[MEMBER:.*]] = cir.get_member %[[LOADED]][0] {name = "start"}
+// CIR: cir.copy
+
+// LLVM-LABEL: define{{.*}} @test_member_arrow_in_array
+// LLVM: %[[PTR:.*]] = alloca ptr
+// LLVM: %[[ARR:.*]] = alloca [1 x %struct.Point]
+// LLVM: %[[LOADED:.*]] = load ptr{{.*}}%[[PTR]]
+// LLVM: %[[MEMBER:.*]] = getelementptr{{.*}}%struct.Line{{.*}}%[[LOADED]]{{.*}}i32 0, i32 0
+// LLVM: call void @llvm.memcpy
+
+// OGCG-LABEL: define{{.*}} @test_member_arrow_in_array
+// OGCG: %[[PTR:.*]] = alloca ptr
+// OGCG: %[[ARR:.*]] = alloca [1 x %struct.Point]
+// OGCG: %[[LOADED:.*]] = load ptr{{.*}}%[[PTR]]
+// OGCG: %[[MEMBER:.*]] = getelementptr{{.*}}%struct.Line{{.*}}%[[LOADED]]{{.*}}i32 0, i32 0
+// OGCG: call void @llvm.memcpy
+
+// AggExprEmitter::VisitUnaryDeref
+void test_deref_in_array(void) {
+ struct Point *ptr;
+ struct Point arr[1] = {*ptr};
+}
+
+// CIR-LABEL: cir.func{{.*}} @test_deref_in_array
+// CIR: %[[PTR:.*]] = cir.alloca !cir.ptr<!rec_Point>{{.*}}, ["ptr"]
+// CIR: %[[ARR:.*]] = cir.alloca !cir.array<!rec_Point x 1>{{.*}}, ["arr", init]
+// CIR: %[[LOADED:.*]] = cir.load{{.*}}%[[PTR]]
+// CIR: cir.copy
+
+// LLVM-LABEL: define{{.*}} @test_deref_in_array
+// LLVM: %[[PTR:.*]] = alloca ptr
+// LLVM: %[[ARR:.*]] = alloca [1 x %struct.Point]
+// LLVM: %[[LOADED:.*]] = load ptr{{.*}}%[[PTR]]
+// LLVM: call void @llvm.memcpy
+
+// OGCG-LABEL: define{{.*}} @test_deref_in_array
+// OGCG: %[[PTR:.*]] = alloca ptr
+// OGCG: %[[ARR:.*]] = alloca [1 x %struct.Point]
+// OGCG: %[[LOADED:.*]] = load ptr{{.*}}%[[PTR]]
+// OGCG: call void @llvm.memcpy
+
+// AggExprEmitter::VisitStringLiteral
+void test_string_array_in_array(void) {
+ char matrix[2][6] = {"hello", "world"};
+}
+
+// CIR-LABEL: cir.func{{.*}} @test_string_array_in_array
+// CIR: cir.alloca !cir.array<!cir.array<!s8i x 6> x 2>, {{.*}}, ["matrix", init]
+// CIR: cir.get_global
+// CIR: cir.copy
+// CIR: cir.get_global
+// CIR: cir.copy
+
+// LLVM-LABEL: define{{.*}} @test_string_array_in_array
+// LLVM: alloca [2 x [6 x i8]]
+// LLVM: call void @llvm.memcpy
+// LLVM: call void @llvm.memcpy
+
+// OGCG-LABEL: define{{.*}} @test_string_array_in_array
+// OGCG: alloca [2 x [6 x i8]]
+// OGCG: call void @llvm.memcpy{{.*}}@__const.test_string_array_in_array
diff --git a/clang/test/CIR/CodeGen/binassign.c b/clang/test/CIR/CodeGen/binassign.c
index dab9879..44c54b4 100644
--- a/clang/test/CIR/CodeGen/binassign.c
+++ b/clang/test/CIR/CodeGen/binassign.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -std=c23 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: %clang_cc1 -std=c23 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-lowering-prepare %s -o %t.cir 2> %t-before-lp.cir
// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
// RUN: %clang_cc1 -std=c23 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
@@ -54,3 +54,49 @@ void binary_assign(void) {
// OGCG: store float 0x40091EB860000000, ptr %[[F_PTR]]
// OGCG: store i32 42, ptr %[[I_PTR]]
// OGCG: ret void
+
+struct S {
+ int a;
+ float b;
+};
+
+struct SV {
+ int a;
+ volatile float b;
+};
+
+struct S gs;
+struct SV gsv;
+
+void binary_assign_struct() {
+ // Test normal struct assignment
+ struct S ls;
+ ls = gs;
+
+ // Test assignment of a struct with a volatile member
+ struct SV lsv;
+ lsv = gsv;
+}
+
+// CIR: cir.func{{.*}} @binary_assign_struct()
+// CIR: %[[LS:.*]] = cir.alloca ![[REC_S:.*]], !cir.ptr<![[REC_S]]>, ["ls"]
+// CIR: %[[LSV:.*]] = cir.alloca ![[REC_SV:.*]], !cir.ptr<![[REC_SV]]>, ["lsv"]
+// CIR: %[[GS_PTR:.*]] = cir.get_global @gs : !cir.ptr<![[REC_S]]>
+// CIR: cir.copy %[[GS_PTR]] to %[[LS]] : !cir.ptr<![[REC_S]]>
+// CIR: %[[GSV_PTR:.*]] = cir.get_global @gsv : !cir.ptr<![[REC_SV]]>
+// CIR: cir.copy %[[GSV_PTR]] to %[[LSV]] volatile : !cir.ptr<![[REC_SV]]>
+// CIR: cir.return
+
+// LLVM: define {{.*}}void @binary_assign_struct()
+// LLVM: %[[LS_PTR:.*]] = alloca %struct.S
+// LLVM: %[[LSV_PTR:.*]] = alloca %struct.SV
+// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[LS_PTR]], ptr @gs, i32 8, i1 false)
+// LLVM: call void @llvm.memcpy.p0.p0.i32(ptr %[[LSV_PTR]], ptr @gsv, i32 8, i1 true)
+// LLVM: ret void
+
+// OGCG: define {{.*}}void @binary_assign_struct()
+// OGCG: %[[LS_PTR:.*]] = alloca %struct.S
+// OGCG: %[[LSV_PTR:.*]] = alloca %struct.SV
+// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[LS_PTR]], ptr align 4 @gs, i64 8, i1 false)
+// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[LSV_PTR]], ptr align 4 @gsv, i64 8, i1 true)
+// OGCG: ret void
diff --git a/clang/test/CIR/CodeGen/complex.cpp b/clang/test/CIR/CodeGen/complex.cpp
index 083d438..4e89af4 100644
--- a/clang/test/CIR/CodeGen/complex.cpp
+++ b/clang/test/CIR/CodeGen/complex.cpp
@@ -1360,6 +1360,30 @@ void complex_type_argument() {
// OGCG: %[[TMP_ARG:.*]] = load <2 x float>, ptr %[[ARG_ADDR]], align 4
// OGCG: call void @_Z22complex_type_parameterCf(<2 x float> noundef %[[TMP_ARG]])
+float _Complex complex_type_return_type() {
+ return { 1.0f, 2.0f };
+}
+
+// CIR: %[[RET_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["__retval"]
+// CIR: %[[RET_VAL:.*]] = cir.const #cir.const_complex<#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.000000e+00> : !cir.float> : !cir.complex<!cir.float>
+// CIR: cir.store{{.*}} %[[RET_VAL]], %[[RET_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+// CIR: %[[TMP_RET:.*]] = cir.load %[[RET_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: cir.return %[[TMP_RET]] : !cir.complex<!cir.float>
+
+// TODO(CIR): the difference between the CIR LLVM and OGCG is because the lack of calling convention lowering,
+// LLVM: %[[RET_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: store { float, float } { float 1.000000e+00, float 2.000000e+00 }, ptr %[[RET_ADDR]], align 4
+// LLVM: %[[TMP_RET:.*]] = load { float, float }, ptr %[[RET_ADDR]], align 4
+// LLVM: ret { float, float } %[[TMP_RET]]
+
+// OGCG: %[[RET_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[RET_VAL_REAL:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RET_ADDR]], i32 0, i32 0
+// OGCG: %[[RET_VAL_IMAG:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RET_ADDR]], i32 0, i32 1
+// OGCG: store float 1.000000e+00, ptr %[[RET_VAL_REAL]], align 4
+// OGCG: store float 2.000000e+00, ptr %[[RET_VAL_IMAG]], align 4
+// OGCG: %[[TMP_RET:.*]] = load <2 x float>, ptr %[[RET_ADDR]], align 4
+// OGCG: ret <2 x float> %[[TMP_RET]]
+
void real_on_scalar_bool() {
bool a;
bool b = __real__ a;
@@ -1405,3 +1429,42 @@ void imag_on_scalar_bool() {
// OGCG: %[[A_ADDR:.*]] = alloca i8, align 1
// OGCG: %[[B_ADDR:.*]] = alloca i8, align 1
// OGCG: store i8 0, ptr %[[B_ADDR]], align 1
+
+void function_with_complex_default_arg(
+ float _Complex a = __builtin_complex(1.0f, 2.2f)) {}
+
+// CIR: %[[ARG_0_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a", init]
+// CIR: cir.store %{{.*}}, %[[ARG_0_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// TODO(CIR): the difference between the CIR LLVM and OGCG is because the lack of calling convention lowering,
+
+// LLVM: %[[ARG_0_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: store { float, float } %{{.*}}, ptr %[[ARG_0_ADDR]], align 4
+
+// OGCG: %[[ARG_0_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: store <2 x float> %{{.*}}, ptr %[[ARG_0_ADDR]], align 4
+
+void calling_function_with_default_arg() {
+ function_with_complex_default_arg();
+}
+
+// CIR: %[[DEFAULT_ARG_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["coerce"]
+// CIR: %[[DEFAULT_ARG_VAL:.*]] = cir.const #cir.const_complex<#cir.fp<1.000000e+00> : !cir.float, #cir.fp<2.200000e+00> : !cir.float> : !cir.complex<!cir.float>
+// CIR: cir.store{{.*}} %[[DEFAULT_ARG_VAL]], %[[DEFAULT_ARG_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+// CIR: %[[TMP_DEFAULT_ARG:.*]] = cir.load{{.*}} %[[DEFAULT_ARG_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR: cir.call @_Z33function_with_complex_default_argCf(%[[TMP_DEFAULT_ARG]]) : (!cir.complex<!cir.float>) -> ()
+
+// TODO(CIR): the difference between the CIR LLVM and OGCG is because the lack of calling convention lowering,
+
+// LLVM: %[[DEFAULT_ARG_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: store { float, float } { float 1.000000e+00, float 0x40019999A0000000 }, ptr %[[DEFAULT_ARG_ADDR]], align 4
+// LLVM: %[[TMP_DEFAULT_ARG:.*]] = load { float, float }, ptr %[[DEFAULT_ARG_ADDR]], align 4
+// LLVM: call void @_Z33function_with_complex_default_argCf({ float, float } %[[TMP_DEFAULT_ARG]])
+
+// OGCG: %[[DEFAULT_ARG_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[DEFAULT_ARG_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[DEFAULT_ARG_ADDR]], i32 0, i32 0
+// OGCG: %[[DEFAULT_ARG_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[DEFAULT_ARG_ADDR]], i32 0, i32 1
+// OGCG: store float 1.000000e+00, ptr %[[DEFAULT_ARG_REAL_PTR]], align 4
+// OGCG: store float 0x40019999A0000000, ptr %[[DEFAULT_ARG_IMAG_PTR]], align 4
+// OGCG: %[[TMP_DEFAULT_ARG:.*]] = load <2 x float>, ptr %[[DEFAULT_ARG_ADDR]], align 4
+// OGCG: call void @_Z33function_with_complex_default_argCf(<2 x float> {{.*}} %[[TMP_DEFAULT_ARG]])
diff --git a/clang/test/CIR/CodeGen/compound_literal.cpp b/clang/test/CIR/CodeGen/compound_literal.cpp
index a92af95..30a1dc0 100644
--- a/clang/test/CIR/CodeGen/compound_literal.cpp
+++ b/clang/test/CIR/CodeGen/compound_literal.cpp
@@ -97,3 +97,30 @@ void foo3() {
// OGCG: %[[TMP:.*]] = load <4 x i32>, ptr %[[CL_ADDR]], align 16
// OGCG: store <4 x i32> %[[TMP]], ptr %[[A_ADDR]], align 16
+struct Point {
+ int x, y;
+};
+
+void foo4() {
+ Point p = (Point){5, 10};
+}
+
+// CIR-LABEL: @_Z4foo4v
+// CIR: %[[P:.*]] = cir.alloca !rec_Point, !cir.ptr<!rec_Point>, ["p", init]
+// CIR: %[[P_X:.*]] = cir.get_member %[[P]][0] {name = "x"}
+// CIR: %[[FIVE:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: cir.store{{.*}} %[[FIVE]], %[[P_X]]
+// CIR: %[[P_Y:.*]] = cir.get_member %[[P]][1] {name = "y"}
+// CIR: %[[TEN:.*]] = cir.const #cir.int<10> : !s32i
+// CIR: cir.store{{.*}} %[[TEN]], %[[P_Y]]
+
+// LLVM-LABEL: @_Z4foo4v
+// LLVM: %[[P:.*]] = alloca %struct.Point
+// LLVM: %[[P_X:.*]] = getelementptr %struct.Point, ptr %[[P]], i32 0, i32 0
+// LLVM: store i32 5, ptr %[[P_X]]
+// LLVM: %[[P_Y:.*]] = getelementptr %struct.Point, ptr %[[P]], i32 0, i32 1
+// LLVM: store i32 10, ptr %[[P_Y]]
+
+// OGCG-LABEL: @_Z4foo4v
+// OGCG: %[[P:.*]] = alloca %struct.Point
+// OGCG: call void @llvm.memcpy{{.*}}(ptr{{.*}} %[[P]], ptr{{.*}} @__const._Z4foo4v.p
diff --git a/clang/test/CIR/CodeGen/dtors.cpp b/clang/test/CIR/CodeGen/dtors.cpp
index cb3886b..f2c80a5 100644
--- a/clang/test/CIR/CodeGen/dtors.cpp
+++ b/clang/test/CIR/CodeGen/dtors.cpp
@@ -14,7 +14,7 @@ void test_temporary_dtor() {
}
// CIR: cir.func dso_local @_Z19test_temporary_dtorv()
-// CIR: %[[ALLOCA:.*]] = cir.alloca !rec_A, !cir.ptr<!rec_A>, ["agg.tmp0"]
+// CIR: %[[ALLOCA:.*]] = cir.alloca !rec_A, !cir.ptr<!rec_A>, ["agg.tmp.ensured"]
// CIR: cir.call @_ZN1AD1Ev(%[[ALLOCA]]) nothrow : (!cir.ptr<!rec_A>) -> ()
// LLVM: define dso_local void @_Z19test_temporary_dtorv(){{.*}}
diff --git a/clang/test/CIR/CodeGen/opaque.cpp b/clang/test/CIR/CodeGen/opaque.cpp
index 028bfd9..eac0dfa 100644
--- a/clang/test/CIR/CodeGen/opaque.cpp
+++ b/clang/test/CIR/CodeGen/opaque.cpp
@@ -154,3 +154,166 @@ void foo3() {
// OGCG: [[COND_END]]:
// OGCG: %[[RESULT:.*]] = phi i32 [ %[[TMP_A]], %[[COND_TRUE]] ], [ %[[TMP_B]], %[[COND_FALSE]] ]
// OGCG: store i32 %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+void test_gnu_binary_lvalue_assign() {
+ int a = 5;
+ int b = 10;
+ (a ?: b) = 42;
+}
+
+// CIR-LABEL: cir.func{{.*}} @_Z29test_gnu_binary_lvalue_assignv(
+// CIR: %[[A:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: %[[B:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init]
+// CIR: %[[A_VAL:.*]] = cir.load{{.*}} %[[A]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[A_BOOL:.*]] = cir.cast int_to_bool %[[A_VAL]] : !s32i -> !cir.bool
+// CIR: %[[TERNARY_PTR:.*]] = cir.ternary(%[[A_BOOL]], true {
+// CIR: cir.yield %[[A]] : !cir.ptr<!s32i>
+// CIR: }, false {
+// CIR: cir.yield %[[B]] : !cir.ptr<!s32i>
+// CIR: }) : (!cir.bool) -> !cir.ptr<!s32i>
+// CIR: cir.store{{.*}} %{{.*}}, %[[TERNARY_PTR]] : !s32i, !cir.ptr<!s32i>
+
+// LLVM-LABEL: define{{.*}} void @_Z29test_gnu_binary_lvalue_assignv(
+// LLVM: %[[A:.*]] = alloca i32
+// LLVM: %[[B:.*]] = alloca i32
+// LLVM: %[[A_VAL:.*]] = load i32, ptr %[[A]]
+// LLVM: %[[COND:.*]] = icmp ne i32 %[[A_VAL]], 0
+// LLVM: br i1 %[[COND]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// LLVM: [[TRUE_BB]]:
+// LLVM: br label %[[MERGE_BB:.*]]
+// LLVM: [[FALSE_BB]]:
+// LLVM: br label %[[MERGE_BB]]
+// LLVM: [[MERGE_BB]]:
+// LLVM: %[[PHI_PTR:.*]] = phi ptr [ %[[B]], %[[FALSE_BB]] ], [ %[[A]], %[[TRUE_BB]] ]
+// LLVM: br label %[[CONT_BB:.*]]
+// LLVM: [[CONT_BB]]:
+// LLVM: store i32 42, ptr %[[PHI_PTR]]
+
+// OGCG-LABEL: define{{.*}} void @_Z29test_gnu_binary_lvalue_assignv(
+// OGCG: %[[A:.*]] = alloca i32
+// OGCG: %[[B:.*]] = alloca i32
+// OGCG: %[[A_VAL:.*]] = load i32, ptr %[[A]]
+// OGCG: %[[COND:.*]] = icmp ne i32 %[[A_VAL]], 0
+// OGCG: br i1 %[[COND]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// OGCG: [[TRUE_BB]]:
+// OGCG: br label %[[MERGE_BB:.*]]
+// OGCG: [[FALSE_BB]]:
+// OGCG: br label %[[MERGE_BB]]
+// OGCG: [[MERGE_BB]]:
+// OGCG: %[[PHI_PTR:.*]] = phi ptr [ %[[A]], %[[TRUE_BB]] ], [ %[[B]], %[[FALSE_BB]] ]
+// OGCG: store i32 42, ptr %[[PHI_PTR]]
+
+void test_gnu_binary_lvalue_compound() {
+ int a = 7;
+ int b = 14;
+ (a ?: b) += 5;
+}
+
+// CIR-LABEL: cir.func{{.*}} @_Z31test_gnu_binary_lvalue_compoundv(
+// CIR: %[[A:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: %[[B:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init]
+// CIR: %[[A_VAL:.*]] = cir.load{{.*}} %[[A]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[A_BOOL:.*]] = cir.cast int_to_bool %[[A_VAL]] : !s32i -> !cir.bool
+// CIR: %[[LVAL_PTR:.*]] = cir.ternary(%[[A_BOOL]], true {
+// CIR: cir.yield %[[A]] : !cir.ptr<!s32i>
+// CIR: }, false {
+// CIR: cir.yield %[[B]] : !cir.ptr<!s32i>
+// CIR: }) : (!cir.bool) -> !cir.ptr<!s32i>
+// CIR: %[[OLD_VAL:.*]] = cir.load{{.*}} %[[LVAL_PTR]]
+// CIR: %[[NEW_VAL:.*]] = cir.binop(add, %[[OLD_VAL]], %{{.*}})
+// CIR: cir.store{{.*}} %[[NEW_VAL]], %[[LVAL_PTR]]
+
+// LLVM-LABEL: define{{.*}} void @_Z31test_gnu_binary_lvalue_compoundv(
+// LLVM: %[[A:.*]] = alloca i32
+// LLVM: %[[B:.*]] = alloca i32
+// LLVM: %[[A_VAL:.*]] = load i32, ptr %[[A]]
+// LLVM: %[[COND:.*]] = icmp ne i32 %[[A_VAL]], 0
+// LLVM: br i1 %[[COND]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// LLVM: [[TRUE_BB]]:
+// LLVM: br label %[[MERGE_BB:.*]]
+// LLVM: [[FALSE_BB]]:
+// LLVM: br label %[[MERGE_BB]]
+// LLVM: [[MERGE_BB]]:
+// LLVM: %[[PTR:.*]] = phi ptr [ %[[B]], %[[FALSE_BB]] ], [ %[[A]], %[[TRUE_BB]] ]
+// LLVM: br label %[[CONT:.*]]
+// LLVM: [[CONT]]:
+// LLVM: %[[OLD:.*]] = load i32, ptr %[[PTR]]
+// LLVM: %[[NEW:.*]] = add{{.*}} i32 %[[OLD]], 5
+// LLVM: store i32 %[[NEW]], ptr %[[PTR]]
+
+// OGCG-LABEL: define{{.*}} void @_Z31test_gnu_binary_lvalue_compoundv(
+// OGCG: %[[A:.*]] = alloca i32
+// OGCG: %[[B:.*]] = alloca i32
+// OGCG: %[[A_VAL:.*]] = load i32, ptr %[[A]]
+// OGCG: %[[COND:.*]] = icmp ne i32 %[[A_VAL]], 0
+// OGCG: br i1 %[[COND]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// OGCG: [[TRUE_BB]]:
+// OGCG: br label %[[MERGE_BB:.*]]
+// OGCG: [[FALSE_BB]]:
+// OGCG: br label %[[MERGE_BB]]
+// OGCG: [[MERGE_BB]]:
+// OGCG: %[[PTR:.*]] = phi ptr [ %[[A]], %[[TRUE_BB]] ], [ %[[B]], %[[FALSE_BB]] ]
+// OGCG: %[[OLD:.*]] = load i32, ptr %[[PTR]]
+// OGCG: %[[NEW:.*]] = add{{.*}} i32 %[[OLD]], 5
+// OGCG: store i32 %[[NEW]], ptr %[[PTR]]
+
+void test_gnu_binary_lvalue_ptr() {
+ int x = 1, y = 2;
+ int *p = &x;
+ int *q = nullptr;
+ *(p ?: q) = 99;
+}
+
+// CIR-LABEL: cir.func{{.*}} @_Z26test_gnu_binary_lvalue_ptrv(
+// CIR: %[[X:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init]
+// CIR: %[[Y:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["y", init]
+// CIR: %[[P:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["p", init]
+// CIR: %[[Q:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["q", init]
+// CIR: %[[P_VAL:.*]] = cir.load{{.*}} %[[P]]
+// CIR: %[[P_BOOL:.*]] = cir.cast ptr_to_bool %[[P_VAL]]
+// CIR: %[[PTR_RESULT:.*]] = cir.ternary(%[[P_BOOL]], true {
+// CIR: %[[P_LOAD:.*]] = cir.load{{.*}} %[[P]]
+// CIR: cir.yield %[[P_LOAD]] : !cir.ptr<!s32i>
+// CIR: }, false {
+// CIR: %[[Q_LOAD:.*]] = cir.load{{.*}} %[[Q]]
+// CIR: cir.yield %[[Q_LOAD]] : !cir.ptr<!s32i>
+// CIR: }) : (!cir.bool) -> !cir.ptr<!s32i>
+// CIR: cir.store{{.*}} %{{.*}}, %[[PTR_RESULT]]
+
+// LLVM-LABEL: define{{.*}} void @_Z26test_gnu_binary_lvalue_ptrv(
+// LLVM: %[[X:.*]] = alloca i32
+// LLVM: %[[Y:.*]] = alloca i32
+// LLVM: %[[P:.*]] = alloca ptr
+// LLVM: %[[Q:.*]] = alloca ptr
+// LLVM: %[[P_VAL:.*]] = load ptr, ptr %[[P]]
+// LLVM: %[[COND:.*]] = icmp ne ptr %[[P_VAL]], null
+// LLVM: br i1 %[[COND]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// LLVM: [[TRUE_BB]]:
+// LLVM: %[[P_LOAD:.*]] = load ptr, ptr %[[P]]
+// LLVM: br label %[[MERGE_BB:.*]]
+// LLVM: [[FALSE_BB]]:
+// LLVM: %[[Q_LOAD:.*]] = load ptr, ptr %[[Q]]
+// LLVM: br label %[[MERGE_BB]]
+// LLVM: [[MERGE_BB]]:
+// LLVM: %[[PHI:.*]] = phi ptr [ %[[Q_LOAD]], %[[FALSE_BB]] ], [ %[[P_LOAD]], %[[TRUE_BB]] ]
+// LLVM: br label %[[CONT:.*]]
+// LLVM: [[CONT]]:
+// LLVM: store i32 99, ptr %[[PHI]]
+
+// OGCG-LABEL: define{{.*}} void @_Z26test_gnu_binary_lvalue_ptrv(
+// OGCG: %[[X:.*]] = alloca i32
+// OGCG: %[[Y:.*]] = alloca i32
+// OGCG: %[[P:.*]] = alloca ptr
+// OGCG: %[[Q:.*]] = alloca ptr
+// OGCG: %[[P_VAL:.*]] = load ptr, ptr %[[P]]
+// OGCG: %[[COND:.*]] = icmp ne ptr %[[P_VAL]], null
+// OGCG: br i1 %[[COND]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// OGCG: [[TRUE_BB]]:
+// OGCG: %[[P_LOAD:.*]] = load ptr, ptr %[[P]]
+// OGCG: br label %[[MERGE_BB:.*]]
+// OGCG: [[FALSE_BB]]:
+// OGCG: %[[Q_LOAD:.*]] = load ptr, ptr %[[Q]]
+// OGCG: br label %[[MERGE_BB]]
+// OGCG: [[MERGE_BB]]:
+// OGCG: %[[PHI:.*]] = phi ptr [ %[[P_LOAD]], %[[TRUE_BB]] ], [ %[[Q_LOAD]], %[[FALSE_BB]] ]
+// OGCG: store i32 99, ptr %[[PHI]]
diff --git a/clang/test/CIR/CodeGen/struct.cpp b/clang/test/CIR/CodeGen/struct.cpp
index 263799f..6d362c7 100644
--- a/clang/test/CIR/CodeGen/struct.cpp
+++ b/clang/test/CIR/CodeGen/struct.cpp
@@ -265,7 +265,7 @@ void bin_comma() {
// CIR: cir.func{{.*}} @_Z9bin_commav()
// CIR: %[[A_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr<!rec_CompleteS>, ["a", init]
-// CIR: %[[TMP_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr<!rec_CompleteS>, ["agg.tmp0"]
+// CIR: %[[TMP_ADDR:.*]] = cir.alloca !rec_CompleteS, !cir.ptr<!rec_CompleteS>, ["agg.tmp.ensured"]
// CIR: %[[ZERO:.*]] = cir.const #cir.zero : !rec_CompleteS
// CIR: cir.store{{.*}} %[[ZERO]], %[[TMP_ADDR]] : !rec_CompleteS, !cir.ptr<!rec_CompleteS>
// CIR: %[[ZERO:.*]] = cir.const #cir.zero : !rec_CompleteS
diff --git a/clang/test/CIR/CodeGen/ternary-throw.cpp b/clang/test/CIR/CodeGen/ternary-throw.cpp
new file mode 100644
index 0000000..fb8897f
--- /dev/null
+++ b/clang/test/CIR/CodeGen/ternary-throw.cpp
@@ -0,0 +1,197 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fexceptions -fcxx-exceptions -emit-cir %s -o %t.cir
+// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fexceptions -fcxx-exceptions -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fexceptions -fcxx-exceptions -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG
+
+const int& test_cond_throw_false(bool flag) {
+ const int a = 10;
+ return flag ? a : throw 0;
+}
+
+// CIR-LABEL: cir.func{{.*}} @_Z21test_cond_throw_falseb(
+// CIR: %[[FLAG:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["flag", init]
+// CIR: %[[A:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init, const]
+// CIR: %[[TEN:.*]] = cir.const #cir.int<10> : !s32i
+// CIR: cir.store{{.*}} %[[TEN]], %[[A]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[FLAG_VAL:.*]] = cir.load{{.*}} %[[FLAG]] : !cir.ptr<!cir.bool>, !cir.bool
+// CIR: %[[RESULT:.*]] = cir.ternary(%[[FLAG_VAL]], true {
+// CIR: cir.yield %[[A]] : !cir.ptr<!s32i>
+// CIR: }, false {
+// CIR: %[[EXCEPTION:.*]] = cir.alloc.exception{{.*}} -> !cir.ptr<!s32i>
+// CIR: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store{{.*}} %[[ZERO]], %[[EXCEPTION]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.throw %[[EXCEPTION]] : !cir.ptr<!s32i>, @_ZTIi
+// CIR: cir.unreachable
+// CIR: }) : (!cir.bool) -> !cir.ptr<!s32i>
+
+// LLVM-LABEL: define{{.*}} ptr @_Z21test_cond_throw_falseb(
+// LLVM: %[[FLAG_ALLOCA:.*]] = alloca i8
+// LLVM: %[[RET_ALLOCA:.*]] = alloca ptr
+// LLVM: %[[A_ALLOCA:.*]] = alloca i32
+// LLVM: %[[ZEXT:.*]] = zext i1 %{{.*}} to i8
+// LLVM: store i8 %[[ZEXT]], ptr %[[FLAG_ALLOCA]]
+// LLVM: store i32 10, ptr %[[A_ALLOCA]]
+// LLVM: %[[LOAD:.*]] = load i8, ptr %[[FLAG_ALLOCA]]
+// LLVM: %[[BOOL:.*]] = trunc i8 %[[LOAD]] to i1
+// LLVM: br i1 %[[BOOL]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// LLVM: [[TRUE_BB]]:
+// LLVM: br label %[[PHI_BB:.*]]
+// LLVM: [[FALSE_BB]]:
+// LLVM: %[[EXC:.*]] = call{{.*}} ptr @__cxa_allocate_exception
+// LLVM: store i32 0, ptr %[[EXC]]
+// LLVM: call void @__cxa_throw(ptr %[[EXC]], ptr @_ZTIi
+// LLVM: unreachable
+// LLVM: [[PHI_BB]]:
+// LLVM: %[[PHI:.*]] = phi ptr [ %[[A_ALLOCA]], %[[TRUE_BB]] ]
+// LLVM: br label %[[CONT_BB:.*]]
+// LLVM: [[CONT_BB]]:
+// LLVM: store ptr %[[A_ALLOCA]], ptr %[[RET_ALLOCA]]
+// LLVM: %[[RET:.*]] = load ptr, ptr %[[RET_ALLOCA]]
+// LLVM: ret ptr %[[RET]]
+
+// OGCG-LABEL: define{{.*}} ptr @_Z21test_cond_throw_falseb(
+// OGCG: %{{.*}} = alloca i8
+// OGCG: %[[A:.*]] = alloca i32
+// OGCG: store i32 10, ptr %[[A]]
+// OGCG: %{{.*}} = load i8, ptr %{{.*}}
+// OGCG: %[[BOOL:.*]] = trunc i8 %{{.*}} to i1
+// OGCG: br i1 %[[BOOL]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// OGCG: [[TRUE_BB]]:
+// OGCG: br label %[[END:.*]]
+// OGCG: [[FALSE_BB]]:
+// OGCG: %{{.*}} = call{{.*}} ptr @__cxa_allocate_exception
+// OGCG: store i32 0, ptr %{{.*}}
+// OGCG: call void @__cxa_throw(ptr %{{.*}}, ptr @_ZTIi
+// OGCG: unreachable
+// OGCG: [[END]]:
+// OGCG: ret ptr %[[A]]
+
+const int& test_cond_throw_true(bool flag) {
+ const int a = 10;
+ return flag ? throw 0 : a;
+}
+
+// CIR-LABEL: cir.func{{.*}} @_Z20test_cond_throw_trueb(
+// CIR: %[[FLAG:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["flag", init]
+// CIR: %[[A:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init, const]
+// CIR: %[[TEN:.*]] = cir.const #cir.int<10> : !s32i
+// CIR: cir.store{{.*}} %[[TEN]], %[[A]] : !s32i, !cir.ptr<!s32i>
+// CIR: %[[FLAG_VAL:.*]] = cir.load{{.*}} %[[FLAG]] : !cir.ptr<!cir.bool>, !cir.bool
+// CIR: %[[RESULT:.*]] = cir.ternary(%[[FLAG_VAL]], true {
+// CIR: %[[EXCEPTION:.*]] = cir.alloc.exception{{.*}} -> !cir.ptr<!s32i>
+// CIR: %[[ZERO:.*]] = cir.const #cir.int<0> : !s32i
+// CIR: cir.store{{.*}} %[[ZERO]], %[[EXCEPTION]] : !s32i, !cir.ptr<!s32i>
+// CIR: cir.throw %[[EXCEPTION]] : !cir.ptr<!s32i>, @_ZTIi
+// CIR: cir.unreachable
+// CIR: }, false {
+// CIR: cir.yield %[[A]] : !cir.ptr<!s32i>
+// CIR: }) : (!cir.bool) -> !cir.ptr<!s32i>
+
+// LLVM-LABEL: define{{.*}} ptr @_Z20test_cond_throw_trueb(
+// LLVM: %[[FLAG_ALLOCA:.*]] = alloca i8
+// LLVM: %[[RET_ALLOCA:.*]] = alloca ptr
+// LLVM: %[[A_ALLOCA:.*]] = alloca i32
+// LLVM: %[[ZEXT:.*]] = zext i1 %{{.*}} to i8
+// LLVM: store i8 %[[ZEXT]], ptr %[[FLAG_ALLOCA]]
+// LLVM: store i32 10, ptr %[[A_ALLOCA]]
+// LLVM: %[[LOAD:.*]] = load i8, ptr %[[FLAG_ALLOCA]]
+// LLVM: %[[BOOL:.*]] = trunc i8 %[[LOAD]] to i1
+// LLVM: br i1 %[[BOOL]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// LLVM: [[TRUE_BB]]:
+// LLVM: %[[EXC:.*]] = call{{.*}} ptr @__cxa_allocate_exception
+// LLVM: store i32 0, ptr %[[EXC]]
+// LLVM: call void @__cxa_throw(ptr %[[EXC]], ptr @_ZTIi
+// LLVM: unreachable
+// LLVM: [[FALSE_BB]]:
+// LLVM: br label %[[PHI_BB:.*]]
+// LLVM: [[PHI_BB]]:
+// LLVM: %[[PHI:.*]] = phi ptr [ %[[A_ALLOCA]], %[[FALSE_BB]] ]
+// LLVM: br label %[[CONT_BB:.*]]
+// LLVM: [[CONT_BB]]:
+// LLVM: store ptr %[[A_ALLOCA]], ptr %[[RET_ALLOCA]]
+// LLVM: %[[RET:.*]] = load ptr, ptr %[[RET_ALLOCA]]
+// LLVM: ret ptr %[[RET]]
+
+// OGCG-LABEL: define{{.*}} ptr @_Z20test_cond_throw_trueb(
+// OGCG: %{{.*}} = alloca i8
+// OGCG: %[[A:.*]] = alloca i32
+// OGCG: store i32 10, ptr %[[A]]
+// OGCG: %{{.*}} = load i8, ptr %{{.*}}
+// OGCG: %[[BOOL:.*]] = trunc i8 %{{.*}} to i1
+// OGCG: br i1 %[[BOOL]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// OGCG: [[TRUE_BB]]:
+// OGCG: %{{.*}} = call{{.*}} ptr @__cxa_allocate_exception
+// OGCG: store i32 0, ptr %{{.*}}
+// OGCG: call void @__cxa_throw(ptr %{{.*}}, ptr @_ZTIi
+// OGCG: unreachable
+// OGCG: [[FALSE_BB]]:
+// OGCG: br label %[[END:.*]]
+// OGCG: [[END]]:
+// OGCG: ret ptr %[[A]]
+
+// Test constant folding with throw - compile-time true condition, dead throw in false branch
+const int& test_cond_const_true_throw_false() {
+ const int a = 20;
+ return true ? a : throw 0;
+}
+
+// CIR-LABEL: cir.func{{.*}} @_Z32test_cond_const_true_throw_falsev(
+// CIR: %[[A:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init, const]
+// CIR: %[[TWENTY:.*]] = cir.const #cir.int<20> : !s32i
+// CIR: cir.store{{.*}} %[[TWENTY]], %[[A]] : !s32i, !cir.ptr<!s32i>
+// CIR-NOT: cir.ternary
+// CIR-NOT: cir.throw
+// CIR: cir.store %[[A]]
+// CIR: %[[RET:.*]] = cir.load
+// CIR: cir.return %[[RET]] : !cir.ptr<!s32i>
+
+// LLVM-LABEL: define{{.*}} ptr @_Z32test_cond_const_true_throw_falsev(
+// LLVM: %[[A:.*]] = alloca i32
+// LLVM: store i32 20, ptr %[[A]]
+// LLVM-NOT: br i1
+// LLVM-NOT: __cxa_throw
+// LLVM: store ptr %[[A]]
+// LLVM: %[[RET:.*]] = load ptr
+// LLVM: ret ptr %[[RET]]
+
+// OGCG-LABEL: define{{.*}} ptr @_Z32test_cond_const_true_throw_falsev(
+// OGCG: %[[A:.*]] = alloca i32
+// OGCG: store i32 20, ptr %[[A]]
+// OGCG-NOT: br i1
+// OGCG-NOT: __cxa_throw
+// OGCG: ret ptr %[[A]]
+
+// Test constant folding with throw - compile-time false condition, dead throw in true branch
+const int& test_cond_const_false_throw_true() {
+ const int a = 30;
+ return false ? throw 0 : a;
+}
+
+// CIR-LABEL: cir.func{{.*}} @_Z32test_cond_const_false_throw_truev(
+// CIR: %[[A:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init, const]
+// CIR: %[[THIRTY:.*]] = cir.const #cir.int<30> : !s32i
+// CIR: cir.store{{.*}} %[[THIRTY]], %[[A]] : !s32i, !cir.ptr<!s32i>
+// CIR-NOT: cir.ternary
+// CIR-NOT: cir.throw
+// CIR: cir.store %[[A]]
+// CIR: %[[RET:.*]] = cir.load
+// CIR: cir.return %[[RET]] : !cir.ptr<!s32i>
+
+// LLVM-LABEL: define{{.*}} ptr @_Z32test_cond_const_false_throw_truev(
+// LLVM: %[[A:.*]] = alloca i32
+// LLVM: store i32 30, ptr %[[A]]
+// LLVM-NOT: br i1
+// LLVM-NOT: __cxa_throw
+// LLVM: store ptr %[[A]]
+// LLVM: %[[RET:.*]] = load ptr
+// LLVM: ret ptr %[[RET]]
+
+// OGCG-LABEL: define{{.*}} ptr @_Z32test_cond_const_false_throw_truev(
+// OGCG: %[[A:.*]] = alloca i32
+// OGCG: store i32 30, ptr %[[A]]
+// OGCG-NOT: br i1
+// OGCG-NOT: __cxa_throw
+// OGCG: ret ptr %[[A]]
+
diff --git a/clang/test/CIR/CodeGen/ternary.cpp b/clang/test/CIR/CodeGen/ternary.cpp
index e7b7270..847c0b4 100644
--- a/clang/test/CIR/CodeGen/ternary.cpp
+++ b/clang/test/CIR/CodeGen/ternary.cpp
@@ -10,11 +10,11 @@ int x(int y) {
}
// CIR-LABEL: cir.func{{.*}} @_Z1xi(
-// CIR-SAME: %[[ARG0:.*]]: !s32i {{.*}}) -> !s32i
-// CIR: [[Y:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["y", init] {alignment = 4 : i64}
-// CIR: [[RETVAL:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"] {alignment = 4 : i64}
+// CIR-SAME: %[[ARG0:.*]]: !s32i {{.*}}) -> !s32i{{.*}}{
+// CIR: [[Y:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["y", init]
+// CIR: [[RETVAL:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: cir.store %[[ARG0]], [[Y]] : !s32i, !cir.ptr<!s32i>
-// CIR: [[YVAL:%.+]] = cir.load align(4) [[Y]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[YVAL:%.+]] = cir.load{{.*}} [[Y]] : !cir.ptr<!s32i>, !s32i
// CIR: [[ZERO:%.+]] = cir.const #cir.int<0> : !s32i
// CIR: [[CMP:%.+]] = cir.cmp(gt, [[YVAL]], [[ZERO]]) : !s32i, !cir.bool
// CIR: [[THREE:%.+]] = cir.const #cir.int<3> : !s32i
@@ -52,21 +52,21 @@ int foo(int a, int b) {
}
// CIR-LABEL: cir.func{{.*}} @_Z3fooii(
-// CIR-SAME: %[[ARG0:.*]]: !s32i {{.*}}, %[[ARG1:.*]]: !s32i {{.*}}) -> !s32i
-// CIR: [[A:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init] {alignment = 4 : i64}
-// CIR: [[B:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init] {alignment = 4 : i64}
-// CIR: [[RETVAL:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"] {alignment = 4 : i64}
+// CIR-SAME: %[[ARG0:.*]]: !s32i {{.*}}, %[[ARG1:.*]]: !s32i {{.*}}) -> !s32i{{.*}}{
+// CIR: [[A:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: [[B:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init]
+// CIR: [[RETVAL:%.+]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
// CIR: cir.store %[[ARG0]], [[A]] : !s32i, !cir.ptr<!s32i>
// CIR: cir.store %[[ARG1]], [[B]] : !s32i, !cir.ptr<!s32i>
// CIR: cir.scope {
-// CIR: [[ALOAD:%.+]] = cir.load align(4) [[A]] : !cir.ptr<!s32i>, !s32i
-// CIR: [[BLOAD:%.+]] = cir.load align(4) [[B]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[ALOAD:%.+]] = cir.load{{.*}} [[A]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[BLOAD:%.+]] = cir.load{{.*}} [[B]] : !cir.ptr<!s32i>, !s32i
// CIR: [[CMP:%.+]] = cir.cmp(lt, [[ALOAD]], [[BLOAD]]) : !s32i, !cir.bool
// CIR: [[TERNARY_RES:%.+]] = cir.ternary([[CMP]], true {
// CIR: [[ZERO:%.+]] = cir.const #cir.int<0> : !s32i
// CIR: cir.yield [[ZERO]] : !s32i
// CIR: }, false {
-// CIR: [[ALOAD2:%.+]] = cir.load align(4) [[A]] : !cir.ptr<!s32i>, !s32i
+// CIR: [[ALOAD2:%.+]] = cir.load{{.*}} [[A]] : !cir.ptr<!s32i>, !s32i
// CIR: cir.yield [[ALOAD2]] : !s32i
// CIR: }) : (!cir.bool) -> !s32i
// CIR: [[CAST:%.+]] = cir.cast int_to_bool [[TERNARY_RES]] : !s32i -> !cir.bool
@@ -145,3 +145,214 @@ int foo(int a, int b) {
// OGCG: [[RETURN]]:
// OGCG: %[[RET2:.*]] = load i32, ptr %[[RETVAL]]
// OGCG: ret i32 %[[RET2]]
+
+void test_cond_lvalue_assign(bool flag) {
+ int a = 1;
+ int b = 2;
+ (flag ? a : b) = 42;
+}
+
+// CIR-LABEL: cir.func{{.*}} @_Z23test_cond_lvalue_assignb(
+// CIR: %[[FLAG:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["flag", init]
+// CIR: %[[A:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: %[[B:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init]
+// CIR: %[[FLAG_VAL:.*]] = cir.load{{.*}} %[[FLAG]]
+// CIR: %[[TERNARY_PTR:.*]] = cir.ternary(%[[FLAG_VAL]], true {
+// CIR: cir.yield %[[A]] : !cir.ptr<!s32i>
+// CIR: }, false {
+// CIR: cir.yield %[[B]] : !cir.ptr<!s32i>
+// CIR: }) : (!cir.bool) -> !cir.ptr<!s32i>
+// CIR: cir.store{{.*}} %{{.*}}, %[[TERNARY_PTR]] : !s32i, !cir.ptr<!s32i>
+
+// LLVM-LABEL: define{{.*}} void @_Z23test_cond_lvalue_assignb(
+// LLVM: %[[FLAG:.*]] = alloca i8
+// LLVM: %[[A:.*]] = alloca i32
+// LLVM: %[[B:.*]] = alloca i32
+// LLVM: %[[FLAG_VAL:.*]] = load i8, ptr %[[FLAG]]
+// LLVM: %[[COND:.*]] = trunc i8 %[[FLAG_VAL]] to i1
+// LLVM: br i1 %[[COND]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// LLVM: [[TRUE_BB]]:
+// LLVM: br label %[[MERGE_BB:.*]]
+// LLVM: [[FALSE_BB]]:
+// LLVM: br label %[[MERGE_BB]]
+// LLVM: [[MERGE_BB]]:
+// LLVM: %[[PHI_PTR:.*]] = phi ptr [ %[[B]], %[[FALSE_BB]] ], [ %[[A]], %[[TRUE_BB]] ]
+// LLVM: br label %[[CONT_BB:.*]]
+// LLVM: [[CONT_BB]]:
+// LLVM: store i32 42, ptr %[[PHI_PTR]]
+
+// OGCG-LABEL: define{{.*}} void @_Z23test_cond_lvalue_assignb(
+// OGCG: %[[FLAG:.*]] = alloca i8
+// OGCG: %[[A:.*]] = alloca i32
+// OGCG: %[[B:.*]] = alloca i32
+// OGCG: %[[FLAG_VAL:.*]] = load i8, ptr %[[FLAG]]
+// OGCG: %[[COND:.*]] = trunc i8 %[[FLAG_VAL]] to i1
+// OGCG: br i1 %[[COND]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// OGCG: [[TRUE_BB]]:
+// OGCG: br label %[[MERGE_BB:.*]]
+// OGCG: [[FALSE_BB]]:
+// OGCG: br label %[[MERGE_BB]]
+// OGCG: [[MERGE_BB]]:
+// OGCG: %[[PHI_PTR:.*]] = phi ptr [ %[[A]], %[[TRUE_BB]] ], [ %[[B]], %[[FALSE_BB]] ]
+// OGCG: store i32 42, ptr %[[PHI_PTR]]
+
+int& test_cond_lvalue_ref(bool cond, int x, int y) {
+ return cond ? x : y;
+}
+
+// CIR-LABEL: cir.func{{.*}} @_Z20test_cond_lvalue_refbii(
+// CIR: %[[COND:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["cond", init]
+// CIR: %[[X:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["x", init]
+// CIR: %[[Y:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["y", init]
+// CIR: %[[COND_VAL:.*]] = cir.load{{.*}} %[[COND]]
+// CIR: %[[REF_PTR:.*]] = cir.ternary(%[[COND_VAL]], true {
+// CIR: cir.yield %[[X]] : !cir.ptr<!s32i>
+// CIR: }, false {
+// CIR: cir.yield %[[Y]] : !cir.ptr<!s32i>
+// CIR: }) : (!cir.bool) -> !cir.ptr<!s32i>
+// CIR: cir.store %[[REF_PTR]]
+// CIR: %[[RET_VAL:.*]] = cir.load
+// CIR: cir.return %[[RET_VAL]] : !cir.ptr<!s32i>
+
+// LLVM-LABEL: define{{.*}} ptr @_Z20test_cond_lvalue_refbii(
+// LLVM: %[[COND:.*]] = alloca i8
+// LLVM: %[[X:.*]] = alloca i32
+// LLVM: %[[Y:.*]] = alloca i32
+// LLVM: %[[COND_VAL:.*]] = load i8, ptr %[[COND]]
+// LLVM: %[[BOOL:.*]] = trunc i8 %[[COND_VAL]] to i1
+// LLVM: br i1 %[[BOOL]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// LLVM: [[TRUE_BB]]:
+// LLVM: br label %[[MERGE_BB:.*]]
+// LLVM: [[FALSE_BB]]:
+// LLVM: br label %[[MERGE_BB]]
+// LLVM: [[MERGE_BB]]:
+// LLVM: %[[PHI:.*]] = phi ptr [ %[[Y]], %[[FALSE_BB]] ], [ %[[X]], %[[TRUE_BB]] ]
+// LLVM: br label %[[CONT:.*]]
+// LLVM: [[CONT]]:
+// LLVM: store ptr %[[PHI]]
+// LLVM: %[[RET:.*]] = load ptr
+// LLVM: ret ptr %[[RET]]
+
+// OGCG-LABEL: define{{.*}} ptr @_Z20test_cond_lvalue_refbii(
+// OGCG: %[[COND:.*]] = alloca i8
+// OGCG: %[[X:.*]] = alloca i32
+// OGCG: %[[Y:.*]] = alloca i32
+// OGCG: %[[COND_VAL:.*]] = load i8, ptr %[[COND]]
+// OGCG: %[[BOOL:.*]] = trunc i8 %[[COND_VAL]] to i1
+// OGCG: br i1 %[[BOOL]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// OGCG: [[TRUE_BB]]:
+// OGCG: br label %[[MERGE_BB:.*]]
+// OGCG: [[FALSE_BB]]:
+// OGCG: br label %[[MERGE_BB]]
+// OGCG: [[MERGE_BB]]:
+// OGCG: %[[PHI:.*]] = phi ptr [ %[[X]], %[[TRUE_BB]] ], [ %[[Y]], %[[FALSE_BB]] ]
+// OGCG: ret ptr %[[PHI]]
+
+// Test ConditionalOperator as lvalue - compound assignment
+void test_cond_lvalue_compound(bool flag) {
+ int a = 5;
+ int b = 10;
+ (flag ? a : b) += 3;
+}
+
+// CIR-LABEL: cir.func{{.*}} @_Z25test_cond_lvalue_compoundb(
+// CIR: %[[FLAG:.*]] = cir.alloca !cir.bool, !cir.ptr<!cir.bool>, ["flag", init]
+// CIR: %[[A:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: %[[B:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init]
+// CIR: %[[FLAG_VAL:.*]] = cir.load{{.*}} %[[FLAG]]
+// CIR: %[[LVAL_PTR:.*]] = cir.ternary(%[[FLAG_VAL]], true {
+// CIR: cir.yield %[[A]] : !cir.ptr<!s32i>
+// CIR: }, false {
+// CIR: cir.yield %[[B]] : !cir.ptr<!s32i>
+// CIR: }) : (!cir.bool) -> !cir.ptr<!s32i>
+// CIR: %[[OLD_VAL:.*]] = cir.load{{.*}} %[[LVAL_PTR]]
+// CIR: %[[NEW_VAL:.*]] = cir.binop(add, %[[OLD_VAL]], %{{.*}})
+// CIR: cir.store{{.*}} %[[NEW_VAL]], %[[LVAL_PTR]]
+
+// LLVM-LABEL: define{{.*}} void @_Z25test_cond_lvalue_compoundb(
+// LLVM: %[[FLAG:.*]] = alloca i8
+// LLVM: %[[A:.*]] = alloca i32
+// LLVM: %[[B:.*]] = alloca i32
+// LLVM: %[[FLAG_VAL:.*]] = load i8, ptr %[[FLAG]]
+// LLVM: %[[BOOL:.*]] = trunc i8 %[[FLAG_VAL]] to i1
+// LLVM: br i1 %[[BOOL]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// LLVM: [[TRUE_BB]]:
+// LLVM: br label %[[MERGE_BB:.*]]
+// LLVM: [[FALSE_BB]]:
+// LLVM: br label %[[MERGE_BB]]
+// LLVM: [[MERGE_BB]]:
+// LLVM: %[[PTR:.*]] = phi ptr [ %[[B]], %[[FALSE_BB]] ], [ %[[A]], %[[TRUE_BB]] ]
+// LLVM: br label %[[CONT:.*]]
+// LLVM: [[CONT]]:
+// LLVM: %[[OLD:.*]] = load i32, ptr %[[PTR]]
+// LLVM: %[[NEW:.*]] = add{{.*}} i32 %[[OLD]], 3
+// LLVM: store i32 %[[NEW]], ptr %[[PTR]]
+
+// OGCG-LABEL: define{{.*}} void @_Z25test_cond_lvalue_compoundb(
+// OGCG: %[[FLAG:.*]] = alloca i8
+// OGCG: %[[A:.*]] = alloca i32
+// OGCG: %[[B:.*]] = alloca i32
+// OGCG: %[[FLAG_VAL:.*]] = load i8, ptr %[[FLAG]]
+// OGCG: %[[BOOL:.*]] = trunc i8 %[[FLAG_VAL]] to i1
+// OGCG: br i1 %[[BOOL]], label %[[TRUE_BB:.*]], label %[[FALSE_BB:.*]]
+// OGCG: [[TRUE_BB]]:
+// OGCG: br label %[[MERGE_BB:.*]]
+// OGCG: [[FALSE_BB]]:
+// OGCG: br label %[[MERGE_BB]]
+// OGCG: [[MERGE_BB]]:
+// OGCG: %[[PTR:.*]] = phi ptr [ %[[A]], %[[TRUE_BB]] ], [ %[[B]], %[[FALSE_BB]] ]
+// OGCG: %[[OLD:.*]] = load i32, ptr %[[PTR]]
+// OGCG: %[[NEW:.*]] = add{{.*}} i32 %[[OLD]], 3
+// OGCG: store i32 %[[NEW]], ptr %[[PTR]]
+
+// Test constant folding - compile-time true condition with lvalue assignment
+void test_cond_const_true_lvalue() {
+ int a = 1;
+ int b = 2;
+ (true ? a : b) = 99;
+}
+
+// CIR-LABEL: cir.func{{.*}} @_Z27test_cond_const_true_lvaluev(
+// CIR: %[[A:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: %[[B:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init]
+// CIR-NOT: cir.ternary
+// CIR: %[[NINETYNINE:.*]] = cir.const #cir.int<99> : !s32i
+// CIR: cir.store{{.*}} %[[NINETYNINE]], %[[A]] : !s32i, !cir.ptr<!s32i>
+
+// LLVM-LABEL: define{{.*}} void @_Z27test_cond_const_true_lvaluev(
+// LLVM: %[[A:.*]] = alloca i32
+// LLVM: %[[B:.*]] = alloca i32
+// LLVM-NOT: br i1
+// LLVM: store i32 99, ptr %[[A]]
+
+// OGCG-LABEL: define{{.*}} void @_Z27test_cond_const_true_lvaluev(
+// OGCG: %[[A:.*]] = alloca i32
+// OGCG: %[[B:.*]] = alloca i32
+// OGCG-NOT: br i1
+// OGCG: store i32 99, ptr %[[A]]
+
+// Test constant folding - compile-time false condition with lvalue assignment
+void test_cond_const_false_lvalue() {
+ int a = 1;
+ int b = 2;
+ (false ? a : b) = 88;
+}
+
+// CIR-LABEL: cir.func{{.*}} @_Z28test_cond_const_false_lvaluev(
+// CIR: %[[A:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["a", init]
+// CIR: %[[B:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b", init]
+// CIR-NOT: cir.ternary
+// CIR: %[[EIGHTYEIGHT:.*]] = cir.const #cir.int<88> : !s32i
+// CIR: cir.store{{.*}} %[[EIGHTYEIGHT]], %[[B]] : !s32i, !cir.ptr<!s32i>
+
+// LLVM-LABEL: define{{.*}} void @_Z28test_cond_const_false_lvaluev(
+// LLVM: %[[A:.*]] = alloca i32
+// LLVM: %[[B:.*]] = alloca i32
+// LLVM-NOT: br i1
+// LLVM: store i32 88, ptr %[[B]]
+
+// OGCG-LABEL: define{{.*}} void @_Z28test_cond_const_false_lvaluev(
+// OGCG: %[[A:.*]] = alloca i32
+// OGCG: %[[B:.*]] = alloca i32
+// OGCG-NOT: br i1
+// OGCG: store i32 88, ptr %[[B]]
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp
index ee4fffe..c1c2e4b 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-default-ops.cpp
@@ -161,7 +161,78 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: }
for(int i = 0; i < 5; ++i);
@@ -188,7 +259,78 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: }
for(int i = 0; i < 5; ++i);
@@ -724,7 +866,100 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
for(int i = 0; i < 5; ++i);
@@ -826,7 +1061,100 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
for(int i = 0; i < 5; ++i);
@@ -1510,6 +1838,106 @@ void acc_combined() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+//
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
for(int i = 0; i < 5; ++i);
@@ -1558,6 +1986,106 @@ void acc_combined() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+//
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
for(int i = 0; i < 5; ++i);
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp
index 472e4ac..853f345 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-float.cpp
@@ -46,7 +46,17 @@ void acc_combined() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -59,7 +69,17 @@ void acc_combined() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -227,7 +247,38 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -258,7 +309,38 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -484,7 +566,42 @@ void acc_combined() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -520,7 +637,42 @@ void acc_combined() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp
index 112ff656..67e8460 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-inline-ops.cpp
@@ -17,7 +17,7 @@ struct HasOperatorsInline {
bool &operator&&(HasOperatorsInline& other);
bool &operator||(HasOperatorsInline& other);
// For min/max
- HasOperatorsInline &operator<(HasOperatorsInline& other);
+ bool operator<(HasOperatorsInline& other);
HasOperatorsInline &operator=(HasOperatorsInline& other);
};
@@ -110,7 +110,13 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHSARG]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
@@ -119,7 +125,7 @@ void acc_combined() {
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
#pragma acc parallel loop reduction(min:someVar)
-// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <min> init {
+// CHECK: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
@@ -141,7 +147,13 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHSARG]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
@@ -631,7 +643,34 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
@@ -753,7 +792,34 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
@@ -1528,6 +1594,38 @@ void acc_combined() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}):
@@ -1605,6 +1703,38 @@ void acc_combined() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}):
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp
index 7eaa822b..d74de82 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-int.cpp
@@ -47,7 +47,17 @@ void acc_combined() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -60,7 +70,17 @@ void acc_combined() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -280,7 +300,38 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -311,7 +362,38 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -700,7 +782,42 @@ void acc_combined() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -736,7 +853,42 @@ void acc_combined() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
diff --git a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp
index c2c0c77..a6df6c0 100644
--- a/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/combined-reduction-clause-outline-ops.cpp
@@ -7,7 +7,7 @@ struct HasOperatorsOutline {
bool b;
~HasOperatorsOutline();
-HasOperatorsOutline &operator=(const HasOperatorsOutline &);
+ HasOperatorsOutline &operator=(const HasOperatorsOutline &);
};
HasOperatorsOutline &operator+=(HasOperatorsOutline &, HasOperatorsOutline &);
@@ -18,7 +18,7 @@ HasOperatorsOutline &operator^=(HasOperatorsOutline &, HasOperatorsOutline &);
bool &operator&&(HasOperatorsOutline &, HasOperatorsOutline &);
bool &operator||(HasOperatorsOutline &, HasOperatorsOutline &);
// For min/max
-HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &);
+bool operator<(HasOperatorsOutline &, HasOperatorsOutline &);
template<typename T>
void acc_combined() {
@@ -109,7 +109,13 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHSARG]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
@@ -118,7 +124,7 @@ void acc_combined() {
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
#pragma acc parallel loop reduction(min:someVar)
-// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <min> init {
+// CHECK: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
@@ -140,7 +146,13 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHSARG]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
@@ -630,7 +642,34 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
@@ -752,7 +791,34 @@ void acc_combined() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
@@ -1527,6 +1593,38 @@ void acc_combined() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}):
@@ -1604,6 +1702,38 @@ void acc_combined() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}):
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c
index b439623..d65d5d4 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.c
@@ -160,7 +160,81 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast bool_to_int %[[RESULT]] : !cir.bool -> !s32i
+// CHECK-NEXT: cir.yield %[[RES_CAST]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast bool_to_int %[[RESULT]] : !cir.bool -> !s32i
+// CHECK-NEXT: cir.yield %[[RES_CAST]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast int_to_bool %[[TERNARY]] : !s32i -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[RES_CAST]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: }
;
@@ -187,7 +261,81 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast bool_to_int %[[RESULT]] : !cir.bool -> !s32i
+// CHECK-NEXT: cir.yield %[[RES_CAST]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast bool_to_int %[[RESULT]] : !cir.bool -> !s32i
+// CHECK-NEXT: cir.yield %[[RES_CAST]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast int_to_bool %[[TERNARY]] : !s32i -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[RES_CAST]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: }
;
@@ -710,7 +858,103 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast bool_to_int %[[RESULT]] : !cir.bool -> !s32i
+// CHECK-NEXT: cir.yield %[[RES_CAST]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast bool_to_int %[[RESULT]] : !cir.bool -> !s32i
+// CHECK-NEXT: cir.yield %[[RES_CAST]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast int_to_bool %[[TERNARY]] : !s32i -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[RES_CAST]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
;
@@ -812,7 +1056,104 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast bool_to_int %[[RESULT]] : !cir.bool -> !s32i
+// CHECK-NEXT: cir.yield %[[RES_CAST]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast bool_to_int %[[RESULT]] : !cir.bool -> !s32i
+// CHECK-NEXT: cir.yield %[[RES_CAST]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast int_to_bool %[[TERNARY]] : !s32i -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[RES_CAST]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+//
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
;
@@ -1467,6 +1808,110 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+//
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast bool_to_int %[[RESULT]] : !cir.bool -> !s32i
+// CHECK-NEXT: cir.yield %[[RES_CAST]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast bool_to_int %[[RESULT]] : !cir.bool -> !s32i
+// CHECK-NEXT: cir.yield %[[RES_CAST]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast int_to_bool %[[TERNARY]] : !s32i -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[RES_CAST]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+//
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
;
@@ -1515,6 +1960,110 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+//
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast bool_to_int %[[RESULT]] : !cir.bool -> !s32i
+// CHECK-NEXT: cir.yield %[[RES_CAST]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast bool_to_int %[[RESULT]] : !cir.bool -> !s32i
+// CHECK-NEXT: cir.yield %[[RES_CAST]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: %[[RES_CAST:.*]] = cir.cast int_to_bool %[[TERNARY]] : !s32i -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[RES_CAST]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+//
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
;
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp
index f997902..f32fa2d 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-default-ops.cpp
@@ -161,7 +161,78 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: }
;
@@ -188,7 +259,78 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: }
;
@@ -724,7 +866,100 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
;
@@ -826,7 +1061,100 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
;
@@ -1510,6 +1838,106 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+//
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
;
@@ -1558,6 +1986,106 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+//
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
;
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c
index 3e4583f..9f73367 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.c
@@ -45,7 +45,18 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float>
+//
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
// CHECK-NEXT: }
;
@@ -58,7 +69,17 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
// CHECK-NEXT: }
;
@@ -226,7 +247,38 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
;
@@ -257,7 +309,38 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
;
@@ -483,7 +566,42 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
;
@@ -519,7 +637,42 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
;
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp
index 833cfad..ffd2631 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-float.cpp
@@ -47,7 +47,17 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
// CHECK-NEXT: }
;
@@ -60,7 +70,17 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
// CHECK-NEXT: }
;
@@ -228,7 +248,38 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
;
@@ -259,7 +310,38 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
;
@@ -485,7 +567,42 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
;
@@ -521,6 +638,42 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
;
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp
index ec4372d..1e367ee 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-inline-ops.cpp
@@ -17,7 +17,7 @@ struct HasOperatorsInline {
bool &operator&&(HasOperatorsInline& other);
bool &operator||(HasOperatorsInline& other);
// For min/max
- HasOperatorsInline &operator<(HasOperatorsInline& other);
+ bool operator<(HasOperatorsInline& other);
HasOperatorsInline &operator=(HasOperatorsInline& other);
};
@@ -110,7 +110,13 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHSARG]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
@@ -119,7 +125,7 @@ void acc_compute() {
// CHECK-NEXT: }
;
#pragma acc parallel reduction(min:someVar)
-// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <min> init {
+// CHECK: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
@@ -141,7 +147,13 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHSARG]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
@@ -631,7 +643,34 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
@@ -753,7 +792,34 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
@@ -1528,6 +1594,38 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}):
@@ -1605,6 +1703,38 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}):
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c
index 0cee5c6..2f42a5c 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.c
@@ -46,7 +46,17 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
// CHECK-NEXT: }
;
@@ -59,7 +69,17 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
// CHECK-NEXT: }
;
@@ -279,7 +299,38 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
;
@@ -310,7 +361,38 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
;
@@ -699,7 +781,42 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
;
@@ -735,7 +852,42 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
;
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp
index 822dd9f6..af7bcf3 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-int.cpp
@@ -47,7 +47,17 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
// CHECK-NEXT: }
;
@@ -60,7 +70,17 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
// CHECK-NEXT: }
;
@@ -280,7 +300,38 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
;
@@ -311,7 +362,38 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
;
@@ -700,7 +782,42 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
;
@@ -736,7 +853,42 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
;
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp
index 873bf51..ec890e2 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-outline-ops.cpp
@@ -18,7 +18,7 @@ HasOperatorsOutline &operator^=(HasOperatorsOutline &, HasOperatorsOutline &);
bool &operator&&(HasOperatorsOutline &, HasOperatorsOutline &);
bool &operator||(HasOperatorsOutline &, HasOperatorsOutline &);
// For min/max
-HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &);
+bool operator<(HasOperatorsOutline &, HasOperatorsOutline &);
template<typename T>
void acc_compute() {
@@ -109,7 +109,13 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHSARG]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
@@ -118,7 +124,7 @@ void acc_compute() {
// CHECK-NEXT: }
;
#pragma acc parallel reduction(min:someVar)
-// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <min> init {
+// CHECK: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
@@ -140,7 +146,13 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHSARG]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
@@ -630,7 +642,34 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
@@ -752,7 +791,34 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
@@ -1527,6 +1593,38 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}):
@@ -1604,6 +1702,38 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}):
diff --git a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c
index b2d1362..08daa70 100644
--- a/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c
+++ b/clang/test/CIR/CodeGenOpenACC/compute-reduction-clause-unsigned-int.c
@@ -46,7 +46,17 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!u32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!u32i> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!u32i>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!u32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !u32i, !cir.ptr<!u32i>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!u32i>
// CHECK-NEXT: }
;
@@ -59,7 +69,17 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!u32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!u32i> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!u32i>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!u32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !u32i, !cir.ptr<!u32i>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!u32i>
// CHECK-NEXT: }
;
@@ -278,7 +298,38 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !s64i) -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !s64i) -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !u32i, !cir.ptr<!u32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>>
// CHECK-NEXT: }
;
@@ -309,7 +360,38 @@ void acc_compute() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !s64i) -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !s64i) -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !u32i, !cir.ptr<!u32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>>
// CHECK-NEXT: }
;
@@ -698,7 +780,42 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !u64i) -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !u64i) -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !u32i, !cir.ptr<!u32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>>
// CHECK-NEXT: }
;
@@ -734,7 +851,42 @@ void acc_compute() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!u32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !u64i) -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!u32i x 5>> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!u32i>, !u64i) -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !u32i, !cir.ptr<!u32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!u32i x 5>>
// CHECK-NEXT: }
;
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp
index 349e0fb..1a77c0f 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-default-ops.cpp
@@ -161,7 +161,78 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: }
for(int i = 0; i < 5; ++i);
@@ -188,7 +259,78 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_DefaultOperators> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHSARG]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_DefaultOperators>
// CHECK-NEXT: }
for(int i = 0; i < 5; ++i);
@@ -724,7 +866,100 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
for(int i = 0; i < 5; ++i);
@@ -826,7 +1061,100 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !s64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
for(int i = 0; i < 5; ++i);
@@ -1510,6 +1838,106 @@ void acc_loop() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+//
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
for(int i = 0; i < 5; ++i);
@@ -1558,6 +1986,106 @@ void acc_loop() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+//
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>> -> !cir.ptr<!rec_DefaultOperators>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_DefaultOperators>, !u64i) -> !cir.ptr<!rec_DefaultOperators>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][0] {name = "i"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !s32i, !cir.ptr<!s32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][1] {name = "u"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!u32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !u32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!u32i>, !u32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !u32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !u32i, !cir.ptr<!u32i>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][2] {name = "f"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.float, !cir.ptr<!cir.float>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][3] {name = "d"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.double>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.double, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.double>, !cir.double
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.double
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.double, !cir.ptr<!cir.double>
+//
+// CHECK-NEXT: %[[GET_MEM_LHS:.*]] = cir.get_member %[[LHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[GET_MEM_RHS:.*]] = cir.get_member %[[RHS_STRIDE]][4] {name = "b"} : !cir.ptr<!rec_DefaultOperators> -> !cir.ptr<!cir.bool>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[LHS_CAST:.*]] = cir.cast bool_to_int %[[LHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: %[[RHS_CAST:.*]] = cir.cast bool_to_int %[[RHS_LOAD]] : !cir.bool -> !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_CAST]], %[[RHS_CAST]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_LHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[GET_MEM_RHS]] : !cir.ptr<!cir.bool>, !cir.bool
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.bool
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[GET_MEM_LHS]] : !cir.bool, !cir.ptr<!cir.bool>
+//
+// CHECK-NEXT: cir.yield
+//
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_DefaultOperators x 5>>
// CHECK-NEXT: }
for(int i = 0; i < 5; ++i);
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp
index 8d9269b..7faef71 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-float.cpp
@@ -47,7 +47,17 @@ void acc_loop() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -60,7 +70,17 @@ void acc_loop() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.float> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.float> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !cir.float, !cir.ptr<!cir.float>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.float>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -228,7 +248,38 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -259,7 +310,38 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !s64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -485,7 +567,42 @@ void acc_loop() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -521,7 +638,42 @@ void acc_loop() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!cir.float x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>> -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!cir.float>, !u64i) -> !cir.ptr<!cir.float>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !cir.float, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!cir.float>, !cir.float
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.float
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !cir.float, !cir.ptr<!cir.float>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!cir.float x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp
index 1c89515..43c9fbbc 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-inline-ops.cpp
@@ -17,7 +17,7 @@ struct HasOperatorsInline {
bool &operator&&(HasOperatorsInline& other);
bool &operator||(HasOperatorsInline& other);
// For min/max
- HasOperatorsInline &operator<(HasOperatorsInline& other);
+ bool operator<(HasOperatorsInline& other);
HasOperatorsInline &operator=(HasOperatorsInline& other);
};
@@ -110,7 +110,13 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHSARG]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
@@ -119,7 +125,7 @@ void acc_loop() {
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
#pragma acc loop reduction(min:someVar)
-// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <min> init {
+// CHECK: acc.reduction.recipe @reduction_min__ZTS18HasOperatorsInline : !cir.ptr<!rec_HasOperatorsInline> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsInline, !cir.ptr<!rec_HasOperatorsInline>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsInline> -> !cir.ptr<!s32i>
@@ -141,7 +147,13 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHSARG]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsInline>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsInline> {{.*}}):
@@ -631,7 +643,34 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
@@ -753,7 +792,34 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !s64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}):
@@ -1528,6 +1594,38 @@ void acc_loop() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}):
@@ -1605,6 +1703,38 @@ void acc_loop() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsInline>, !u64i) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZN18HasOperatorsInlineltERS_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.call @_ZN18HasOperatorsInlineaSERS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsInline>, !cir.ptr<!rec_HasOperatorsInline>) -> !cir.ptr<!rec_HasOperatorsInline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsInline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}):
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp
index 72e9d1f..5353218 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-int.cpp
@@ -47,7 +47,17 @@ void acc_loop() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -60,7 +70,17 @@ void acc_loop() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!s32i> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!s32i> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHSARG]] : !cir.ptr<!s32i>
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHSARG]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHSARG]] : !s32i, !cir.ptr<!s32i>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!s32i>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -280,7 +300,38 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -311,7 +362,38 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"]
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[MAX_IDX:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[ITR_CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[MAX_IDX]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[ITR_CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !s64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -700,7 +782,42 @@ void acc_loop() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
@@ -736,7 +853,42 @@ void acc_loop() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"]
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!s32i x 5>> -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!s32i>, !u64i) -> !cir.ptr<!s32i>
+// CHECK-NEXT: %[[LHS_LOAD:.*]] = cir.load {{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[RHS_LOAD:.*]] = cir.load {{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[LHS_LOAD]], %[[RHS_LOAD]]) : !s32i, !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[CMP]], true {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[LHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: %[[RESULT:.*]] = cir.load{{.*}} %[[RHS_STRIDE]] : !cir.ptr<!s32i>, !s32i
+// CHECK-NEXT: cir.yield %[[RESULT]]
+// CHECK-NEXT: }) : (!cir.bool) -> !s32i
+// CHECK-NEXT: cir.store{{.*}} %[[TERNARY]], %[[LHS_STRIDE]] : !s32i, !cir.ptr<!s32i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!s32i x 5>>
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
diff --git a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp
index a36d41c1..e193cfa 100644
--- a/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp
+++ b/clang/test/CIR/CodeGenOpenACC/loop-reduction-clause-outline-ops.cpp
@@ -18,7 +18,7 @@ HasOperatorsOutline &operator^=(HasOperatorsOutline &, HasOperatorsOutline &);
bool &operator&&(HasOperatorsOutline &, HasOperatorsOutline &);
bool &operator||(HasOperatorsOutline &, HasOperatorsOutline &);
// For min/max
-HasOperatorsOutline &operator<(HasOperatorsOutline &, HasOperatorsOutline &);
+bool operator<(HasOperatorsOutline &, HasOperatorsOutline &);
template<typename T>
void acc_loop() {
@@ -109,7 +109,13 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHSARG]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
@@ -118,7 +124,7 @@ void acc_loop() {
// CHECK-NEXT: }
for(int i=0;i < 5; ++i);
#pragma acc loop reduction(min:someVar)
-// CHECK-NEXT: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <min> init {
+// CHECK: acc.reduction.recipe @reduction_min__ZTS19HasOperatorsOutline : !cir.ptr<!rec_HasOperatorsOutline> reduction_operator <min> init {
// CHECK-NEXT: ^bb0(%[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline>{{.*}})
// CHECK-NEXT: %[[ALLOCA:.*]] = cir.alloca !rec_HasOperatorsOutline, !cir.ptr<!rec_HasOperatorsOutline>, ["openacc.reduction.init", init]
// CHECK-NEXT: %[[GET_I:.*]] = cir.get_member %[[ALLOCA]][0] {name = "i"} : !cir.ptr<!rec_HasOperatorsOutline> -> !cir.ptr<!s32i>
@@ -140,7 +146,13 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHSARG]], %[[RHSARG]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHSARG]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!rec_HasOperatorsOutline>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}, %[[ARG:.*]]: !cir.ptr<!rec_HasOperatorsOutline> {{.*}}):
@@ -630,7 +642,34 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
@@ -752,7 +791,34 @@ void acc_loop() {
//
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}})
-// TODO OpenACC: Expecting combination operation here
+// CHECK-NEXT: %[[ZERO:.*]] = cir.const #cir.int<0> : !s64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !s64i, !cir.ptr<!s64i>, ["itr"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[ZERO]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[END_VAL:.*]] = cir.const #cir.int<5> : !s64i
+// CHECK-NEXT: %[[CMP:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[END_VAL]]) : !s64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[CMP]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !s64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!s64i>, !s64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !s64i, !s64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !s64i, !cir.ptr<!s64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}):
@@ -1527,6 +1593,38 @@ void acc_loop() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}):
@@ -1604,6 +1702,38 @@ void acc_loop() {
// CHECK-NEXT: acc.yield
// CHECK-NEXT: } combiner {
// CHECK-NEXT: ^bb0(%[[LHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[RHSARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}))
+// CHECK-NEXT: cir.scope {
+// CHECK-NEXT: %[[LB:.*]] = acc.get_lowerbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[LB_CAST:.*]] = builtin.unrealized_conversion_cast %[[LB]] : index to !u64i
+// CHECK-NEXT: %[[UB:.*]] = acc.get_upperbound %[[BOUND1]] : (!acc.data_bounds_ty) -> index
+// CHECK-NEXT: %[[UB_CAST:.*]] = builtin.unrealized_conversion_cast %[[UB]] : index to !u64i
+// CHECK-NEXT: %[[ITR:.*]] = cir.alloca !u64i, !cir.ptr<!u64i>, ["iter"] {alignment = 8 : i64}
+// CHECK-NEXT: cir.store %[[LB_CAST]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.for : cond {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[COND:.*]] = cir.cmp(lt, %[[ITR_LOAD]], %[[UB_CAST]]) : !u64i, !cir.bool
+// CHECK-NEXT: cir.condition(%[[COND]])
+// CHECK-NEXT: } body {
+// CHECK-NEXT: %[[ITR_LOAD:.*]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[LHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LHS_STRIDE:.*]] = cir.ptr_stride %[[LHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_DECAY:.*]] = cir.cast array_to_ptrdecay %[[RHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[RHS_STRIDE:.*]] = cir.ptr_stride %[[RHS_DECAY]], %[[ITR_LOAD]] : (!cir.ptr<!rec_HasOperatorsOutline>, !u64i) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: %[[LT:.*]] = cir.call @_ZltR19HasOperatorsOutlineS0_(%[[LHS_STRIDE]], %[[RHS_STRIDE]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.bool
+// CHECK-NEXT: %[[TERNARY:.*]] = cir.ternary(%[[LT]], true {
+// CHECK-NEXT: cir.yield %[[LHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }, false {
+// CHECK-NEXT: cir.yield %[[RHS_STRIDE]] : !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: }) : (!cir.bool) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.call @_ZN19HasOperatorsOutlineaSERKS_(%[[LHS_STRIDE]], %[[TERNARY]]) : (!cir.ptr<!rec_HasOperatorsOutline>, !cir.ptr<!rec_HasOperatorsOutline>) -> !cir.ptr<!rec_HasOperatorsOutline>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: } step {
+// CHECK-NEXT: %[[ITR_LOAD]] = cir.load %[[ITR]] : !cir.ptr<!u64i>, !u64i
+// CHECK-NEXT: %[[INC:.*]] = cir.unary(inc, %[[ITR_LOAD]]) : !u64i, !u64i
+// CHECK-NEXT: cir.store %[[INC]], %[[ITR]] : !u64i, !cir.ptr<!u64i>
+// CHECK-NEXT: cir.yield
+// CHECK-NEXT: }
+// CHECK-NEXT: }
// CHECK-NEXT: acc.yield %[[LHSARG]] : !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>>
// CHECK-NEXT: } destroy {
// CHECK-NEXT: ^bb0(%[[ORIG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[ARG:.*]]: !cir.ptr<!cir.array<!rec_HasOperatorsOutline x 5>> {{.*}}, %[[BOUND1:.*]]: !acc.data_bounds_ty{{.*}}):
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1.c
index 19e5243..1ed59c6c 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1.c
@@ -24,12 +24,12 @@
// CHECK-LABEL: @test_svld1_s8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z13test_svld1_s8u10__SVBool_tPKa(
// CPP-CHECK-NEXT: entry:
-// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
svint8_t test_svld1_s8(svbool_t pg, const int8_t *base) MODE_ATTR
@@ -40,13 +40,13 @@ svint8_t test_svld1_s8(svbool_t pg, const int8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_s16u10__SVBool_tPKs(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
svint16_t test_svld1_s16(svbool_t pg, const int16_t *base) MODE_ATTR
@@ -57,13 +57,13 @@ svint16_t test_svld1_s16(svbool_t pg, const int16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_s32u10__SVBool_tPKi(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
svint32_t test_svld1_s32(svbool_t pg, const int32_t *base) MODE_ATTR
@@ -74,13 +74,13 @@ svint32_t test_svld1_s32(svbool_t pg, const int32_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_s64u10__SVBool_tPKl(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
svint64_t test_svld1_s64(svbool_t pg, const int64_t *base) MODE_ATTR
@@ -90,12 +90,12 @@ svint64_t test_svld1_s64(svbool_t pg, const int64_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_u8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z13test_svld1_u8u10__SVBool_tPKh(
// CPP-CHECK-NEXT: entry:
-// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
svuint8_t test_svld1_u8(svbool_t pg, const uint8_t *base) MODE_ATTR
@@ -106,13 +106,13 @@ svuint8_t test_svld1_u8(svbool_t pg, const uint8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_u16u10__SVBool_tPKt(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
//
svuint16_t test_svld1_u16(svbool_t pg, const uint16_t *base) MODE_ATTR
@@ -123,13 +123,13 @@ svuint16_t test_svld1_u16(svbool_t pg, const uint16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_u32u10__SVBool_tPKj(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
//
svuint32_t test_svld1_u32(svbool_t pg, const uint32_t *base) MODE_ATTR
@@ -140,13 +140,13 @@ svuint32_t test_svld1_u32(svbool_t pg, const uint32_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_u64u10__SVBool_tPKm(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
//
svuint64_t test_svld1_u64(svbool_t pg, const uint64_t *base) MODE_ATTR
@@ -157,13 +157,13 @@ svuint64_t test_svld1_u64(svbool_t pg, const uint64_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_f16u10__SVBool_tPKDh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
//
svfloat16_t test_svld1_f16(svbool_t pg, const float16_t *base) MODE_ATTR
@@ -174,13 +174,13 @@ svfloat16_t test_svld1_f16(svbool_t pg, const float16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_f32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
// CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_f32u10__SVBool_tPKf(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
//
svfloat32_t test_svld1_f32(svbool_t pg, const float32_t *base) MODE_ATTR
@@ -191,13 +191,13 @@ svfloat32_t test_svld1_f32(svbool_t pg, const float32_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_f64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
// CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_f64u10__SVBool_tPKd(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
//
svfloat64_t test_svld1_f64(svbool_t pg, const float64_t *base) MODE_ATTR
@@ -207,12 +207,12 @@ svfloat64_t test_svld1_f64(svbool_t pg, const float64_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1_mf8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
// CPP-CHECK-LABEL: @_Z14test_svld1_mf8u10__SVBool_tPKu6__mfp8(
// CPP-CHECK-NEXT: entry:
-// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
svmfloat8_t test_svld1_mf8(svbool_t pg, const mfloat8_t *base) MODE_ATTR
@@ -226,7 +226,7 @@ svmfloat8_t test_svld1_mf8(svbool_t pg, const mfloat8_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
//
// CPP-CHECK-LABEL: @_Z18test_svld1_vnum_s8u10__SVBool_tPKal(
@@ -235,7 +235,7 @@ svmfloat8_t test_svld1_mf8(svbool_t pg, const mfloat8_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
//
svint8_t test_svld1_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) MODE_ATTR
@@ -250,7 +250,7 @@ svint8_t test_svld1_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) MODE_
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_s16u10__SVBool_tPKsl(
@@ -260,7 +260,7 @@ svint8_t test_svld1_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) MODE_
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP4]]
//
svint16_t test_svld1_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) MODE_ATTR
@@ -275,7 +275,7 @@ svint16_t test_svld1_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) MO
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_s32u10__SVBool_tPKil(
@@ -285,7 +285,7 @@ svint16_t test_svld1_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) MO
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
//
svint32_t test_svld1_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) MODE_ATTR
@@ -300,7 +300,7 @@ svint32_t test_svld1_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) MO
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_s64u10__SVBool_tPKll(
@@ -310,7 +310,7 @@ svint32_t test_svld1_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) MO
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
//
svint64_t test_svld1_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) MODE_ATTR
@@ -324,7 +324,7 @@ svint64_t test_svld1_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) MO
// CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
//
// CPP-CHECK-LABEL: @_Z18test_svld1_vnum_u8u10__SVBool_tPKhl(
@@ -333,7 +333,7 @@ svint64_t test_svld1_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) MO
// CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
//
svuint8_t test_svld1_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) MODE_ATTR
@@ -348,7 +348,7 @@ svuint8_t test_svld1_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) MOD
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_u16u10__SVBool_tPKtl(
@@ -358,7 +358,7 @@ svuint8_t test_svld1_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) MOD
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP4]]
//
svuint16_t test_svld1_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum) MODE_ATTR
@@ -373,7 +373,7 @@ svuint16_t test_svld1_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_u32u10__SVBool_tPKjl(
@@ -383,7 +383,7 @@ svuint16_t test_svld1_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP4]]
//
svuint32_t test_svld1_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum) MODE_ATTR
@@ -398,7 +398,7 @@ svuint32_t test_svld1_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_u64u10__SVBool_tPKml(
@@ -408,7 +408,7 @@ svuint32_t test_svld1_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP4]]
//
svuint64_t test_svld1_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum) MODE_ATTR
@@ -423,7 +423,7 @@ svuint64_t test_svld1_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x half> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_f16u10__SVBool_tPKDhl(
@@ -433,7 +433,7 @@ svuint64_t test_svld1_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x half> @llvm.masked.load.nxv8f16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP4]]
//
svfloat16_t test_svld1_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum) MODE_ATTR
@@ -448,7 +448,7 @@ svfloat16_t test_svld1_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
// CHECK-NEXT: ret <vscale x 4 x float> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_f32u10__SVBool_tPKfl(
@@ -458,7 +458,7 @@ svfloat16_t test_svld1_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP4]]
//
svfloat32_t test_svld1_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum) MODE_ATTR
@@ -473,7 +473,7 @@ svfloat32_t test_svld1_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
// CHECK-NEXT: ret <vscale x 2 x double> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_f64u10__SVBool_tPKdl(
@@ -483,7 +483,7 @@ svfloat32_t test_svld1_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP4]]
//
svfloat64_t test_svld1_vnum_f64(svbool_t pg, const float64_t *base, int64_t vnum) MODE_ATTR
@@ -497,7 +497,7 @@ svfloat64_t test_svld1_vnum_f64(svbool_t pg, const float64_t *base, int64_t vnum
// CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
//
// CPP-CHECK-LABEL: @_Z19test_svld1_vnum_mf8u10__SVBool_tPKu6__mfp8l(
@@ -506,7 +506,7 @@ svfloat64_t test_svld1_vnum_f64(svbool_t pg, const float64_t *base, int64_t vnum
// CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP3]]
//
svmfloat8_t test_svld1_vnum_mf8(svbool_t pg, const mfloat8_t *base, int64_t vnum) MODE_ATTR
@@ -1205,13 +1205,13 @@ svfloat64_t test_svld1_gather_u64base_index_f64(svbool_t pg, svuint64_t bases, i
// CHECK-LABEL: @test_svld1_bf16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP1]]
//
// CPP-CHECK-LABEL: @_Z15test_svld1_bf16u10__SVBool_tPKu6__bf16(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP1]]
//
svbfloat16_t test_svld1_bf16(svbool_t pg, const bfloat16_t *base) MODE_ATTR
@@ -1226,7 +1226,7 @@ svbfloat16_t test_svld1_bf16(svbool_t pg, const bfloat16_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
// CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP4]]
//
// CPP-CHECK-LABEL: @_Z20test_svld1_vnum_bf16u10__SVBool_tPKu6__bf16l(
@@ -1236,7 +1236,7 @@ svbfloat16_t test_svld1_bf16(svbool_t pg, const bfloat16_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x bfloat> @llvm.masked.load.nxv8bf16.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x bfloat> zeroinitializer)
// CPP-CHECK-NEXT: ret <vscale x 8 x bfloat> [[TMP4]]
//
svbfloat16_t test_svld1_vnum_bf16(svbool_t pg, const bfloat16_t *base, int64_t vnum) MODE_ATTR
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sb.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sb.c
index 2757f28..eb40da2 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sb.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sb.c
@@ -25,14 +25,14 @@
// CHECK-LABEL: @test_svld1sb_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sb_s16u10__SVBool_tPKa(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
@@ -44,14 +44,14 @@ svint16_t test_svld1sb_s16(svbool_t pg, const int8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sb_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sb_s32u10__SVBool_tPKa(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -63,14 +63,14 @@ svint32_t test_svld1sb_s32(svbool_t pg, const int8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sb_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sb_s64u10__SVBool_tPKa(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -82,14 +82,14 @@ svint64_t test_svld1sb_s64(svbool_t pg, const int8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sb_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sb_u16u10__SVBool_tPKa(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
@@ -101,14 +101,14 @@ svuint16_t test_svld1sb_u16(svbool_t pg, const int8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sb_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sb_u32u10__SVBool_tPKa(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -120,14 +120,14 @@ svuint32_t test_svld1sb_u32(svbool_t pg, const int8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sb_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sb_u64u10__SVBool_tPKa(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -143,7 +143,7 @@ svuint64_t test_svld1sb_u64(svbool_t pg, const int8_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -154,7 +154,7 @@ svuint64_t test_svld1sb_u64(svbool_t pg, const int8_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -170,7 +170,7 @@ svint16_t test_svld1sb_vnum_s16(svbool_t pg, const int8_t *base, int64_t vnum) M
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -181,7 +181,7 @@ svint16_t test_svld1sb_vnum_s16(svbool_t pg, const int8_t *base, int64_t vnum) M
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -197,7 +197,7 @@ svint32_t test_svld1sb_vnum_s32(svbool_t pg, const int8_t *base, int64_t vnum) M
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -208,7 +208,7 @@ svint32_t test_svld1sb_vnum_s32(svbool_t pg, const int8_t *base, int64_t vnum) M
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -224,7 +224,7 @@ svint64_t test_svld1sb_vnum_s64(svbool_t pg, const int8_t *base, int64_t vnum) M
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -235,7 +235,7 @@ svint64_t test_svld1sb_vnum_s64(svbool_t pg, const int8_t *base, int64_t vnum) M
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -251,7 +251,7 @@ svuint16_t test_svld1sb_vnum_u16(svbool_t pg, const int8_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -262,7 +262,7 @@ svuint16_t test_svld1sb_vnum_u16(svbool_t pg, const int8_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -278,7 +278,7 @@ svuint32_t test_svld1sb_vnum_u32(svbool_t pg, const int8_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -289,7 +289,7 @@ svuint32_t test_svld1sb_vnum_u32(svbool_t pg, const int8_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sh.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sh.c
index dbc762f..e1cbb53 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sh.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sh.c
@@ -25,14 +25,14 @@
// CHECK-LABEL: @test_svld1sh_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sh_s32u10__SVBool_tPKs(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -44,14 +44,14 @@ svint32_t test_svld1sh_s32(svbool_t pg, const int16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sh_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sh_s64u10__SVBool_tPKs(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -63,14 +63,14 @@ svint64_t test_svld1sh_s64(svbool_t pg, const int16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sh_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sh_u32u10__SVBool_tPKs(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -82,14 +82,14 @@ svuint32_t test_svld1sh_u32(svbool_t pg, const int16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sh_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sh_u64u10__SVBool_tPKs(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -105,7 +105,7 @@ svuint64_t test_svld1sh_u64(svbool_t pg, const int16_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -116,7 +116,7 @@ svuint64_t test_svld1sh_u64(svbool_t pg, const int16_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -132,7 +132,7 @@ svint32_t test_svld1sh_vnum_s32(svbool_t pg, const int16_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -143,7 +143,7 @@ svint32_t test_svld1sh_vnum_s32(svbool_t pg, const int16_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -159,7 +159,7 @@ svint64_t test_svld1sh_vnum_s64(svbool_t pg, const int16_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -170,7 +170,7 @@ svint64_t test_svld1sh_vnum_s64(svbool_t pg, const int16_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -186,7 +186,7 @@ svuint32_t test_svld1sh_vnum_u32(svbool_t pg, const int16_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -197,7 +197,7 @@ svuint32_t test_svld1sh_vnum_u32(svbool_t pg, const int16_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sw.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sw.c
index 575d214..14ee095 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sw.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1sw.c
@@ -25,14 +25,14 @@
// CHECK-LABEL: @test_svld1sw_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sw_s64u10__SVBool_tPKi(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -44,14 +44,14 @@ svint64_t test_svld1sw_s64(svbool_t pg, const int32_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1sw_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1sw_u64u10__SVBool_tPKi(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -67,7 +67,7 @@ svuint64_t test_svld1sw_u64(svbool_t pg, const int32_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -78,7 +78,7 @@ svuint64_t test_svld1sw_u64(svbool_t pg, const int32_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -94,7 +94,7 @@ svint64_t test_svld1sw_vnum_s64(svbool_t pg, const int32_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -105,7 +105,7 @@ svint64_t test_svld1sw_vnum_s64(svbool_t pg, const int32_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1ub.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1ub.c
index 07e8815..3e0d2827 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1ub.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1ub.c
@@ -25,14 +25,14 @@
// CHECK-LABEL: @test_svld1ub_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1ub_s16u10__SVBool_tPKh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
@@ -44,14 +44,14 @@ svint16_t test_svld1ub_s16(svbool_t pg, const uint8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1ub_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1ub_s32u10__SVBool_tPKh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -63,14 +63,14 @@ svint32_t test_svld1ub_s32(svbool_t pg, const uint8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1ub_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1ub_s64u10__SVBool_tPKh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -82,14 +82,14 @@ svint64_t test_svld1ub_s64(svbool_t pg, const uint8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1ub_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1ub_u16u10__SVBool_tPKh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 8 x i8> [[TMP1]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
//
@@ -101,14 +101,14 @@ svuint16_t test_svld1ub_u16(svbool_t pg, const uint8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1ub_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1ub_u32u10__SVBool_tPKh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -120,14 +120,14 @@ svuint32_t test_svld1ub_u32(svbool_t pg, const uint8_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1ub_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1ub_u64u10__SVBool_tPKh(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i8> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -143,7 +143,7 @@ svuint64_t test_svld1ub_u64(svbool_t pg, const uint8_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -154,7 +154,7 @@ svuint64_t test_svld1ub_u64(svbool_t pg, const uint8_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -170,7 +170,7 @@ svint16_t test_svld1ub_vnum_s16(svbool_t pg, const uint8_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -181,7 +181,7 @@ svint16_t test_svld1ub_vnum_s16(svbool_t pg, const uint8_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -197,7 +197,7 @@ svint32_t test_svld1ub_vnum_s32(svbool_t pg, const uint8_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -208,7 +208,7 @@ svint32_t test_svld1ub_vnum_s32(svbool_t pg, const uint8_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -224,7 +224,7 @@ svint64_t test_svld1ub_vnum_s64(svbool_t pg, const uint8_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -235,7 +235,7 @@ svint64_t test_svld1ub_vnum_s64(svbool_t pg, const uint8_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 8 x i8> [[TMP4]] to <vscale x 8 x i16>
// CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP5]]
//
@@ -251,7 +251,7 @@ svuint16_t test_svld1ub_vnum_u16(svbool_t pg, const uint8_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -262,7 +262,7 @@ svuint16_t test_svld1ub_vnum_u16(svbool_t pg, const uint8_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i8> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -278,7 +278,7 @@ svuint32_t test_svld1ub_vnum_u32(svbool_t pg, const uint8_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -289,7 +289,7 @@ svuint32_t test_svld1ub_vnum_u32(svbool_t pg, const uint8_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i8> @llvm.masked.load.nxv2i8.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i8> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i8> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uh.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uh.c
index 6d91c1e..18dfc082 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uh.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uh.c
@@ -25,14 +25,14 @@
// CHECK-LABEL: @test_svld1uh_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1uh_s32u10__SVBool_tPKt(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -44,14 +44,14 @@ svint32_t test_svld1uh_s32(svbool_t pg, const uint16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1uh_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1uh_s64u10__SVBool_tPKt(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -63,14 +63,14 @@ svint64_t test_svld1uh_s64(svbool_t pg, const uint16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1uh_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1uh_u32u10__SVBool_tPKt(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
//
@@ -82,14 +82,14 @@ svuint32_t test_svld1uh_u32(svbool_t pg, const uint16_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1uh_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1uh_u64u10__SVBool_tPKt(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -105,7 +105,7 @@ svuint64_t test_svld1uh_u64(svbool_t pg, const uint16_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -116,7 +116,7 @@ svuint64_t test_svld1uh_u64(svbool_t pg, const uint16_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -132,7 +132,7 @@ svint32_t test_svld1uh_vnum_s32(svbool_t pg, const uint16_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -143,7 +143,7 @@ svint32_t test_svld1uh_vnum_s32(svbool_t pg, const uint16_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -159,7 +159,7 @@ svint64_t test_svld1uh_vnum_s64(svbool_t pg, const uint16_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -170,7 +170,7 @@ svint64_t test_svld1uh_vnum_s64(svbool_t pg, const uint16_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 4 x i16> [[TMP4]] to <vscale x 4 x i32>
// CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
//
@@ -186,7 +186,7 @@ svuint32_t test_svld1uh_vnum_u32(svbool_t pg, const uint16_t *base, int64_t vnum
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -197,7 +197,7 @@ svuint32_t test_svld1uh_vnum_u32(svbool_t pg, const uint16_t *base, int64_t vnum
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i16> @llvm.masked.load.nxv2i16.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i16> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i16> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uw.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uw.c
index 7be2398..62637ff 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uw.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_ld1uw.c
@@ -25,14 +25,14 @@
// CHECK-LABEL: @test_svld1uw_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1uw_s64u10__SVBool_tPKj(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -44,14 +44,14 @@ svint64_t test_svld1uw_s64(svbool_t pg, const uint32_t *base) MODE_ATTR
// CHECK-LABEL: @test_svld1uw_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
// CPP-CHECK-LABEL: @_Z16test_svld1uw_u64u10__SVBool_tPKj(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = zext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
//
@@ -67,7 +67,7 @@ svuint64_t test_svld1uw_u64(svbool_t pg, const uint32_t *base) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -78,7 +78,7 @@ svuint64_t test_svld1uw_u64(svbool_t pg, const uint32_t *base) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -94,7 +94,7 @@ svint64_t test_svld1uw_vnum_s64(svbool_t pg, const uint32_t *base, int64_t vnum)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
@@ -105,7 +105,7 @@ svint64_t test_svld1uw_vnum_s64(svbool_t pg, const uint32_t *base, int64_t vnum)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
+// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i32> zeroinitializer)
// CPP-CHECK-NEXT: [[TMP5:%.*]] = zext <vscale x 2 x i32> [[TMP4]] to <vscale x 2 x i64>
// CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP5]]
//
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1.c
index 56f8c32..4d0005e 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1.c
@@ -24,12 +24,12 @@
// CHECK-LABEL: @test_svst1_s8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z13test_svst1_s8u10__SVBool_tPau10__SVInt8_t(
// CPP-CHECK-NEXT: entry:
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_s8(svbool_t pg, int8_t *base, svint8_t data) MODE_ATTR
@@ -40,13 +40,13 @@ void test_svst1_s8(svbool_t pg, int8_t *base, svint8_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_s16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_s16u10__SVBool_tPsu11__SVInt16_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_s16(svbool_t pg, int16_t *base, svint16_t data) MODE_ATTR
@@ -57,13 +57,13 @@ void test_svst1_s16(svbool_t pg, int16_t *base, svint16_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_s32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_s32u10__SVBool_tPiu11__SVInt32_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_s32(svbool_t pg, int32_t *base, svint32_t data) MODE_ATTR
@@ -74,13 +74,13 @@ void test_svst1_s32(svbool_t pg, int32_t *base, svint32_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_s64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_s64u10__SVBool_tPlu11__SVInt64_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_s64(svbool_t pg, int64_t *base, svint64_t data) MODE_ATTR
@@ -90,12 +90,12 @@ void test_svst1_s64(svbool_t pg, int64_t *base, svint64_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_u8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z13test_svst1_u8u10__SVBool_tPhu11__SVUint8_t(
// CPP-CHECK-NEXT: entry:
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_u8(svbool_t pg, uint8_t *base, svuint8_t data) MODE_ATTR
@@ -106,13 +106,13 @@ void test_svst1_u8(svbool_t pg, uint8_t *base, svuint8_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_u16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_u16u10__SVBool_tPtu12__SVUint16_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_u16(svbool_t pg, uint16_t *base, svuint16_t data) MODE_ATTR
@@ -123,13 +123,13 @@ void test_svst1_u16(svbool_t pg, uint16_t *base, svuint16_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_u32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_u32u10__SVBool_tPju12__SVUint32_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_u32(svbool_t pg, uint32_t *base, svuint32_t data) MODE_ATTR
@@ -140,13 +140,13 @@ void test_svst1_u32(svbool_t pg, uint32_t *base, svuint32_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_u64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_u64u10__SVBool_tPmu12__SVUint64_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_u64(svbool_t pg, uint64_t *base, svuint64_t data) MODE_ATTR
@@ -157,13 +157,13 @@ void test_svst1_u64(svbool_t pg, uint64_t *base, svuint64_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_f16u10__SVBool_tPDhu13__SVFloat16_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_f16(svbool_t pg, float16_t *base, svfloat16_t data) MODE_ATTR
@@ -174,13 +174,13 @@ void test_svst1_f16(svbool_t pg, float16_t *base, svfloat16_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_f32(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_f32u10__SVBool_tPfu13__SVFloat32_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_f32(svbool_t pg, float32_t *base, svfloat32_t data) MODE_ATTR
@@ -191,13 +191,13 @@ void test_svst1_f32(svbool_t pg, float32_t *base, svfloat32_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_f64(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_f64u10__SVBool_tPdu13__SVFloat64_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_f64(svbool_t pg, float64_t *base, svfloat64_t data) MODE_ATTR
@@ -207,12 +207,12 @@ void test_svst1_f64(svbool_t pg, float64_t *base, svfloat64_t data) MODE_ATTR
// CHECK-LABEL: @test_svst1_mf8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z14test_svst1_mf8u10__SVBool_tPu6__mfp8u13__SVMfloat8_t(
// CPP-CHECK-NEXT: entry:
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 16 x i1> [[PG:%.*]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_mf8(svbool_t pg, mfloat8_t *base, svmfloat8_t data) MODE_ATTR
@@ -226,7 +226,7 @@ void test_svst1_mf8(svbool_t pg, mfloat8_t *base, svmfloat8_t data) MODE_ATTR
// CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z18test_svst1_vnum_s8u10__SVBool_tPalu10__SVInt8_t(
@@ -235,7 +235,7 @@ void test_svst1_mf8(svbool_t pg, mfloat8_t *base, svmfloat8_t data) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_s8(svbool_t pg, int8_t *base, int64_t vnum, svint8_t data) MODE_ATTR
@@ -250,7 +250,7 @@ void test_svst1_vnum_s8(svbool_t pg, int8_t *base, int64_t vnum, svint8_t data)
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_s16u10__SVBool_tPslu11__SVInt16_t(
@@ -260,7 +260,7 @@ void test_svst1_vnum_s8(svbool_t pg, int8_t *base, int64_t vnum, svint8_t data)
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_s16(svbool_t pg, int16_t *base, int64_t vnum, svint16_t data) MODE_ATTR
@@ -275,7 +275,7 @@ void test_svst1_vnum_s16(svbool_t pg, int16_t *base, int64_t vnum, svint16_t dat
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_s32u10__SVBool_tPilu11__SVInt32_t(
@@ -285,7 +285,7 @@ void test_svst1_vnum_s16(svbool_t pg, int16_t *base, int64_t vnum, svint16_t dat
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_s32(svbool_t pg, int32_t *base, int64_t vnum, svint32_t data) MODE_ATTR
@@ -300,7 +300,7 @@ void test_svst1_vnum_s32(svbool_t pg, int32_t *base, int64_t vnum, svint32_t dat
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_s64u10__SVBool_tPllu11__SVInt64_t(
@@ -310,7 +310,7 @@ void test_svst1_vnum_s32(svbool_t pg, int32_t *base, int64_t vnum, svint32_t dat
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_s64(svbool_t pg, int64_t *base, int64_t vnum, svint64_t data) MODE_ATTR
@@ -324,7 +324,7 @@ void test_svst1_vnum_s64(svbool_t pg, int64_t *base, int64_t vnum, svint64_t dat
// CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z18test_svst1_vnum_u8u10__SVBool_tPhlu11__SVUint8_t(
@@ -333,7 +333,7 @@ void test_svst1_vnum_s64(svbool_t pg, int64_t *base, int64_t vnum, svint64_t dat
// CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_u8(svbool_t pg, uint8_t *base, int64_t vnum, svuint8_t data) MODE_ATTR
@@ -348,7 +348,7 @@ void test_svst1_vnum_u8(svbool_t pg, uint8_t *base, int64_t vnum, svuint8_t data
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_u16u10__SVBool_tPtlu12__SVUint16_t(
@@ -358,7 +358,7 @@ void test_svst1_vnum_u8(svbool_t pg, uint8_t *base, int64_t vnum, svuint8_t data
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0(<vscale x 8 x i16> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_u16(svbool_t pg, uint16_t *base, int64_t vnum, svuint16_t data) MODE_ATTR
@@ -373,7 +373,7 @@ void test_svst1_vnum_u16(svbool_t pg, uint16_t *base, int64_t vnum, svuint16_t d
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_u32u10__SVBool_tPjlu12__SVUint32_t(
@@ -383,7 +383,7 @@ void test_svst1_vnum_u16(svbool_t pg, uint16_t *base, int64_t vnum, svuint16_t d
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_u32(svbool_t pg, uint32_t *base, int64_t vnum, svuint32_t data) MODE_ATTR
@@ -398,7 +398,7 @@ void test_svst1_vnum_u32(svbool_t pg, uint32_t *base, int64_t vnum, svuint32_t d
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_u64u10__SVBool_tPmlu12__SVUint64_t(
@@ -408,7 +408,7 @@ void test_svst1_vnum_u32(svbool_t pg, uint32_t *base, int64_t vnum, svuint32_t d
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_u64(svbool_t pg, uint64_t *base, int64_t vnum, svuint64_t data) MODE_ATTR
@@ -423,7 +423,7 @@ void test_svst1_vnum_u64(svbool_t pg, uint64_t *base, int64_t vnum, svuint64_t d
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_f16u10__SVBool_tPDhlu13__SVFloat16_t(
@@ -433,7 +433,7 @@ void test_svst1_vnum_u64(svbool_t pg, uint64_t *base, int64_t vnum, svuint64_t d
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0(<vscale x 8 x half> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_f16(svbool_t pg, float16_t *base, int64_t vnum, svfloat16_t data) MODE_ATTR
@@ -448,7 +448,7 @@ void test_svst1_vnum_f16(svbool_t pg, float16_t *base, int64_t vnum, svfloat16_t
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_f32u10__SVBool_tPflu13__SVFloat32_t(
@@ -458,7 +458,7 @@ void test_svst1_vnum_f16(svbool_t pg, float16_t *base, int64_t vnum, svfloat16_t
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0(<vscale x 4 x float> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_f32(svbool_t pg, float32_t *base, int64_t vnum, svfloat32_t data) MODE_ATTR
@@ -473,7 +473,7 @@ void test_svst1_vnum_f32(svbool_t pg, float32_t *base, int64_t vnum, svfloat32_t
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_f64u10__SVBool_tPdlu13__SVFloat64_t(
@@ -483,7 +483,7 @@ void test_svst1_vnum_f32(svbool_t pg, float32_t *base, int64_t vnum, svfloat32_t
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_f64(svbool_t pg, float64_t *base, int64_t vnum, svfloat64_t data) MODE_ATTR
@@ -497,7 +497,7 @@ void test_svst1_vnum_f64(svbool_t pg, float64_t *base, int64_t vnum, svfloat64_t
// CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z19test_svst1_vnum_mf8u10__SVBool_tPu6__mfp8lu13__SVMfloat8_t(
@@ -506,7 +506,7 @@ void test_svst1_vnum_f64(svbool_t pg, float64_t *base, int64_t vnum, svfloat64_t
// CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]]
// CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr [[TMP2]], i32 1, <vscale x 16 x i1> [[PG:%.*]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[DATA:%.*]], ptr align 1 [[TMP2]], <vscale x 16 x i1> [[PG:%.*]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_mf8(svbool_t pg, mfloat8_t *base, int64_t vnum, svmfloat8_t data) MODE_ATTR
@@ -1247,13 +1247,13 @@ void test_svst1_scatter_u64base_index_f64(svbool_t pg, svuint64_t bases, int64_t
// CHECK-LABEL: @test_svst1_bf16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z15test_svst1_bf16u10__SVBool_tPu6__bf16u14__SVBfloat16_t(
// CPP-CHECK-NEXT: entry:
// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_bf16(svbool_t pg, bfloat16_t *base, svbfloat16_t data) MODE_ATTR
@@ -1268,7 +1268,7 @@ void test_svst1_bf16(svbool_t pg, bfloat16_t *base, svbfloat16_t data) MODE_ATTR
// CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z20test_svst1_vnum_bf16u10__SVBool_tPu6__bf16lu14__SVBfloat16_t(
@@ -1278,7 +1278,7 @@ void test_svst1_bf16(svbool_t pg, bfloat16_t *base, svbfloat16_t data) MODE_ATTR
// CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
-// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0(<vscale x 8 x bfloat> [[DATA:%.*]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CPP-CHECK-NEXT: ret void
//
void test_svst1_vnum_bf16(svbool_t pg, bfloat16_t *base, int64_t vnum, svbfloat16_t data) MODE_ATTR
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1b.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1b.c
index c908bc2..3ac49e2 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1b.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1b.c
@@ -24,7 +24,7 @@
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_s16(svbool_t pg, int8_t *base, svint16_t data) MODE_ATTR
@@ -36,7 +36,7 @@ void test_svst1b_s16(svbool_t pg, int8_t *base, svint16_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_s32(svbool_t pg, int8_t *base, svint32_t data) MODE_ATTR
@@ -48,7 +48,7 @@ void test_svst1b_s32(svbool_t pg, int8_t *base, svint32_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_s64(svbool_t pg, int8_t *base, svint64_t data) MODE_ATTR
@@ -60,7 +60,7 @@ void test_svst1b_s64(svbool_t pg, int8_t *base, svint64_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_u16(svbool_t pg, uint8_t *base, svuint16_t data) MODE_ATTR
@@ -72,7 +72,7 @@ void test_svst1b_u16(svbool_t pg, uint8_t *base, svuint16_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_u32(svbool_t pg, uint8_t *base, svuint32_t data) MODE_ATTR
@@ -84,7 +84,7 @@ void test_svst1b_u32(svbool_t pg, uint8_t *base, svuint32_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_u64(svbool_t pg, uint8_t *base, svuint64_t data) MODE_ATTR
@@ -100,7 +100,7 @@ void test_svst1b_u64(svbool_t pg, uint8_t *base, svuint64_t data) MODE_ATTR
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_vnum_s16(svbool_t pg, int8_t *base, int64_t vnum, svint16_t data) MODE_ATTR
@@ -116,7 +116,7 @@ void test_svst1b_vnum_s16(svbool_t pg, int8_t *base, int64_t vnum, svint16_t dat
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_vnum_s32(svbool_t pg, int8_t *base, int64_t vnum, svint32_t data) MODE_ATTR
@@ -132,7 +132,7 @@ void test_svst1b_vnum_s32(svbool_t pg, int8_t *base, int64_t vnum, svint32_t dat
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_vnum_s64(svbool_t pg, int8_t *base, int64_t vnum, svint64_t data) MODE_ATTR
@@ -148,7 +148,7 @@ void test_svst1b_vnum_s64(svbool_t pg, int8_t *base, int64_t vnum, svint64_t dat
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 8 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 8 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_vnum_u16(svbool_t pg, uint8_t *base, int64_t vnum, svuint16_t data) MODE_ATTR
@@ -164,7 +164,7 @@ void test_svst1b_vnum_u16(svbool_t pg, uint8_t *base, int64_t vnum, svuint16_t d
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_vnum_u32(svbool_t pg, uint8_t *base, int64_t vnum, svuint32_t data) MODE_ATTR
@@ -180,7 +180,7 @@ void test_svst1b_vnum_u32(svbool_t pg, uint8_t *base, int64_t vnum, svuint32_t d
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1b_vnum_u64(svbool_t pg, uint8_t *base, int64_t vnum, svuint64_t data) MODE_ATTR
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1h.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1h.c
index 959b658..1e3e0b2 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1h.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1h.c
@@ -24,7 +24,7 @@
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_s32(svbool_t pg, int16_t *base, svint32_t data) MODE_ATTR
@@ -36,7 +36,7 @@ void test_svst1h_s32(svbool_t pg, int16_t *base, svint32_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_s64(svbool_t pg, int16_t *base, svint64_t data) MODE_ATTR
@@ -48,7 +48,7 @@ void test_svst1h_s64(svbool_t pg, int16_t *base, svint64_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_u32(svbool_t pg, uint16_t *base, svuint32_t data) MODE_ATTR
@@ -60,7 +60,7 @@ void test_svst1h_u32(svbool_t pg, uint16_t *base, svuint32_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_u64(svbool_t pg, uint16_t *base, svuint64_t data) MODE_ATTR
@@ -76,7 +76,7 @@ void test_svst1h_u64(svbool_t pg, uint16_t *base, svuint64_t data) MODE_ATTR
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_vnum_s32(svbool_t pg, int16_t *base, int64_t vnum, svint32_t data) MODE_ATTR
@@ -92,7 +92,7 @@ void test_svst1h_vnum_s32(svbool_t pg, int16_t *base, int64_t vnum, svint32_t da
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_vnum_s64(svbool_t pg, int16_t *base, int64_t vnum, svint64_t data) MODE_ATTR
@@ -108,7 +108,7 @@ void test_svst1h_vnum_s64(svbool_t pg, int16_t *base, int64_t vnum, svint64_t da
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 4 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_vnum_u32(svbool_t pg, uint16_t *base, int64_t vnum, svuint32_t data) MODE_ATTR
@@ -124,7 +124,7 @@ void test_svst1h_vnum_u32(svbool_t pg, uint16_t *base, int64_t vnum, svuint32_t
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1h_vnum_u64(svbool_t pg, uint16_t *base, int64_t vnum, svuint64_t data) MODE_ATTR
diff --git a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1w.c b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1w.c
index 3d9e45bd..1a12412 100644
--- a/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1w.c
+++ b/clang/test/CodeGen/AArch64/sve-intrinsics/acle_sve_st1w.c
@@ -24,7 +24,7 @@
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i32>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1w_s64(svbool_t pg, int32_t *base, svint64_t data) MODE_ATTR
@@ -36,7 +36,7 @@ void test_svst1w_s64(svbool_t pg, int32_t *base, svint64_t data) MODE_ATTR
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
// CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i32>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP1]], ptr align 1 [[BASE:%.*]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1w_u64(svbool_t pg, uint32_t *base, svuint64_t data) MODE_ATTR
@@ -52,7 +52,7 @@ void test_svst1w_u64(svbool_t pg, uint32_t *base, svuint64_t data) MODE_ATTR
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i32>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1w_vnum_s64(svbool_t pg, int32_t *base, int64_t vnum, svint64_t data) MODE_ATTR
@@ -68,7 +68,7 @@ void test_svst1w_vnum_s64(svbool_t pg, int32_t *base, int64_t vnum, svint64_t da
// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
// CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i32>
-// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
+// CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP4]], ptr align 1 [[TMP3]], <vscale x 2 x i1> [[TMP0]])
// CHECK-NEXT: ret void
//
void test_svst1w_vnum_u64(svbool_t pg, uint32_t *base, int64_t vnum, svuint64_t data) MODE_ATTR
diff --git a/clang/test/CodeGen/X86/avx-builtins.c b/clang/test/CodeGen/X86/avx-builtins.c
index bcffd861..26aed772 100644
--- a/clang/test/CodeGen/X86/avx-builtins.c
+++ b/clang/test/CodeGen/X86/avx-builtins.c
@@ -1070,19 +1070,25 @@ __m128d test_mm256_extractf128_pd(__m256d A) {
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3>
return _mm256_extractf128_pd(A, 1);
}
+TEST_CONSTEXPR(match_m128d(_mm256_extractf128_pd(((__m256d){0.0, 1.0, 2.0, 3.0}), 1),
+ 2.0, 3.0));
__m128 test_mm256_extractf128_ps(__m256 A) {
// CHECK-LABEL: test_mm256_extractf128_ps
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm256_extractf128_ps(A, 1);
}
+TEST_CONSTEXPR(match_m128(_mm256_extractf128_ps(((__m256){0,1,2,3,4,5,6,7}), 1),
+ 4.0f, 5.0f, 6.0f, 7.0f));
__m128i test_mm256_extractf128_si256(__m256i A) {
// CHECK-LABEL: test_mm256_extractf128_si256
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm256_extractf128_si256(A, 1);
}
-
+TEST_CONSTEXPR(match_m128i(_mm256_extractf128_si256(((__m256i){0ULL, 1ULL, 2ULL, 3ULL}), 1),
+ 2ULL, 3ULL));
+
__m256d test_mm256_floor_pd(__m256d x) {
// CHECK-LABEL: test_mm256_floor_pd
// CHECK: call {{.*}}<4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %{{.*}}, i32 1)
@@ -1348,12 +1354,16 @@ int test_mm256_movemask_pd(__m256d A) {
// CHECK: call {{.*}}i32 @llvm.x86.avx.movmsk.pd.256(<4 x double> %{{.*}})
return _mm256_movemask_pd(A);
}
+TEST_CONSTEXPR(_mm256_movemask_pd((__m256d)(__v4df){-1234.5678901234, 98765.4321098765, 0.000123456789, -3.14159265358979}) == 0x9);
+TEST_CONSTEXPR(_mm256_movemask_pd((__m256d)(__v4df){-0.000000987654321, -99999.999999999, 42.424242424242, 314159.2653589793}) == 0x3);
int test_mm256_movemask_ps(__m256 A) {
// CHECK-LABEL: test_mm256_movemask_ps
// CHECK: call {{.*}}i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %{{.*}})
return _mm256_movemask_ps(A);
}
+TEST_CONSTEXPR(_mm256_movemask_ps((__m256)(__v8sf){-12.3456f, 34.7890f, -0.0001234f, 123456.78f, -987.654f, 0.001234f, 3.14159f, -256.001f}) == 0x95);
+TEST_CONSTEXPR(_mm256_movemask_ps((__m256)(__v8sf){0.333333f, -45.6789f, 999.999f, -0.9999f, 17.234f, -128.512f, 2048.0f, -3.14f}) == 0xAA);
__m256d test_mm256_mul_pd(__m256d A, __m256d B) {
// CHECK-LABEL: test_mm256_mul_pd
diff --git a/clang/test/CodeGen/X86/avx10_2bf16-builtins.c b/clang/test/CodeGen/X86/avx10_2bf16-builtins.c
index c7fea07..f8a4c51 100644
--- a/clang/test/CodeGen/X86/avx10_2bf16-builtins.c
+++ b/clang/test/CodeGen/X86/avx10_2bf16-builtins.c
@@ -274,7 +274,7 @@ __m256bh test_mm256_loadu_pbh(void *p) {
__m128bh test_mm_load_sbh(void const *A) {
// CHECK-LABEL: test_mm_load_sbh
- // CHECK: %{{.*}} = call <8 x bfloat> @llvm.masked.load.v8bf16.p0(ptr %{{.*}}, i32 1, <8 x i1> bitcast (<1 x i8> splat (i8 1) to <8 x i1>), <8 x bfloat> %{{.*}})
+ // CHECK: %{{.*}} = call <8 x bfloat> @llvm.masked.load.v8bf16.p0(ptr align 1 %{{.*}}, <8 x i1> bitcast (<1 x i8> splat (i8 1) to <8 x i1>), <8 x bfloat> %{{.*}})
return _mm_load_sbh(A);
}
@@ -305,7 +305,7 @@ void test_mm_store_sbh(void *A, __m128bh B) {
void test_mm_mask_store_sbh(void *__P, __mmask8 __U, __m128bh __A) {
// CHECK-LABEL: @test_mm_mask_store_sbh
- // CHECK: call void @llvm.masked.store.v8bf16.p0(<8 x bfloat> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
+ // CHECK: call void @llvm.masked.store.v8bf16.p0(<8 x bfloat> %{{.*}}, ptr align 1 %{{.*}}, <8 x i1> %{{.*}})
_mm_mask_store_sbh(__P, __U, __A);
}
@@ -323,13 +323,13 @@ void test_mm_store_pbh(void *p, __m128bh a) {
__m128bh test_mm_mask_load_sbh(__m128bh __A, __mmask8 __U, const void *__W) {
// CHECK-LABEL: @test_mm_mask_load_sbh
- // CHECK: %{{.*}} = call <8 x bfloat> @llvm.masked.load.v8bf16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x bfloat> %{{.*}})
+ // CHECK: %{{.*}} = call <8 x bfloat> @llvm.masked.load.v8bf16.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x bfloat> %{{.*}})
return _mm_mask_load_sbh(__A, __U, __W);
}
__m128bh test_mm_maskz_load_sbh(__mmask8 __U, const void *__W) {
// CHECK-LABEL: @test_mm_maskz_load_sbh
- // CHECK: %{{.*}} = call <8 x bfloat> @llvm.masked.load.v8bf16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x bfloat> %{{.*}})
+ // CHECK: %{{.*}} = call <8 x bfloat> @llvm.masked.load.v8bf16.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x bfloat> %{{.*}})
return _mm_maskz_load_sbh(__U, __W);
}
diff --git a/clang/test/CodeGen/X86/avx2-builtins.c b/clang/test/CodeGen/X86/avx2-builtins.c
index a505d70..03b1bde 100644
--- a/clang/test/CodeGen/X86/avx2-builtins.c
+++ b/clang/test/CodeGen/X86/avx2-builtins.c
@@ -466,6 +466,7 @@ __m128i test0_mm256_extracti128_si256_0(__m256i a) {
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> poison, <2 x i32> <i32 0, i32 1>
return _mm256_extracti128_si256(a, 0);
}
+TEST_CONSTEXPR(match_m128i(_mm256_extracti128_si256(((__m256i){1ULL, 2ULL, 3ULL, 4ULL}), 0),1ULL, 2ULL));
__m128i test1_mm256_extracti128_si256_1(__m256i a) {
// CHECK-LABEL: test1_mm256_extracti128_si256
@@ -992,6 +993,9 @@ int test_mm256_movemask_epi8(__m256i a) {
// CHECK: call {{.*}}i32 @llvm.x86.avx2.pmovmskb(<32 x i8> %{{.*}})
return _mm256_movemask_epi8(a);
}
+TEST_CONSTEXPR(_mm256_movemask_epi8((__m256i)(__v32qu){0x7F,0x80,0x01,0xFF,0x00,0xAA,0x55,0xC3,0x12,0x8E,0x00,0xFE,0x7E,0x81,0xFF,0x01,0xB6,0x00,0x39,0x40,0xD0,0x05,0x80,0x2A,0x7B,0x00,0x90,0xFF,0x01,0x34,0xC0,0x6D}) == 0x4C516AAA);
+TEST_CONSTEXPR(_mm256_movemask_epi8((__m256i)(__v8si){(int)0x80FF00AA,(int)0x7F0183E1,(int)0xDEADBEEF,(int)0xC0000001,(int)0x00000000,(int)0xFFFFFFFF,(int)0x12345678,(int)0x90ABCDEF}) == 0xF0F08F3D);
+TEST_CONSTEXPR(_mm256_movemask_epi8((__m256i)(__v4du){0xFF00000000000080ULL,0x7F010203040506C3ULL,0x8000000000000000ULL,0x0123456789ABCDEFULL}) == 0x0F800181);
__m256i test_mm256_mpsadbw_epu8(__m256i x, __m256i y) {
// CHECK-LABEL: test_mm256_mpsadbw_epu8
diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c b/clang/test/CodeGen/X86/avx512bw-builtins.c
index 55bf482..d07e40a 100644
--- a/clang/test/CodeGen/X86/avx512bw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512bw-builtins.c
@@ -2414,13 +2414,13 @@ __m512i test_mm512_loadu_epi16 (void *__P)
__m512i test_mm512_mask_loadu_epi16(__m512i __W, __mmask32 __U, void const *__P) {
// CHECK-LABEL: test_mm512_mask_loadu_epi16
- // CHECK: @llvm.masked.load.v32i16.p0(ptr %{{.*}}, i32 1, <32 x i1> %{{.*}}, <32 x i16> %{{.*}})
+ // CHECK: @llvm.masked.load.v32i16.p0(ptr align 1 %{{.*}}, <32 x i1> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_mask_loadu_epi16(__W, __U, __P);
}
__m512i test_mm512_maskz_loadu_epi16(__mmask32 __U, void const *__P) {
// CHECK-LABEL: test_mm512_maskz_loadu_epi16
- // CHECK: @llvm.masked.load.v32i16.p0(ptr %{{.*}}, i32 1, <32 x i1> %{{.*}}, <32 x i16> %{{.*}})
+ // CHECK: @llvm.masked.load.v32i16.p0(ptr align 1 %{{.*}}, <32 x i1> %{{.*}}, <32 x i16> %{{.*}})
return _mm512_maskz_loadu_epi16(__U, __P);
}
@@ -2433,13 +2433,13 @@ __m512i test_mm512_loadu_epi8 (void *__P)
__m512i test_mm512_mask_loadu_epi8(__m512i __W, __mmask64 __U, void const *__P) {
// CHECK-LABEL: test_mm512_mask_loadu_epi8
- // CHECK: @llvm.masked.load.v64i8.p0(ptr %{{.*}}, i32 1, <64 x i1> %{{.*}}, <64 x i8> %{{.*}})
+ // CHECK: @llvm.masked.load.v64i8.p0(ptr align 1 %{{.*}}, <64 x i1> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_mask_loadu_epi8(__W, __U, __P);
}
__m512i test_mm512_maskz_loadu_epi8(__mmask64 __U, void const *__P) {
// CHECK-LABEL: test_mm512_maskz_loadu_epi8
- // CHECK: @llvm.masked.load.v64i8.p0(ptr %{{.*}}, i32 1, <64 x i1> %{{.*}}, <64 x i8> %{{.*}})
+ // CHECK: @llvm.masked.load.v64i8.p0(ptr align 1 %{{.*}}, <64 x i1> %{{.*}}, <64 x i8> %{{.*}})
return _mm512_maskz_loadu_epi8(__U, __P);
}
@@ -2451,7 +2451,7 @@ void test_mm512_storeu_epi16(void *__P, __m512i __A) {
void test_mm512_mask_storeu_epi16(void *__P, __mmask32 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_storeu_epi16
- // CHECK: @llvm.masked.store.v32i16.p0(<32 x i16> %{{.*}}, ptr %{{.*}}, i32 1, <32 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v32i16.p0(<32 x i16> %{{.*}}, ptr align 1 %{{.*}}, <32 x i1> %{{.*}})
return _mm512_mask_storeu_epi16(__P, __U, __A);
}
@@ -2470,7 +2470,7 @@ void test_mm512_storeu_epi8(void *__P, __m512i __A) {
void test_mm512_mask_storeu_epi8(void *__P, __mmask64 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_storeu_epi8
- // CHECK: @llvm.masked.store.v64i8.p0(<64 x i8> %{{.*}}, ptr %{{.*}}, i32 1, <64 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v64i8.p0(<64 x i8> %{{.*}}, ptr align 1 %{{.*}}, <64 x i1> %{{.*}})
return _mm512_mask_storeu_epi8(__P, __U, __A);
}
__mmask64 test_mm512_mask_test_epi8_mask(__mmask64 __U, __m512i __A, __m512i __B) {
diff --git a/clang/test/CodeGen/X86/avx512dq-builtins.c b/clang/test/CodeGen/X86/avx512dq-builtins.c
index 1b09959..9c4ada3 100644
--- a/clang/test/CodeGen/X86/avx512dq-builtins.c
+++ b/clang/test/CodeGen/X86/avx512dq-builtins.c
@@ -1415,6 +1415,7 @@ __m256 test_mm512_extractf32x8_ps(__m512 __A) {
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
return _mm512_extractf32x8_ps(__A, 1);
}
+TEST_CONSTEXPR(match_m256(_mm512_extractf32x8_ps(((__m512){0.0f,1.0f,2.0f,3.0f, 4.0f,5.0f,6.0f,7.0f,8.0f,9.0f,10.0f,11.0f, 12.0f,13.0f,14.0f,15.0f}), 1),8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f));
__m256 test_mm512_mask_extractf32x8_ps(__m256 __W, __mmask8 __U, __m512 __A) {
// CHECK-LABEL: test_mm512_mask_extractf32x8_ps
@@ -1422,6 +1423,7 @@ __m256 test_mm512_mask_extractf32x8_ps(__m256 __W, __mmask8 __U, __m512 __A) {
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_mask_extractf32x8_ps(__W, __U, __A, 1);
}
+TEST_CONSTEXPR(match_m256(_mm512_mask_extractf32x8_ps(((__m256)(__v8sf){0,0,0,0,0,0,0,0}), (__mmask8)0xFF,((__m512)(__v16sf){0.0f,1.0f,2.0f,3.0f,4.0f,5.0f,6.0f,7.0f,8.0f,9.0f,10.0f,11.0f,12.0f,13.0f,14.0f,15.0f}),1),8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f));
__m256 test_mm512_maskz_extractf32x8_ps(__mmask8 __U, __m512 __A) {
// CHECK-LABEL: test_mm512_maskz_extractf32x8_ps
@@ -1429,12 +1431,14 @@ __m256 test_mm512_maskz_extractf32x8_ps(__mmask8 __U, __m512 __A) {
// CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm512_maskz_extractf32x8_ps(__U, __A, 1);
}
+TEST_CONSTEXPR(match_m256(_mm512_maskz_extractf32x8_ps((__mmask8)0x0F, ((__m512)(__v16sf){0.0f,1.0f,2.0f,3.0f,4.0f,5.0f,6.0f,7.0f,8.0f,9.0f,10.0f,11.0f,12.0f,13.0f,14.0f,15.0f}),1),8.0f, 9.0f, 10.0f, 11.0f, 0.0f, 0.0f, 0.0f, 0.0f));
__m128d test_mm512_extractf64x2_pd(__m512d __A) {
// CHECK-LABEL: test_mm512_extractf64x2_pd
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> poison, <2 x i32> <i32 6, i32 7>
return _mm512_extractf64x2_pd(__A, 3);
}
+TEST_CONSTEXPR(match_m128d(_mm512_extractf64x2_pd(((__m512d){0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0}), 3),6.0, 7.0));
__m128d test_mm512_mask_extractf64x2_pd(__m128d __W, __mmask8 __U, __m512d __A) {
// CHECK-LABEL: test_mm512_mask_extractf64x2_pd
@@ -1442,6 +1446,7 @@ __m128d test_mm512_mask_extractf64x2_pd(__m128d __W, __mmask8 __U, __m512d __A)
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm512_mask_extractf64x2_pd(__W, __U, __A, 3);
}
+TEST_CONSTEXPR(match_m128d(_mm512_mask_extractf64x2_pd(((__m128d)(__v2df){100.0, 101.0}),(__mmask8)0x1,((__m512d)(__v8df){0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0}),3),6.0, 101.0));
__m128d test_mm512_maskz_extractf64x2_pd(__mmask8 __U, __m512d __A) {
// CHECK-LABEL: test_mm512_maskz_extractf64x2_pd
@@ -1449,12 +1454,14 @@ __m128d test_mm512_maskz_extractf64x2_pd(__mmask8 __U, __m512d __A) {
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm512_maskz_extractf64x2_pd(__U, __A, 3);
}
+TEST_CONSTEXPR(match_m128d(_mm512_maskz_extractf64x2_pd((__mmask8)0x2,((__m512d){0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0}),3),0.0, 7.0));
__m256i test_mm512_extracti32x8_epi32(__m512i __A) {
// CHECK-LABEL: test_mm512_extracti32x8_epi32
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
return _mm512_extracti32x8_epi32(__A, 1);
}
+TEST_CONSTEXPR(match_v8si(_mm512_extracti32x8_epi32(((__m512i)(__v16si){0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15}), 1),8, 9,10,11,12,13,14,15));
__m256i test_mm512_mask_extracti32x8_epi32(__m256i __W, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_extracti32x8_epi32
@@ -1462,6 +1469,7 @@ __m256i test_mm512_mask_extracti32x8_epi32(__m256i __W, __mmask8 __U, __m512i __
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm512_mask_extracti32x8_epi32(__W, __U, __A, 1);
}
+TEST_CONSTEXPR(match_v8si(_mm512_mask_extracti32x8_epi32(((__m256i)(__v8si){100,101,102,103,104,105,106,107}), (__mmask8)0xAA,((__m512i)(__v16si){0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15}),1),100, 9, 102, 11, 104, 13, 106, 15));
__m256i test_mm512_maskz_extracti32x8_epi32(__mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_maskz_extracti32x8_epi32
@@ -1469,12 +1477,14 @@ __m256i test_mm512_maskz_extracti32x8_epi32(__mmask8 __U, __m512i __A) {
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm512_maskz_extracti32x8_epi32(__U, __A, 1);
}
+TEST_CONSTEXPR(match_v8si(_mm512_maskz_extracti32x8_epi32((__mmask8)0x0F,((__m512i)(__v16si){0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15}),1),8, 9, 10, 11, 0, 0, 0, 0));
__m128i test_mm512_extracti64x2_epi64(__m512i __A) {
// CHECK-LABEL: test_mm512_extracti64x2_epi64
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> poison, <2 x i32> <i32 6, i32 7>
return _mm512_extracti64x2_epi64(__A, 3);
}
+TEST_CONSTEXPR(match_m128i(_mm512_extracti64x2_epi64(((__m512i)(__v8di){0ULL,1ULL,2ULL,3ULL, 4ULL,5ULL,6ULL,7ULL}), 3),6ULL, 7ULL));
__m128i test_mm512_mask_extracti64x2_epi64(__m128i __W, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_extracti64x2_epi64
@@ -1482,6 +1492,7 @@ __m128i test_mm512_mask_extracti64x2_epi64(__m128i __W, __mmask8 __U, __m512i __
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm512_mask_extracti64x2_epi64(__W, __U, __A, 3);
}
+TEST_CONSTEXPR(match_m128i(_mm512_mask_extracti64x2_epi64(((__m128i)(__v2di){100ULL, 101ULL}), (__mmask8)0x1,((__m512i)(__v8di){0ULL,1ULL,2ULL,3ULL, 4ULL,5ULL,6ULL,7ULL}),3),6ULL, 101ULL));
__m128i test_mm512_maskz_extracti64x2_epi64(__mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_maskz_extracti64x2_epi64
@@ -1489,6 +1500,7 @@ __m128i test_mm512_maskz_extracti64x2_epi64(__mmask8 __U, __m512i __A) {
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm512_maskz_extracti64x2_epi64(__U, __A, 3);
}
+TEST_CONSTEXPR(match_m128i(_mm512_maskz_extracti64x2_epi64((__mmask8)0x2,((__m512i)(__v8di){0ULL,1ULL,2ULL,3ULL, 4ULL,5ULL,6ULL,7ULL}),3),0ULL, 7ULL));
__m512 test_mm512_insertf32x8(__m512 __A, __m256 __B) {
// CHECK-LABEL: test_mm512_insertf32x8
diff --git a/clang/test/CodeGen/X86/avx512f-builtins.c b/clang/test/CodeGen/X86/avx512f-builtins.c
index 3deaf8e..122dadd 100644
--- a/clang/test/CodeGen/X86/avx512f-builtins.c
+++ b/clang/test/CodeGen/X86/avx512f-builtins.c
@@ -212,7 +212,7 @@ void test_mm512_storeu_pd(void *p, __m512d a)
void test_mm512_mask_store_ps(void *p, __m512 a, __mmask16 m)
{
// CHECK-LABEL: test_mm512_mask_store_ps
- // CHECK: @llvm.masked.store.v16f32.p0(<16 x float> %{{.*}}, ptr %{{.*}}, i32 64, <16 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v16f32.p0(<16 x float> %{{.*}}, ptr align 64 %{{.*}}, <16 x i1> %{{.*}})
_mm512_mask_store_ps(p, m, a);
}
@@ -260,7 +260,7 @@ void test_mm512_store_pd(void *p, __m512d a)
void test_mm512_mask_store_pd(void *p, __m512d a, __mmask8 m)
{
// CHECK-LABEL: test_mm512_mask_store_pd
- // CHECK: @llvm.masked.store.v8f64.p0(<8 x double> %{{.*}}, ptr %{{.*}}, i32 64, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8f64.p0(<8 x double> %{{.*}}, ptr align 64 %{{.*}}, <8 x i1> %{{.*}})
_mm512_mask_store_pd(p, m, a);
}
@@ -272,7 +272,7 @@ void test_mm512_storeu_epi32(void *__P, __m512i __A) {
void test_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_storeu_epi32
- // CHECK: @llvm.masked.store.v16i32.p0(<16 x i32> %{{.*}}, ptr %{{.*}}, i32 1, <16 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v16i32.p0(<16 x i32> %{{.*}}, ptr align 1 %{{.*}}, <16 x i1> %{{.*}})
return _mm512_mask_storeu_epi32(__P, __U, __A);
}
@@ -284,7 +284,7 @@ void test_mm512_storeu_epi64(void *__P, __m512i __A) {
void test_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_storeu_epi64
- // CHECK: @llvm.masked.store.v8i64.p0(<8 x i64> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8i64.p0(<8 x i64> %{{.*}}, ptr align 1 %{{.*}}, <8 x i1> %{{.*}})
return _mm512_mask_storeu_epi64(__P, __U, __A);
}
@@ -305,14 +305,14 @@ __m512i test_mm512_loadu_epi32 (void *__P)
__m512i test_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void *__P)
{
// CHECK-LABEL: test_mm512_mask_loadu_epi32
- // CHECK: @llvm.masked.load.v16i32.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i32.p0(ptr align 1 %{{.*}}, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
return _mm512_mask_loadu_epi32 (__W,__U, __P);
}
__m512i test_mm512_maskz_loadu_epi32 (__mmask16 __U, void *__P)
{
// CHECK-LABEL: test_mm512_maskz_loadu_epi32
- // CHECK: @llvm.masked.load.v16i32.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i32.p0(ptr align 1 %{{.*}}, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
return _mm512_maskz_loadu_epi32 (__U, __P);
}
@@ -326,14 +326,14 @@ __m512i test_mm512_loadu_epi64 (void *__P)
__m512i test_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void *__P)
{
// CHECK-LABEL: test_mm512_mask_loadu_epi64
- // CHECK: @llvm.masked.load.v8i64.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i64.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
return _mm512_mask_loadu_epi64 (__W,__U, __P);
}
__m512i test_mm512_maskz_loadu_epi64 (__mmask16 __U, void *__P)
{
// CHECK-LABEL: test_mm512_maskz_loadu_epi64
- // CHECK: @llvm.masked.load.v8i64.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i64.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
return _mm512_maskz_loadu_epi64 (__U, __P);
}
@@ -347,7 +347,7 @@ __m512 test_mm512_loadu_ps(void *p)
__m512 test_mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void *__P)
{
// CHECK-LABEL: test_mm512_mask_loadu_ps
- // CHECK: @llvm.masked.load.v16f32.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v16f32.p0(ptr align 1 %{{.*}}, <16 x i1> %{{.*}}, <16 x float> %{{.*}})
return _mm512_mask_loadu_ps (__W,__U, __P);
}
@@ -361,7 +361,7 @@ __m512d test_mm512_loadu_pd(void *p)
__m512d test_mm512_mask_loadu_pd (__m512d __W, __mmask8 __U, void *__P)
{
// CHECK-LABEL: test_mm512_mask_loadu_pd
- // CHECK: @llvm.masked.load.v8f64.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f64.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x double> %{{.*}})
return _mm512_mask_loadu_pd (__W,__U, __P);
}
@@ -399,14 +399,14 @@ __m512 test_mm512_load_ps(void *p)
__m512 test_mm512_mask_load_ps (__m512 __W, __mmask16 __U, void *__P)
{
// CHECK-LABEL: test_mm512_mask_load_ps
- // CHECK: @llvm.masked.load.v16f32.p0(ptr %{{.*}}, i32 64, <16 x i1> %{{.*}}, <16 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v16f32.p0(ptr align 64 %{{.*}}, <16 x i1> %{{.*}}, <16 x float> %{{.*}})
return _mm512_mask_load_ps (__W,__U, __P);
}
__m512 test_mm512_maskz_load_ps(__mmask16 __U, void *__P)
{
// CHECK-LABEL: test_mm512_maskz_load_ps
- // CHECK: @llvm.masked.load.v16f32.p0(ptr %{{.*}}, i32 64, <16 x i1> %{{.*}}, <16 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v16f32.p0(ptr align 64 %{{.*}}, <16 x i1> %{{.*}}, <16 x float> %{{.*}})
return _mm512_maskz_load_ps(__U, __P);
}
@@ -420,14 +420,14 @@ __m512d test_mm512_load_pd(void *p)
__m512d test_mm512_mask_load_pd (__m512d __W, __mmask8 __U, void *__P)
{
// CHECK-LABEL: test_mm512_mask_load_pd
- // CHECK: @llvm.masked.load.v8f64.p0(ptr %{{.*}}, i32 64, <8 x i1> %{{.*}}, <8 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f64.p0(ptr align 64 %{{.*}}, <8 x i1> %{{.*}}, <8 x double> %{{.*}})
return _mm512_mask_load_pd (__W,__U, __P);
}
__m512d test_mm512_maskz_load_pd(__mmask8 __U, void *__P)
{
// CHECK-LABEL: test_mm512_maskz_load_pd
- // CHECK: @llvm.masked.load.v8f64.p0(ptr %{{.*}}, i32 64, <8 x i1> %{{.*}}, <8 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f64.p0(ptr align 64 %{{.*}}, <8 x i1> %{{.*}}, <8 x double> %{{.*}})
return _mm512_maskz_load_pd(__U, __P);
}
@@ -2452,6 +2452,7 @@ __m256d test_mm512_extractf64x4_pd(__m512d a)
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm512_extractf64x4_pd(a, 1);
}
+TEST_CONSTEXPR(match_m256d(_mm512_extractf64x4_pd(((__m512d){0.0,1.0,2.0,3.0, 4.0,5.0,6.0,7.0}),1),4.0, 5.0, 6.0, 7.0));
__m256d test_mm512_mask_extractf64x4_pd(__m256d __W,__mmask8 __U,__m512d __A){
// CHECK-LABEL: test_mm512_mask_extractf64x4_pd
@@ -2459,6 +2460,7 @@ __m256d test_mm512_mask_extractf64x4_pd(__m256d __W,__mmask8 __U,__m512d __A){
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm512_mask_extractf64x4_pd( __W, __U, __A, 1);
}
+TEST_CONSTEXPR(match_m256d(_mm512_mask_extractf64x4_pd(((__m256d){100.0,101.0,102.0,103.0}), (__mmask8)0x5,((__m512d){0.0,1.0,2.0,3.0, 4.0,5.0,6.0,7.0}), 1), 4.0, 101.0, 6.0, 103.0));
__m256d test_mm512_maskz_extractf64x4_pd(__mmask8 __U,__m512d __A){
// CHECK-LABEL: test_mm512_maskz_extractf64x4_pd
@@ -2466,6 +2468,7 @@ __m256d test_mm512_maskz_extractf64x4_pd(__mmask8 __U,__m512d __A){
// CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm512_maskz_extractf64x4_pd( __U, __A, 1);
}
+TEST_CONSTEXPR(match_m256d(_mm512_maskz_extractf64x4_pd((__mmask8)0x3,((__m512d){0.0,1.0,2.0,3.0, 4.0,5.0,6.0,7.0}),1),4.0, 5.0, 0.0, 0.0));
__m128 test_mm512_extractf32x4_ps(__m512 a)
{
@@ -2473,6 +2476,7 @@ __m128 test_mm512_extractf32x4_ps(__m512 a)
// CHECK: shufflevector <16 x float> %{{.*}}, <16 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm512_extractf32x4_ps(a, 1);
}
+TEST_CONSTEXPR(match_m128(_mm512_extractf32x4_ps(((__m512){0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15}),1),4.0f, 5.0f, 6.0f, 7.0f));
__m128 test_mm512_mask_extractf32x4_ps(__m128 __W, __mmask8 __U,__m512 __A){
// CHECK-LABEL: test_mm512_mask_extractf32x4_ps
@@ -2480,6 +2484,7 @@ __m128 test_mm512_mask_extractf32x4_ps(__m128 __W, __mmask8 __U,__m512 __A){
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm512_mask_extractf32x4_ps( __W, __U, __A, 1);
}
+TEST_CONSTEXPR(match_m128(_mm512_mask_extractf32x4_ps(((__m128){100,101,102,103}),(__mmask8)0x5,((__m512){0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15}),1),4.0f, 101.0f, 6.0f, 103.0f));
__m128 test_mm512_maskz_extractf32x4_ps( __mmask8 __U,__m512 __A){
// CHECK-LABEL: test_mm512_maskz_extractf32x4_ps
@@ -2487,6 +2492,7 @@ __m128 test_mm512_maskz_extractf32x4_ps( __mmask8 __U,__m512 __A){
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm512_maskz_extractf32x4_ps(__U, __A, 1);
}
+TEST_CONSTEXPR(match_m128(_mm512_maskz_extractf32x4_ps((__mmask8)0x3,((__m512){0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15}),1),4.0f, 5.0f, 0.0f, 0.0f));
__mmask16 test_mm512_cmpeq_epu32_mask(__m512i __a, __m512i __b) {
// CHECK-LABEL: test_mm512_cmpeq_epu32_mask
@@ -4560,13 +4566,13 @@ __m512i test_mm512_maskz_srli_epi64_2(__mmask8 __U, __m512i __A, unsigned int __
__m512i test_mm512_mask_load_epi32(__m512i __W, __mmask16 __U, void const *__P) {
// CHECK-LABEL: test_mm512_mask_load_epi32
- // CHECK: @llvm.masked.load.v16i32.p0(ptr %{{.*}}, i32 64, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i32.p0(ptr align 64 %{{.*}}, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
return _mm512_mask_load_epi32(__W, __U, __P);
}
__m512i test_mm512_maskz_load_epi32(__mmask16 __U, void const *__P) {
// CHECK-LABEL: test_mm512_maskz_load_epi32
- // CHECK: @llvm.masked.load.v16i32.p0(ptr %{{.*}}, i32 64, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i32.p0(ptr align 64 %{{.*}}, <16 x i1> %{{.*}}, <16 x i32> %{{.*}})
return _mm512_maskz_load_epi32(__U, __P);
}
@@ -4596,25 +4602,25 @@ __m512i test_mm512_maskz_mov_epi64(__mmask8 __U, __m512i __A) {
__m512i test_mm512_mask_load_epi64(__m512i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm512_mask_load_epi64
- // CHECK: @llvm.masked.load.v8i64.p0(ptr %{{.*}}, i32 64, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i64.p0(ptr align 64 %{{.*}}, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
return _mm512_mask_load_epi64(__W, __U, __P);
}
__m512i test_mm512_maskz_load_epi64(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm512_maskz_load_epi64
- // CHECK: @llvm.masked.load.v8i64.p0(ptr %{{.*}}, i32 64, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i64.p0(ptr align 64 %{{.*}}, <8 x i1> %{{.*}}, <8 x i64> %{{.*}})
return _mm512_maskz_load_epi64(__U, __P);
}
void test_mm512_mask_store_epi32(void *__P, __mmask16 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_store_epi32
- // CHECK: @llvm.masked.store.v16i32.p0(<16 x i32> %{{.*}}, ptr %{{.*}}, i32 64, <16 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v16i32.p0(<16 x i32> %{{.*}}, ptr align 64 %{{.*}}, <16 x i1> %{{.*}})
return _mm512_mask_store_epi32(__P, __U, __A);
}
void test_mm512_mask_store_epi64(void *__P, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_store_epi64
- // CHECK: @llvm.masked.store.v8i64.p0(<8 x i64> %{{.*}}, ptr %{{.*}}, i32 64, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8i64.p0(<8 x i64> %{{.*}}, ptr align 64 %{{.*}}, <8 x i1> %{{.*}})
return _mm512_mask_store_epi64(__P, __U, __A);
}
@@ -7363,6 +7369,7 @@ __m128i test_mm512_extracti32x4_epi32(__m512i __A) {
// CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> poison, <4 x i32> <i32 12, i32 13, i32 14, i32 15>
return _mm512_extracti32x4_epi32(__A, 3);
}
+TEST_CONSTEXPR(match_m128i(_mm512_extracti32x4_epi32(((__m512i)(__v16si){0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15}), 3), 0x0000000D0000000CULL, 0x0000000F0000000EULL));
__m128i test_mm512_mask_extracti32x4_epi32(__m128i __W, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_extracti32x4_epi32
@@ -7370,6 +7377,7 @@ __m128i test_mm512_mask_extracti32x4_epi32(__m128i __W, __mmask8 __U, __m512i __
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm512_mask_extracti32x4_epi32(__W, __U, __A, 3);
}
+TEST_CONSTEXPR(match_m128i(_mm512_mask_extracti32x4_epi32(((__m128i)(__v4si){100,101,102,103}), (__mmask8)0x5, ((__m512i)(__v16si){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}), 3), 0x000000650000000CULL, 0x000000670000000EULL));
__m128i test_mm512_maskz_extracti32x4_epi32(__mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_maskz_extracti32x4_epi32
@@ -7377,12 +7385,14 @@ __m128i test_mm512_maskz_extracti32x4_epi32(__mmask8 __U, __m512i __A) {
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm512_maskz_extracti32x4_epi32(__U, __A, 3);
}
+TEST_CONSTEXPR(match_m128i(_mm512_maskz_extracti32x4_epi32((__mmask8)0x3, ((__m512i)(__v16si){0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}), 3), 0x0000000D0000000CULL, 0x0000000000000000ULL));
__m256i test_mm512_extracti64x4_epi64(__m512i __A) {
// CHECK-LABEL: test_mm512_extracti64x4_epi64
// CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm512_extracti64x4_epi64(__A, 1);
}
+TEST_CONSTEXPR(match_m256i(_mm512_extracti64x4_epi64(((__m512i)(__v8di){0,1,2,3,4,5,6,7}), 1), 4ULL, 5ULL, 6ULL, 7ULL));
__m256i test_mm512_mask_extracti64x4_epi64(__m256i __W, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_mask_extracti64x4_epi64
@@ -7390,6 +7400,7 @@ __m256i test_mm512_mask_extracti64x4_epi64(__m256i __W, __mmask8 __U, __m512i __
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm512_mask_extracti64x4_epi64(__W, __U, __A, 1);
}
+TEST_CONSTEXPR(match_m256i(_mm512_mask_extracti64x4_epi64(((__m256i)(__v4di){100ULL,101ULL,102ULL,103ULL}), (__mmask8)0x5, (((__m512i)(__v8di){0ULL,1ULL,2ULL,3ULL, 4ULL,5ULL,6ULL,7ULL})), 1), 4ULL, 101ULL, 6ULL, 103ULL));
__m256i test_mm512_maskz_extracti64x4_epi64(__mmask8 __U, __m512i __A) {
// CHECK-LABEL: test_mm512_maskz_extracti64x4_epi64
@@ -7397,7 +7408,7 @@ __m256i test_mm512_maskz_extracti64x4_epi64(__mmask8 __U, __m512i __A) {
// CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm512_maskz_extracti64x4_epi64(__U, __A, 1);
}
-
+TEST_CONSTEXPR(match_m256i(_mm512_maskz_extracti64x4_epi64((__mmask8)0x3, (((__m512i)(__v8di){0ULL,1ULL,2ULL,3ULL, 4ULL,5ULL,6ULL,7ULL})), 1), 4ULL, 5ULL, 0ULL, 0ULL));
__m512d test_mm512_insertf64x4(__m512d __A, __m256d __B) {
// CHECK-LABEL: test_mm512_insertf64x4
// CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
@@ -11572,42 +11583,42 @@ __m128d test_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B)
void test_mm_mask_store_ss(float * __P, __mmask8 __U, __m128 __A)
{
// CHECK-LABEL: test_mm_mask_store_ss
- // CHECK: call void @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
+ // CHECK: call void @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr align 1 %{{.*}}, <4 x i1> %{{.*}})
_mm_mask_store_ss(__P, __U, __A);
}
void test_mm_mask_store_sd(double * __P, __mmask8 __U, __m128d __A)
{
// CHECK-LABEL: test_mm_mask_store_sd
- // CHECK: call void @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr %{{.*}}, i32 1, <2 x i1> %{{.*}})
+ // CHECK: call void @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr align 1 %{{.*}}, <2 x i1> %{{.*}})
_mm_mask_store_sd(__P, __U, __A);
}
__m128 test_mm_mask_load_ss(__m128 __A, __mmask8 __U, const float* __W)
{
// CHECK-LABEL: test_mm_mask_load_ss
- // CHECK: call {{.*}}<4 x float> @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: call {{.*}}<4 x float> @llvm.masked.load.v4f32.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_mask_load_ss(__A, __U, __W);
}
__m128 test_mm_maskz_load_ss (__mmask8 __U, const float * __W)
{
// CHECK-LABEL: test_mm_maskz_load_ss
- // CHECK: call {{.*}}<4 x float> @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: call {{.*}}<4 x float> @llvm.masked.load.v4f32.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_maskz_load_ss (__U, __W);
}
__m128d test_mm_mask_load_sd (__m128d __A, __mmask8 __U, const double * __W)
{
// CHECK-LABEL: test_mm_mask_load_sd
- // CHECK: call {{.*}}<2 x double> @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: call {{.*}}<2 x double> @llvm.masked.load.v2f64.p0(ptr align 1 %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_mask_load_sd (__A, __U, __W);
}
__m128d test_mm_maskz_load_sd (__mmask8 __U, const double * __W)
{
// CHECK-LABEL: test_mm_maskz_load_sd
- // CHECK: call {{.*}}<2 x double> @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: call {{.*}}<2 x double> @llvm.masked.load.v2f64.p0(ptr align 1 %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_maskz_load_sd (__U, __W);
}
diff --git a/clang/test/CodeGen/X86/avx512fp16-builtins.c b/clang/test/CodeGen/X86/avx512fp16-builtins.c
index 2befff0..f0a0a3b 100644
--- a/clang/test/CodeGen/X86/avx512fp16-builtins.c
+++ b/clang/test/CodeGen/X86/avx512fp16-builtins.c
@@ -1505,13 +1505,13 @@ __m128h test_mm_load_sh(void const *A) {
__m128h test_mm_mask_load_sh(__m128h __A, __mmask8 __U, const void *__W) {
// CHECK-LABEL: test_mm_mask_load_sh
- // CHECK: @llvm.masked.load.v8f16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f16.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x half> %{{.*}})
return _mm_mask_load_sh(__A, __U, __W);
}
__m128h test_mm_maskz_load_sh(__mmask8 __U, const void *__W) {
// CHECK-LABEL: test_mm_maskz_load_sh
- // CHECK: @llvm.masked.load.v8f16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x half> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f16.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x half> %{{.*}})
return _mm_maskz_load_sh(__U, __W);
}
@@ -1560,7 +1560,7 @@ void test_mm_store_sh(void *A, __m128h B) {
void test_mm_mask_store_sh(void *__P, __mmask8 __U, __m128h __A) {
// CHECK-LABEL: test_mm_mask_store_sh
- // CHECK: call void @llvm.masked.store.v8f16.p0(<8 x half> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
+ // CHECK: call void @llvm.masked.store.v8f16.p0(<8 x half> %{{.*}}, ptr align 1 %{{.*}}, <8 x i1> %{{.*}})
_mm_mask_store_sh(__P, __U, __A);
}
diff --git a/clang/test/CodeGen/X86/avx512vl-builtins.c b/clang/test/CodeGen/X86/avx512vl-builtins.c
index 9b6bfea9..34db764 100644
--- a/clang/test/CodeGen/X86/avx512vl-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vl-builtins.c
@@ -7002,7 +7002,7 @@ void test_mm_store_epi32(void *__P, __m128i __A) {
void test_mm_mask_store_epi32(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm_mask_store_epi32
- // CHECK: @llvm.masked.store.v4i32.p0(<4 x i32> %{{.*}}, ptr %{{.}}, i32 16, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4i32.p0(<4 x i32> %{{.*}}, ptr align 16 %{{.}}, <4 x i1> %{{.*}})
return _mm_mask_store_epi32(__P, __U, __A);
}
@@ -7014,7 +7014,7 @@ void test_mm256_store_epi32(void *__P, __m256i __A) {
void test_mm256_mask_store_epi32(void *__P, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_store_epi32
- // CHECK: @llvm.masked.store.v8i32.p0(<8 x i32> %{{.*}}, ptr %{{.}}, i32 32, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8i32.p0(<8 x i32> %{{.*}}, ptr align 32 %{{.}}, <8 x i1> %{{.*}})
return _mm256_mask_store_epi32(__P, __U, __A);
}
@@ -7074,13 +7074,13 @@ __m128i test_mm_load_epi32(void const *__P) {
__m128i test_mm_mask_load_epi32(__m128i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_load_epi32
- // CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i32.p0(ptr align 16 %{{.*}}, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_mask_load_epi32(__W, __U, __P);
}
__m128i test_mm_maskz_load_epi32(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_load_epi32
- // CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i32.p0(ptr align 16 %{{.*}}, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_maskz_load_epi32(__U, __P);
}
@@ -7092,13 +7092,13 @@ __m256i test_mm256_load_epi32(void const *__P) {
__m256i test_mm256_mask_load_epi32(__m256i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_load_epi32
- // CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i32.p0(ptr align 32 %{{.*}}, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_mask_load_epi32(__W, __U, __P);
}
__m256i test_mm256_maskz_load_epi32(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_load_epi32
- // CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i32.p0(ptr align 32 %{{.*}}, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_maskz_load_epi32(__U, __P);
}
@@ -7110,13 +7110,13 @@ __m128i test_mm_load_epi64(void const *__P) {
__m128i test_mm_mask_load_epi64(__m128i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_load_epi64
- // CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v2i64.p0(ptr align 16 %{{.*}}, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_mask_load_epi64(__W, __U, __P);
}
__m128i test_mm_maskz_load_epi64(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_load_epi64
- // CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v2i64.p0(ptr align 16 %{{.*}}, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_maskz_load_epi64(__U, __P);
}
@@ -7128,13 +7128,13 @@ __m256i test_mm256_load_epi64(void const *__P) {
__m256i test_mm256_mask_load_epi64(__m256i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_load_epi64
- // CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i64.p0(ptr align 32 %{{.*}}, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_mask_load_epi64(__W, __U, __P);
}
__m256i test_mm256_maskz_load_epi64(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_load_epi64
- // CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i64.p0(ptr align 32 %{{.*}}, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_maskz_load_epi64(__U, __P);
}
@@ -7146,7 +7146,7 @@ void test_mm_store_epi64(void *__P, __m128i __A) {
void test_mm_mask_store_epi64(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm_mask_store_epi64
- // CHECK: @llvm.masked.store.v2i64.p0(<2 x i64> %{{.*}}, ptr %{{.*}}, i32 16, <2 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v2i64.p0(<2 x i64> %{{.*}}, ptr align 16 %{{.*}}, <2 x i1> %{{.*}})
return _mm_mask_store_epi64(__P, __U, __A);
}
@@ -7158,7 +7158,7 @@ void test_mm256_store_epi64(void *__P, __m256i __A) {
void test_mm256_mask_store_epi64(void *__P, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_store_epi64
- // CHECK: @llvm.masked.store.v4i64.p0(<4 x i64> %{{.*}}, ptr %{{.*}}, i32 32, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4i64.p0(<4 x i64> %{{.*}}, ptr align 32 %{{.*}}, <4 x i1> %{{.*}})
return _mm256_mask_store_epi64(__P, __U, __A);
}
@@ -7370,49 +7370,49 @@ __m256 test_mm256_maskz_fixupimm_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256
__m128d test_mm_mask_load_pd(__m128d __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_load_pd
- // CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v2f64.p0(ptr align 16 %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_mask_load_pd(__W, __U, __P);
}
__m128d test_mm_maskz_load_pd(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_load_pd
- // CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 16, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v2f64.p0(ptr align 16 %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_maskz_load_pd(__U, __P);
}
__m256d test_mm256_mask_load_pd(__m256d __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_load_pd
- // CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f64.p0(ptr align 32 %{{.*}}, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_mask_load_pd(__W, __U, __P);
}
__m256d test_mm256_maskz_load_pd(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_load_pd
- // CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 32, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f64.p0(ptr align 32 %{{.*}}, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_maskz_load_pd(__U, __P);
}
__m128 test_mm_mask_load_ps(__m128 __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_load_ps
- // CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f32.p0(ptr align 16 %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_mask_load_ps(__W, __U, __P);
}
__m128 test_mm_maskz_load_ps(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_load_ps
- // CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 16, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f32.p0(ptr align 16 %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_maskz_load_ps(__U, __P);
}
__m256 test_mm256_mask_load_ps(__m256 __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_load_ps
- // CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f32.p0(ptr align 32 %{{.*}}, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_mask_load_ps(__W, __U, __P);
}
__m256 test_mm256_maskz_load_ps(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_load_ps
- // CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 32, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f32.p0(ptr align 32 %{{.*}}, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_maskz_load_ps(__U, __P);
}
@@ -7424,13 +7424,13 @@ __m128i test_mm_loadu_epi64(void const *__P) {
__m128i test_mm_mask_loadu_epi64(__m128i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_loadu_epi64
- // CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v2i64.p0(ptr align 1 %{{.*}}, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_mask_loadu_epi64(__W, __U, __P);
}
__m128i test_mm_maskz_loadu_epi64(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_loadu_epi64
- // CHECK: @llvm.masked.load.v2i64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v2i64.p0(ptr align 1 %{{.*}}, <2 x i1> %{{.*}}, <2 x i64> %{{.*}})
return _mm_maskz_loadu_epi64(__U, __P);
}
@@ -7442,13 +7442,13 @@ __m256i test_mm256_loadu_epi64(void const *__P) {
__m256i test_mm256_mask_loadu_epi64(__m256i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_loadu_epi64
- // CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i64.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_mask_loadu_epi64(__W, __U, __P);
}
__m256i test_mm256_maskz_loadu_epi64(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_loadu_epi64
- // CHECK: @llvm.masked.load.v4i64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i64.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x i64> %{{.*}})
return _mm256_maskz_loadu_epi64(__U, __P);
}
@@ -7460,13 +7460,13 @@ __m128i test_mm_loadu_epi32(void const *__P) {
__m128i test_mm_mask_loadu_epi32(__m128i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_loadu_epi32
- // CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i32.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_mask_loadu_epi32(__W, __U, __P);
}
__m128i test_mm_maskz_loadu_epi32(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_loadu_epi32
- // CHECK: @llvm.masked.load.v4i32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v4i32.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x i32> %{{.*}})
return _mm_maskz_loadu_epi32(__U, __P);
}
@@ -7478,85 +7478,85 @@ __m256i test_mm256_loadu_epi32(void const *__P) {
__m256i test_mm256_mask_loadu_epi32(__m256i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_loadu_epi32
- // CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i32.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_mask_loadu_epi32(__W, __U, __P);
}
__m256i test_mm256_maskz_loadu_epi32(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_loadu_epi32
- // CHECK: @llvm.masked.load.v8i32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i32.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x i32> %{{.*}})
return _mm256_maskz_loadu_epi32(__U, __P);
}
__m128d test_mm_mask_loadu_pd(__m128d __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_loadu_pd
- // CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v2f64.p0(ptr align 1 %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_mask_loadu_pd(__W, __U, __P);
}
__m128d test_mm_maskz_loadu_pd(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_loadu_pd
- // CHECK: @llvm.masked.load.v2f64.p0(ptr %{{.*}}, i32 1, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v2f64.p0(ptr align 1 %{{.*}}, <2 x i1> %{{.*}}, <2 x double> %{{.*}})
return _mm_maskz_loadu_pd(__U, __P);
}
__m256d test_mm256_mask_loadu_pd(__m256d __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_loadu_pd
- // CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f64.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_mask_loadu_pd(__W, __U, __P);
}
__m256d test_mm256_maskz_loadu_pd(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_loadu_pd
- // CHECK: @llvm.masked.load.v4f64.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f64.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x double> %{{.*}})
return _mm256_maskz_loadu_pd(__U, __P);
}
__m128 test_mm_mask_loadu_ps(__m128 __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_loadu_ps
- // CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f32.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_mask_loadu_ps(__W, __U, __P);
}
__m128 test_mm_maskz_loadu_ps(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_loadu_ps
- // CHECK: @llvm.masked.load.v4f32.p0(ptr %{{.*}}, i32 1, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v4f32.p0(ptr align 1 %{{.*}}, <4 x i1> %{{.*}}, <4 x float> %{{.*}})
return _mm_maskz_loadu_ps(__U, __P);
}
__m256 test_mm256_mask_loadu_ps(__m256 __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_loadu_ps
- // CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f32.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_mask_loadu_ps(__W, __U, __P);
}
__m256 test_mm256_maskz_loadu_ps(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_loadu_ps
- // CHECK: @llvm.masked.load.v8f32.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
+ // CHECK: @llvm.masked.load.v8f32.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x float> %{{.*}})
return _mm256_maskz_loadu_ps(__U, __P);
}
void test_mm_mask_store_pd(void *__P, __mmask8 __U, __m128d __A) {
// CHECK-LABEL: test_mm_mask_store_pd
- // CHECK: @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr %{{.*}}, i32 16, <2 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr align 16 %{{.*}}, <2 x i1> %{{.*}})
return _mm_mask_store_pd(__P, __U, __A);
}
void test_mm256_mask_store_pd(void *__P, __mmask8 __U, __m256d __A) {
// CHECK-LABEL: test_mm256_mask_store_pd
- // CHECK: @llvm.masked.store.v4f64.p0(<4 x double> %{{.*}}, ptr %{{.*}}, i32 32, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4f64.p0(<4 x double> %{{.*}}, ptr align 32 %{{.*}}, <4 x i1> %{{.*}})
return _mm256_mask_store_pd(__P, __U, __A);
}
void test_mm_mask_store_ps(void *__P, __mmask8 __U, __m128 __A) {
// CHECK-LABEL: test_mm_mask_store_ps
- // CHECK: @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr %{{.*}}, i32 16, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr align 16 %{{.*}}, <4 x i1> %{{.*}})
return _mm_mask_store_ps(__P, __U, __A);
}
void test_mm256_mask_store_ps(void *__P, __mmask8 __U, __m256 __A) {
// CHECK-LABEL: test_mm256_mask_store_ps
- // CHECK: @llvm.masked.store.v8f32.p0(<8 x float> %{{.*}}, ptr %{{.*}}, i32 32, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8f32.p0(<8 x float> %{{.*}}, ptr align 32 %{{.*}}, <8 x i1> %{{.*}})
return _mm256_mask_store_ps(__P, __U, __A);
}
@@ -7568,7 +7568,7 @@ void test_mm_storeu_epi64(void *__p, __m128i __a) {
void test_mm_mask_storeu_epi64(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm_mask_storeu_epi64
- // CHECK: @llvm.masked.store.v2i64.p0(<2 x i64> %{{.*}}, ptr %{{.*}}, i32 1, <2 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v2i64.p0(<2 x i64> %{{.*}}, ptr align 1 %{{.*}}, <2 x i1> %{{.*}})
return _mm_mask_storeu_epi64(__P, __U, __A);
}
@@ -7580,7 +7580,7 @@ void test_mm256_storeu_epi64(void *__P, __m256i __A) {
void test_mm256_mask_storeu_epi64(void *__P, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_storeu_epi64
- // CHECK: @llvm.masked.store.v4i64.p0(<4 x i64> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4i64.p0(<4 x i64> %{{.*}}, ptr align 1 %{{.*}}, <4 x i1> %{{.*}})
return _mm256_mask_storeu_epi64(__P, __U, __A);
}
@@ -7592,7 +7592,7 @@ void test_mm_storeu_epi32(void *__P, __m128i __A) {
void test_mm_mask_storeu_epi32(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm_mask_storeu_epi32
- // CHECK: @llvm.masked.store.v4i32.p0(<4 x i32> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4i32.p0(<4 x i32> %{{.*}}, ptr align 1 %{{.*}}, <4 x i1> %{{.*}})
return _mm_mask_storeu_epi32(__P, __U, __A);
}
@@ -7604,31 +7604,31 @@ void test_mm256_storeu_epi32(void *__P, __m256i __A) {
void test_mm256_mask_storeu_epi32(void *__P, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_storeu_epi32
- // CHECK: @llvm.masked.store.v8i32.p0(<8 x i32> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8i32.p0(<8 x i32> %{{.*}}, ptr align 1 %{{.*}}, <8 x i1> %{{.*}})
return _mm256_mask_storeu_epi32(__P, __U, __A);
}
void test_mm_mask_storeu_pd(void *__P, __mmask8 __U, __m128d __A) {
// CHECK-LABEL: test_mm_mask_storeu_pd
- // CHECK: @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr %{{.*}}, i32 1, <2 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v2f64.p0(<2 x double> %{{.*}}, ptr align 1 %{{.*}}, <2 x i1> %{{.*}})
return _mm_mask_storeu_pd(__P, __U, __A);
}
void test_mm256_mask_storeu_pd(void *__P, __mmask8 __U, __m256d __A) {
// CHECK-LABEL: test_mm256_mask_storeu_pd
- // CHECK: @llvm.masked.store.v4f64.p0(<4 x double> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4f64.p0(<4 x double> %{{.*}}, ptr align 1 %{{.*}}, <4 x i1> %{{.*}})
return _mm256_mask_storeu_pd(__P, __U, __A);
}
void test_mm_mask_storeu_ps(void *__P, __mmask8 __U, __m128 __A) {
// CHECK-LABEL: test_mm_mask_storeu_ps
- // CHECK: @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr %{{.*}}, i32 1, <4 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v4f32.p0(<4 x float> %{{.*}}, ptr align 1 %{{.*}}, <4 x i1> %{{.*}})
return _mm_mask_storeu_ps(__P, __U, __A);
}
void test_mm256_mask_storeu_ps(void *__P, __mmask8 __U, __m256 __A) {
// CHECK-LABEL: test_mm256_mask_storeu_ps
- // CHECK: @llvm.masked.store.v8f32.p0(<8 x float> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8f32.p0(<8 x float> %{{.*}}, ptr align 1 %{{.*}}, <8 x i1> %{{.*}})
return _mm256_mask_storeu_ps(__P, __U, __A);
}
@@ -9911,6 +9911,7 @@ __m128 test_mm256_extractf32x4_ps(__m256 __A) {
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm256_extractf32x4_ps(__A, 1);
}
+TEST_CONSTEXPR(match_m128(_mm256_extractf32x4_ps(((__m256){0,1,2,3, 4,5,6,7}), 1),4.0f, 5.0f, 6.0f, 7.0f));
__m128 test_mm256_mask_extractf32x4_ps(__m128 __W, __mmask8 __U, __m256 __A) {
// CHECK-LABEL: test_mm256_mask_extractf32x4_ps
@@ -9918,6 +9919,7 @@ __m128 test_mm256_mask_extractf32x4_ps(__m128 __W, __mmask8 __U, __m256 __A) {
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm256_mask_extractf32x4_ps(__W, __U, __A, 1);
}
+TEST_CONSTEXPR(match_m128(_mm256_mask_extractf32x4_ps((((__m128){100,101,102,103})), (__mmask8)0x5, (((__m256){0,1,2,3, 4,5,6,7})), 1), 4.0f, 101.0f, 6.0f, 103.0f));
__m128 test_mm256_maskz_extractf32x4_ps(__mmask8 __U, __m256 __A) {
// CHECK-LABEL: test_mm256_maskz_extractf32x4_ps
@@ -9925,12 +9927,14 @@ __m128 test_mm256_maskz_extractf32x4_ps(__mmask8 __U, __m256 __A) {
// CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm256_maskz_extractf32x4_ps(__U, __A, 1);
}
+TEST_CONSTEXPR(match_m128(_mm256_maskz_extractf32x4_ps((__mmask8)0x3, (((__m256){0,1,2,3, 4,5,6,7})), 1), 4.0f, 5.0f, 0.0f, 0.0f));
__m128i test_mm256_extracti32x4_epi32(__m256i __A) {
// CHECK-LABEL: test_mm256_extracti32x4_epi32
// CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
return _mm256_extracti32x4_epi32(__A, 1);
}
+TEST_CONSTEXPR(match_m128i(_mm256_extracti32x4_epi32((((__m256i)(__v8si){0,1,2,3, 4,5,6,7})), 1), 0x0000000500000004ULL, 0x0000000700000006ULL));
__m128i test_mm256_mask_extracti32x4_epi32(__m128i __W, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_extracti32x4_epi32
@@ -9938,6 +9942,7 @@ __m128i test_mm256_mask_extracti32x4_epi32(__m128i __W, __mmask8 __U, __m256i __
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm256_mask_extracti32x4_epi32(__W, __U, __A, 1);
}
+TEST_CONSTEXPR(match_m128i(_mm256_mask_extracti32x4_epi32((((__m128i)(__v4si){100,101,102,103})), (__mmask8)0xA, (((__m256i)(__v8si){0,1,2,3, 4,5,6,7})), 1),0x0000000500000064ULL, 0x0000000700000066ULL));
__m128i test_mm256_maskz_extracti32x4_epi32(__mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_maskz_extracti32x4_epi32
@@ -9945,6 +9950,7 @@ __m128i test_mm256_maskz_extracti32x4_epi32(__mmask8 __U, __m256i __A) {
// CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm256_maskz_extracti32x4_epi32(__U, __A, 1);
}
+TEST_CONSTEXPR(match_m128i(_mm256_maskz_extracti32x4_epi32((__mmask8)0x3, (((__m256i)(__v8si){0,1,2,3, 4,5,6,7})), 1), 0x0000000500000004ULL, 0x0000000000000000ULL));
__m256 test_mm256_insertf32x4(__m256 __A, __m128 __B) {
// CHECK-LABEL: test_mm256_insertf32x4
diff --git a/clang/test/CodeGen/X86/avx512vlbw-builtins.c b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
index 95e4d40..116d86f 100644
--- a/clang/test/CodeGen/X86/avx512vlbw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
@@ -2776,13 +2776,13 @@ __m128i test_mm_loadu_epi16(void const *__P) {
__m128i test_mm_mask_loadu_epi16(__m128i __W, __mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_loadu_epi16
- // CHECK: @llvm.masked.load.v8i16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i16> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i16.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x i16> %{{.*}})
return _mm_mask_loadu_epi16(__W, __U, __P);
}
__m128i test_mm_maskz_loadu_epi16(__mmask8 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_loadu_epi16
- // CHECK: @llvm.masked.load.v8i16.p0(ptr %{{.*}}, i32 1, <8 x i1> %{{.*}}, <8 x i16> %{{.*}})
+ // CHECK: @llvm.masked.load.v8i16.p0(ptr align 1 %{{.*}}, <8 x i1> %{{.*}}, <8 x i16> %{{.*}})
return _mm_maskz_loadu_epi16(__U, __P);
}
@@ -2794,13 +2794,13 @@ __m256i test_mm256_loadu_epi16(void const *__P) {
__m256i test_mm256_mask_loadu_epi16(__m256i __W, __mmask16 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_loadu_epi16
- // CHECK: @llvm.masked.load.v16i16.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i16> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i16.p0(ptr align 1 %{{.*}}, <16 x i1> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_mask_loadu_epi16(__W, __U, __P);
}
__m256i test_mm256_maskz_loadu_epi16(__mmask16 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_loadu_epi16
- // CHECK: @llvm.masked.load.v16i16.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i16> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i16.p0(ptr align 1 %{{.*}}, <16 x i1> %{{.*}}, <16 x i16> %{{.*}})
return _mm256_maskz_loadu_epi16(__U, __P);
}
@@ -2812,13 +2812,13 @@ __m128i test_mm_loadu_epi8(void const *__P) {
__m128i test_mm_mask_loadu_epi8(__m128i __W, __mmask16 __U, void const *__P) {
// CHECK-LABEL: test_mm_mask_loadu_epi8
- // CHECK: @llvm.masked.load.v16i8.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i8.p0(ptr align 1 %{{.*}}, <16 x i1> %{{.*}}, <16 x i8> %{{.*}})
return _mm_mask_loadu_epi8(__W, __U, __P);
}
__m128i test_mm_maskz_loadu_epi8(__mmask16 __U, void const *__P) {
// CHECK-LABEL: test_mm_maskz_loadu_epi8
- // CHECK: @llvm.masked.load.v16i8.p0(ptr %{{.*}}, i32 1, <16 x i1> %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK: @llvm.masked.load.v16i8.p0(ptr align 1 %{{.*}}, <16 x i1> %{{.*}}, <16 x i8> %{{.*}})
return _mm_maskz_loadu_epi8(__U, __P);
}
@@ -2830,13 +2830,13 @@ __m256i test_mm256_loadu_epi8(void const *__P) {
__m256i test_mm256_mask_loadu_epi8(__m256i __W, __mmask32 __U, void const *__P) {
// CHECK-LABEL: test_mm256_mask_loadu_epi8
- // CHECK: @llvm.masked.load.v32i8.p0(ptr %{{.*}}, i32 1, <32 x i1> %{{.*}}, <32 x i8> %{{.*}})
+ // CHECK: @llvm.masked.load.v32i8.p0(ptr align 1 %{{.*}}, <32 x i1> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_mask_loadu_epi8(__W, __U, __P);
}
__m256i test_mm256_maskz_loadu_epi8(__mmask32 __U, void const *__P) {
// CHECK-LABEL: test_mm256_maskz_loadu_epi8
- // CHECK: @llvm.masked.load.v32i8.p0(ptr %{{.*}}, i32 1, <32 x i1> %{{.*}}, <32 x i8> %{{.*}})
+ // CHECK: @llvm.masked.load.v32i8.p0(ptr align 1 %{{.*}}, <32 x i1> %{{.*}}, <32 x i8> %{{.*}})
return _mm256_maskz_loadu_epi8(__U, __P);
}
@@ -2848,7 +2848,7 @@ void test_mm_storeu_epi16(void *__p, __m128i __a) {
void test_mm_mask_storeu_epi16(void *__P, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: test_mm_mask_storeu_epi16
- // CHECK: @llvm.masked.store.v8i16.p0(<8 x i16> %{{.*}}, ptr %{{.*}}, i32 1, <8 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v8i16.p0(<8 x i16> %{{.*}}, ptr align 1 %{{.*}}, <8 x i1> %{{.*}})
return _mm_mask_storeu_epi16(__P, __U, __A);
}
@@ -2860,7 +2860,7 @@ void test_mm256_storeu_epi16(void *__P, __m256i __A) {
void test_mm256_mask_storeu_epi16(void *__P, __mmask16 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_storeu_epi16
- // CHECK: @llvm.masked.store.v16i16.p0(<16 x i16> %{{.*}}, ptr %{{.*}}, i32 1, <16 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v16i16.p0(<16 x i16> %{{.*}}, ptr align 1 %{{.*}}, <16 x i1> %{{.*}})
return _mm256_mask_storeu_epi16(__P, __U, __A);
}
@@ -2872,7 +2872,7 @@ void test_mm_storeu_epi8(void *__p, __m128i __a) {
void test_mm_mask_storeu_epi8(void *__P, __mmask16 __U, __m128i __A) {
// CHECK-LABEL: test_mm_mask_storeu_epi8
- // CHECK: @llvm.masked.store.v16i8.p0(<16 x i8> %{{.*}}, ptr %{{.*}}, i32 1, <16 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v16i8.p0(<16 x i8> %{{.*}}, ptr align 1 %{{.*}}, <16 x i1> %{{.*}})
return _mm_mask_storeu_epi8(__P, __U, __A);
}
@@ -2884,7 +2884,7 @@ void test_mm256_storeu_epi8(void *__P, __m256i __A) {
void test_mm256_mask_storeu_epi8(void *__P, __mmask32 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_storeu_epi8
- // CHECK: @llvm.masked.store.v32i8.p0(<32 x i8> %{{.*}}, ptr %{{.*}}, i32 1, <32 x i1> %{{.*}})
+ // CHECK: @llvm.masked.store.v32i8.p0(<32 x i8> %{{.*}}, ptr align 1 %{{.*}}, <32 x i1> %{{.*}})
return _mm256_mask_storeu_epi8(__P, __U, __A);
}
__mmask16 test_mm_test_epi8_mask(__m128i __A, __m128i __B) {
diff --git a/clang/test/CodeGen/X86/avx512vldq-builtins.c b/clang/test/CodeGen/X86/avx512vldq-builtins.c
index 4773b60..1bfc022 100644
--- a/clang/test/CodeGen/X86/avx512vldq-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vldq-builtins.c
@@ -1101,6 +1101,7 @@ __m128d test_mm256_extractf64x2_pd(__m256d __A) {
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> poison, <2 x i32> <i32 2, i32 3>
return _mm256_extractf64x2_pd(__A, 1);
}
+TEST_CONSTEXPR(match_m128d(_mm256_extractf64x2_pd(((__m256d){0.0,1.0,2.0,3.0}), 1), 2.0, 3.0));
__m128d test_mm256_mask_extractf64x2_pd(__m128d __W, __mmask8 __U, __m256d __A) {
// CHECK-LABEL: test_mm256_mask_extractf64x2_pd
@@ -1108,6 +1109,7 @@ __m128d test_mm256_mask_extractf64x2_pd(__m128d __W, __mmask8 __U, __m256d __A)
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm256_mask_extractf64x2_pd(__W, __U, __A, 1);
}
+TEST_CONSTEXPR(match_m128d(_mm256_mask_extractf64x2_pd((((__m128d){100.0, 101.0})), (__mmask8)0x1, (((__m256d){0.0,1.0,2.0,3.0})),1), 2.0, 101.0));
__m128d test_mm256_maskz_extractf64x2_pd(__mmask8 __U, __m256d __A) {
// CHECK-LABEL: test_mm256_maskz_extractf64x2_pd
@@ -1115,12 +1117,14 @@ __m128d test_mm256_maskz_extractf64x2_pd(__mmask8 __U, __m256d __A) {
// CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm256_maskz_extractf64x2_pd(__U, __A, 1);
}
+TEST_CONSTEXPR(match_m128d(_mm256_maskz_extractf64x2_pd((__mmask8)0x2,(((__m256d){0.0,1.0,2.0,3.0})),1), 0.0, 3.0));
__m128i test_mm256_extracti64x2_epi64(__m256i __A) {
// CHECK-LABEL: test_mm256_extracti64x2_epi64
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> poison, <2 x i32> <i32 2, i32 3>
return _mm256_extracti64x2_epi64(__A, 1);
}
+TEST_CONSTEXPR(match_m128i(_mm256_extracti64x2_epi64(((__m256i){0ULL,1ULL,2ULL,3ULL}), 1), 2ULL, 3ULL));
__m128i test_mm256_mask_extracti64x2_epi64(__m128i __W, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_mask_extracti64x2_epi64
@@ -1128,6 +1132,7 @@ __m128i test_mm256_mask_extracti64x2_epi64(__m128i __W, __mmask8 __U, __m256i __
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm256_mask_extracti64x2_epi64(__W, __U, __A, 1);
}
+TEST_CONSTEXPR(match_m128i(_mm256_mask_extracti64x2_epi64((((__m128i){100ULL, 101ULL})), (__mmask8)0x1, (((__m256i){0ULL,1ULL,2ULL,3ULL})), 1), 2ULL, 101ULL));
__m128i test_mm256_maskz_extracti64x2_epi64(__mmask8 __U, __m256i __A) {
// CHECK-LABEL: test_mm256_maskz_extracti64x2_epi64
@@ -1135,6 +1140,7 @@ __m128i test_mm256_maskz_extracti64x2_epi64(__mmask8 __U, __m256i __A) {
// CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm256_maskz_extracti64x2_epi64(__U, __A, 1);
}
+TEST_CONSTEXPR(match_m128i(_mm256_maskz_extracti64x2_epi64((__mmask8)0x2, (((__m256i){0ULL,1ULL,2ULL,3ULL})),1), 0ULL, 3ULL));
__m256d test_mm256_insertf64x2(__m256d __A, __m128d __B) {
// CHECK-LABEL: test_mm256_insertf64x2
diff --git a/clang/test/CodeGen/X86/mmx-builtins.c b/clang/test/CodeGen/X86/mmx-builtins.c
index c1ac57b..7674255 100644
--- a/clang/test/CodeGen/X86/mmx-builtins.c
+++ b/clang/test/CodeGen/X86/mmx-builtins.c
@@ -409,6 +409,10 @@ int test_mm_movemask_pi8(__m64 a) {
// CHECK: call {{.*}}i32 @llvm.x86.sse2.pmovmskb.128(
return _mm_movemask_pi8(a);
}
+TEST_CONSTEXPR(_mm_movemask_pi8((__m64)((__v8qu){0x7F,0x80,0x01,0xFF,0x00,0xAA,0x55,0xC3})) == 0xAA);
+TEST_CONSTEXPR(_mm_movemask_pi8((__m64)((__v2si){(int)0x80FF00AA,(int)0x7F0183E1})) == 0x3D);
+TEST_CONSTEXPR(_mm_movemask_pi8((__m64)((__v1di){(long long)0xE110837A00924DB0ULL})) == 0xA5);
+
__m64 test_mm_mul_su32(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_mul_su32
diff --git a/clang/test/CodeGen/X86/sse-builtins.c b/clang/test/CodeGen/X86/sse-builtins.c
index 3bad342..f5c1d00 100644
--- a/clang/test/CodeGen/X86/sse-builtins.c
+++ b/clang/test/CodeGen/X86/sse-builtins.c
@@ -561,6 +561,8 @@ int test_mm_movemask_ps(__m128 A) {
// CHECK: call {{.*}}i32 @llvm.x86.sse.movmsk.ps(<4 x float> %{{.*}})
return _mm_movemask_ps(A);
}
+TEST_CONSTEXPR(_mm_movemask_ps((__m128)(__v4sf){-2.0f, 3.0f, -5.5f, -0.0f}) == 0xD);
+TEST_CONSTEXPR(_mm_movemask_ps((__m128)(__v4sf){-7.348215e5, 0.00314159, -12.789, 2.7182818}) == 0x5);
__m128 test_mm_mul_ps(__m128 A, __m128 B) {
// CHECK-LABEL: test_mm_mul_ps
diff --git a/clang/test/CodeGen/X86/sse2-builtins.c b/clang/test/CodeGen/X86/sse2-builtins.c
index ade7ef3..8e4fb86 100644
--- a/clang/test/CodeGen/X86/sse2-builtins.c
+++ b/clang/test/CodeGen/X86/sse2-builtins.c
@@ -956,12 +956,17 @@ int test_mm_movemask_epi8(__m128i A) {
// CHECK: call {{.*}}i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %{{.*}})
return _mm_movemask_epi8(A);
}
+TEST_CONSTEXPR(_mm_movemask_epi8((__m128i)(__v16qu){0x7F,0x80,0x01,0xFF,0x00,0xAA,0x55,0xC3,0x12,0x8E,0x00,0xFE,0x7E,0x81,0xFF,0x01}) == 0x6AAA);
+TEST_CONSTEXPR(_mm_movemask_epi8((__m128i)(__v4si){(int)0x80FF00AA,(int)0x7F0183E1,(int)0xDEADBEEF,(int)0xC0000001}) == 0x8F3D);
+TEST_CONSTEXPR(_mm_movemask_epi8((__m128i)(__v2du){0xFF00000000000080ULL,0x7F010203040506C3ULL}) == 0x181);
int test_mm_movemask_pd(__m128d A) {
// CHECK-LABEL: test_mm_movemask_pd
// CHECK: call {{.*}}i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %{{.*}})
return _mm_movemask_pd(A);
}
+TEST_CONSTEXPR(_mm_movemask_pd((__m128d)(__v2df){-12345.67890123, 4567.89012345}) == 0x1);
+TEST_CONSTEXPR(_mm_movemask_pd((__m128d)(__v2df){0.0000987654321, 09876.5432109876}) == 0x0);
__m128i test_mm_mul_epu32(__m128i A, __m128i B) {
// CHECK-LABEL: test_mm_mul_epu32
diff --git a/clang/test/CodeGen/X86/sse41-builtins.c b/clang/test/CodeGen/X86/sse41-builtins.c
index 89a7ac2..62cd392 100644
--- a/clang/test/CodeGen/X86/sse41-builtins.c
+++ b/clang/test/CodeGen/X86/sse41-builtins.c
@@ -376,6 +376,16 @@ __m128i test_mm_minpos_epu16(__m128i x) {
// CHECK: call <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16> %{{.*}})
return _mm_minpos_epu16(x);
}
+TEST_CONSTEXPR(match_v8hu(_mm_minpos_epu16((__m128i)(__v8hu){0,0,0,0, 0,0,0,0}), 0,0,0,0, 0,0,0,0));
+TEST_CONSTEXPR(match_v8hu(_mm_minpos_epu16((__m128i)(__v8hu){1,0,0,0, 0,0,0,0}), 0,1,0,0, 0,0,0,0));
+TEST_CONSTEXPR(match_v8hu(_mm_minpos_epu16((__m128i)(__v8hu){65535,65535,65535,65535,65535,65535,65535,65535}), 65535,0,0,0, 0,0,0,0));
+TEST_CONSTEXPR(match_v8hu(_mm_minpos_epu16((__m128i)(__v8hu){9,8,7,6,5,4,3,2}), 2,7,0,0, 0,0,0,0));
+TEST_CONSTEXPR(match_v8hu(_mm_minpos_epu16((__m128i)(__v8hu){5,5,5,5,5,5,5,5}), 5,0,0,0, 0,0,0,0));
+TEST_CONSTEXPR(match_v8hu(_mm_minpos_epu16((__m128i)(__v8hu){5,7,9,4,10,4,11,12}), 4,3,0,0, 0,0,0,0));
+TEST_CONSTEXPR(match_v8hu(_mm_minpos_epu16((__m128i)(__v8hu){6,0,0,0,0,0,0,0}), 0,1,0,0, 0,0,0,0));
+TEST_CONSTEXPR(match_v8hu(_mm_minpos_epu16((__m128i)(__v8hu){1000,2000,3000,4000,5000,6000,7000,1}), 1,7,0,0, 0,0,0,0));
+TEST_CONSTEXPR(match_v8hu(_mm_minpos_epu16((__m128i)(__v8hu){1234,5678,42,9999,65535,0,4242,42}), 0,5,0,0, 0,0,0,0));
+TEST_CONSTEXPR(match_v8hu(_mm_minpos_epu16((__m128i)(__v8hu){400,500,12,600,12,700,800,900}), 12,2,0,0, 0,0,0,0));
__m128i test_mm_mpsadbw_epu8(__m128i x, __m128i y) {
// CHECK-LABEL: test_mm_mpsadbw_epu8
diff --git a/clang/test/CodeGen/arm-mve-intrinsics/cplusplus.cpp b/clang/test/CodeGen/arm-mve-intrinsics/cplusplus.cpp
index 2971961..4e3d0ce 100644
--- a/clang/test/CodeGen/arm-mve-intrinsics/cplusplus.cpp
+++ b/clang/test/CodeGen/arm-mve-intrinsics/cplusplus.cpp
@@ -114,7 +114,7 @@ uint16x8_t test_vld1q_u16(const uint16_t *base)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
diff --git a/clang/test/CodeGen/arm-mve-intrinsics/dup.c b/clang/test/CodeGen/arm-mve-intrinsics/dup.c
index c2c7a9c..e7113fd 100644
--- a/clang/test/CodeGen/arm-mve-intrinsics/dup.c
+++ b/clang/test/CodeGen/arm-mve-intrinsics/dup.c
@@ -244,8 +244,7 @@ uint32x4_t test_vdupq_m_n_u32(uint32x4_t inactive, uint32_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x half> [[DOTSPLAT]], <8 x half> undef
-// CHECK-NEXT: ret <8 x half> [[TMP2]]
+// CHECK-NEXT: ret <8 x half> [[DOTSPLAT]]
//
float16x8_t test_vdupq_x_n_f16(float16_t a, mve_pred16_t p)
{
@@ -258,8 +257,7 @@ float16x8_t test_vdupq_x_n_f16(float16_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[TMP1]], <4 x float> [[DOTSPLAT]], <4 x float> undef
-// CHECK-NEXT: ret <4 x float> [[TMP2]]
+// CHECK-NEXT: ret <4 x float> [[DOTSPLAT]]
//
float32x4_t test_vdupq_x_n_f32(float32_t a, mve_pred16_t p)
{
@@ -272,8 +270,7 @@ float32x4_t test_vdupq_x_n_f32(float32_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i8> [[DOTSPLAT]], <16 x i8> undef
-// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+// CHECK-NEXT: ret <16 x i8> [[DOTSPLAT]]
//
int8x16_t test_vdupq_x_n_s8(int8_t a, mve_pred16_t p)
{
@@ -286,8 +283,7 @@ int8x16_t test_vdupq_x_n_s8(int8_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> [[DOTSPLAT]], <8 x i16> undef
-// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+// CHECK-NEXT: ret <8 x i16> [[DOTSPLAT]]
//
int16x8_t test_vdupq_x_n_s16(int16_t a, mve_pred16_t p)
{
@@ -300,8 +296,7 @@ int16x8_t test_vdupq_x_n_s16(int16_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[DOTSPLAT]], <4 x i32> undef
-// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+// CHECK-NEXT: ret <4 x i32> [[DOTSPLAT]]
//
int32x4_t test_vdupq_x_n_s32(int32_t a, mve_pred16_t p)
{
@@ -314,8 +309,7 @@ int32x4_t test_vdupq_x_n_s32(int32_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP1]], <16 x i8> [[DOTSPLAT]], <16 x i8> undef
-// CHECK-NEXT: ret <16 x i8> [[TMP2]]
+// CHECK-NEXT: ret <16 x i8> [[DOTSPLAT]]
//
uint8x16_t test_vdupq_x_n_u8(uint8_t a, mve_pred16_t p)
{
@@ -328,8 +322,7 @@ uint8x16_t test_vdupq_x_n_u8(uint8_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> [[DOTSPLAT]], <8 x i16> undef
-// CHECK-NEXT: ret <8 x i16> [[TMP2]]
+// CHECK-NEXT: ret <8 x i16> [[DOTSPLAT]]
//
uint16x8_t test_vdupq_x_n_u16(uint16_t a, mve_pred16_t p)
{
@@ -342,8 +335,7 @@ uint16x8_t test_vdupq_x_n_u16(uint16_t a, mve_pred16_t p)
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[A:%.*]], i64 0
// CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-// CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[DOTSPLAT]], <4 x i32> undef
-// CHECK-NEXT: ret <4 x i32> [[TMP2]]
+// CHECK-NEXT: ret <4 x i32> [[DOTSPLAT]]
//
uint32x4_t test_vdupq_x_n_u32(uint32_t a, mve_pred16_t p)
{
diff --git a/clang/test/CodeGen/arm-mve-intrinsics/load-store.c b/clang/test/CodeGen/arm-mve-intrinsics/load-store.c
index 50f7011..ede2e95 100644
--- a/clang/test/CodeGen/arm-mve-intrinsics/load-store.c
+++ b/clang/test/CodeGen/arm-mve-intrinsics/load-store.c
@@ -122,7 +122,7 @@ uint32x4_t test_vld1q_u32(const uint32_t *base)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x half> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x half> zeroinitializer)
// CHECK-NEXT: ret <8 x half> [[TMP2]]
//
float16x8_t test_vld1q_z_f16(const float16_t *base, mve_pred16_t p)
@@ -138,7 +138,7 @@ float16x8_t test_vld1q_z_f16(const float16_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x float> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x float> zeroinitializer)
// CHECK-NEXT: ret <4 x float> [[TMP2]]
//
float32x4_t test_vld1q_z_f32(const float32_t *base, mve_pred16_t p)
@@ -154,7 +154,7 @@ float32x4_t test_vld1q_z_f32(const float32_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
int8x16_t test_vld1q_z_s8(const int8_t *base, mve_pred16_t p)
@@ -170,7 +170,7 @@ int8x16_t test_vld1q_z_s8(const int8_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
int16x8_t test_vld1q_z_s16(const int16_t *base, mve_pred16_t p)
@@ -186,7 +186,7 @@ int16x8_t test_vld1q_z_s16(const int16_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
//
int32x4_t test_vld1q_z_s32(const int32_t *base, mve_pred16_t p)
@@ -202,7 +202,7 @@ int32x4_t test_vld1q_z_s32(const int32_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
uint8x16_t test_vld1q_z_u8(const uint8_t *base, mve_pred16_t p)
@@ -218,7 +218,7 @@ uint8x16_t test_vld1q_z_u8(const uint8_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
uint16x8_t test_vld1q_z_u16(const uint16_t *base, mve_pred16_t p)
@@ -234,7 +234,7 @@ uint16x8_t test_vld1q_z_u16(const uint16_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
//
uint32x4_t test_vld1q_z_u32(const uint32_t *base, mve_pred16_t p)
@@ -314,7 +314,7 @@ uint32x4_t test_vldrbq_u32(const uint8_t *base)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
int8x16_t test_vldrbq_z_s8(const int8_t *base, mve_pred16_t p)
@@ -326,7 +326,7 @@ int8x16_t test_vldrbq_z_s8(const int8_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP1]], <8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr align 1 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i8> [[TMP2]] to <8 x i16>
// CHECK-NEXT: ret <8 x i16> [[TMP3]]
//
@@ -339,7 +339,7 @@ int16x8_t test_vldrbq_z_s16(const int8_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP1]], <4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr align 1 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i8> [[TMP2]] to <4 x i32>
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
//
@@ -352,7 +352,7 @@ int32x4_t test_vldrbq_z_s32(const int8_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]], <16 x i8> zeroinitializer)
// CHECK-NEXT: ret <16 x i8> [[TMP2]]
//
uint8x16_t test_vldrbq_z_u8(const uint8_t *base, mve_pred16_t p)
@@ -364,7 +364,7 @@ uint8x16_t test_vldrbq_z_u8(const uint8_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP1]], <8 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr align 1 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[TMP2]] to <8 x i16>
// CHECK-NEXT: ret <8 x i16> [[TMP3]]
//
@@ -377,7 +377,7 @@ uint16x8_t test_vldrbq_z_u16(const uint8_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP1]], <4 x i8> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i8> @llvm.masked.load.v4i8.p0(ptr align 1 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i8> zeroinitializer)
// CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i32>
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
//
@@ -442,7 +442,7 @@ uint32x4_t test_vldrhq_u32(const uint16_t *base)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x half> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.masked.load.v8f16.p0(ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x half> zeroinitializer)
// CHECK-NEXT: ret <8 x half> [[TMP2]]
//
float16x8_t test_vldrhq_z_f16(const float16_t *base, mve_pred16_t p)
@@ -454,7 +454,7 @@ float16x8_t test_vldrhq_z_f16(const float16_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
int16x8_t test_vldrhq_z_s16(const int16_t *base, mve_pred16_t p)
@@ -466,7 +466,7 @@ int16x8_t test_vldrhq_z_s16(const int16_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP1]], <4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr align 2 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i16> [[TMP2]] to <4 x i32>
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
//
@@ -479,7 +479,7 @@ int32x4_t test_vldrhq_z_s32(const int16_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]], <8 x i16> zeroinitializer)
// CHECK-NEXT: ret <8 x i16> [[TMP2]]
//
uint16x8_t test_vldrhq_z_u16(const uint16_t *base, mve_pred16_t p)
@@ -491,7 +491,7 @@ uint16x8_t test_vldrhq_z_u16(const uint16_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP1]], <4 x i16> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr align 2 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i16> zeroinitializer)
// CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i16> [[TMP2]] to <4 x i32>
// CHECK-NEXT: ret <4 x i32> [[TMP3]]
//
@@ -534,7 +534,7 @@ uint32x4_t test_vldrwq_u32(const uint32_t *base)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x float> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x float> zeroinitializer)
// CHECK-NEXT: ret <4 x float> [[TMP2]]
//
float32x4_t test_vldrwq_z_f32(const float32_t *base, mve_pred16_t p)
@@ -546,7 +546,7 @@ float32x4_t test_vldrwq_z_f32(const float32_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
//
int32x4_t test_vldrwq_z_s32(const int32_t *base, mve_pred16_t p)
@@ -558,7 +558,7 @@ int32x4_t test_vldrwq_z_s32(const int32_t *base, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]], <4 x i32> zeroinitializer)
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
//
uint32x4_t test_vldrwq_z_u32(const uint32_t *base, mve_pred16_t p)
@@ -682,7 +682,7 @@ void test_vst1q_u32(uint32_t *base, uint32x4_t value)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p)
@@ -698,7 +698,7 @@ void test_vst1q_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p)
@@ -714,7 +714,7 @@ void test_vst1q_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p)
@@ -730,7 +730,7 @@ void test_vst1q_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p)
@@ -746,7 +746,7 @@ void test_vst1q_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
@@ -762,7 +762,7 @@ void test_vst1q_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p)
@@ -778,7 +778,7 @@ void test_vst1q_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p)
@@ -794,7 +794,7 @@ void test_vst1q_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vst1q_p_u32(uint32_t *base, uint32x4_t value, mve_pred16_t p)
@@ -898,7 +898,7 @@ void test_vstrbq_u32(uint8_t *base, uint32x4_t value)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p)
@@ -915,7 +915,7 @@ void test_vstrbq_p_s8(int8_t *base, int8x16_t value, mve_pred16_t p)
// CHECK-NEXT: [[TMP0:%.*]] = trunc <8 x i16> [[VALUE:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr align 1 [[BASE:%.*]], <8 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_s16(int8_t *base, int16x8_t value, mve_pred16_t p)
@@ -932,7 +932,7 @@ void test_vstrbq_p_s16(int8_t *base, int16x8_t value, mve_pred16_t p)
// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr align 1 [[BASE:%.*]], <4 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_s32(int8_t *base, int32x4_t value, mve_pred16_t p)
@@ -948,7 +948,7 @@ void test_vstrbq_p_s32(int8_t *base, int32x4_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 1, <16 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v16i8.p0(<16 x i8> [[VALUE:%.*]], ptr align 1 [[BASE:%.*]], <16 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p)
@@ -965,7 +965,7 @@ void test_vstrbq_p_u8(uint8_t *base, uint8x16_t value, mve_pred16_t p)
// CHECK-NEXT: [[TMP0:%.*]] = trunc <8 x i16> [[VALUE:%.*]] to <8 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <8 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i8.p0(<8 x i8> [[TMP0]], ptr align 1 [[BASE:%.*]], <8 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_u16(uint8_t *base, uint16x8_t value, mve_pred16_t p)
@@ -982,7 +982,7 @@ void test_vstrbq_p_u16(uint8_t *base, uint16x8_t value, mve_pred16_t p)
// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i8>
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr [[BASE:%.*]], i32 1, <4 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i8.p0(<4 x i8> [[TMP0]], ptr align 1 [[BASE:%.*]], <4 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrbq_p_u32(uint8_t *base, uint32x4_t value, mve_pred16_t p)
@@ -1070,7 +1070,7 @@ void test_vstrhq_u32(uint16_t *base, uint32x4_t value)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8f16.p0(<8 x half> [[VALUE:%.*]], ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p)
@@ -1086,7 +1086,7 @@ void test_vstrhq_p_f16(float16_t *base, float16x8_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p)
@@ -1103,7 +1103,7 @@ void test_vstrhq_p_s16(int16_t *base, int16x8_t value, mve_pred16_t p)
// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i16>
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr align 2 [[BASE:%.*]], <4 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_s32(int16_t *base, int32x4_t value, mve_pred16_t p)
@@ -1119,7 +1119,7 @@ void test_vstrhq_p_s32(int16_t *base, int32x4_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 2, <8 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i16.p0(<8 x i16> [[VALUE:%.*]], ptr align 2 [[BASE:%.*]], <8 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p)
@@ -1136,7 +1136,7 @@ void test_vstrhq_p_u16(uint16_t *base, uint16x8_t value, mve_pred16_t p)
// CHECK-NEXT: [[TMP0:%.*]] = trunc <4 x i32> [[VALUE:%.*]] to <4 x i16>
// CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP1]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr [[BASE:%.*]], i32 2, <4 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i16.p0(<4 x i16> [[TMP0]], ptr align 2 [[BASE:%.*]], <4 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_vstrhq_p_u32(uint16_t *base, uint32x4_t value, mve_pred16_t p)
@@ -1194,7 +1194,7 @@ void test_vstrwq_u32(uint32_t *base, uint32x4_t value)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[VALUE:%.*]], ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrwq_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p)
@@ -1210,7 +1210,7 @@ void test_vstrwq_p_f32(float32_t *base, float32x4_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrwq_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
@@ -1226,7 +1226,7 @@ void test_vstrwq_p_s32(int32_t *base, int32x4_t value, mve_pred16_t p)
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr [[BASE:%.*]], i32 4, <4 x i1> [[TMP1]])
+// CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[VALUE:%.*]], ptr align 4 [[BASE:%.*]], <4 x i1> [[TMP1]])
// CHECK-NEXT: ret void
//
void test_vstrwq_p_u32(uint32_t *base, uint32x4_t value, mve_pred16_t p)
diff --git a/clang/test/CodeGen/builtin-masked.c b/clang/test/CodeGen/builtin-masked.c
index e2b5e09..28b94b7 100644
--- a/clang/test/CodeGen/builtin-masked.c
+++ b/clang/test/CodeGen/builtin-masked.c
@@ -19,7 +19,7 @@ typedef _Bool v8b __attribute__((ext_vector_type(8)));
// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[M_ADDR]], align 1
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[P_ADDR]], align 8
-// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP2]], i32 4, <8 x i1> [[TMP1]], <8 x i32> poison)
+// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr align 4 [[TMP2]], <8 x i1> [[TMP1]], <8 x i32> poison)
// CHECK-NEXT: ret <8 x i32> [[MASKED_LOAD]]
//
v8i test_load(v8b m, int *p) {
@@ -45,7 +45,7 @@ v8i test_load(v8b m, int *p) {
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[P_ADDR]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr [[T_ADDR]], align 32
-// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[TMP3]], i32 4, <8 x i1> [[TMP2]], <8 x i32> [[TMP4]])
+// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr align 4 [[TMP3]], <8 x i1> [[TMP2]], <8 x i32> [[TMP4]])
// CHECK-NEXT: ret <8 x i32> [[MASKED_LOAD]]
//
v8i test_load_passthru(v8b m, int *p, v8i t) {
@@ -97,7 +97,7 @@ v8i test_load_expand(v8b m, int *p, v8i t) {
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[V_ADDR]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[P_ADDR]], align 8
-// CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP3]], ptr [[TMP4]], i32 4, <8 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[TMP3]], ptr align 4 [[TMP4]], <8 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_store(v8b m, v8i v, int *p) {
@@ -150,7 +150,7 @@ void test_compress_store(v8b m, v8i v, int *p) {
// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[IDX_ADDR]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP4]], <8 x i32> [[TMP3]]
-// CHECK-NEXT: [[MASKED_GATHER:%.*]] = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> [[TMP5]], i32 4, <8 x i1> [[TMP2]], <8 x i32> poison)
+// CHECK-NEXT: [[MASKED_GATHER:%.*]] = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> align 4 [[TMP5]], <8 x i1> [[TMP2]], <8 x i32> poison)
// CHECK-NEXT: ret <8 x i32> [[MASKED_GATHER]]
//
v8i test_gather(v8b mask, v8i idx, int *ptr) {
@@ -181,7 +181,7 @@ v8i test_gather(v8b mask, v8i idx, int *ptr) {
// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr [[IDX_ADDR]], align 32
// CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[TMP6]], <8 x i32> [[TMP4]]
-// CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP5]], <8 x ptr> [[TMP7]], i32 4, <8 x i1> [[TMP3]])
+// CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP5]], <8 x ptr> align 4 [[TMP7]], <8 x i1> [[TMP3]])
// CHECK-NEXT: ret void
//
void test_scatter(v8b mask, v8i val, v8i idx, int *ptr) {
@@ -203,7 +203,7 @@ void test_scatter(v8b mask, v8i val, v8i idx, int *ptr) {
// CHECK-NEXT: [[LOAD_BITS2:%.*]] = load i8, ptr [[MASK_ADDR]], align 1
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
// CHECK-NEXT: [[TMP2:%.*]] = load ptr addrspace(42), ptr [[PTR_ADDR]], align 8
-// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p42(ptr addrspace(42) [[TMP2]], i32 4, <8 x i1> [[TMP1]], <8 x i32> poison)
+// CHECK-NEXT: [[MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p42(ptr addrspace(42) align 4 [[TMP2]], <8 x i1> [[TMP1]], <8 x i32> poison)
// CHECK-NEXT: ret <8 x i32> [[MASKED_LOAD]]
//
v8i test_load_as(v8b mask, int __attribute__((address_space(42))) * ptr) {
@@ -229,7 +229,7 @@ v8i test_load_as(v8b mask, int __attribute__((address_space(42))) * ptr) {
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 [[LOAD_BITS2]] to <8 x i1>
// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[V_ADDR]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(42), ptr [[P_ADDR]], align 8
-// CHECK-NEXT: call void @llvm.masked.store.v8i32.p42(<8 x i32> [[TMP3]], ptr addrspace(42) [[TMP4]], i32 4, <8 x i1> [[TMP2]])
+// CHECK-NEXT: call void @llvm.masked.store.v8i32.p42(<8 x i32> [[TMP3]], ptr addrspace(42) align 4 [[TMP4]], <8 x i1> [[TMP2]])
// CHECK-NEXT: ret void
//
void test_store_as(v8b m, v8i v, int __attribute__((address_space(42))) *p) {
@@ -256,7 +256,7 @@ void test_store_as(v8b m, v8i v, int __attribute__((address_space(42))) *p) {
// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[IDX_ADDR]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(42), ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr addrspace(42) [[TMP4]], <8 x i32> [[TMP3]]
-// CHECK-NEXT: [[MASKED_GATHER:%.*]] = call <8 x i32> @llvm.masked.gather.v8i32.v8p42(<8 x ptr addrspace(42)> [[TMP5]], i32 4, <8 x i1> [[TMP2]], <8 x i32> poison)
+// CHECK-NEXT: [[MASKED_GATHER:%.*]] = call <8 x i32> @llvm.masked.gather.v8i32.v8p42(<8 x ptr addrspace(42)> align 4 [[TMP5]], <8 x i1> [[TMP2]], <8 x i32> poison)
// CHECK-NEXT: ret <8 x i32> [[MASKED_GATHER]]
//
v8i test_gather_as(v8b mask, v8i idx, int __attribute__((address_space(42))) *ptr) {
@@ -287,7 +287,7 @@ v8i test_gather_as(v8b mask, v8i idx, int __attribute__((address_space(42))) *pt
// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr [[IDX_ADDR]], align 32
// CHECK-NEXT: [[TMP6:%.*]] = load ptr addrspace(42), ptr [[PTR_ADDR]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr addrspace(42) [[TMP6]], <8 x i32> [[TMP4]]
-// CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p42(<8 x i32> [[TMP5]], <8 x ptr addrspace(42)> [[TMP7]], i32 4, <8 x i1> [[TMP3]])
+// CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p42(<8 x i32> [[TMP5]], <8 x ptr addrspace(42)> align 4 [[TMP7]], <8 x i1> [[TMP3]])
// CHECK-NEXT: ret void
//
void test_scatter_as(v8b mask, v8i val, v8i idx, int __attribute__((address_space(42))) *ptr) {
diff --git a/clang/test/CodeGen/c11atomics-ios.c b/clang/test/CodeGen/c11atomics-ios.c
index 8310270..34843ef 100644
--- a/clang/test/CodeGen/c11atomics-ios.c
+++ b/clang/test/CodeGen/c11atomics-ios.c
@@ -235,6 +235,7 @@ _Bool test_promoted_cmpxchg(_Atomic(PS) *addr, PS *desired, PS *new) {
// CHECK: [[ATOMIC_DESIRED:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8
// CHECK: [[ATOMIC_NEW:%.*]] = alloca { %struct.PS, [2 x i8] }, align 8
// CHECK: [[RES_ADDR:%.*]] = alloca i8, align 1
+ // CHECK: [[OLD_TMP:%.*]] = alloca i64, align 8
// CHECK: store ptr %addr, ptr [[ADDR_ARG]], align 4
// CHECK: store ptr %desired, ptr [[DESIRED_ARG]], align 4
// CHECK: store ptr %new, ptr [[NEW_ARG]], align 4
@@ -251,7 +252,8 @@ _Bool test_promoted_cmpxchg(_Atomic(PS) *addr, PS *desired, PS *new) {
// CHECK: [[RES_BOOL:%.*]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[RES_BOOL]], label {{%.*}}, label {{%.*}}
- // CHECK: store i64 [[RES_VAL64]], ptr [[ATOMIC_DESIRED]], align 8
+ // CHECK: store i64 [[RES_VAL64]], ptr [[OLD_TMP]], align 8
+ // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 2 [[DESIRED_ARG:%.*]], ptr align 8 [[OLD_TMP]], i64 6, i1 false)
// CHECK: br label {{%.*}}
// CHECK: [[RES_BOOL8:%.*]] = zext i1 [[RES_BOOL]] to i8
diff --git a/clang/test/CodeGenCXX/builtin-atomic-compare_exchange.cpp b/clang/test/CodeGenCXX/builtin-atomic-compare_exchange.cpp
new file mode 100644
index 0000000..4f1fe98
--- /dev/null
+++ b/clang/test/CodeGenCXX/builtin-atomic-compare_exchange.cpp
@@ -0,0 +1,130 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -std=c++20 -triple=x86_64-linux-gnu -emit-llvm -o - %s | FileCheck %s
+
+
+template <unsigned Size>
+struct S {
+ char data[Size];
+};
+
+// CHECK-LABEL: define dso_local noundef zeroext i1 @_Z21test_compare_exchangePU7_Atomic1SILj3EEPS0_S0_(
+// CHECK-SAME: ptr noundef [[A:%.*]], ptr noundef [[EXPECTED:%.*]], i24 [[DESIRED_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[DESIRED:%.*]] = alloca [[STRUCT_S:%.*]], align 1
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[EXPECTED_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_S]], align 1
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca { [[STRUCT_S]], [1 x i8] }, align 4
+// CHECK-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca { [[STRUCT_S]], [1 x i8] }, align 4
+// CHECK-NEXT: [[CMPXCHG_BOOL:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[OLD_TMP:%.*]] = alloca i32, align 4
+// CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_S]], ptr [[DESIRED]], i32 0, i32 0
+// CHECK-NEXT: store i24 [[DESIRED_COERCE]], ptr [[COERCE_DIVE]], align 1
+// CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK-NEXT: store ptr [[EXPECTED]], ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DOTATOMICTMP]], ptr align 1 [[DESIRED]], i64 3, i1 false)
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP]], i8 0, i64 4, i1 false)
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ATOMIC_TEMP]], ptr align 1 [[TMP1]], i64 3, i1 false)
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP1]], i8 0, i64 4, i1 false)
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ATOMIC_TEMP1]], ptr align 1 [[DOTATOMICTMP]], i64 3, i1 false)
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP1]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[TMP0]], i32 [[TMP2]], i32 [[TMP3]] monotonic monotonic, align 4
+// CHECK-NEXT: [[TMP5:%.*]] = extractvalue { i32, i1 } [[TMP4]], 0
+// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+// CHECK-NEXT: br i1 [[TMP6]], label %[[CMPXCHG_CONTINUE:.*]], label %[[CMPXCHG_STORE_EXPECTED:.*]]
+// CHECK: [[CMPXCHG_STORE_EXPECTED]]:
+// CHECK-NEXT: store i32 [[TMP5]], ptr [[OLD_TMP]], align 4
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP1]], ptr align 4 [[OLD_TMP]], i64 3, i1 false)
+// CHECK-NEXT: br label %[[CMPXCHG_CONTINUE]]
+// CHECK: [[CMPXCHG_CONTINUE]]:
+// CHECK-NEXT: [[STOREDV:%.*]] = zext i1 [[TMP6]] to i8
+// CHECK-NEXT: store i8 [[STOREDV]], ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP7]] to i1
+// CHECK-NEXT: ret i1 [[LOADEDV]]
+//
+bool test_compare_exchange(_Atomic(S<3>)* a, S<3>* expected, S<3> desired) {
+ return __c11_atomic_compare_exchange_strong(a, expected, desired, 0, 0);
+}
+
+
+// CHECK-LABEL: define dso_local noundef zeroext i1 @_Z21test_compare_exchangePU7_Atomic1SILj4EEPS0_S0_(
+// CHECK-SAME: ptr noundef [[A:%.*]], ptr noundef [[EXPECTED:%.*]], i32 [[DESIRED_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[DESIRED:%.*]] = alloca [[STRUCT_S_0:%.*]], align 1
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[EXPECTED_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_S_0]], align 1
+// CHECK-NEXT: [[CMPXCHG_BOOL:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_S_0]], ptr [[DESIRED]], i32 0, i32 0
+// CHECK-NEXT: store i32 [[DESIRED_COERCE]], ptr [[COERCE_DIVE]], align 1
+// CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK-NEXT: store ptr [[EXPECTED]], ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DOTATOMICTMP]], ptr align 1 [[DESIRED]], i64 4, i1 false)
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 1
+// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 1
+// CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[TMP0]], i32 [[TMP2]], i32 [[TMP3]] monotonic monotonic, align 4
+// CHECK-NEXT: [[TMP5:%.*]] = extractvalue { i32, i1 } [[TMP4]], 0
+// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+// CHECK-NEXT: br i1 [[TMP6]], label %[[CMPXCHG_CONTINUE:.*]], label %[[CMPXCHG_STORE_EXPECTED:.*]]
+// CHECK: [[CMPXCHG_STORE_EXPECTED]]:
+// CHECK-NEXT: store i32 [[TMP5]], ptr [[TMP1]], align 1
+// CHECK-NEXT: br label %[[CMPXCHG_CONTINUE]]
+// CHECK: [[CMPXCHG_CONTINUE]]:
+// CHECK-NEXT: [[STOREDV:%.*]] = zext i1 [[TMP6]] to i8
+// CHECK-NEXT: store i8 [[STOREDV]], ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP7]] to i1
+// CHECK-NEXT: ret i1 [[LOADEDV]]
+//
+bool test_compare_exchange(_Atomic(S<4>)* a, S<4>* expected, S<4> desired) {
+ return __c11_atomic_compare_exchange_strong(a, expected, desired, 0, 0);
+}
+
+// CHECK-LABEL: define dso_local noundef zeroext i1 @_Z21test_compare_exchangePU7_Atomic1SILj6EEPS0_S0_(
+// CHECK-SAME: ptr noundef [[A:%.*]], ptr noundef [[EXPECTED:%.*]], i48 [[DESIRED_COERCE:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[DESIRED:%.*]] = alloca [[STRUCT_S_1:%.*]], align 1
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[EXPECTED_ADDR:%.*]] = alloca ptr, align 8
+// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_S_1]], align 1
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca { [[STRUCT_S_1]], [2 x i8] }, align 8
+// CHECK-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca { [[STRUCT_S_1]], [2 x i8] }, align 8
+// CHECK-NEXT: [[CMPXCHG_BOOL:%.*]] = alloca i8, align 1
+// CHECK-NEXT: [[OLD_TMP:%.*]] = alloca i64, align 8
+// CHECK-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds nuw [[STRUCT_S_1]], ptr [[DESIRED]], i32 0, i32 0
+// CHECK-NEXT: store i48 [[DESIRED_COERCE]], ptr [[COERCE_DIVE]], align 1
+// CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
+// CHECK-NEXT: store ptr [[EXPECTED]], ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[EXPECTED_ADDR]], align 8
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[DOTATOMICTMP]], ptr align 1 [[DESIRED]], i64 6, i1 false)
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[ATOMIC_TEMP]], i8 0, i64 8, i1 false)
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[ATOMIC_TEMP]], ptr align 1 [[TMP1]], i64 6, i1 false)
+// CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[ATOMIC_TEMP1]], i8 0, i64 8, i1 false)
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[ATOMIC_TEMP1]], ptr align 1 [[DOTATOMICTMP]], i64 6, i1 false)
+// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[ATOMIC_TEMP]], align 8
+// CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[ATOMIC_TEMP1]], align 8
+// CHECK-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[TMP0]], i64 [[TMP2]], i64 [[TMP3]] monotonic monotonic, align 8
+// CHECK-NEXT: [[TMP5:%.*]] = extractvalue { i64, i1 } [[TMP4]], 0
+// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+// CHECK-NEXT: br i1 [[TMP6]], label %[[CMPXCHG_CONTINUE:.*]], label %[[CMPXCHG_STORE_EXPECTED:.*]]
+// CHECK: [[CMPXCHG_STORE_EXPECTED]]:
+// CHECK-NEXT: store i64 [[TMP5]], ptr [[OLD_TMP]], align 8
+// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP1]], ptr align 8 [[OLD_TMP]], i64 6, i1 false)
+// CHECK-NEXT: br label %[[CMPXCHG_CONTINUE]]
+// CHECK: [[CMPXCHG_CONTINUE]]:
+// CHECK-NEXT: [[STOREDV:%.*]] = zext i1 [[TMP6]] to i8
+// CHECK-NEXT: store i8 [[STOREDV]], ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[CMPXCHG_BOOL]], align 1
+// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP7]] to i1
+// CHECK-NEXT: ret i1 [[LOADEDV]]
+//
+bool test_compare_exchange(_Atomic(S<6>)* a, S<6>* expected, S<6> desired) {
+ return __c11_atomic_compare_exchange_strong(a, expected, desired, 0, 0);
+}
diff --git a/clang/test/CodeGenOpenCL/amdgpu-cluster-dims.cl b/clang/test/CodeGenOpenCL/amdgpu-cluster-dims.cl
new file mode 100644
index 0000000..be822a6
--- /dev/null
+++ b/clang/test/CodeGenOpenCL/amdgpu-cluster-dims.cl
@@ -0,0 +1,47 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-attributes --check-globals all --include-generated-funcs --prefix-filecheck-ir-name VAR --version 5
+// RUN: %clang_cc1 -cl-std=CL2.0 -triple amdgcn-amd-amdhsa -target-cpu gfx1250 -disable-llvm-passes -fno-ident -emit-llvm %s -o - | FileCheck %s
+
+kernel void foo(global int *p) { *p = 1; }
+// CHECK: Function Attrs: convergent norecurse nounwind
+// CHECK-LABEL: define dso_local amdgpu_kernel void @foo(
+// CHECK-SAME: ptr addrspace(1) noundef align 4 [[P:%.*]]) #[[ATTR0:[0-9]+]] !kernel_arg_addr_space [[META3:![0-9]+]] !kernel_arg_access_qual [[META4:![0-9]+]] !kernel_arg_type [[META5:![0-9]+]] !kernel_arg_base_type [[META5]] !kernel_arg_type_qual [[META6:![0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[P]], ptr [[P_ADDR_ASCAST]], align 8, !tbaa [[TBAA7:![0-9]+]]
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(1), ptr [[P_ADDR_ASCAST]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT: call void @__clang_ocl_kern_imp_foo(ptr addrspace(1) noundef align 4 [[TMP0]]) #[[ATTR2:[0-9]+]]
+// CHECK-NEXT: ret void
+//
+//
+// CHECK: Function Attrs: alwaysinline convergent norecurse nounwind
+// CHECK-LABEL: define dso_local void @__clang_ocl_kern_imp_foo(
+// CHECK-SAME: ptr addrspace(1) noundef align 4 [[P:%.*]]) #[[ATTR1:[0-9]+]] !kernel_arg_addr_space [[META3]] !kernel_arg_access_qual [[META4]] !kernel_arg_type [[META5]] !kernel_arg_base_type [[META5]] !kernel_arg_type_qual [[META6]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[P]], ptr [[P_ADDR_ASCAST]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(1), ptr [[P_ADDR_ASCAST]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT: store i32 1, ptr addrspace(1) [[TMP0]], align 4, !tbaa [[TBAA12:![0-9]+]]
+// CHECK-NEXT: ret void
+//
+//.
+// CHECK: attributes #[[ATTR0]] = { convergent norecurse nounwind "amdgpu-cluster-dims"="0,0,0" "amdgpu-flat-work-group-size"="1,256" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="gfx1250" "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+clusters,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32" "uniform-work-group-size"="false" }
+// CHECK: attributes #[[ATTR1]] = { alwaysinline convergent norecurse nounwind "amdgpu-cluster-dims"="0,0,0" "amdgpu-flat-work-group-size"="1,256" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="gfx1250" "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-fmin-fmax-global-f32,+atomic-fmin-fmax-global-f64,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-pk-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+clusters,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+tensor-cvt-lut-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32" }
+// CHECK: attributes #[[ATTR2]] = { convergent nounwind }
+//.
+// CHECK: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 600}
+// CHECK: [[META1:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
+// CHECK: [[META2:![0-9]+]] = !{i32 2, i32 0}
+// CHECK: [[META3]] = !{i32 1}
+// CHECK: [[META4]] = !{!"none"}
+// CHECK: [[META5]] = !{!"int*"}
+// CHECK: [[META6]] = !{!""}
+// CHECK: [[TBAA7]] = !{[[META8:![0-9]+]], [[META8]], i64 0}
+// CHECK: [[META8]] = !{!"p1 int", [[META9:![0-9]+]], i64 0}
+// CHECK: [[META9]] = !{!"any pointer", [[META10:![0-9]+]], i64 0}
+// CHECK: [[META10]] = !{!"omnipotent char", [[META11:![0-9]+]], i64 0}
+// CHECK: [[META11]] = !{!"Simple C/C++ TBAA"}
+// CHECK: [[TBAA12]] = !{[[META13:![0-9]+]], [[META13]], i64 0}
+// CHECK: [[META13]] = !{!"int", [[META10]], i64 0}
+//.
diff --git a/clang/test/Frontend/rewrite-includes-bom.c b/clang/test/Frontend/rewrite-includes-bom.c
index caa431a..27bf470 100644
--- a/clang/test/Frontend/rewrite-includes-bom.c
+++ b/clang/test/Frontend/rewrite-includes-bom.c
@@ -1,8 +1,8 @@
-// RUN: grep -q $'^\xEF\xBB\xBF' %S/Inputs/rewrite-includes-bom.h
+// RUN: cat %S/Inputs/rewrite-includes-bom.h | od -t x1 | grep -q 'ef\s*bb\s*bf'
// RUN: %clang_cc1 -E -frewrite-includes -I %S/Inputs %s -o %t.c
-// RUN: ! grep -q $'\xEF\xBB\xBF' %t.c
+// RUN: cat %t.c | od -t x1 | not grep -q 'ef\s*bb\s*bf'
// RUN: %clang_cc1 -fsyntax-only -verify %t.c
// expected-no-diagnostics
-// REQUIRES: shell
+// UNSUPPORTED: system-windows
#include "rewrite-includes-bom.h"
diff --git a/clang/test/Headers/wasm.c b/clang/test/Headers/wasm.c
index 7f427ca..03f20c5 100644
--- a/clang/test/Headers/wasm.c
+++ b/clang/test/Headers/wasm.c
@@ -612,7 +612,7 @@ v128_t test_f64x2_const_splat(void) {
return wasm_f64x2_const_splat(42);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_i8x16_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_i8x16_splat(
// CHECK-SAME: i8 noundef signext [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <16 x i8> poison, i8 [[A]], i64 0
@@ -624,7 +624,7 @@ v128_t test_i8x16_splat(int8_t a) {
return wasm_i8x16_splat(a);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_u8x16_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_u8x16_splat(
// CHECK-SAME: i8 noundef zeroext [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <16 x i8> poison, i8 [[A]], i64 0
@@ -682,7 +682,7 @@ v128_t test_u8x16_replace_lane(v128_t a, uint8_t b) {
return wasm_u8x16_replace_lane(a, 15, b);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_i16x8_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_i16x8_splat(
// CHECK-SAME: i16 noundef signext [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <8 x i16> poison, i16 [[A]], i64 0
@@ -694,7 +694,7 @@ v128_t test_i16x8_splat(int16_t a) {
return wasm_i16x8_splat(a);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_u16x8_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_u16x8_splat(
// CHECK-SAME: i16 noundef zeroext [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <8 x i16> poison, i16 [[A]], i64 0
@@ -752,7 +752,7 @@ v128_t test_u16x8_replace_lane(v128_t a, uint16_t b) {
return wasm_u16x8_replace_lane(a, 7, b);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_i32x4_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_i32x4_splat(
// CHECK-SAME: i32 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <4 x i32> poison, i32 [[A]], i64 0
@@ -763,7 +763,7 @@ v128_t test_i32x4_splat(int32_t a) {
return wasm_i32x4_splat(a);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_u32x4_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_u32x4_splat(
// CHECK-SAME: i32 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <4 x i32> poison, i32 [[A]], i64 0
@@ -814,7 +814,7 @@ v128_t test_u32x4_replace_lane(v128_t a, uint32_t b) {
return wasm_u32x4_replace_lane(a, 3, b);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_i64x2_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_i64x2_splat(
// CHECK-SAME: i64 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <2 x i64> poison, i64 [[A]], i64 0
@@ -826,7 +826,7 @@ v128_t test_i64x2_splat(int64_t a) {
return wasm_i64x2_splat(a);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_u64x2_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_u64x2_splat(
// CHECK-SAME: i64 noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <2 x i64> poison, i64 [[A]], i64 0
@@ -919,7 +919,7 @@ v128_t test_f32x4_replace_lane(v128_t a, float b) {
return wasm_f32x4_replace_lane(a, 3, b);
}
-// CHECK-LABEL: define hidden <4 x i32> @test_f64x2_splat(
+// CHECK-LABEL: define hidden noundef <4 x i32> @test_f64x2_splat(
// CHECK-SAME: double noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <2 x double> poison, double [[A]], i64 0
diff --git a/clang/test/Lexer/minimize_source_to_dependency_directives_utf8bom.c b/clang/test/Lexer/minimize_source_to_dependency_directives_utf8bom.c
index 46aba91..6f574ac 100644
--- a/clang/test/Lexer/minimize_source_to_dependency_directives_utf8bom.c
+++ b/clang/test/Lexer/minimize_source_to_dependency_directives_utf8bom.c
@@ -1,5 +1,5 @@
// Test UTF8 BOM at start of file
-// RUN: printf '\xef\xbb\xbf' > %t.c
+// RUN: printf '\357\273\277' > %t.c
// RUN: echo '#ifdef TEST\n' >> %t.c
// RUN: echo '#include <string>' >> %t.c
// RUN: echo '#endif' >> %t.c
diff --git a/clang/test/Modules/crash-vfs-relative-incdir.m b/clang/test/Modules/crash-vfs-relative-incdir.m
index c0407f7..46c3413c 100644
--- a/clang/test/Modules/crash-vfs-relative-incdir.m
+++ b/clang/test/Modules/crash-vfs-relative-incdir.m
@@ -53,4 +53,4 @@
// RUN: cd %t
// RUN: chmod 755 crash-vfs-*.sh
-// RUN: ./crash-vfs-*.sh
+// RUN: bash ./crash-vfs-*.sh
diff --git a/clang/test/Modules/crash-vfs-run-reproducer.m b/clang/test/Modules/crash-vfs-run-reproducer.m
index fd861fe..fa06fd9 100644
--- a/clang/test/Modules/crash-vfs-run-reproducer.m
+++ b/clang/test/Modules/crash-vfs-run-reproducer.m
@@ -53,4 +53,4 @@
// RUN: cd %t
// RUN: chmod 755 crash-vfs-*.sh
-// RUN: ./crash-vfs-*.sh
+// RUN: bash ./crash-vfs-*.sh
diff --git a/clang/test/Preprocessor/predefined-arch-macros.c b/clang/test/Preprocessor/predefined-arch-macros.c
index dac3649..a3c3697 100644
--- a/clang/test/Preprocessor/predefined-arch-macros.c
+++ b/clang/test/Preprocessor/predefined-arch-macros.c
@@ -1911,7 +1911,6 @@
// CHECK_GNR_M32: #define __TSXLDTRK__ 1
// CHECK_GNR_M32: #define __UINTR__ 1
// CHECK_GNR_M32-NOT: #define __USERMSR__ 1
-// CHECK_DMR_M32: #define __USERMSR__ 1
// CHECK_GNR_M32: #define __VAES__ 1
// CHECK_GNR_M32: #define __VPCLMULQDQ__ 1
// CHECK_GNR_M32: #define __WAITPKG__ 1
@@ -2018,7 +2017,6 @@
// CHECK_GNR_M64: #define __TSXLDTRK__ 1
// CHECK_GNR_M64: #define __UINTR__ 1
// CHECK_GNR_M64-NOT: #define __USERMSR__ 1
-// CHECK_DMR_M64: #define __USERMSR__ 1
// CHECK_GNR_M64: #define __VAES__ 1
// CHECK_GNR_M64: #define __VPCLMULQDQ__ 1
// CHECK_GNR_M64: #define __WAITPKG__ 1
diff --git a/clang/test/SemaCXX/libstdcxx_pair_swap_hack.cpp b/clang/test/SemaCXX/libstdcxx_pair_swap_hack.cpp
index 6b8ca4f..a477571 100644
--- a/clang/test/SemaCXX/libstdcxx_pair_swap_hack.cpp
+++ b/clang/test/SemaCXX/libstdcxx_pair_swap_hack.cpp
@@ -82,13 +82,14 @@ namespace sad {
template<typename T> void swap(T &, T &);
template<typename A, typename B> struct CLASS {
- void swap(CLASS &other) noexcept(noexcept(swap(*this, other))); // expected-error {{too many arguments}} expected-note {{declared here}}
- // expected-error@-1{{uses itself}} expected-note@-1{{in instantiation of}}
+ void swap(CLASS &other) // expected-note {{declared here}}
+ noexcept(noexcept(swap(*this, other))); // expected-error {{too many arguments}}
+ // expected-error@-1{{uses itself}}
};
CLASS<int, int> pi;
- static_assert(!noexcept(pi.swap(pi)), ""); // expected-note 2{{in instantiation of exception specification for 'swap'}}
+ static_assert(!noexcept(pi.swap(pi)), ""); // expected-note {{in instantiation of exception specification for 'swap'}}
}
#endif
diff --git a/clang/test/SemaCXX/warn-implicit-unicode-conversions.cpp b/clang/test/SemaCXX/warn-implicit-unicode-conversions.cpp
index fcff006..f17f20c 100644
--- a/clang/test/SemaCXX/warn-implicit-unicode-conversions.cpp
+++ b/clang/test/SemaCXX/warn-implicit-unicode-conversions.cpp
@@ -14,7 +14,7 @@ void test(char8_t u8, char16_t u16, char32_t u32) {
c16(u32); // expected-warning {{implicit conversion from 'char32_t' to 'char16_t' may lose precision and change the meaning of the represented code unit}}
c32(u8); // expected-warning {{implicit conversion from 'char8_t' to 'char32_t' may change the meaning of the represented code unit}}
- c32(u16); // expected-warning {{implicit conversion from 'char16_t' to 'char32_t' may change the meaning of the represented code unit}}
+ c32(u16);
c32(u32);
@@ -30,7 +30,7 @@ void test(char8_t u8, char16_t u16, char32_t u32) {
c16(char32_t(0x7f));
c16(char32_t(0x80));
c16(char32_t(0xD7FF));
- c16(char32_t(0xD800)); // expected-warning {{implicit conversion from 'char32_t' to 'char16_t' changes the meaning of the code unit '<0xD800>'}}
+ c16(char32_t(0xD800));
c16(char32_t(0xE000));
c16(char32_t(U'🐉')); // expected-warning {{implicit conversion from 'char32_t' to 'char16_t' changes the meaning of the code point '🐉'}}
@@ -44,8 +44,8 @@ void test(char8_t u8, char16_t u16, char32_t u32) {
c32(char16_t(0x80));
c32(char16_t(0xD7FF));
- c32(char16_t(0xD800)); // expected-warning {{implicit conversion from 'char16_t' to 'char32_t' changes the meaning of the code unit '<0xD800>'}}
- c32(char16_t(0xDFFF)); // expected-warning {{implicit conversion from 'char16_t' to 'char32_t' changes the meaning of the code unit '<0xDFFF>'}}
+ c32(char16_t(0xD800));
+ c32(char16_t(0xDFFF));
c32(char16_t(0xE000));
c32(char16_t(u'☕'));
diff --git a/clang/test/SemaOpenACC/combined-construct-reduction-clause.cpp b/clang/test/SemaOpenACC/combined-construct-reduction-clause.cpp
index 1b50336..c406644 100644
--- a/clang/test/SemaOpenACC/combined-construct-reduction-clause.cpp
+++ b/clang/test/SemaOpenACC/combined-construct-reduction-clause.cpp
@@ -31,10 +31,12 @@ void uses(unsigned Parm) {
#pragma acc serial loop reduction(&: CoS, I, F)
// expected-error@-1{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
- // expected-error@-2{{invalid operands to binary expression ('float' and 'float')}}
- // expected-error@-3{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
+ // expected-note@-2{{while forming binary operator '&='}}
+ // expected-error@-3{{invalid operands to binary expression ('float' and 'float')}}
+ // expected-error@-4{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
// expected-note@#COS_FLOAT{{while forming combiner for compound type 'CompositeOfScalars'}}
- // expected-error@-5{{invalid operands to binary expression ('float' and 'float')}}
+ // expected-note@-6{{while forming binary operator '&='}}
+ // expected-error@-7{{invalid operands to binary expression ('float' and 'float')}}
for(int i = 0; i < 5; ++i);
#pragma acc kernels loop reduction(min: CoS, Array[I], Array[0:I])
diff --git a/clang/test/SemaOpenACC/compute-construct-reduction-clause.c b/clang/test/SemaOpenACC/compute-construct-reduction-clause.c
index 96c01d0..cb490ed 100644
--- a/clang/test/SemaOpenACC/compute-construct-reduction-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-reduction-clause.c
@@ -59,10 +59,12 @@ void uses(unsigned Parm) {
// Vars in a reduction must be a scalar or a composite of scalars.
#pragma acc parallel reduction(&: CoS, I, F)
// expected-error@-1{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
- // expected-error@-2{{invalid operands to binary expression ('float' and 'float')}}
- // expected-error@-3{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
+ // expected-note@-2{{while forming binary operator '&='}}
+ // expected-error@-3{{invalid operands to binary expression ('float' and 'float')}}
+ // expected-error@-4{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
// expected-note@#COS_FLOAT{{while forming combiner for compound type 'CompositeOfScalars'}}
- // expected-error@-5{{invalid operands to binary expression ('float' and 'float')}}
+ // expected-note@-6{{while forming binary operator '&='}}
+ // expected-error@-7{{invalid operands to binary expression ('float' and 'float')}}
while (1);
// expected-error@+3{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
// expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
@@ -76,15 +78,17 @@ void uses(unsigned Parm) {
#pragma acc parallel reduction(&: CoS, Array[I], Array[0:I])
// expected-error@-1{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
// expected-note@#COS_FLOAT{{while forming combiner for compound type 'CompositeOfScalars'}}
- // expected-error@-3{{invalid operands to binary expression ('float' and 'float')}}
+ // expected-note@-3{{while forming binary operator '&='}}
+ // expected-error@-4{{invalid operands to binary expression ('float' and 'float')}}
while (1);
struct CompositeHasComposite ChCArray[5];
- // expected-error@+6{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
+ // expected-error@+7{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
// expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
- // expected-note@+4{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
- // expected-error@+3{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
+ // expected-note@+5{{OpenACC 'reduction' variable reference must be a scalar variable or a composite of scalars, or an array, sub-array, or element of scalar types}}
+ // expected-error@+4{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
// expected-note@#COS_FLOAT{{while forming combiner for compound type 'CompositeOfScalars'}}
+ // expected-note@+2{{while forming binary operator '&='}}
// expected-error@+1{{invalid operands to binary expression ('float' and 'float')}}
#pragma acc parallel reduction(&: CoS, Array[I], ChCArray[0:I])
while (1);
diff --git a/clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp b/clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp
index e3a487a..70dc3d6d 100644
--- a/clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp
@@ -62,10 +62,12 @@ void uses(unsigned Parm) {
// Vars in a reduction must be a scalar or a composite of scalars.
#pragma acc parallel reduction(&: CoS, I, F)
// expected-error@-1{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
- // expected-error@-2{{invalid operands to binary expression ('float' and 'float')}}
- // expected-error@-3{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
+ // expected-note@-2{{while forming binary operator '&='}}
+ // expected-error@-3{{invalid operands to binary expression ('float' and 'float')}}
+ // expected-error@-4{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
// expected-note@#COS_FLOAT{{while forming combiner for compound type 'CompositeOfScalars'}}
- // expected-error@-5{{invalid operands to binary expression ('float' and 'float')}}
+ // expected-note@-6{{while forming binary operator '&='}}
+ // expected-error@-7{{invalid operands to binary expression ('float' and 'float')}}
while (1);
// expected-error@+3{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
// expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
@@ -78,7 +80,8 @@ void uses(unsigned Parm) {
#pragma acc parallel reduction(&: CoS, Array[I], Array[0:I])
// expected-error@-1{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
// expected-note@#COS_FLOAT{{while forming combiner for compound type 'CompositeOfScalars'}}
- // expected-error@-3{{invalid operands to binary expression ('float' and 'float')}}
+ // expected-note@-3{{while forming binary operator '&='}}
+ // expected-error@-4{{invalid operands to binary expression ('float' and 'float')}}
while (1);
// expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
@@ -221,7 +224,8 @@ void TemplUses(T Parm, U CoS, V ChC) {
#pragma acc parallel reduction(&: CoS, Var, Parm)
// expected-error@-1{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
// expected-note@#COS_FLOAT{{while forming combiner for compound type 'CompositeOfScalars'}}
- // expected-error@-3{{invalid operands to binary expression ('float' and 'float')}}
+ // expected-note@-3{{while forming binary operator '&='}}
+ // expected-error@-4{{invalid operands to binary expression ('float' and 'float')}}
while (1);
// expected-error@+3{{invalid type 'struct CompositeOfScalars' used in OpenACC 'reduction' variable reference; type is not a scalar value}}
// expected-note@#COS_FIELD{{used as field 'COS' of composite 'CompositeHasComposite'}}
@@ -236,7 +240,8 @@ void TemplUses(T Parm, U CoS, V ChC) {
#pragma acc parallel reduction(&: CoS, Array[Var], Array[0:Var])
// expected-error@-1{{variable of type 'float' referenced in OpenACC 'reduction' clause does not have a valid operation available}}
// expected-note@#COS_FLOAT{{while forming combiner for compound type 'CompositeOfScalars'}}
- // expected-error@-3{{invalid operands to binary expression ('float' and 'float')}}
+ // expected-note@-3{{while forming binary operator '&='}}
+ // expected-error@-4{{invalid operands to binary expression ('float' and 'float')}}
while (1);
// expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
diff --git a/clang/test/SemaTemplate/GH164330.cpp b/clang/test/SemaTemplate/GH164330.cpp
new file mode 100644
index 0000000..767420d
--- /dev/null
+++ b/clang/test/SemaTemplate/GH164330.cpp
@@ -0,0 +1,309 @@
+// RUN: %clang_cc1 -std=c++20 -verify %s
+// expected-no-diagnostics
+
+template <int __v> struct integral_constant {
+ static constexpr int value = __v;
+};
+template <bool _Val> using _BoolConstant = integral_constant<_Val>;
+template <int, class> struct tuple_element;
+template <class...> class tuple;
+template <int _Ip, class... _Tp> struct tuple_element<_Ip, tuple<_Tp...>> {
+ using type = __type_pack_element<_Ip, _Tp...>;
+};
+template <class> struct tuple_size;
+template <bool> using __enable_if_t = int;
+template <template <class> class _BaseType, class _Tp, _Tp _SequenceSize>
+using __make_integer_sequence_impl =
+ __make_integer_seq<_BaseType, _Tp, _SequenceSize>;
+template <class _Tp, _Tp...> struct __integer_sequence;
+template <int... _Indices>
+using __index_sequence = __integer_sequence<int, _Indices...>;
+template <int _SequenceSize>
+using __make_index_sequence =
+ __make_integer_sequence_impl<__integer_sequence, int, _SequenceSize>;
+template <class _Tp, _Tp...> struct integer_sequence {};
+template <int... _Ip> using index_sequence = integer_sequence<int, _Ip...>;
+template <class _Tp, _Tp _Ep>
+using make_integer_sequence =
+ __make_integer_sequence_impl<integer_sequence, _Tp, _Ep>;
+template <int _Np> using make_index_sequence = make_integer_sequence<int, _Np>;
+enum __element_count : int;
+constexpr void __constexpr_memmove(char *__dest, const char *__src,
+ __element_count __n) {
+ __builtin_memmove(__dest, __src, __n);
+}
+template <class _Tp> using __underlying_type_t = __underlying_type(_Tp);
+template <class _Tp> using underlying_type_t = __underlying_type_t<_Tp>;
+template <class _Tp, class> using __enable_if_tuple_size_imp = _Tp;
+template <class _Tp>
+struct tuple_size<__enable_if_tuple_size_imp<
+ const _Tp, __enable_if_t<_BoolConstant<__is_volatile(int)>::value>>>
+ : integral_constant<tuple_size<_Tp>::value> {};
+template <class... _Tp>
+struct tuple_size<tuple<_Tp...>> : integral_constant<sizeof...(_Tp)> {};
+template <class _Tp> constexpr int tuple_size_v = tuple_size<_Tp>::value;
+template <class _T1, class _T2> struct pair {
+ _T1 first;
+ _T2 second;
+};
+template <class _T1> constexpr pair<_T1, char *> make_pair(_T1, char *__t2) {
+ return pair(_T1(), __t2);
+}
+template <int, class _Hp> struct __tuple_leaf {
+ _Hp __value_;
+ constexpr const _Hp &get() const { return __value_; }
+};
+template <class...> struct __tuple_impl;
+template <int... _Indx, class... _Tp>
+struct __tuple_impl<__index_sequence<_Indx...>, _Tp...>
+ : __tuple_leaf<_Indx, _Tp>... {
+ template <class... _Args>
+ constexpr __tuple_impl(int, _Args... __args)
+ : __tuple_leaf<_Indx, _Tp>(__args)... {}
+};
+template <class... _Tp> struct tuple {
+ __tuple_impl<__make_index_sequence<sizeof...(_Tp)>, _Tp...> __base_;
+ template <class... _Up> constexpr tuple(_Up... __u) : __base_({}, __u...) {}
+};
+template <int _Ip, class... _Tp>
+constexpr const tuple_element<_Ip, tuple<_Tp...>>::type &
+get(const tuple<_Tp...> &__t) noexcept {
+ return static_cast<const __tuple_leaf<
+ _Ip, typename tuple_element<_Ip, tuple<_Tp...>>::type> &>(__t.__base_)
+ .get();
+}
+template <class... _Tp> constexpr tuple<_Tp...> make_tuple(_Tp... __t) {
+ return tuple<_Tp...>(__t...);
+}
+constexpr int __char_traits_length_checked(const char *__s) {
+ return __builtin_strlen(__s);
+}
+struct basic_string_view {
+ constexpr basic_string_view() {}
+ constexpr basic_string_view(const char *__s)
+ : __data_(__s), __size_(__char_traits_length_checked(__s)) {}
+ constexpr const char *begin() { return __data_; }
+ constexpr const char *end() {
+ return __data_ + __size_;
+ }
+ const char *__data_;
+ int __size_;
+};
+template <class _Algorithm>
+constexpr pair<const char *, char *>
+__copy_move_unwrap_iters(const char *__first, const char *__last,
+ char *__out_first) {
+ pair<const char *, const char *> __range = {__first, __last};
+ auto __result = _Algorithm()(__range.first, __range.second, __out_first);
+ return make_pair(__result.first, __result.second);
+}
+struct __copy_impl {
+ constexpr pair<const char *, char *>
+ operator()(const char *__first, const char *__last, char *__result) {
+ const int __n(__last - __first);
+ __constexpr_memmove(__result, __first, __element_count(__n));
+ return make_pair(__last, __result);
+ }
+};
+constexpr char *copy(const char *__first, const char *__last, char *__result) {
+ return __copy_move_unwrap_iters<__copy_impl>(__first, __last, __result).second;
+}
+constexpr char *copy_n(const char *__first, int __orig_n, char *__result) {
+ return copy(__first, __first + __orig_n, __result);
+}
+template <int _Size> struct array {
+ basic_string_view __elems_[_Size];
+ constexpr basic_string_view &operator[](int __n) { return __elems_[__n]; }
+ constexpr basic_string_view operator[](int __n) const {
+ return __elems_[__n];
+ }
+};
+
+template <typename> struct FieldId;
+
+template <FieldId field> constexpr auto FieldIdToInnerValue() {
+ return field.template ToInnerValue<field>();
+}
+struct FieldNameEnum {
+ enum class type;
+};
+template <int N> using FieldName = FieldNameEnum::type;
+template <typename, auto> struct GetParentMessageAtIndexImpl;
+template <typename, auto> struct FieldInfoHelper;
+template <FieldId...> struct PathImpl;
+template <int N> struct LongPathLiteral {
+ consteval LongPathLiteral(const char (&s)[N]) {
+ copy_n(s, N, long_path)[N] = field_count = long_path_size = 1;
+ }
+ consteval basic_string_view to_string_view() const { return long_path; }
+ char long_path[N + 1];
+ int long_path_size;
+ int field_count;
+};
+template <LongPathLiteral kLongPath> consteval auto get_field_components() {
+ basic_string_view long_path(kLongPath.to_string_view());
+ array<kLongPath.field_count> ret;
+ for (int i = 0; i < kLongPath.field_count; ++i)
+ ret[i] = long_path;
+ return ret;
+}
+template <LongPathLiteral kLongPath>
+constexpr auto kFieldComponents = get_field_components<kLongPath>();
+template <LongPathLiteral kLongPath> struct LongPathHelper {
+ template <int... I>
+ static PathImpl<kFieldComponents<kLongPath>[I]...>
+ PathForLongPath(index_sequence<I...>);
+ using type =
+ decltype(PathForLongPath(make_index_sequence<kLongPath.field_count>{}));
+};
+template <typename T> struct PathFieldId {
+ template <typename Arg> constexpr PathFieldId(Arg &arg) : value(arg) {}
+ T value;
+};
+template <PathFieldId...> constexpr auto PathImplHelper();
+
+template <int N> using FieldName = FieldName<N>;
+enum class FieldNumber;
+template <PathFieldId... fields>
+constexpr auto Path = PathImplHelper<fields...>();
+template <typename Proto, FieldId field>
+using FieldInfo =
+ FieldInfoHelper<Proto, FieldIdToInnerValue<field>()>::type;
+template <> struct FieldId<FieldNameEnum::type> {
+ constexpr FieldId(basic_string_view);
+ int size;
+ long hash;
+ template <auto field> static constexpr auto ToInnerValue() {
+ return static_cast<FieldNameEnum::type>(field.hash);
+ }
+};
+FieldId(basic_string_view) -> FieldId<FieldNameEnum::type>;
+template <typename Proto, FieldId field, int index>
+using GetParentMessageAtIndex = GetParentMessageAtIndexImpl<
+ Proto, FieldIdToInnerValue<field>()>::type;
+
+template <typename T>
+PathFieldId(T &t) -> PathFieldId<decltype(LongPathLiteral(t))>;
+template <FieldId... fields1, FieldId... fields2>
+constexpr PathImpl<fields1..., fields2...> *ConcatPath(PathImpl<fields1...> *,
+ PathImpl<fields2...> *) {
+ return nullptr;
+}
+template <LongPathLiteral long_path_literal>
+constexpr LongPathHelper<long_path_literal>::type *SinglePath() {
+ return nullptr;
+}
+template <PathFieldId... fields> constexpr auto PathImplHelper() {
+ return ConcatPath(SinglePath<fields.value>()...);
+}
+template <auto hash_prime, auto offset_bias>
+constexpr auto Fnv1a(basic_string_view str) {
+ auto hash = offset_bias;
+ for (char c : str) {
+ hash ^= c;
+ hash *= hash_prime;
+ }
+ return hash;
+}
+constexpr auto HashField(basic_string_view str) {
+ return Fnv1a<1099511628211u, 1039346656037>(str);
+}
+template <typename FI> struct FieldInfoValueTypeAlias : FI {};
+template <typename Proto, auto field> struct FieldInfoHelperBase {
+ static constexpr auto MaskFieldNameHash() {
+ using FieldEnum = decltype(field);
+ return FieldEnum{static_cast<underlying_type_t<FieldEnum>>(field) & 31};
+ }
+ using internal_type =
+ Proto::template FieldInfoImpl<decltype(field), MaskFieldNameHash()>;
+};
+template <typename Proto, auto field> struct FieldInfoHelper {
+ using type = FieldInfoValueTypeAlias<
+ typename FieldInfoHelperBase<Proto, field>::internal_type>;
+};
+
+template <auto... fields>
+struct FieldId<const PathImpl<fields...> *> {
+ constexpr FieldId(PathImpl<fields...> *) : path() {}
+ template <auto field> static constexpr auto ToInnerValue() {
+ return field.path;
+ }
+ const PathImpl<fields...> *path;
+};
+template <auto... fields>
+FieldId(PathImpl<fields...> *)
+ -> FieldId<const PathImpl<fields...> *>;
+
+template <auto> struct UnpackedField {
+ static constexpr bool is_path = false;
+};
+template <auto... fields, const PathImpl<fields...> *path>
+struct UnpackedField<path> {
+ static constexpr auto value = make_tuple(fields...);
+ static constexpr bool is_path = true;
+};
+template <typename Proto, FieldId... fields, const PathImpl<fields...> *path>
+struct GetParentMessageAtIndexImpl<Proto, path> {
+ using type = Proto;
+};
+
+constexpr FieldId<FieldNameEnum::type>::FieldId(basic_string_view str)
+ : size(), hash(HashField(str)) {}
+template <FieldId field> constexpr bool IsPath() {
+ return UnpackedField<
+ FieldIdToInnerValue<field>()>::is_path;
+}
+template <FieldId field> constexpr auto UnpackFieldToTuple() {
+ return UnpackedField<FieldIdToInnerValue<field>()>::value;
+}
+template <int> struct CompileTimeString {
+ consteval CompileTimeString(basic_string_view &v) : internal_view_(v) {}
+ basic_string_view &internal_view_;
+};
+CompileTimeString(basic_string_view) -> CompileTimeString<0>;
+
+template <CompileTimeString... parts> struct NameJoiner {
+ template <CompileTimeString... after>
+ NameJoiner<parts...> operator+(NameJoiner<after...>);
+};
+template <FieldId> struct FieldNameBuilder;
+template <FieldId field>
+ requires(!IsPath<field>())
+struct FieldNameBuilder<field> {
+ template <typename Proto> static auto Get() {
+ return NameJoiner<FieldInfo<Proto, field>::name>();
+ }
+};
+template <FieldId field>
+ requires(IsPath<field>())
+struct FieldNameBuilder<field> {
+ static constexpr auto kTuple = UnpackFieldToTuple<field>();
+ static constexpr int kTupleSize = tuple_size_v<decltype(kTuple)>;
+ template <typename Proto, int... Is> static void Get(index_sequence<Is...>) {
+ (FieldNameBuilder<get<Is>(
+ kTuple)>::template Get<GetParentMessageAtIndex<Proto, field, Is>>() +
+ ...);
+ }
+ template <typename Proto> static void Get() {
+ Get<Proto>(make_index_sequence<kTupleSize>());
+ }
+};
+
+struct T {
+ template <typename FieldType, FieldType> struct FieldInfoImpl;
+};
+void AddPathsToFieldMask() {
+ FieldNameBuilder<Path<"message_field", "int32_field">>::Get<T>();
+}
+template <> struct T::FieldInfoImpl<FieldNumber, FieldNumber{1}> {
+ static basic_string_view name;
+};
+template <>
+struct T::FieldInfoImpl<FieldName<1>, FieldName<1>{12}>
+ : FieldInfoImpl<FieldNumber, FieldNumber{1}> {};
+template <> struct T::FieldInfoImpl<FieldNumber, FieldNumber{10}> {
+ static basic_string_view name;
+};
+template <>
+struct T::FieldInfoImpl<FieldName<3>, FieldName<3>{11}>
+ : FieldInfoImpl<FieldNumber, FieldNumber{10}> {};
diff --git a/clang/test/SemaTemplate/concepts.cpp b/clang/test/SemaTemplate/concepts.cpp
index a54bc02..5b0f3d3 100644
--- a/clang/test/SemaTemplate/concepts.cpp
+++ b/clang/test/SemaTemplate/concepts.cpp
@@ -1441,6 +1441,24 @@ void main() { Feeder<int>{}.feed<Cat<int>>(); }
}
+namespace case9 {
+
+template <typename>
+concept a = requires { requires true; };
+template <typename T>
+concept b = a<typename T::EntitySpec>;
+template <typename T>
+concept c = requires { b<T>; };
+template <typename T>
+ requires c<T>
+struct s;
+template <typename> constexpr bool f() { return true; }
+template <typename T> constexpr bool d = f<T>();
+struct s2;
+static_assert(d<s<s2>>);
+
+}
+
}
namespace GH162125 {
diff --git a/clang/test/SemaTemplate/instantiate-self.cpp b/clang/test/SemaTemplate/instantiate-self.cpp
index 4999a4a..1cdf2c6 100644
--- a/clang/test/SemaTemplate/instantiate-self.cpp
+++ b/clang/test/SemaTemplate/instantiate-self.cpp
@@ -86,7 +86,7 @@ namespace test7 {
namespace test8 {
template<typename T> struct A {
- int n = A{}.n; // expected-error {{default member initializer for 'n' uses itself}} expected-note {{instantiation of default member init}}
+ int n = A{}.n; // expected-error {{default member initializer for 'n' uses itself}}
};
A<int> ai = {}; // expected-note {{instantiation of default member init}}
}
@@ -100,7 +100,7 @@ namespace test9 {
namespace test10 {
template<typename T> struct A {
- void f() noexcept(noexcept(f())); // expected-error {{exception specification of 'f' uses itself}} expected-note {{instantiation of}}
+ void f() noexcept(noexcept(f())); // expected-error {{exception specification of 'f' uses itself}}
};
bool b = noexcept(A<int>().f()); // expected-note {{instantiation of}}
}
@@ -125,7 +125,7 @@ namespace test11 {
}
namespace test12 {
- template<typename T> int f(T t, int = f(T())) {} // expected-error {{recursive evaluation of default argument}} expected-note {{instantiation of}}
+ template<typename T> int f(T t, int = f(T())) {} // expected-error {{recursive evaluation of default argument}}
struct X {};
int q = f(X()); // expected-note {{instantiation of}}
}
@@ -171,3 +171,25 @@ namespace test13 {
A::Z<A> aza;
#endif
}
+
+namespace test14 {
+ template <class> void f();
+ template <class T, decltype(new (f<void>()) T)> T x;
+}
+
+namespace test15 {
+ template <class V> void __overload(V);
+
+ template <class, class> struct __invoke_result_impl;
+ template <class _Arg>
+ struct __invoke_result_impl<decltype(__overload(*(_Arg*)0)),
+ _Arg>;
+ struct variant {
+ template <class _Arg,
+ class = typename __invoke_result_impl<void, _Arg>::type>
+ variant(_Arg);
+ };
+ struct Matcher {
+ Matcher(variant);
+ } vec(vec); // expected-warning {{uninitialized}}
+} // namespace test15
diff --git a/clang/test/Unit/lit.cfg.py b/clang/test/Unit/lit.cfg.py
index 37e91d0..ebe35a1 100644
--- a/clang/test/Unit/lit.cfg.py
+++ b/clang/test/Unit/lit.cfg.py
@@ -51,7 +51,7 @@ def find_shlibpath_var():
yield "LD_LIBRARY_PATH"
elif platform.system() == "Darwin":
yield "DYLD_LIBRARY_PATH"
- elif platform.system() == "Windows":
+ elif platform.system() == "Windows" or sys.platform == "cygwin":
yield "PATH"
elif platform.system() == "AIX":
yield "LIBPATH"
diff --git a/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp b/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp
index 95f8ae2..ef22960 100644
--- a/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp
+++ b/clang/unittests/Analysis/ExprMutationAnalyzerTest.cpp
@@ -2038,4 +2038,42 @@ TEST(ExprMutationAnalyzerTest, PointeeMutatedByConditionOperator) {
EXPECT_TRUE(isPointeeMutated(Results, AST.get()));
}
+TEST(ExprMutationAnalyzerTest, PointeeMutatedByReturn) {
+ {
+ const std::string Code = R"(
+ int * f() {
+ int *const x = nullptr;
+ return x;
+ })";
+ auto AST = buildASTFromCodeWithArgs(Code, {"-Wno-everything"});
+ auto Results =
+ match(withEnclosingCompound(declRefTo("x")), AST->getASTContext());
+ EXPECT_TRUE(isPointeeMutated(Results, AST.get()));
+ }
+ {
+ const std::string Code = R"(
+ int * f() {
+ int *const x = nullptr;
+ return x;
+ })";
+ // in C++23, AST will have NoOp cast.
+ auto AST =
+ buildASTFromCodeWithArgs(Code, {"-Wno-everything", "-std=c++23"});
+ auto Results =
+ match(withEnclosingCompound(declRefTo("x")), AST->getASTContext());
+ EXPECT_TRUE(isPointeeMutated(Results, AST.get()));
+ }
+ {
+ const std::string Code = R"(
+ int const* f() {
+ int *const x = nullptr;
+ return x;
+ })";
+ auto AST = buildASTFromCodeWithArgs(Code, {"-Wno-everything"});
+ auto Results =
+ match(withEnclosingCompound(declRefTo("x")), AST->getASTContext());
+ EXPECT_FALSE(isPointeeMutated(Results, AST.get()));
+ }
+}
+
} // namespace clang
diff --git a/clang/unittests/Analysis/FlowSensitive/CMakeLists.txt b/clang/unittests/Analysis/FlowSensitive/CMakeLists.txt
index 3508238..1d932ec 100644
--- a/clang/unittests/Analysis/FlowSensitive/CMakeLists.txt
+++ b/clang/unittests/Analysis/FlowSensitive/CMakeLists.txt
@@ -25,6 +25,8 @@ add_clang_unittest(ClangAnalysisFlowSensitiveTests
TransferTest.cpp
TypeErasedDataflowAnalysisTest.cpp
UncheckedOptionalAccessModelTest.cpp
+ UncheckedStatusOrAccessModelTest.cpp
+ UncheckedStatusOrAccessModelTestFixture.cpp
ValueTest.cpp
WatchedLiteralsSolverTest.cpp
CLANG_LIBS
diff --git a/clang/unittests/Analysis/FlowSensitive/MockHeaders.cpp b/clang/unittests/Analysis/FlowSensitive/MockHeaders.cpp
index d3dee58..d875542 100644
--- a/clang/unittests/Analysis/FlowSensitive/MockHeaders.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/MockHeaders.cpp
@@ -459,6 +459,10 @@ struct is_scalar
template <>
struct is_scalar<nullptr_t> : public true_type {};
+struct in_place_t {};
+
+constexpr in_place_t in_place;
+
} // namespace std
#endif // STD_TYPE_TRAITS_H
@@ -511,9 +515,8 @@ using remove_reference_t = typename std::remove_reference<T>::type;
template <typename T>
using decay_t = typename std::decay<T>::type;
-struct in_place_t {};
-
-constexpr in_place_t in_place;
+using std::in_place;
+using std::in_place_t;
} // namespace absl
#endif // ABSL_TYPE_TRAITS_H
@@ -589,9 +592,6 @@ static constexpr char StdOptionalHeader[] = R"(
namespace std {
-struct in_place_t {};
-constexpr in_place_t in_place;
-
struct nullopt_t {
constexpr explicit nullopt_t() {}
};
diff --git a/clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTest.cpp b/clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTest.cpp
new file mode 100644
index 0000000..20630a4c
--- /dev/null
+++ b/clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTest.cpp
@@ -0,0 +1,34 @@
+//===- UncheckedStatusOrAccessModelTest.cpp -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <utility>
+
+#include "UncheckedStatusOrAccessModelTestFixture.h"
+#include "clang/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.h"
+#include "gtest/gtest.h"
+
+namespace clang::dataflow::statusor_model {
+namespace {
+
+auto Executor = std::make_unique<
+ UncheckedStatusOrAccessModelTestExecutor<UncheckedStatusOrAccessModel>>();
+
+INSTANTIATE_TEST_SUITE_P(
+ UncheckedStatusOrAccessModelTest, UncheckedStatusOrAccessModelTest,
+ testing::Values(
+ std::make_pair(Executor.get(),
+ UncheckedStatusOrAccessModelTestAliasKind::kUnaliased),
+ std::make_pair(
+ Executor.get(),
+ UncheckedStatusOrAccessModelTestAliasKind::kPartiallyAliased),
+ std::make_pair(
+ Executor.get(),
+ UncheckedStatusOrAccessModelTestAliasKind::kFullyAliased)));
+} // namespace
+
+} // namespace clang::dataflow::statusor_model
diff --git a/clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTestFixture.cpp b/clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTestFixture.cpp
new file mode 100644
index 0000000..4827cc1
--- /dev/null
+++ b/clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTestFixture.cpp
@@ -0,0 +1,2518 @@
+//===- UncheckedStatusOrAccessModelTestFixture.cpp ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "UncheckedStatusOrAccessModelTestFixture.h"
+#include "MockHeaders.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "gtest/gtest.h"
+
+namespace clang::dataflow::statusor_model {
+namespace {
+
+TEST_P(UncheckedStatusOrAccessModelTest, NoStatusOrMention) {
+ ExpectDiagnosticsFor(R"cc(
+ void target() { "nop"; }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest,
+ UnwrapWithoutCheck_Lvalue_CallToValue) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, NonExplicitInitialization) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+ STATUSOR_INT target() {
+ STATUSOR_INT x = Make<STATUSOR_INT>();
+ return x.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest,
+ UnwrapWithoutCheck_Lvalue_CallToValue_NewLine) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ sor. // force newline
+ value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest,
+ UnwrapWithoutCheck_Rvalue_CallToValue) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ std::move(sor).value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest,
+ UnwrapWithoutCheck_Lvalue_CallToValueOrDie) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ sor.ValueOrDie(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest,
+ UnwrapWithoutCheck_Rvalue_CallToValueOrDie) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ std::move(sor).ValueOrDie(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest,
+ UnwrapWithoutCheck_Lvalue_CallToOperatorStar) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ *sor; // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest,
+ UnwrapWithoutCheck_Lvalue_CallToOperatorStarSeparateLine) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ * // [[unsafe]]
+ sor;
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest,
+ UnwrapWithoutCheck_Rvalue_CallToOperatorStar) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ *std::move(sor); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest,
+ UnwrapWithoutCheck_Lvalue_CallToOperatorArrow) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ void foo();
+ };
+
+ void target(absl::StatusOr<Foo> sor) {
+ sor->foo(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest,
+ UnwrapWithoutCheck_Rvalue_CallToOperatorArrow) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ void foo();
+ };
+
+ void target(absl::StatusOr<Foo> sor) {
+ std::move(sor)->foo(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, UnwrapRvalueWithCheck) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ if (sor.ok()) std::move(sor).value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, ParensInDeclInitExpr) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ auto sor = (Make<STATUSOR_INT>());
+ if (sor.ok()) sor.value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, ReferenceInDeclInitExpr) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ const STATUSOR_INT& GetStatusOrInt() const;
+ };
+
+ void target(Foo foo) {
+ auto sor = foo.GetStatusOrInt();
+ if (sor.ok()) sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ STATUSOR_INT& GetStatusOrInt();
+ };
+
+ void target(Foo foo) {
+ auto sor = foo.GetStatusOrInt();
+ if (sor.ok()) sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ STATUSOR_INT&& GetStatusOrInt() &&;
+ };
+
+ void target(Foo foo) {
+ auto sor = std::move(foo).GetStatusOrInt();
+ if (sor.ok()) sor.value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ if (auto sor = Make<STATUSOR_INT>(); sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, JoinSafeSafe) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor, bool b) {
+ if (sor.ok()) {
+ if (b)
+ sor.value();
+ else
+ sor.value();
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, JoinUnsafeUnsafe) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor, bool b) {
+ if (b)
+ sor.value(); // [[unsafe]]
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, InversedIfThenElse) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ if (!sor.ok())
+ sor.value(); // [[unsafe]]
+ else
+ sor.value();
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, DoubleInversedIfThenElse) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ if (!!sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, TripleInversedIfThenElse) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ if (!!!sor.ok())
+ sor.value(); // [[unsafe]]
+ else
+ sor.value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_LhsAndRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (x.ok() && y.ok()) {
+ x.value();
+
+ y.value();
+ } else {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_NotLhsAndRhs) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (!x.ok() && y.ok()) {
+ y.value();
+
+ x.value(); // [[unsafe]]
+ } else {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_LhsAndNotRhs) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (x.ok() && !y.ok()) {
+ x.value();
+
+ y.value(); // [[unsafe]]
+ } else {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_NotLhsAndNotRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (!x.ok() && !y.ok()) {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ } else {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_Not_LhsAndRhs) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (!(x.ok() && y.ok())) {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ } else {
+ x.value();
+
+ y.value();
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_Not_NotLhsAndRhs) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (!(!x.ok() && y.ok())) {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ } else {
+ y.value();
+
+ x.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_Not_LhsAndNotRhs) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (!(x.ok() && !y.ok())) {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ } else {
+ x.value();
+
+ y.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_Not_NotLhsAndNotRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (!(!x.ok() && !y.ok())) {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ } else {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_LhsOrRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (x.ok() || y.ok()) {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ } else {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_NotLhsOrRhs) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (!x.ok() || y.ok()) {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ } else {
+ x.value();
+
+ y.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_LhsOrNotRhs) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (x.ok() || !y.ok()) {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ } else {
+ y.value();
+
+ x.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_NotLhsOrNotRhs) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (!x.ok() || !y.ok()) {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ } else {
+ x.value();
+
+ y.value();
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_Not_LhsOrRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (!(x.ok() || y.ok())) {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ } else {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_Not_NotLhsOrRhs) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (!(!x.ok() || y.ok())) {
+ x.value();
+
+ y.value(); // [[unsafe]]
+ } else {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_Not_LhsOrNotRhs) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (!(x.ok() || !y.ok())) {
+ y.value();
+
+ x.value(); // [[unsafe]]
+ } else {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, IfThenElse_Not_NotLhsOrNotRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (!(!x.ok() || !y.ok())) {
+ x.value();
+
+ y.value();
+ } else {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, TerminatingIfThenBranch) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ if (!sor.ok()) return;
+
+ sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ if (sor.ok()) return;
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (!x.ok() || !y.ok()) return;
+
+ x.value();
+
+ y.value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, TerminatingIfElseBranch) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ if (sor.ok()) {
+ } else {
+ return;
+ }
+
+ sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ if (!sor.ok()) {
+ } else {
+ return;
+ }
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, TerminatingIfThenBranchInLoop) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ while (Make<bool>()) {
+ if (!sor.ok()) continue;
+
+ sor.value();
+ }
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ while (Make<bool>()) {
+ if (!sor.ok()) break;
+
+ sor.value();
+ }
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, TernaryConditionalOperator) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ sor.ok() ? sor.value() : 21;
+
+ sor.ok() ? 21 : sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ !sor.ok() ? 21 : sor.value();
+
+ !sor.ok() ? sor.value() : 21; // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor1, STATUSOR_INT sor2) {
+ !((__builtin_expect(false || (!(sor1.ok() && sor2.ok())), false)))
+ ? (void)0
+ : (void)1;
+ do {
+ sor1.value(); // [[unsafe]]
+ sor2.value(); // [[unsafe]]
+ } while (true);
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ while (Make<bool>()) sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ while (sor.ok()) sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ while (!sor.ok()) sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ while (!!sor.ok()) sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ while (!!!sor.ok()) sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_LhsAndRhs) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (x.ok() && y.ok()) {
+ x.value();
+
+ y.value();
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_NotLhsAndRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!x.ok() && y.ok()) x.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!x.ok() && y.ok()) y.value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_LhsAndNotRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (x.ok() && !y.ok()) x.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (x.ok() && !y.ok()) y.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_NotLhsAndNotRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!x.ok() && !y.ok()) x.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!x.ok() && !y.ok()) y.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_Not_LhsAndRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(x.ok() && y.ok())) x.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(x.ok() && y.ok())) y.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_Not_NotLhsAndRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(!x.ok() && y.ok())) x.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(!x.ok() && y.ok())) y.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_Not_LhsAndNotRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(x.ok() && !y.ok())) x.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(x.ok() && !y.ok())) y.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_Not_NotLhsAndNotRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(!x.ok() && !y.ok())) x.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(!x.ok() && !y.ok())) y.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_LhsOrRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (x.ok() || y.ok()) x.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (x.ok() || y.ok()) y.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_NotLhsOrRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!x.ok() || y.ok()) x.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!x.ok() || y.ok()) y.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_LhsOrNotRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (x.ok() || !y.ok()) x.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (x.ok() || !y.ok()) y.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_NotLhsOrNotRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!x.ok() || !y.ok()) x.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!x.ok() || !y.ok()) y.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_Not_LhsOrRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(x.ok() || y.ok())) x.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(x.ok() || y.ok())) y.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_Not_NotLhsOrRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(!x.ok() || y.ok())) x.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(!x.ok() || y.ok())) y.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_Not_LhsOrNotRhs) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(x.ok() || !y.ok())) x.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(x.ok() || !y.ok())) y.value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_Not_NotLhsOrNotRhs) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (!(!x.ok() || !y.ok())) {
+ x.value();
+
+ y.value();
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_AccessAfterStmt) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ while (sor.ok()) {
+ }
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ while (!sor.ok()) {
+ }
+
+ sor.value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_TerminatingBranch_Return) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ while (!sor.ok()) return;
+
+ sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ while (sor.ok()) return;
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, While_NestedIfWithBinaryCondition) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (Make<bool>()) {
+ if (x.ok() && y.ok()) {
+ x.value();
+
+ y.value();
+ }
+ }
+ }
+ )cc");
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ while (Make<bool>()) {
+ if (!(!x.ok() || !y.ok())) {
+ x.value();
+
+ y.value();
+ }
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, BuiltinExpect) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT x, STATUSOR_INT y) {
+ if (!__builtin_expect(!x.ok() || __builtin_expect(!y.ok(), true), false)) {
+ x.value();
+
+ y.value();
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, CopyAssignment) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ STATUSOR_INT sor = Make<STATUSOR_INT>();
+ if (sor.ok()) {
+ sor = Make<STATUSOR_INT>();
+ sor.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ STATUSOR_INT sor = Make<STATUSOR_INT>();
+ if (!sor.ok()) return;
+
+ sor = Make<STATUSOR_INT>();
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ STATUSOR_INT x = Make<STATUSOR_INT>();
+ if (x.ok()) {
+ STATUSOR_INT y = x;
+ x = Make<STATUSOR_INT>();
+
+ y.value();
+
+ x.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ STATUSOR_INT x = Make<STATUSOR_INT>();
+ STATUSOR_INT y = x;
+ if (!y.ok()) return;
+
+ x.value();
+
+ y = Make<STATUSOR_INT>();
+ x.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ STATUSOR_INT bar;
+ };
+
+ void target(Foo foo) {
+ foo.bar = Make<STATUSOR_INT>();
+ if (foo.bar.ok()) {
+ foo.bar.value();
+
+ foo.bar = Make<STATUSOR_INT>();
+ foo.bar.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, ShortCircuitingBinaryOperators) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_BOOL sor) {
+ bool b = sor.ok() & sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_BOOL sor) {
+ bool b = sor.ok() && sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_BOOL sor) {
+ bool b = !sor.ok() && sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_BOOL sor) {
+ bool b = sor.ok() || sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_BOOL sor) {
+ bool b = !sor.ok() || sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(bool b, STATUSOR_INT sor) {
+ if (b || sor.ok()) {
+ do {
+ sor.value(); // [[unsafe]]
+ } while (true);
+ }
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(bool b, STATUSOR_INT sor) {
+ if (__builtin_expect(b || sor.ok(), false)) {
+ do {
+ sor.value(); // [[unsafe]]
+ } while (false);
+ }
+ }
+ )cc");
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor1, STATUSOR_INT sor2) {
+ while (sor1.ok() && sor2.ok()) sor1.value();
+ while (sor1.ok() && sor2.ok()) sor2.value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, References) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ STATUSOR_INT x = Make<STATUSOR_INT>();
+ STATUSOR_INT& y = x;
+ if (x.ok()) {
+ x.value();
+
+ y.value();
+ } else {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ STATUSOR_INT x = Make<STATUSOR_INT>();
+ STATUSOR_INT& y = x;
+ if (y.ok()) {
+ x.value();
+
+ y.value();
+ } else {
+ x.value(); // [[unsafe]]
+
+ y.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ STATUSOR_INT x = Make<STATUSOR_INT>();
+ STATUSOR_INT& y = x;
+ if (!y.ok()) return;
+
+ x.value();
+
+ y = Make<STATUSOR_INT>();
+ x.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ STATUSOR_INT x = Make<STATUSOR_INT>();
+ const STATUSOR_INT& y = x;
+ if (!y.ok()) return;
+
+ y.value();
+
+ x = Make<STATUSOR_INT>();
+ y.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, NoReturnAttribute) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ __attribute__((noreturn)) void f();
+
+ void target(STATUSOR_INT sor) {
+ if (!sor.ok()) f();
+
+ sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void f();
+
+ void target(STATUSOR_INT sor) {
+ if (!sor.ok()) f();
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ __attribute__((noreturn)) ~Foo();
+ void Bar();
+ };
+
+ void target(STATUSOR_INT sor) {
+ if (!sor.ok()) Foo().Bar();
+
+ sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ ~Foo();
+ void Bar();
+ };
+
+ void target(STATUSOR_INT sor) {
+ if (!sor.ok()) Foo().Bar();
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void f();
+ __attribute__((noreturn)) void g();
+
+ void target(STATUSOR_INT sor) {
+ sor.ok() ? f() : g();
+
+ sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ __attribute__((noreturn)) void f();
+ void g();
+
+ void target(STATUSOR_INT sor) {
+ !sor.ok() ? f() : g();
+
+ sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void f();
+ void g();
+
+ void target(STATUSOR_INT sor) {
+ sor.ok() ? f() : g();
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void terminate() __attribute__((noreturn));
+
+ void target(STATUSOR_INT sor) {
+ sor.value(); // [[unsafe]]
+ terminate();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void terminate() __attribute__((noreturn));
+
+ void target(STATUSOR_INT sor) {
+ if (sor.ok()) sor.value();
+ terminate();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void terminate() __attribute__((noreturn));
+
+ struct Foo {
+ ~Foo() __attribute__((noreturn));
+ };
+
+ void target() {
+ auto sor = Make<absl::StatusOr<Foo>>();
+ !(false || !(sor.ok())) ? (void)0 : terminate();
+ sor.value();
+ terminate();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, DeclInLoop) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ while (auto ok = sor.ok()) sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ using BoolAlias = bool;
+
+ void target(STATUSOR_INT sor) {
+ while (BoolAlias ok = sor.ok()) sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ while (Make<bool>()) {
+ STATUSOR_INT sor = Make<STATUSOR_INT>();
+ sor.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ using StatusOrInt = STATUSOR_INT;
+
+ void target() {
+ while (Make<bool>()) {
+ StatusOrInt sor = Make<STATUSOR_INT>();
+ sor.value(); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, NonEvaluatedExprInCondition) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ bool unknown();
+
+ void target(STATUSOR_INT sor) {
+ if (unknown() && sor.ok()) sor.value();
+ if (sor.ok() && unknown()) sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ bool unknown();
+
+ void target(STATUSOR_INT sor) {
+ if (!(!unknown() || !sor.ok())) sor.value();
+ if (!(!sor.ok() || !unknown())) sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ bool unknown();
+
+ void target(STATUSOR_INT sor) {
+ if (unknown() || sor.ok()) sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ bool unknown();
+
+ void target(STATUSOR_INT sor) {
+ if (sor.ok() || unknown()) sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, CorrelatedBranches) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(bool b, STATUSOR_INT sor) {
+ if (b || sor.ok()) {
+ if (!b) sor.value();
+ }
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(bool b, STATUSOR_INT sor) {
+ if (b && !sor.ok()) return;
+ if (b) sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(bool b, STATUSOR_INT sor) {
+ if (sor.ok()) b = true;
+ if (b) sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(bool b, STATUSOR_INT sor) {
+ if (b) return;
+ if (sor.ok()) b = true;
+ if (b) sor.value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, ConditionWithInitStmt) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ if (STATUSOR_INT sor = Make<STATUSOR_INT>(); sor.ok()) sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ if (STATUSOR_INT sor = Make<STATUSOR_INT>(); !sor.ok())
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, DeadCode) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ bool b = false;
+ if (b) sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ bool b;
+ b = false;
+ if (b) sor.value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, TemporaryDestructors) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ sor.ok() ? sor.value() : Fatal().value();
+
+ sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ !sor.ok() ? Fatal().value() : sor.value();
+
+ sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(bool b, STATUSOR_INT sor) {
+ b ? 0 : sor.ok() ? sor.value() : Fatal().value();
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(bool b, STATUSOR_INT sor) {
+ for (int i = 0; i < 10; i++) {
+ (b && sor.ok()) ? sor.value() : Fatal().value();
+
+ if (b) sor.value();
+ }
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(bool b, STATUSOR_INT sor) {
+ for (int i = 0; i < 10; i++) {
+ (b || !sor.ok()) ? Fatal().value() : 0;
+
+ if (!b) sor.value();
+ }
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(bool b, STATUSOR_INT sor) {
+ for (int i = 0; i < 10; i++) {
+ (false || !(b && sor.ok())) ? Fatal().value() : 0;
+
+ do {
+ sor.value();
+ } while (b);
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, CheckMacro) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ CHECK(sor.ok());
+ sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ CHECK(!sor.ok());
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, QcheckMacro) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ QCHECK(sor.ok());
+ sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ QCHECK(!sor.ok());
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, CheckNeMacro) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ CHECK_NE(sor.status(), absl::OkStatus());
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, QcheckNeMacro) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ QCHECK_NE(sor.status(), absl::OkStatus());
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, GlobalVars) {
+ // The following examples are not sound as there could be opaque calls between
+ // the ok() and the value() calls that change the StatusOr value.
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ static STATUSOR_INT sor;
+
+ void target() {
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ static STATUSOR_INT sor;
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ static STATUSOR_INT sor;
+ };
+
+ void target(Foo foo) {
+ if (foo.sor.ok())
+ foo.sor.value();
+ else
+ foo.sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ static STATUSOR_INT sor;
+ };
+
+ void target() {
+ if (Foo::sor.ok())
+ Foo::sor.value();
+ else
+ Foo::sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ static STATUSOR_INT sor;
+
+ static void target() {
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ };
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ static STATUSOR_INT sor;
+
+ void target() {
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ };
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct S {
+ static const int x = -1;
+ };
+
+ int target(S s) { return s.x; }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, ReferenceReceivers) {
+ // The following examples are not sound as there could be opaque calls between
+ // the ok() and the value() calls that change the StatusOr value. However,
+ // this is the behavior that users expect so it is here to stay.
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT& sor) {
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ STATUSOR_INT& sor;
+ };
+
+ void target(Foo foo) {
+ if (foo.sor.ok())
+ foo.sor.value();
+ else
+ foo.sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Bar {
+ STATUSOR_INT sor;
+ };
+
+ struct Foo {
+ Bar& bar;
+ };
+
+ void target(Foo foo) {
+ if (foo.bar.sor.ok())
+ foo.bar.sor.value();
+ else
+ foo.bar.sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ STATUSOR_INT& sor;
+ };
+
+ void target(Foo& foo) {
+ if (foo.sor.ok())
+ foo.sor.value();
+ else
+ foo.sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, Lambdas) {
+ ExpectDiagnosticsForLambda(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ [](STATUSOR_INT sor) {
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }(Make<STATUSOR_INT>());
+ }
+ )cc");
+ ExpectDiagnosticsForLambda(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ [sor]() {
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }();
+ }
+ )cc");
+ ExpectDiagnosticsForLambda(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ [&sor]() {
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }();
+ }
+ )cc");
+ ExpectDiagnosticsForLambda(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ [sor2 = sor]() {
+ if (sor2.ok())
+ sor2.value();
+ else
+ sor2.value(); // [[unsafe]]
+ }();
+ }
+ )cc");
+ ExpectDiagnosticsForLambda(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ [&]() {
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }();
+ }
+ )cc");
+ ExpectDiagnosticsForLambda(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ [=]() {
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }();
+ }
+ )cc");
+ ExpectDiagnosticsForLambda(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct Foo {
+ STATUSOR_INT sor;
+
+ void target() {
+ [this]() {
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }();
+ }
+ };
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, GoodLambda) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ int target() {
+ STATUSOR_INT sor = Make<STATUSOR_INT>();
+ if (sor.ok()) return [&s = sor.value()] { return s; }();
+ return 0;
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, Status) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void foo();
+
+ void target(STATUS s) {
+ if (s.ok()) foo();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void foo();
+
+ void target() {
+ STATUS s = Make<STATUSOR_INT>().status();
+ if (s.ok()) foo();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, ExpectThatMacro) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ EXPECT_THAT(sor, testing::status::IsOk());
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ EXPECT_THAT(sor.status(), testing::status::IsOk());
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ STATUSOR_INT sor = Make<STATUSOR_INT>();
+ EXPECT_THAT(sor, testing::status::IsOk());
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, ExpectOkMacro) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ EXPECT_OK(sor);
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ EXPECT_OK(sor.status());
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ STATUSOR_INT sor = Make<STATUSOR_INT>();
+ EXPECT_OK(sor);
+
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, BreadthFirstBlockTraversalLoop) {
+ // Evaluating the CFG blocks of the code below in breadth-first order results
+ // in an infinite loop. Each iteration of the while loop below results in a
+ // new value being assigned to the storage location of sor1. However,
+ // following a bread-first order of evaluation, downstream blocks will join
+ // environments of different generations of predecessor blocks having distinct
+ // values assigned to the sotrage location of sor1, resulting in not assigning
+ // a value to the storage location of sor1 in successors. As iterations of the
+ // analysis go, the state of the environment flips between having a value
+ // assigned to the storage location of sor1 and not having a value assigned to
+ // it. Since the evaluation of the copy constructor expression in bar(sor1)
+ // depends on a value being assigned to sor1, the state of the environment
+ // also flips between having a storage location assigned to the bar(sor1)
+ // expression and not having a storage location assigned to it. This leads to
+ // an infinite loop as the environment can't stabilize.
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void foo(int, int);
+ STATUSOR_INT bar(STATUSOR_INT);
+ void baz(int);
+
+ void target() {
+ while (true) {
+ STATUSOR_INT sor1 = Make<STATUSOR_INT>();
+ if (sor1.ok()) {
+ STATUSOR_INT sor2 = Make<STATUSOR_INT>();
+ if (sor2.ok()) foo(sor1.value(), sor2.value());
+ }
+
+ STATUSOR_INT sor3 = bar(sor1);
+ for (int i = 0; i < 5; i++) sor3 = bar(sor1);
+
+ baz(sor3.value()); // [[unsafe]]
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, ReturnValue) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ STATUSOR_INT sor = Make<STATUSOR_INT>();
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, Goto) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ label:
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ goto label;
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ label:
+ if (!sor.ok()) goto label;
+ sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ if (!sor.ok()) return;
+ goto label;
+ label:
+ sor.value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, JoinDistinctValues) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(bool b) {
+ STATUSOR_INT sor;
+ if (b)
+ sor = Make<STATUSOR_INT>();
+ else
+ sor = Make<STATUSOR_INT>();
+
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(bool b) {
+ STATUSOR_INT sor;
+ if (b) {
+ sor = Make<STATUSOR_INT>();
+ if (!sor.ok()) return;
+ } else {
+ sor = Make<STATUSOR_INT>();
+ if (!sor.ok()) return;
+ }
+ sor.value();
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(bool b) {
+ STATUSOR_INT sor;
+ if (b) {
+ sor = Make<STATUSOR_INT>();
+ if (!sor.ok()) return;
+ } else {
+ sor = Make<STATUSOR_INT>();
+ }
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, VarDeclInitExprFromPairAccess) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ auto sor = Make<std::pair<int, STATUSOR_INT>>().second;
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ const auto& sor = Make<std::pair<int, STATUSOR_INT>>().second;
+ if (sor.ok())
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, LValueToRValueCastOfChangingValue) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ bool foo();
+
+ void target(bool b1) {
+ STATUSOR_INT sor;
+ if (b1)
+ sor = Make<STATUSOR_INT>();
+ else
+ sor = Make<STATUSOR_INT>();
+
+ do {
+ const auto& b2 = foo();
+ if (b2) break;
+
+ sor.value(); // [[unsafe]]
+ } while (true);
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, ConstructorInitializer) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ class target {
+ target() : foo_(Make<STATUSOR_INT>().value()) { // [[unsafe]]
+ }
+ int foo_;
+ };
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, AssignStatusToBoolVar) {
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ bool ok = sor.ok();
+ if (ok)
+ sor.value();
+ else
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor) {
+ bool not_ok = !sor.ok();
+ if (not_ok)
+ sor.value(); // [[unsafe]]
+ else
+ sor.value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, StructuredBindings) {
+ // Binding to a pair (which is actually a struct in the mock header).
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ const auto [sor, x] = Make<std::pair<STATUSOR_INT, int>>();
+ if (sor.ok()) sor.value();
+ }
+ )cc");
+
+ // Unsafe case.
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ const auto [sor, x] = Make<std::pair<STATUSOR_INT, int>>();
+ sor.value(); // [[unsafe]]
+ }
+ )cc");
+
+ // As a reference.
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ const auto& [sor, x] = Make<std::pair<STATUSOR_INT, int>>();
+ if (sor.ok()) sor.value();
+ }
+ )cc");
+
+ // Binding to a ref in a struct.
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ struct S {
+ STATUSOR_INT& sor;
+ int i;
+ };
+
+ void target() {
+ const auto& [sor, i] = Make<S>();
+ if (sor.ok()) sor.value();
+ }
+ )cc");
+
+ // In a loop.
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ auto vals = Make<std::vector<std::pair<int, STATUSOR_INT>>>();
+ for (const auto& [x, sor] : vals)
+ if (sor.ok()) sor.value();
+ }
+ )cc");
+
+ // Similar to the above, but InitExpr already has the storage initialized,
+ // and bindings refer to them.
+ ExpectDiagnosticsFor(R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target() {
+ auto vals = Make<std::vector<std::pair<int, STATUSOR_INT>>>();
+ for (const auto& p : vals) {
+ const auto& [i, sor] = p;
+ if (sor.ok()) sor.value();
+ }
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, AssignCompositeLogicExprToVar) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor, bool b) {
+ bool c = sor.ok() && b;
+ if (c) sor.value();
+ }
+ )cc");
+
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ void target(STATUSOR_INT sor, bool b) {
+ bool c = !(!sor.ok() || !b);
+ if (c) sor.value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, Subclass) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ class Foo : public STATUSOR_INT {};
+
+ void target(Foo opt) {
+ opt.value(); // [[unsafe]]
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, SubclassStatus) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ class Foo : public STATUS {};
+
+ void target(Foo opt) { opt.ok(); }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, SubclassOk) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ class Foo : public STATUSOR_INT {};
+
+ void target(Foo opt) {
+ if (opt.ok()) opt.value();
+ }
+ )cc");
+}
+
+TEST_P(UncheckedStatusOrAccessModelTest, SubclassOperator) {
+ ExpectDiagnosticsFor(
+ R"cc(
+#include "unchecked_statusor_access_test_defs.h"
+
+ class Foo : public STATUSOR_INT {};
+
+ void target(Foo opt) {
+ *opt; // [[unsafe]]
+ }
+ )cc");
+}
+
+} // namespace
+
+std::string
+GetAliasMacros(UncheckedStatusOrAccessModelTestAliasKind AliasKind) {
+ switch (AliasKind) {
+ case UncheckedStatusOrAccessModelTestAliasKind::kUnaliased:
+ return R"cc(
+#define STATUSOR_INT ::absl::StatusOr<int>
+#define STATUSOR_BOOL ::absl::StatusOr<bool>
+#define STATUSOR_VOIDPTR ::absl::StatusOr<void*>
+#define STATUS ::absl::Status
+ )cc";
+ case UncheckedStatusOrAccessModelTestAliasKind::kPartiallyAliased:
+ return R"cc(
+ template <typename T>
+ using StatusOrAlias = ::absl::StatusOr<T>;
+#define STATUSOR_INT StatusOrAlias<int>
+#define STATUSOR_BOOL StatusOrAlias<bool>
+#define STATUSOR_VOIDPTR StatusOrAlias<void*>
+#define STATUS ::absl::Status
+ )cc";
+ case UncheckedStatusOrAccessModelTestAliasKind::kFullyAliased:
+ return R"cc(
+ using StatusOrIntAlias = ::absl::StatusOr<int>;
+#define STATUSOR_INT StatusOrIntAlias
+ using StatusOrBoolAlias = ::absl::StatusOr<bool>;
+#define STATUSOR_BOOL StatusOrBoolAlias
+ using StatusOrVoidPtrAlias = ::absl::StatusOr<void*>;
+#define STATUSOR_VOIDPTR StatusOrVoidPtrAlias
+ using StatusAlias = ::absl::Status;
+#define STATUS StatusAlias
+ )cc";
+ }
+ llvm_unreachable("Unknown alias kind.");
+}
+
+std::vector<std::pair<std::string, std::string>>
+GetHeaders(UncheckedStatusOrAccessModelTestAliasKind AliasKind) {
+ auto Headers = test::getMockHeaders();
+
+ Headers.emplace_back("unchecked_statusor_access_test_defs.h",
+ R"cc(
+#include "cstddef.h"
+#include "statusor_defs.h"
+#include "std_optional.h"
+#include "std_vector.h"
+#include "std_pair.h"
+#include "absl_log.h"
+#include "testing_defs.h"
+
+ template <typename T>
+ T Make();
+
+ class Fatal {
+ public:
+ ~Fatal() __attribute__((noreturn));
+ int value();
+ };
+ )cc" +
+ GetAliasMacros(AliasKind));
+ return Headers;
+}
+} // namespace clang::dataflow::statusor_model
diff --git a/clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTestFixture.h b/clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTestFixture.h
new file mode 100644
index 0000000..92dcd93
--- /dev/null
+++ b/clang/unittests/Analysis/FlowSensitive/UncheckedStatusOrAccessModelTestFixture.h
@@ -0,0 +1,159 @@
+//===- UncheckedStatusOrAccessModelTestFixture.h --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOW_SENSITIVE_UNCHECKEDSTATUSORACCESSMODELTESTFIXTURE_H_
+#define LLVM_CLANG_ANALYSIS_FLOW_SENSITIVE_UNCHECKEDSTATUSORACCESSMODELTESTFIXTURE_H_
+
+#include <algorithm>
+#include <iterator>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "TestingSupport.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/MatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Tooling/Tooling.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Error.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace clang::dataflow::statusor_model {
+
+enum class UncheckedStatusOrAccessModelTestAliasKind {
+ kUnaliased = 0, // no alias
+ kPartiallyAliased = 1, // template<typename T> using Alias = absl::StatusOr;
+ kFullyAliased = 2, // using Alias = absl::StatusOr<int>;
+};
+
+// Base class for the test executors. This is needed to abstract away the
+// template parameter from the UncheckedStatusOrAccessModelTestExecutor. This
+// allows us to use UncheckedStatusOrAccessModelTestExecutorBase* in the
+// UncheckedStatusOrAccessModelTest.
+class UncheckedStatusOrAccessModelTestExecutorBase {
+public:
+ virtual void
+ ExpectDiagnosticsFor(std::string SourceCode,
+ UncheckedStatusOrAccessModelTestAliasKind) const = 0;
+ virtual void ExpectDiagnosticsForLambda(
+ std::string SourceCode,
+ UncheckedStatusOrAccessModelTestAliasKind) const = 0;
+ virtual ~UncheckedStatusOrAccessModelTestExecutorBase() = default;
+};
+
+// Returns these macros according to the alias kind:
+// - STATUS
+// - STATUSOR_INT
+// - STATUSOR_BOOL
+// - STATUSOR_VOIDPTR
+// Tests should use these macros instead of e.g. absl::StatusOr<int> to ensure
+// the model is insensitive to whether the StatusOr<> is aliased or not.
+std::string GetAliasMacros(UncheckedStatusOrAccessModelTestAliasKind AliasKind);
+
+std::vector<std::pair<std::string, std::string>>
+GetHeaders(UncheckedStatusOrAccessModelTestAliasKind AliasKind);
+
+// This allows us to run the same test suite for multiple models. This allows
+// vendors to model internal APIs in an extension of the base model, and make
+// sure that these tests still pass.
+template <typename Model>
+class UncheckedStatusOrAccessModelTestExecutor
+ : public UncheckedStatusOrAccessModelTestExecutorBase {
+public:
+ void ExpectDiagnosticsFor(
+ std::string SourceCode,
+ UncheckedStatusOrAccessModelTestAliasKind AliasKind) const override {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ ExpectDiagnosticsFor(SourceCode, hasName("target"), AliasKind);
+ }
+
+ void ExpectDiagnosticsForLambda(
+ std::string SourceCode,
+ UncheckedStatusOrAccessModelTestAliasKind AliasKind) const override {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ ExpectDiagnosticsFor(SourceCode,
+ allOf(hasOverloadedOperatorName("()"),
+ hasDeclContext(cxxRecordDecl(isLambda()))),
+ AliasKind);
+ }
+
+ template <typename FuncDeclMatcher>
+ void ExpectDiagnosticsFor(
+ std::string SourceCode, FuncDeclMatcher FuncMatcher,
+ UncheckedStatusOrAccessModelTestAliasKind AliasKind) const {
+ std::vector<std::pair<std::string, std::string>> Headers =
+ GetHeaders(AliasKind);
+
+ UncheckedStatusOrAccessModelOptions Options{};
+ std::vector<SourceLocation> Diagnostics;
+ llvm::Error Error = test::checkDataflow<Model>(
+ test::AnalysisInputs<Model>(
+ SourceCode, std::move(FuncMatcher),
+ [](ASTContext &Ctx, Environment &Env) { return Model(Ctx, Env); })
+ .withPostVisitCFG(
+ [&Diagnostics,
+ Diagnoser = UncheckedStatusOrAccessDiagnoser(Options)](
+ ASTContext &Ctx, const CFGElement &Elt,
+ const TransferStateForDiagnostics<
+ UncheckedStatusOrAccessModel::Lattice> &State) mutable {
+ auto EltDiagnostics = Diagnoser(Elt, Ctx, State);
+ llvm::move(EltDiagnostics, std::back_inserter(Diagnostics));
+ })
+ .withASTBuildArgs(
+ {"-fsyntax-only", "-std=c++17", "-Wno-undefined-inline"})
+ .withASTBuildVirtualMappedFiles(
+ tooling::FileContentMappings(Headers.begin(), Headers.end())),
+ /*VerifyResults=*/[&Diagnostics, SourceCode](
+ const llvm::DenseMap<unsigned, std::string>
+ &Annotations,
+ const test::AnalysisOutputs &AO) {
+ llvm::DenseSet<unsigned> AnnotationLines;
+ for (const auto &[Line, _] : Annotations)
+ AnnotationLines.insert(Line);
+ auto &SrcMgr = AO.ASTCtx.getSourceManager();
+ llvm::DenseSet<unsigned> DiagnosticLines;
+ for (SourceLocation &Loc : Diagnostics)
+ DiagnosticLines.insert(SrcMgr.getPresumedLineNumber(Loc));
+
+ EXPECT_THAT(DiagnosticLines, testing::ContainerEq(AnnotationLines))
+ << "\nFailing code:\n"
+ << SourceCode;
+ });
+ if (Error)
+ FAIL() << llvm::toString(std::move(Error));
+ }
+
+ ~UncheckedStatusOrAccessModelTestExecutor() override = default;
+};
+
+class UncheckedStatusOrAccessModelTest
+ : public ::testing::TestWithParam<
+ std::pair<UncheckedStatusOrAccessModelTestExecutorBase *,
+ UncheckedStatusOrAccessModelTestAliasKind>> {
+protected:
+ void ExpectDiagnosticsFor(std::string SourceCode) {
+ GetParam().first->ExpectDiagnosticsFor(SourceCode, GetParam().second);
+ }
+
+ void ExpectDiagnosticsForLambda(std::string SourceCode) {
+ GetParam().first->ExpectDiagnosticsForLambda(SourceCode, GetParam().second);
+ }
+};
+
+} // namespace clang::dataflow::statusor_model
+
+#endif // LLVM_CLANG_ANALYSIS_FLOW_SENSITIVE_UNCHECKEDSTATUSORACCESSMODELTESTFIXTURE_H_
diff --git a/clang/unittests/Driver/MultilibBuilderTest.cpp b/clang/unittests/Driver/MultilibBuilderTest.cpp
index 38c9344..0c1e806 100644
--- a/clang/unittests/Driver/MultilibBuilderTest.cpp
+++ b/clang/unittests/Driver/MultilibBuilderTest.cpp
@@ -91,7 +91,7 @@ TEST(MultilibBuilderTest, SetConstruction2) {
ASSERT_TRUE(MS.size() == 4);
for (MultilibSet::const_iterator I = MS.begin(), E = MS.end(); I != E; ++I) {
ASSERT_TRUE(llvm::StringSwitch<bool>(I->gccSuffix())
- .Cases("", "/sof", "/el", "/sof/el", true)
+ .Cases({"", "/sof", "/el", "/sof/el"}, true)
.Default(false))
<< "Multilib " << *I << " wasn't expected";
ASSERT_TRUE(llvm::StringSwitch<bool>(I->gccSuffix())
diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp
index 0fb8139..ce68f91 100644
--- a/clang/unittests/Format/FormatTest.cpp
+++ b/clang/unittests/Format/FormatTest.cpp
@@ -18559,6 +18559,11 @@ TEST_F(FormatTest, AlignConsecutiveMacros) {
"#define bbbb 4\n"
"#define ccc (5)",
Style);
+
+ Style.ColumnLimit = 30;
+ verifyFormat("#define MY_FUNC(x) callMe(X)\n"
+ "#define MY_LONG_CONSTANT 17",
+ Style);
}
TEST_F(FormatTest, AlignConsecutiveAssignmentsAcrossEmptyLines) {
@@ -20772,6 +20777,30 @@ TEST_F(FormatTest, AlignWithLineBreaks) {
"}",
Style);
// clang-format on
+
+ Style = getLLVMStyleWithColumns(70);
+ Style.AlignConsecutiveDeclarations.Enabled = true;
+ verifyFormat(
+ "ReturnType\n"
+ "MyFancyIntefaceFunction(Context *context,\n"
+ " ALongTypeName *response) noexcept override;\n"
+ "ReturnType func();",
+ Style);
+
+ verifyFormat(
+ "ReturnType\n"
+ "MyFancyIntefaceFunction(B<int> *context,\n"
+ " decltype(AFunc) *response) noexcept override;\n"
+ "ReturnType func();",
+ Style);
+
+ Style.AlignConsecutiveAssignments.Enabled = true;
+ Style.ColumnLimit = 15;
+ verifyFormat("int i1 = 1;\n"
+ "k = bar(\n"
+ " argument1,\n"
+ " argument2);",
+ Style);
}
TEST_F(FormatTest, AlignWithInitializerPeriods) {
diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp
index 1002515..ca99940 100644
--- a/clang/unittests/Format/TokenAnnotatorTest.cpp
+++ b/clang/unittests/Format/TokenAnnotatorTest.cpp
@@ -4237,6 +4237,29 @@ TEST_F(TokenAnnotatorTest, QtProperty) {
EXPECT_TOKEN(Tokens[12], tok::identifier, TT_QtProperty);
}
+TEST_F(TokenAnnotatorTest, AttributeSquares) {
+ auto Tokens = annotate("[[maybe_unused]] const int i;");
+ ASSERT_EQ(Tokens.size(), 10u) << Tokens;
+ EXPECT_TOKEN(Tokens[0], tok::l_square, TT_AttributeLSquare);
+ EXPECT_TOKEN(Tokens[1], tok::l_square, TT_Unknown);
+ EXPECT_TOKEN(Tokens[3], tok::r_square, TT_Unknown);
+ EXPECT_TOKEN(Tokens[4], tok::r_square, TT_AttributeRSquare);
+ EXPECT_TRUE(Tokens[4]->EndsCppAttributeGroup);
+
+ Tokens = annotate("[[foo([[]])]] [[maybe_unused]] int j;");
+ ASSERT_EQ(Tokens.size(), 20u) << Tokens;
+ EXPECT_TOKEN(Tokens[0], tok::l_square, TT_AttributeLSquare);
+ EXPECT_TOKEN(Tokens[1], tok::l_square, TT_Unknown);
+ EXPECT_TOKEN(Tokens[9], tok::r_square, TT_Unknown);
+ EXPECT_TOKEN(Tokens[10], tok::r_square, TT_AttributeRSquare);
+ EXPECT_FALSE(Tokens[10]->EndsCppAttributeGroup);
+ EXPECT_TOKEN(Tokens[11], tok::l_square, TT_AttributeLSquare);
+ EXPECT_TOKEN(Tokens[12], tok::l_square, TT_Unknown);
+ EXPECT_TOKEN(Tokens[14], tok::r_square, TT_Unknown);
+ EXPECT_TOKEN(Tokens[15], tok::r_square, TT_AttributeRSquare);
+ EXPECT_TRUE(Tokens[15]->EndsCppAttributeGroup);
+}
+
} // namespace
} // namespace format
} // namespace clang
diff --git a/clang/utils/TableGen/ClangOptionDocEmitter.cpp b/clang/utils/TableGen/ClangOptionDocEmitter.cpp
index 8a31686..d779c84 100644
--- a/clang/utils/TableGen/ClangOptionDocEmitter.cpp
+++ b/clang/utils/TableGen/ClangOptionDocEmitter.cpp
@@ -173,11 +173,11 @@ Documentation extractDocumentation(const RecordKeeper &Records,
// Get the first and successive separators to use for an OptionKind.
std::pair<StringRef,StringRef> getSeparatorsForKind(const Record *OptionKind) {
return StringSwitch<std::pair<StringRef, StringRef>>(OptionKind->getName())
- .Cases("KIND_JOINED", "KIND_JOINED_OR_SEPARATE",
- "KIND_JOINED_AND_SEPARATE",
- "KIND_REMAINING_ARGS_JOINED", {"", " "})
- .Case("KIND_COMMAJOINED", {"", ","})
- .Default({" ", " "});
+ .Cases({"KIND_JOINED", "KIND_JOINED_OR_SEPARATE",
+ "KIND_JOINED_AND_SEPARATE", "KIND_REMAINING_ARGS_JOINED"},
+ {"", " "})
+ .Case("KIND_COMMAJOINED", {"", ","})
+ .Default({" ", " "});
}
const unsigned UnlimitedArgs = unsigned(-1);
@@ -186,12 +186,13 @@ const unsigned UnlimitedArgs = unsigned(-1);
// arguments are accepted.
unsigned getNumArgsForKind(const Record *OptionKind, const Record *Option) {
return StringSwitch<unsigned>(OptionKind->getName())
- .Cases("KIND_JOINED", "KIND_JOINED_OR_SEPARATE", "KIND_SEPARATE", 1)
- .Cases("KIND_REMAINING_ARGS", "KIND_REMAINING_ARGS_JOINED",
- "KIND_COMMAJOINED", UnlimitedArgs)
- .Case("KIND_JOINED_AND_SEPARATE", 2)
- .Case("KIND_MULTIARG", Option->getValueAsInt("NumArgs"))
- .Default(0);
+ .Cases({"KIND_JOINED", "KIND_JOINED_OR_SEPARATE", "KIND_SEPARATE"}, 1)
+ .Cases({"KIND_REMAINING_ARGS", "KIND_REMAINING_ARGS_JOINED",
+ "KIND_COMMAJOINED"},
+ UnlimitedArgs)
+ .Case("KIND_JOINED_AND_SEPARATE", 2)
+ .Case("KIND_MULTIARG", Option->getValueAsInt("NumArgs"))
+ .Default(0);
}
std::string escapeRST(StringRef Str) {
diff --git a/clang/www/cxx_status.html b/clang/www/cxx_status.html
index a35e501..2618ff9 100755
--- a/clang/www/cxx_status.html
+++ b/clang/www/cxx_status.html
@@ -926,7 +926,14 @@ C++23, informally referred to as C++26.</p>
</tr>
<tr>
<td><a href="https://wg21.link/p1857r3">P1857R3</a></td>
- <td class="none" align="center">No</td>
+ <td class="partial" align="center">
+ <details>
+ <summary>Clang 21 (Partial)</summary>
+ The restriction that "[a] module directive may only appear
+ as the first preprocessing tokens in a file" is enforced
+ starting in Clang 21.
+ </details>
+ </td>
</tr>
<tr>
<td><a href="https://wg21.link/p2115r0">P2115R0</a></td>